src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Wed, 25 Jan 2012 12:58:23 -0500

author
tonyp
date
Wed, 25 Jan 2012 12:58:23 -0500
changeset 3464
eff609af17d7
parent 3461
6a78aa6ac1ff
child 3539
a9647476d1a4
permissions
-rw-r--r--

7127706: G1: re-enable survivors during the initial-mark pause
Summary: Re-enable survivors during the initial-mark pause. Afterwards, the concurrent marking threads have to scan them and mark everything reachable from them. The next GC will have to wait for the survivors to be scanned.
Reviewed-by: brutisso, johnc

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/concurrentMark.hpp"
    28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    34 #include "runtime/arguments.hpp"
    35 #include "runtime/java.hpp"
    36 #include "runtime/mutexLocker.hpp"
    37 #include "utilities/debug.hpp"
    39 // Different defaults for different number of GC threads
    40 // They were chosen by running GCOld and SPECjbb on debris with different
    41 //   numbers of GC threads and choosing them based on the results
    43 // all the same
    44 static double rs_length_diff_defaults[] = {
    45   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    46 };
    48 static double cost_per_card_ms_defaults[] = {
    49   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    50 };
    52 // all the same
    53 static double young_cards_per_entry_ratio_defaults[] = {
    54   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    55 };
    57 static double cost_per_entry_ms_defaults[] = {
    58   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
    59 };
    61 static double cost_per_byte_ms_defaults[] = {
    62   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
    63 };
    65 // these should be pretty consistent
    66 static double constant_other_time_ms_defaults[] = {
    67   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
    68 };
    71 static double young_other_cost_per_region_ms_defaults[] = {
    72   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
    73 };
    75 static double non_young_other_cost_per_region_ms_defaults[] = {
    76   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
    77 };
    79 // Help class for avoiding interleaved logging
    80 class LineBuffer: public StackObj {
    82 private:
    83   static const int BUFFER_LEN = 1024;
    84   static const int INDENT_CHARS = 3;
    85   char _buffer[BUFFER_LEN];
    86   int _indent_level;
    87   int _cur;
    89   void vappend(const char* format, va_list ap) {
    90     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
    91     if (res != -1) {
    92       _cur += res;
    93     } else {
    94       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
    95       _buffer[BUFFER_LEN -1] = 0;
    96       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
    97     }
    98   }
   100 public:
   101   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
   102     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
   103       _buffer[_cur] = ' ';
   104     }
   105   }
   107 #ifndef PRODUCT
   108   ~LineBuffer() {
   109     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
   110   }
   111 #endif
   113   void append(const char* format, ...) {
   114     va_list ap;
   115     va_start(ap, format);
   116     vappend(format, ap);
   117     va_end(ap);
   118   }
   120   void append_and_print_cr(const char* format, ...) {
   121     va_list ap;
   122     va_start(ap, format);
   123     vappend(format, ap);
   124     va_end(ap);
   125     gclog_or_tty->print_cr("%s", _buffer);
   126     _cur = _indent_level * INDENT_CHARS;
   127   }
   128 };
   130 G1CollectorPolicy::G1CollectorPolicy() :
   131   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
   132                         ? ParallelGCThreads : 1),
   134   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   135   _all_pause_times_ms(new NumberSeq()),
   136   _stop_world_start(0.0),
   137   _all_stop_world_times_ms(new NumberSeq()),
   138   _all_yield_times_ms(new NumberSeq()),
   140   _summary(new Summary()),
   142   _cur_clear_ct_time_ms(0.0),
   143   _mark_closure_time_ms(0.0),
   144   _root_region_scan_wait_time_ms(0.0),
   146   _cur_ref_proc_time_ms(0.0),
   147   _cur_ref_enq_time_ms(0.0),
   149 #ifndef PRODUCT
   150   _min_clear_cc_time_ms(-1.0),
   151   _max_clear_cc_time_ms(-1.0),
   152   _cur_clear_cc_time_ms(0.0),
   153   _cum_clear_cc_time_ms(0.0),
   154   _num_cc_clears(0L),
   155 #endif
   157   _aux_num(10),
   158   _all_aux_times_ms(new NumberSeq[_aux_num]),
   159   _cur_aux_start_times_ms(new double[_aux_num]),
   160   _cur_aux_times_ms(new double[_aux_num]),
   161   _cur_aux_times_set(new bool[_aux_num]),
   163   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   164   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   166   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   167   _prev_collection_pause_end_ms(0.0),
   168   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   169   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   170   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   171   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   172   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   173   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   174   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   175   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   176   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   177   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   178   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   179   _non_young_other_cost_per_region_ms_seq(
   180                                          new TruncatedSeq(TruncatedSeqLength)),
   182   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   183   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
   185   _pause_time_target_ms((double) MaxGCPauseMillis),
   187   _gcs_are_young(true),
   188   _young_pause_num(0),
   189   _mixed_pause_num(0),
   191   _during_marking(false),
   192   _in_marking_window(false),
   193   _in_marking_window_im(false),
   195   _known_garbage_ratio(0.0),
   196   _known_garbage_bytes(0),
   198   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
   200   _recent_prev_end_times_for_all_gcs_sec(
   201                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
   203   _recent_avg_pause_time_ratio(0.0),
   205   _all_full_gc_times_ms(new NumberSeq()),
   207   _initiate_conc_mark_if_possible(false),
   208   _during_initial_mark_pause(false),
   209   _should_revert_to_young_gcs(false),
   210   _last_young_gc(false),
   211   _last_gc_was_young(false),
   213   _eden_bytes_before_gc(0),
   214   _survivor_bytes_before_gc(0),
   215   _capacity_before_gc(0),
   217   _eden_cset_region_length(0),
   218   _survivor_cset_region_length(0),
   219   _old_cset_region_length(0),
   221   _collection_set(NULL),
   222   _collection_set_bytes_used_before(0),
   224   // Incremental CSet attributes
   225   _inc_cset_build_state(Inactive),
   226   _inc_cset_head(NULL),
   227   _inc_cset_tail(NULL),
   228   _inc_cset_bytes_used_before(0),
   229   _inc_cset_max_finger(NULL),
   230   _inc_cset_recorded_rs_lengths(0),
   231   _inc_cset_recorded_rs_lengths_diffs(0),
   232   _inc_cset_predicted_elapsed_time_ms(0.0),
   233   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
   235 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   236 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   237 #endif // _MSC_VER
   239   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
   240                                                  G1YoungSurvRateNumRegionsSummary)),
   241   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
   242                                               G1YoungSurvRateNumRegionsSummary)),
   243   // add here any more surv rate groups
   244   _recorded_survivor_regions(0),
   245   _recorded_survivor_head(NULL),
   246   _recorded_survivor_tail(NULL),
   247   _survivors_age_table(true),
   249   _gc_overhead_perc(0.0) {
   251   // Set up the region size and associated fields. Given that the
   252   // policy is created before the heap, we have to set this up here,
   253   // so it's done as soon as possible.
   254   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
   255   HeapRegionRemSet::setup_remset_size();
   257   G1ErgoVerbose::initialize();
   258   if (PrintAdaptiveSizePolicy) {
   259     // Currently, we only use a single switch for all the heuristics.
   260     G1ErgoVerbose::set_enabled(true);
   261     // Given that we don't currently have a verboseness level
   262     // parameter, we'll hardcode this to high. This can be easily
   263     // changed in the future.
   264     G1ErgoVerbose::set_level(ErgoHigh);
   265   } else {
   266     G1ErgoVerbose::set_enabled(false);
   267   }
   269   // Verify PLAB sizes
   270   const size_t region_size = HeapRegion::GrainWords;
   271   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   272     char buffer[128];
   273     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
   274                  OldPLABSize > region_size ? "Old" : "Young", region_size);
   275     vm_exit_during_initialization(buffer);
   276   }
   278   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   279   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   281   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
   282   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   283   _par_last_satb_filtering_times_ms = new double[_parallel_gc_threads];
   285   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
   286   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
   288   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
   290   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
   292   _par_last_termination_times_ms = new double[_parallel_gc_threads];
   293   _par_last_termination_attempts = new double[_parallel_gc_threads];
   294   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
   295   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
   296   _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
   298   // start conservatively
   299   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
   301   int index;
   302   if (ParallelGCThreads == 0)
   303     index = 0;
   304   else if (ParallelGCThreads > 8)
   305     index = 7;
   306   else
   307     index = ParallelGCThreads - 1;
   309   _pending_card_diff_seq->add(0.0);
   310   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   311   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
   312   _young_cards_per_entry_ratio_seq->add(
   313                                   young_cards_per_entry_ratio_defaults[index]);
   314   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   315   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   316   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
   317   _young_other_cost_per_region_ms_seq->add(
   318                                young_other_cost_per_region_ms_defaults[index]);
   319   _non_young_other_cost_per_region_ms_seq->add(
   320                            non_young_other_cost_per_region_ms_defaults[index]);
   322   // Below, we might need to calculate the pause time target based on
   323   // the pause interval. When we do so we are going to give G1 maximum
   324   // flexibility and allow it to do pauses when it needs to. So, we'll
   325   // arrange that the pause interval to be pause time target + 1 to
   326   // ensure that a) the pause time target is maximized with respect to
   327   // the pause interval and b) we maintain the invariant that pause
   328   // time target < pause interval. If the user does not want this
   329   // maximum flexibility, they will have to set the pause interval
   330   // explicitly.
   332   // First make sure that, if either parameter is set, its value is
   333   // reasonable.
   334   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   335     if (MaxGCPauseMillis < 1) {
   336       vm_exit_during_initialization("MaxGCPauseMillis should be "
   337                                     "greater than 0");
   338     }
   339   }
   340   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   341     if (GCPauseIntervalMillis < 1) {
   342       vm_exit_during_initialization("GCPauseIntervalMillis should be "
   343                                     "greater than 0");
   344     }
   345   }
   347   // Then, if the pause time target parameter was not set, set it to
   348   // the default value.
   349   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   350     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   351       // The default pause time target in G1 is 200ms
   352       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   353     } else {
   354       // We do not allow the pause interval to be set without the
   355       // pause time target
   356       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
   357                                     "without setting MaxGCPauseMillis");
   358     }
   359   }
   361   // Then, if the interval parameter was not set, set it according to
   362   // the pause time target (this will also deal with the case when the
   363   // pause time target is the default value).
   364   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   365     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
   366   }
   368   // Finally, make sure that the two parameters are consistent.
   369   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
   370     char buffer[256];
   371     jio_snprintf(buffer, 256,
   372                  "MaxGCPauseMillis (%u) should be less than "
   373                  "GCPauseIntervalMillis (%u)",
   374                  MaxGCPauseMillis, GCPauseIntervalMillis);
   375     vm_exit_during_initialization(buffer);
   376   }
   378   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
   379   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   380   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   381   _sigma = (double) G1ConfidencePercent / 100.0;
   383   // start conservatively (around 50ms is about right)
   384   _concurrent_mark_remark_times_ms->add(0.05);
   385   _concurrent_mark_cleanup_times_ms->add(0.20);
   386   _tenuring_threshold = MaxTenuringThreshold;
   387   // _max_survivor_regions will be calculated by
   388   // update_young_list_target_length() during initialization.
   389   _max_survivor_regions = 0;
   391   assert(GCTimeRatio > 0,
   392          "we should have set it to a default value set_g1_gc_flags() "
   393          "if a user set it to 0");
   394   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
   396   uintx reserve_perc = G1ReservePercent;
   397   // Put an artificial ceiling on this so that it's not set to a silly value.
   398   if (reserve_perc > 50) {
   399     reserve_perc = 50;
   400     warning("G1ReservePercent is set to a value that is too large, "
   401             "it's been updated to %u", reserve_perc);
   402   }
   403   _reserve_factor = (double) reserve_perc / 100.0;
   404   // This will be set when the heap is expanded
   405   // for the first time during initialization.
   406   _reserve_regions = 0;
   408   initialize_all();
   409   _collectionSetChooser = new CollectionSetChooser();
   410   _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
   411 }
   413 void G1CollectorPolicy::initialize_flags() {
   414   set_min_alignment(HeapRegion::GrainBytes);
   415   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
   416   if (SurvivorRatio < 1) {
   417     vm_exit_during_initialization("Invalid survivor ratio specified");
   418   }
   419   CollectorPolicy::initialize_flags();
   420 }
   422 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
   423   assert(G1DefaultMinNewGenPercent <= G1DefaultMaxNewGenPercent, "Min larger than max");
   424   assert(G1DefaultMinNewGenPercent > 0 && G1DefaultMinNewGenPercent < 100, "Min out of bounds");
   425   assert(G1DefaultMaxNewGenPercent > 0 && G1DefaultMaxNewGenPercent < 100, "Max out of bounds");
   427   if (FLAG_IS_CMDLINE(NewRatio)) {
   428     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
   429       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
   430     } else {
   431       _sizer_kind = SizerNewRatio;
   432       _adaptive_size = false;
   433       return;
   434     }
   435   }
   437   if (FLAG_IS_CMDLINE(NewSize)) {
   438      _min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes);
   439     if (FLAG_IS_CMDLINE(MaxNewSize)) {
   440       _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
   441       _sizer_kind = SizerMaxAndNewSize;
   442       _adaptive_size = _min_desired_young_length == _max_desired_young_length;
   443     } else {
   444       _sizer_kind = SizerNewSizeOnly;
   445     }
   446   } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
   447     _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
   448     _sizer_kind = SizerMaxNewSizeOnly;
   449   }
   450 }
   452 size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) {
   453   size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
   454   return MAX2((size_t)1, default_value);
   455 }
   457 size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) {
   458   size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
   459   return MAX2((size_t)1, default_value);
   460 }
   462 void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) {
   463   assert(new_number_of_heap_regions > 0, "Heap must be initialized");
   465   switch (_sizer_kind) {
   466     case SizerDefaults:
   467       _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
   468       _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
   469       break;
   470     case SizerNewSizeOnly:
   471       _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
   472       _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
   473       break;
   474     case SizerMaxNewSizeOnly:
   475       _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
   476       _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
   477       break;
   478     case SizerMaxAndNewSize:
   479       // Do nothing. Values set on the command line, don't update them at runtime.
   480       break;
   481     case SizerNewRatio:
   482       _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
   483       _max_desired_young_length = _min_desired_young_length;
   484       break;
   485     default:
   486       ShouldNotReachHere();
   487   }
   489   assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
   490 }
   492 void G1CollectorPolicy::init() {
   493   // Set aside an initial future to_space.
   494   _g1 = G1CollectedHeap::heap();
   496   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   498   initialize_gc_policy_counters();
   500   if (adaptive_young_list_length()) {
   501     _young_list_fixed_length = 0;
   502   } else {
   503     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   504   }
   505   _free_regions_at_end_of_collection = _g1->free_regions();
   506   update_young_list_target_length();
   507   _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
   509   // We may immediately start allocating regions and placing them on the
   510   // collection set list. Initialize the per-collection set info
   511   start_incremental_cset_building();
   512 }
   514 // Create the jstat counters for the policy.
   515 void G1CollectorPolicy::initialize_gc_policy_counters() {
   516   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
   517 }
   519 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
   520                                          double base_time_ms,
   521                                          size_t base_free_regions,
   522                                          double target_pause_time_ms) {
   523   if (young_length >= base_free_regions) {
   524     // end condition 1: not enough space for the young regions
   525     return false;
   526   }
   528   double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
   529   size_t bytes_to_copy =
   530                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   531   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   532   double young_other_time_ms = predict_young_other_time_ms(young_length);
   533   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
   534   if (pause_time_ms > target_pause_time_ms) {
   535     // end condition 2: prediction is over the target pause time
   536     return false;
   537   }
   539   size_t free_bytes =
   540                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
   541   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
   542     // end condition 3: out-of-space (conservatively!)
   543     return false;
   544   }
   546   // success!
   547   return true;
   548 }
   550 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
   551   // re-calculate the necessary reserve
   552   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
   553   // We use ceiling so that if reserve_regions_d is > 0.0 (but
   554   // smaller than 1.0) we'll get 1.
   555   _reserve_regions = (size_t) ceil(reserve_regions_d);
   557   _young_gen_sizer->heap_size_changed(new_number_of_regions);
   558 }
   560 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
   561                                                      size_t base_min_length) {
   562   size_t desired_min_length = 0;
   563   if (adaptive_young_list_length()) {
   564     if (_alloc_rate_ms_seq->num() > 3) {
   565       double now_sec = os::elapsedTime();
   566       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   567       double alloc_rate_ms = predict_alloc_rate_ms();
   568       desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
   569     } else {
   570       // otherwise we don't have enough info to make the prediction
   571     }
   572   }
   573   desired_min_length += base_min_length;
   574   // make sure we don't go below any user-defined minimum bound
   575   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
   576 }
   578 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
   579   // Here, we might want to also take into account any additional
   580   // constraints (i.e., user-defined minimum bound). Currently, we
   581   // effectively don't set this bound.
   582   return _young_gen_sizer->max_desired_young_length();
   583 }
   585 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   586   if (rs_lengths == (size_t) -1) {
   587     // if it's set to the default value (-1), we should predict it;
   588     // otherwise, use the given value.
   589     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   590   }
   592   // Calculate the absolute and desired min bounds.
   594   // This is how many young regions we already have (currently: the survivors).
   595   size_t base_min_length = recorded_survivor_regions();
   596   // This is the absolute minimum young length, which ensures that we
   597   // can allocate one eden region in the worst-case.
   598   size_t absolute_min_length = base_min_length + 1;
   599   size_t desired_min_length =
   600                      calculate_young_list_desired_min_length(base_min_length);
   601   if (desired_min_length < absolute_min_length) {
   602     desired_min_length = absolute_min_length;
   603   }
   605   // Calculate the absolute and desired max bounds.
   607   // We will try our best not to "eat" into the reserve.
   608   size_t absolute_max_length = 0;
   609   if (_free_regions_at_end_of_collection > _reserve_regions) {
   610     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
   611   }
   612   size_t desired_max_length = calculate_young_list_desired_max_length();
   613   if (desired_max_length > absolute_max_length) {
   614     desired_max_length = absolute_max_length;
   615   }
   617   size_t young_list_target_length = 0;
   618   if (adaptive_young_list_length()) {
   619     if (gcs_are_young()) {
   620       young_list_target_length =
   621                         calculate_young_list_target_length(rs_lengths,
   622                                                            base_min_length,
   623                                                            desired_min_length,
   624                                                            desired_max_length);
   625       _rs_lengths_prediction = rs_lengths;
   626     } else {
   627       // Don't calculate anything and let the code below bound it to
   628       // the desired_min_length, i.e., do the next GC as soon as
   629       // possible to maximize how many old regions we can add to it.
   630     }
   631   } else {
   632     if (gcs_are_young()) {
   633       young_list_target_length = _young_list_fixed_length;
   634     } else {
   635       // A bit arbitrary: during mixed GCs we allocate half
   636       // the young regions to try to add old regions to the CSet.
   637       young_list_target_length = _young_list_fixed_length / 2;
   638       // We choose to accept that we might go under the desired min
   639       // length given that we intentionally ask for a smaller young gen.
   640       desired_min_length = absolute_min_length;
   641     }
   642   }
   644   // Make sure we don't go over the desired max length, nor under the
   645   // desired min length. In case they clash, desired_min_length wins
   646   // which is why that test is second.
   647   if (young_list_target_length > desired_max_length) {
   648     young_list_target_length = desired_max_length;
   649   }
   650   if (young_list_target_length < desired_min_length) {
   651     young_list_target_length = desired_min_length;
   652   }
   654   assert(young_list_target_length > recorded_survivor_regions(),
   655          "we should be able to allocate at least one eden region");
   656   assert(young_list_target_length >= absolute_min_length, "post-condition");
   657   _young_list_target_length = young_list_target_length;
   659   update_max_gc_locker_expansion();
   660 }
   662 size_t
   663 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   664                                                    size_t base_min_length,
   665                                                    size_t desired_min_length,
   666                                                    size_t desired_max_length) {
   667   assert(adaptive_young_list_length(), "pre-condition");
   668   assert(gcs_are_young(), "only call this for young GCs");
   670   // In case some edge-condition makes the desired max length too small...
   671   if (desired_max_length <= desired_min_length) {
   672     return desired_min_length;
   673   }
   675   // We'll adjust min_young_length and max_young_length not to include
   676   // the already allocated young regions (i.e., so they reflect the
   677   // min and max eden regions we'll allocate). The base_min_length
   678   // will be reflected in the predictions by the
   679   // survivor_regions_evac_time prediction.
   680   assert(desired_min_length > base_min_length, "invariant");
   681   size_t min_young_length = desired_min_length - base_min_length;
   682   assert(desired_max_length > base_min_length, "invariant");
   683   size_t max_young_length = desired_max_length - base_min_length;
   685   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   686   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   687   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   688   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   689   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   690   double base_time_ms =
   691     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
   692     survivor_regions_evac_time;
   693   size_t available_free_regions = _free_regions_at_end_of_collection;
   694   size_t base_free_regions = 0;
   695   if (available_free_regions > _reserve_regions) {
   696     base_free_regions = available_free_regions - _reserve_regions;
   697   }
   699   // Here, we will make sure that the shortest young length that
   700   // makes sense fits within the target pause time.
   702   if (predict_will_fit(min_young_length, base_time_ms,
   703                        base_free_regions, target_pause_time_ms)) {
   704     // The shortest young length will fit into the target pause time;
   705     // we'll now check whether the absolute maximum number of young
   706     // regions will fit in the target pause time. If not, we'll do
   707     // a binary search between min_young_length and max_young_length.
   708     if (predict_will_fit(max_young_length, base_time_ms,
   709                          base_free_regions, target_pause_time_ms)) {
   710       // The maximum young length will fit into the target pause time.
   711       // We are done so set min young length to the maximum length (as
   712       // the result is assumed to be returned in min_young_length).
   713       min_young_length = max_young_length;
   714     } else {
   715       // The maximum possible number of young regions will not fit within
   716       // the target pause time so we'll search for the optimal
   717       // length. The loop invariants are:
   718       //
   719       // min_young_length < max_young_length
   720       // min_young_length is known to fit into the target pause time
   721       // max_young_length is known not to fit into the target pause time
   722       //
   723       // Going into the loop we know the above hold as we've just
   724       // checked them. Every time around the loop we check whether
   725       // the middle value between min_young_length and
   726       // max_young_length fits into the target pause time. If it
   727       // does, it becomes the new min. If it doesn't, it becomes
   728       // the new max. This way we maintain the loop invariants.
   730       assert(min_young_length < max_young_length, "invariant");
   731       size_t diff = (max_young_length - min_young_length) / 2;
   732       while (diff > 0) {
   733         size_t young_length = min_young_length + diff;
   734         if (predict_will_fit(young_length, base_time_ms,
   735                              base_free_regions, target_pause_time_ms)) {
   736           min_young_length = young_length;
   737         } else {
   738           max_young_length = young_length;
   739         }
   740         assert(min_young_length <  max_young_length, "invariant");
   741         diff = (max_young_length - min_young_length) / 2;
   742       }
   743       // The results is min_young_length which, according to the
   744       // loop invariants, should fit within the target pause time.
   746       // These are the post-conditions of the binary search above:
   747       assert(min_young_length < max_young_length,
   748              "otherwise we should have discovered that max_young_length "
   749              "fits into the pause target and not done the binary search");
   750       assert(predict_will_fit(min_young_length, base_time_ms,
   751                               base_free_regions, target_pause_time_ms),
   752              "min_young_length, the result of the binary search, should "
   753              "fit into the pause target");
   754       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
   755                                base_free_regions, target_pause_time_ms),
   756              "min_young_length, the result of the binary search, should be "
   757              "optimal, so no larger length should fit into the pause target");
   758     }
   759   } else {
   760     // Even the minimum length doesn't fit into the pause time
   761     // target, return it as the result nevertheless.
   762   }
   763   return base_min_length + min_young_length;
   764 }
   766 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   767   double survivor_regions_evac_time = 0.0;
   768   for (HeapRegion * r = _recorded_survivor_head;
   769        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
   770        r = r->get_next_young_region()) {
   771     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
   772   }
   773   return survivor_regions_evac_time;
   774 }
   776 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
   777   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   779   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   780   if (rs_lengths > _rs_lengths_prediction) {
   781     // add 10% to avoid having to recalculate often
   782     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   783     update_young_list_target_length(rs_lengths_prediction);
   784   }
   785 }
   789 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
   790                                                bool is_tlab,
   791                                                bool* gc_overhead_limit_was_exceeded) {
   792   guarantee(false, "Not using this policy feature yet.");
   793   return NULL;
   794 }
   796 // This method controls how a collector handles one or more
   797 // of its generations being fully allocated.
   798 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
   799                                                        bool is_tlab) {
   800   guarantee(false, "Not using this policy feature yet.");
   801   return NULL;
   802 }
   805 #ifndef PRODUCT
   806 bool G1CollectorPolicy::verify_young_ages() {
   807   HeapRegion* head = _g1->young_list()->first_region();
   808   return
   809     verify_young_ages(head, _short_lived_surv_rate_group);
   810   // also call verify_young_ages on any additional surv rate groups
   811 }
   813 bool
   814 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
   815                                      SurvRateGroup *surv_rate_group) {
   816   guarantee( surv_rate_group != NULL, "pre-condition" );
   818   const char* name = surv_rate_group->name();
   819   bool ret = true;
   820   int prev_age = -1;
   822   for (HeapRegion* curr = head;
   823        curr != NULL;
   824        curr = curr->get_next_young_region()) {
   825     SurvRateGroup* group = curr->surv_rate_group();
   826     if (group == NULL && !curr->is_survivor()) {
   827       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
   828       ret = false;
   829     }
   831     if (surv_rate_group == group) {
   832       int age = curr->age_in_surv_rate_group();
   834       if (age < 0) {
   835         gclog_or_tty->print_cr("## %s: encountered negative age", name);
   836         ret = false;
   837       }
   839       if (age <= prev_age) {
   840         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
   841                                "(%d, %d)", name, age, prev_age);
   842         ret = false;
   843       }
   844       prev_age = age;
   845     }
   846   }
   848   return ret;
   849 }
   850 #endif // PRODUCT
   852 void G1CollectorPolicy::record_full_collection_start() {
   853   _cur_collection_start_sec = os::elapsedTime();
   854   // Release the future to-space so that it is available for compaction into.
   855   _g1->set_full_collection();
   856 }
   858 void G1CollectorPolicy::record_full_collection_end() {
   859   // Consider this like a collection pause for the purposes of allocation
   860   // since last pause.
   861   double end_sec = os::elapsedTime();
   862   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
   863   double full_gc_time_ms = full_gc_time_sec * 1000.0;
   865   _all_full_gc_times_ms->add(full_gc_time_ms);
   867   update_recent_gc_times(end_sec, full_gc_time_ms);
   869   _g1->clear_full_collection();
   871   // "Nuke" the heuristics that control the young/mixed GC
   872   // transitions and make sure we start with young GCs after the Full GC.
   873   set_gcs_are_young(true);
   874   _last_young_gc = false;
   875   _should_revert_to_young_gcs = false;
   876   clear_initiate_conc_mark_if_possible();
   877   clear_during_initial_mark_pause();
   878   _known_garbage_bytes = 0;
   879   _known_garbage_ratio = 0.0;
   880   _in_marking_window = false;
   881   _in_marking_window_im = false;
   883   _short_lived_surv_rate_group->start_adding_regions();
   884   // also call this on any additional surv rate groups
   886   record_survivor_regions(0, NULL, NULL);
   888   _free_regions_at_end_of_collection = _g1->free_regions();
   889   // Reset survivors SurvRateGroup.
   890   _survivor_surv_rate_group->reset();
   891   update_young_list_target_length();
   892   _collectionSetChooser->updateAfterFullCollection();
   893 }
   895 void G1CollectorPolicy::record_stop_world_start() {
   896   _stop_world_start = os::elapsedTime();
   897 }
   899 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
   900                                                       size_t start_used) {
   901   if (PrintGCDetails) {
   902     gclog_or_tty->stamp(PrintGCTimeStamps);
   903     gclog_or_tty->print("[GC pause");
   904     gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
   905   }
   907   // We only need to do this here as the policy will only be applied
   908   // to the GC we're about to start. so, no point is calculating this
   909   // every time we calculate / recalculate the target young length.
   910   update_survivors_policy();
   912   assert(_g1->used() == _g1->recalculate_used(),
   913          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
   914                  _g1->used(), _g1->recalculate_used()));
   916   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
   917   _all_stop_world_times_ms->add(s_w_t_ms);
   918   _stop_world_start = 0.0;
   920   _cur_collection_start_sec = start_time_sec;
   921   _cur_collection_pause_used_at_start_bytes = start_used;
   922   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   923   _pending_cards = _g1->pending_card_num();
   924   _max_pending_cards = _g1->max_pending_card_num();
   926   _bytes_in_collection_set_before_gc = 0;
   927   _bytes_copied_during_gc = 0;
   929   YoungList* young_list = _g1->young_list();
   930   _eden_bytes_before_gc = young_list->eden_used_bytes();
   931   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
   932   _capacity_before_gc = _g1->capacity();
   934 #ifdef DEBUG
   935   // initialise these to something well known so that we can spot
   936   // if they are not set properly
   938   for (int i = 0; i < _parallel_gc_threads; ++i) {
   939     _par_last_gc_worker_start_times_ms[i] = -1234.0;
   940     _par_last_ext_root_scan_times_ms[i] = -1234.0;
   941     _par_last_satb_filtering_times_ms[i] = -1234.0;
   942     _par_last_update_rs_times_ms[i] = -1234.0;
   943     _par_last_update_rs_processed_buffers[i] = -1234.0;
   944     _par_last_scan_rs_times_ms[i] = -1234.0;
   945     _par_last_obj_copy_times_ms[i] = -1234.0;
   946     _par_last_termination_times_ms[i] = -1234.0;
   947     _par_last_termination_attempts[i] = -1234.0;
   948     _par_last_gc_worker_end_times_ms[i] = -1234.0;
   949     _par_last_gc_worker_times_ms[i] = -1234.0;
   950     _par_last_gc_worker_other_times_ms[i] = -1234.0;
   951   }
   952 #endif
   954   for (int i = 0; i < _aux_num; ++i) {
   955     _cur_aux_times_ms[i] = 0.0;
   956     _cur_aux_times_set[i] = false;
   957   }
   959   // This is initialized to zero here and is set during
   960   // the evacuation pause if marking is in progress.
   961   _cur_satb_drain_time_ms = 0.0;
   962   // This is initialized to zero here and is set during the evacuation
   963   // pause if we actually waited for the root region scanning to finish.
   964   _root_region_scan_wait_time_ms = 0.0;
   966   _last_gc_was_young = false;
   968   // do that for any other surv rate groups
   969   _short_lived_surv_rate_group->stop_adding_regions();
   970   _survivors_age_table.clear();
   972   assert( verify_young_ages(), "region age verification" );
   973 }
   975 void G1CollectorPolicy::record_concurrent_mark_init_end(double
   976                                                    mark_init_elapsed_time_ms) {
   977   _during_marking = true;
   978   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
   979   clear_during_initial_mark_pause();
   980   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
   981 }
   983 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
   984   _mark_remark_start_sec = os::elapsedTime();
   985   _during_marking = false;
   986 }
   988 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
   989   double end_time_sec = os::elapsedTime();
   990   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
   991   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
   992   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   993   _prev_collection_pause_end_ms += elapsed_time_ms;
   995   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
   996 }
   998 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
   999   _mark_cleanup_start_sec = os::elapsedTime();
  1002 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
  1003   _should_revert_to_young_gcs = false;
  1004   _last_young_gc = true;
  1005   _in_marking_window = false;
  1008 void G1CollectorPolicy::record_concurrent_pause() {
  1009   if (_stop_world_start > 0.0) {
  1010     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
  1011     _all_yield_times_ms->add(yield_ms);
  1015 void G1CollectorPolicy::record_concurrent_pause_end() {
  1018 template<class T>
  1019 T sum_of(T* sum_arr, int start, int n, int N) {
  1020   T sum = (T)0;
  1021   for (int i = 0; i < n; i++) {
  1022     int j = (start + i) % N;
  1023     sum += sum_arr[j];
  1025   return sum;
  1028 void G1CollectorPolicy::print_par_stats(int level,
  1029                                         const char* str,
  1030                                         double* data) {
  1031   double min = data[0], max = data[0];
  1032   double total = 0.0;
  1033   LineBuffer buf(level);
  1034   buf.append("[%s (ms):", str);
  1035   for (uint i = 0; i < no_of_gc_threads(); ++i) {
  1036     double val = data[i];
  1037     if (val < min)
  1038       min = val;
  1039     if (val > max)
  1040       max = val;
  1041     total += val;
  1042     buf.append("  %3.1lf", val);
  1044   buf.append_and_print_cr("");
  1045   double avg = total / (double) no_of_gc_threads();
  1046   buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
  1047     avg, min, max, max - min);
  1050 void G1CollectorPolicy::print_par_sizes(int level,
  1051                                         const char* str,
  1052                                         double* data) {
  1053   double min = data[0], max = data[0];
  1054   double total = 0.0;
  1055   LineBuffer buf(level);
  1056   buf.append("[%s :", str);
  1057   for (uint i = 0; i < no_of_gc_threads(); ++i) {
  1058     double val = data[i];
  1059     if (val < min)
  1060       min = val;
  1061     if (val > max)
  1062       max = val;
  1063     total += val;
  1064     buf.append(" %d", (int) val);
  1066   buf.append_and_print_cr("");
  1067   double avg = total / (double) no_of_gc_threads();
  1068   buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
  1069     (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
  1072 void G1CollectorPolicy::print_stats(int level,
  1073                                     const char* str,
  1074                                     double value) {
  1075   LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
  1078 void G1CollectorPolicy::print_stats(int level,
  1079                                     const char* str,
  1080                                     int value) {
  1081   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
  1084 double G1CollectorPolicy::avg_value(double* data) {
  1085   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1086     double ret = 0.0;
  1087     for (uint i = 0; i < no_of_gc_threads(); ++i) {
  1088       ret += data[i];
  1090     return ret / (double) no_of_gc_threads();
  1091   } else {
  1092     return data[0];
  1096 double G1CollectorPolicy::max_value(double* data) {
  1097   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1098     double ret = data[0];
  1099     for (uint i = 1; i < no_of_gc_threads(); ++i) {
  1100       if (data[i] > ret) {
  1101         ret = data[i];
  1104     return ret;
  1105   } else {
  1106     return data[0];
  1110 double G1CollectorPolicy::sum_of_values(double* data) {
  1111   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1112     double sum = 0.0;
  1113     for (uint i = 0; i < no_of_gc_threads(); i++) {
  1114       sum += data[i];
  1116     return sum;
  1117   } else {
  1118     return data[0];
  1122 double G1CollectorPolicy::max_sum(double* data1, double* data2) {
  1123   double ret = data1[0] + data2[0];
  1125   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1126     for (uint i = 1; i < no_of_gc_threads(); ++i) {
  1127       double data = data1[i] + data2[i];
  1128       if (data > ret) {
  1129         ret = data;
  1133   return ret;
  1136 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
  1137   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
  1138     return false;
  1141   size_t marking_initiating_used_threshold =
  1142     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
  1143   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
  1144   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
  1146   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
  1147     if (gcs_are_young()) {
  1148       ergo_verbose5(ErgoConcCycles,
  1149         "request concurrent cycle initiation",
  1150         ergo_format_reason("occupancy higher than threshold")
  1151         ergo_format_byte("occupancy")
  1152         ergo_format_byte("allocation request")
  1153         ergo_format_byte_perc("threshold")
  1154         ergo_format_str("source"),
  1155         cur_used_bytes,
  1156         alloc_byte_size,
  1157         marking_initiating_used_threshold,
  1158         (double) InitiatingHeapOccupancyPercent,
  1159         source);
  1160       return true;
  1161     } else {
  1162       ergo_verbose5(ErgoConcCycles,
  1163         "do not request concurrent cycle initiation",
  1164         ergo_format_reason("still doing mixed collections")
  1165         ergo_format_byte("occupancy")
  1166         ergo_format_byte("allocation request")
  1167         ergo_format_byte_perc("threshold")
  1168         ergo_format_str("source"),
  1169         cur_used_bytes,
  1170         alloc_byte_size,
  1171         marking_initiating_used_threshold,
  1172         (double) InitiatingHeapOccupancyPercent,
  1173         source);
  1177   return false;
  1180 // Anything below that is considered to be zero
  1181 #define MIN_TIMER_GRANULARITY 0.0000001
  1183 void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
  1184   double end_time_sec = os::elapsedTime();
  1185   double elapsed_ms = _last_pause_time_ms;
  1186   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  1187   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
  1188          "otherwise, the subtraction below does not make sense");
  1189   size_t rs_size =
  1190             _cur_collection_pause_used_regions_at_start - cset_region_length();
  1191   size_t cur_used_bytes = _g1->used();
  1192   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
  1193   bool last_pause_included_initial_mark = false;
  1194   bool update_stats = !_g1->evacuation_failed();
  1195   set_no_of_gc_threads(no_of_gc_threads);
  1197 #ifndef PRODUCT
  1198   if (G1YoungSurvRateVerbose) {
  1199     gclog_or_tty->print_cr("");
  1200     _short_lived_surv_rate_group->print();
  1201     // do that for any other surv rate groups too
  1203 #endif // PRODUCT
  1205   last_pause_included_initial_mark = during_initial_mark_pause();
  1206   if (last_pause_included_initial_mark) {
  1207     record_concurrent_mark_init_end(0.0);
  1210   if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
  1211     // Note: this might have already been set, if during the last
  1212     // pause we decided to start a cycle but at the beginning of
  1213     // this pause we decided to postpone it. That's OK.
  1214     set_initiate_conc_mark_if_possible();
  1217   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
  1218                           end_time_sec, false);
  1220   // This assert is exempted when we're doing parallel collection pauses,
  1221   // because the fragmentation caused by the parallel GC allocation buffers
  1222   // can lead to more memory being used during collection than was used
  1223   // before. Best leave this out until the fragmentation problem is fixed.
  1224   // Pauses in which evacuation failed can also lead to negative
  1225   // collections, since no space is reclaimed from a region containing an
  1226   // object whose evacuation failed.
  1227   // Further, we're now always doing parallel collection.  But I'm still
  1228   // leaving this here as a placeholder for a more precise assertion later.
  1229   // (DLD, 10/05.)
  1230   assert((true || parallel) // Always using GC LABs now.
  1231          || _g1->evacuation_failed()
  1232          || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
  1233          "Negative collection");
  1235   size_t freed_bytes =
  1236     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
  1237   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
  1239   double survival_fraction =
  1240     (double)surviving_bytes/
  1241     (double)_collection_set_bytes_used_before;
  1243   // These values are used to update the summary information that is
  1244   // displayed when TraceGen0Time is enabled, and are output as part
  1245   // of the PrintGCDetails output, in the non-parallel case.
  1247   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
  1248   double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
  1249   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
  1250   double update_rs_processed_buffers =
  1251     sum_of_values(_par_last_update_rs_processed_buffers);
  1252   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
  1253   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
  1254   double termination_time = avg_value(_par_last_termination_times_ms);
  1256   double known_time = ext_root_scan_time +
  1257                       satb_filtering_time +
  1258                       update_rs_time +
  1259                       scan_rs_time +
  1260                       obj_copy_time;
  1262   double other_time_ms = elapsed_ms;
  1264   // Subtract the SATB drain time. It's initialized to zero at the
  1265   // start of the pause and is updated during the pause if marking
  1266   // is in progress.
  1267   other_time_ms -= _cur_satb_drain_time_ms;
  1269   // Subtract the root region scanning wait time. It's initialized to
  1270   // zero at the start of the pause.
  1271   other_time_ms -= _root_region_scan_wait_time_ms;
  1273   if (parallel) {
  1274     other_time_ms -= _cur_collection_par_time_ms;
  1275   } else {
  1276     other_time_ms -= known_time;
  1279   // Subtract the time taken to clean the card table from the
  1280   // current value of "other time"
  1281   other_time_ms -= _cur_clear_ct_time_ms;
  1283   // Subtract the time spent completing marking in the collection
  1284   // set. Note if marking is not in progress during the pause
  1285   // the value of _mark_closure_time_ms will be zero.
  1286   other_time_ms -= _mark_closure_time_ms;
  1288   // TraceGen0Time and TraceGen1Time summary info updating.
  1289   _all_pause_times_ms->add(elapsed_ms);
  1291   if (update_stats) {
  1292     _summary->record_total_time_ms(elapsed_ms);
  1293     _summary->record_other_time_ms(other_time_ms);
  1295     MainBodySummary* body_summary = _summary->main_body_summary();
  1296     assert(body_summary != NULL, "should not be null!");
  1298     // This will be non-zero iff marking is currently in progress (i.e.
  1299     // _g1->mark_in_progress() == true) and the currrent pause was not
  1300     // an initial mark pause. Since the body_summary items are NumberSeqs,
  1301     // however, they have to be consistent and updated in lock-step with
  1302     // each other. Therefore we unconditionally record the SATB drain
  1303     // time - even if it's zero.
  1304     body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
  1305     body_summary->record_root_region_scan_wait_time_ms(
  1306                                                _root_region_scan_wait_time_ms);
  1308     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
  1309     body_summary->record_satb_filtering_time_ms(satb_filtering_time);
  1310     body_summary->record_update_rs_time_ms(update_rs_time);
  1311     body_summary->record_scan_rs_time_ms(scan_rs_time);
  1312     body_summary->record_obj_copy_time_ms(obj_copy_time);
  1314     if (parallel) {
  1315       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
  1316       body_summary->record_termination_time_ms(termination_time);
  1318       double parallel_known_time = known_time + termination_time;
  1319       double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
  1320       body_summary->record_parallel_other_time_ms(parallel_other_time);
  1323     body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
  1324     body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
  1326     // We exempt parallel collection from this check because Alloc Buffer
  1327     // fragmentation can produce negative collections.  Same with evac
  1328     // failure.
  1329     // Further, we're now always doing parallel collection.  But I'm still
  1330     // leaving this here as a placeholder for a more precise assertion later.
  1331     // (DLD, 10/05.
  1332     assert((true || parallel)
  1333            || _g1->evacuation_failed()
  1334            || surviving_bytes <= _collection_set_bytes_used_before,
  1335            "Or else negative collection!");
  1337     // this is where we update the allocation rate of the application
  1338     double app_time_ms =
  1339       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
  1340     if (app_time_ms < MIN_TIMER_GRANULARITY) {
  1341       // This usually happens due to the timer not having the required
  1342       // granularity. Some Linuxes are the usual culprits.
  1343       // We'll just set it to something (arbitrarily) small.
  1344       app_time_ms = 1.0;
  1346     // We maintain the invariant that all objects allocated by mutator
  1347     // threads will be allocated out of eden regions. So, we can use
  1348     // the eden region number allocated since the previous GC to
  1349     // calculate the application's allocate rate. The only exception
  1350     // to that is humongous objects that are allocated separately. But
  1351     // given that humongous object allocations do not really affect
  1352     // either the pause's duration nor when the next pause will take
  1353     // place we can safely ignore them here.
  1354     size_t regions_allocated = eden_cset_region_length();
  1355     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1356     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1358     double interval_ms =
  1359       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1360     update_recent_gc_times(end_time_sec, elapsed_ms);
  1361     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
  1362     if (recent_avg_pause_time_ratio() < 0.0 ||
  1363         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
  1364 #ifndef PRODUCT
  1365       // Dump info to allow post-facto debugging
  1366       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
  1367       gclog_or_tty->print_cr("-------------------------------------------");
  1368       gclog_or_tty->print_cr("Recent GC Times (ms):");
  1369       _recent_gc_times_ms->dump();
  1370       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
  1371       _recent_prev_end_times_for_all_gcs_sec->dump();
  1372       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
  1373                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
  1374       // In debug mode, terminate the JVM if the user wants to debug at this point.
  1375       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
  1376 #endif  // !PRODUCT
  1377       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
  1378       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
  1379       if (_recent_avg_pause_time_ratio < 0.0) {
  1380         _recent_avg_pause_time_ratio = 0.0;
  1381       } else {
  1382         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
  1383         _recent_avg_pause_time_ratio = 1.0;
  1388   for (int i = 0; i < _aux_num; ++i) {
  1389     if (_cur_aux_times_set[i]) {
  1390       _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
  1394   // PrintGCDetails output
  1395   if (PrintGCDetails) {
  1396     bool print_marking_info =
  1397       _g1->mark_in_progress() && !last_pause_included_initial_mark;
  1399     gclog_or_tty->print_cr("%s, %1.8lf secs]",
  1400                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
  1401                            elapsed_ms / 1000.0);
  1403     if (_root_region_scan_wait_time_ms > 0.0) {
  1404       print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
  1406     if (parallel) {
  1407       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
  1408       print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
  1409       print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
  1410       if (print_marking_info) {
  1411         print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
  1413       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
  1414       print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
  1415       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
  1416       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
  1417       print_par_stats(2, "Termination", _par_last_termination_times_ms);
  1418       print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
  1419       print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
  1421       for (int i = 0; i < _parallel_gc_threads; i++) {
  1422         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
  1424         double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
  1425                                    _par_last_satb_filtering_times_ms[i] +
  1426                                    _par_last_update_rs_times_ms[i] +
  1427                                    _par_last_scan_rs_times_ms[i] +
  1428                                    _par_last_obj_copy_times_ms[i] +
  1429                                    _par_last_termination_times_ms[i];
  1431         _par_last_gc_worker_other_times_ms[i] = _cur_collection_par_time_ms - worker_known_time;
  1433       print_par_stats(2, "GC Worker", _par_last_gc_worker_times_ms);
  1434       print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
  1435     } else {
  1436       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
  1437       if (print_marking_info) {
  1438         print_stats(1, "SATB Filtering", satb_filtering_time);
  1440       print_stats(1, "Update RS", update_rs_time);
  1441       print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
  1442       print_stats(1, "Scan RS", scan_rs_time);
  1443       print_stats(1, "Object Copying", obj_copy_time);
  1445     if (print_marking_info) {
  1446       print_stats(1, "Complete CSet Marking", _mark_closure_time_ms);
  1448     print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
  1449 #ifndef PRODUCT
  1450     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
  1451     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
  1452     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
  1453     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
  1454     if (_num_cc_clears > 0) {
  1455       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
  1457 #endif
  1458     print_stats(1, "Other", other_time_ms);
  1459     print_stats(2, "Choose CSet",
  1460                    (_recorded_young_cset_choice_time_ms +
  1461                     _recorded_non_young_cset_choice_time_ms));
  1462     print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
  1463     print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
  1464     print_stats(2, "Free CSet",
  1465                    (_recorded_young_free_cset_time_ms +
  1466                     _recorded_non_young_free_cset_time_ms));
  1468     for (int i = 0; i < _aux_num; ++i) {
  1469       if (_cur_aux_times_set[i]) {
  1470         char buffer[96];
  1471         sprintf(buffer, "Aux%d", i);
  1472         print_stats(1, buffer, _cur_aux_times_ms[i]);
  1477   // Update the efficiency-since-mark vars.
  1478   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
  1479   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
  1480     // This usually happens due to the timer not having the required
  1481     // granularity. Some Linuxes are the usual culprits.
  1482     // We'll just set it to something (arbitrarily) small.
  1483     proc_ms = 1.0;
  1485   double cur_efficiency = (double) freed_bytes / proc_ms;
  1487   bool new_in_marking_window = _in_marking_window;
  1488   bool new_in_marking_window_im = false;
  1489   if (during_initial_mark_pause()) {
  1490     new_in_marking_window = true;
  1491     new_in_marking_window_im = true;
  1494   if (_last_young_gc) {
  1495     if (!last_pause_included_initial_mark) {
  1496       ergo_verbose2(ErgoMixedGCs,
  1497                     "start mixed GCs",
  1498                     ergo_format_byte_perc("known garbage"),
  1499                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1500       set_gcs_are_young(false);
  1501     } else {
  1502       ergo_verbose0(ErgoMixedGCs,
  1503                     "do not start mixed GCs",
  1504                     ergo_format_reason("concurrent cycle is about to start"));
  1506     _last_young_gc = false;
  1509   if (!_last_gc_was_young) {
  1510     if (_should_revert_to_young_gcs) {
  1511       ergo_verbose2(ErgoMixedGCs,
  1512                     "end mixed GCs",
  1513                     ergo_format_reason("mixed GCs end requested")
  1514                     ergo_format_byte_perc("known garbage"),
  1515                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1516       set_gcs_are_young(true);
  1517     } else if (_known_garbage_ratio < 0.05) {
  1518       ergo_verbose3(ErgoMixedGCs,
  1519                "end mixed GCs",
  1520                ergo_format_reason("known garbage percent lower than threshold")
  1521                ergo_format_byte_perc("known garbage")
  1522                ergo_format_perc("threshold"),
  1523                _known_garbage_bytes, _known_garbage_ratio * 100.0,
  1524                0.05 * 100.0);
  1525       set_gcs_are_young(true);
  1526     } else if (adaptive_young_list_length() &&
  1527               (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
  1528       ergo_verbose5(ErgoMixedGCs,
  1529                     "end mixed GCs",
  1530                     ergo_format_reason("current GC efficiency lower than "
  1531                                        "predicted young GC efficiency")
  1532                     ergo_format_double("GC efficiency factor")
  1533                     ergo_format_double("current GC efficiency")
  1534                     ergo_format_double("predicted young GC efficiency")
  1535                     ergo_format_byte_perc("known garbage"),
  1536                     get_gc_eff_factor(), cur_efficiency,
  1537                     predict_young_gc_eff(),
  1538                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1539       set_gcs_are_young(true);
  1542   _should_revert_to_young_gcs = false;
  1544   if (_last_gc_was_young && !_during_marking) {
  1545     _young_gc_eff_seq->add(cur_efficiency);
  1548   _short_lived_surv_rate_group->start_adding_regions();
  1549   // do that for any other surv rate groupsx
  1551   if (update_stats) {
  1552     double pause_time_ms = elapsed_ms;
  1554     size_t diff = 0;
  1555     if (_max_pending_cards >= _pending_cards)
  1556       diff = _max_pending_cards - _pending_cards;
  1557     _pending_card_diff_seq->add((double) diff);
  1559     double cost_per_card_ms = 0.0;
  1560     if (_pending_cards > 0) {
  1561       cost_per_card_ms = update_rs_time / (double) _pending_cards;
  1562       _cost_per_card_ms_seq->add(cost_per_card_ms);
  1565     size_t cards_scanned = _g1->cards_scanned();
  1567     double cost_per_entry_ms = 0.0;
  1568     if (cards_scanned > 10) {
  1569       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
  1570       if (_last_gc_was_young) {
  1571         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1572       } else {
  1573         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1577     if (_max_rs_lengths > 0) {
  1578       double cards_per_entry_ratio =
  1579         (double) cards_scanned / (double) _max_rs_lengths;
  1580       if (_last_gc_was_young) {
  1581         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1582       } else {
  1583         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1587     // This is defensive. For a while _max_rs_lengths could get
  1588     // smaller than _recorded_rs_lengths which was causing
  1589     // rs_length_diff to get very large and mess up the RSet length
  1590     // predictions. The reason was unsafe concurrent updates to the
  1591     // _inc_cset_recorded_rs_lengths field which the code below guards
  1592     // against (see CR 7118202). This bug has now been fixed (see CR
  1593     // 7119027). However, I'm still worried that
  1594     // _inc_cset_recorded_rs_lengths might still end up somewhat
  1595     // inaccurate. The concurrent refinement thread calculates an
  1596     // RSet's length concurrently with other CR threads updating it
  1597     // which might cause it to calculate the length incorrectly (if,
  1598     // say, it's in mid-coarsening). So I'll leave in the defensive
  1599     // conditional below just in case.
  1600     size_t rs_length_diff = 0;
  1601     if (_max_rs_lengths > _recorded_rs_lengths) {
  1602       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1604     _rs_length_diff_seq->add((double) rs_length_diff);
  1606     size_t copied_bytes = surviving_bytes;
  1607     double cost_per_byte_ms = 0.0;
  1608     if (copied_bytes > 0) {
  1609       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
  1610       if (_in_marking_window) {
  1611         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
  1612       } else {
  1613         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1617     double all_other_time_ms = pause_time_ms -
  1618       (update_rs_time + scan_rs_time + obj_copy_time +
  1619        _mark_closure_time_ms + termination_time);
  1621     double young_other_time_ms = 0.0;
  1622     if (young_cset_region_length() > 0) {
  1623       young_other_time_ms =
  1624         _recorded_young_cset_choice_time_ms +
  1625         _recorded_young_free_cset_time_ms;
  1626       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
  1627                                           (double) young_cset_region_length());
  1629     double non_young_other_time_ms = 0.0;
  1630     if (old_cset_region_length() > 0) {
  1631       non_young_other_time_ms =
  1632         _recorded_non_young_cset_choice_time_ms +
  1633         _recorded_non_young_free_cset_time_ms;
  1635       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
  1636                                             (double) old_cset_region_length());
  1639     double constant_other_time_ms = all_other_time_ms -
  1640       (young_other_time_ms + non_young_other_time_ms);
  1641     _constant_other_time_ms_seq->add(constant_other_time_ms);
  1643     double survival_ratio = 0.0;
  1644     if (_bytes_in_collection_set_before_gc > 0) {
  1645       survival_ratio = (double) _bytes_copied_during_gc /
  1646                                    (double) _bytes_in_collection_set_before_gc;
  1649     _pending_cards_seq->add((double) _pending_cards);
  1650     _rs_lengths_seq->add((double) _max_rs_lengths);
  1652     double expensive_region_limit_ms =
  1653       (double) MaxGCPauseMillis - predict_constant_other_time_ms();
  1654     if (expensive_region_limit_ms < 0.0) {
  1655       // this means that the other time was predicted to be longer than
  1656       // than the max pause time
  1657       expensive_region_limit_ms = (double) MaxGCPauseMillis;
  1659     _expensive_region_limit_ms = expensive_region_limit_ms;
  1662   _in_marking_window = new_in_marking_window;
  1663   _in_marking_window_im = new_in_marking_window_im;
  1664   _free_regions_at_end_of_collection = _g1->free_regions();
  1665   update_young_list_target_length();
  1667   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1668   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1669   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
  1671   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
  1674 #define EXT_SIZE_FORMAT "%d%s"
  1675 #define EXT_SIZE_PARAMS(bytes)                                  \
  1676   byte_size_in_proper_unit((bytes)),                            \
  1677   proper_unit_for_byte_size((bytes))
  1679 void G1CollectorPolicy::print_heap_transition() {
  1680   if (PrintGCDetails) {
  1681     YoungList* young_list = _g1->young_list();
  1682     size_t eden_bytes = young_list->eden_used_bytes();
  1683     size_t survivor_bytes = young_list->survivor_used_bytes();
  1684     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
  1685     size_t used = _g1->used();
  1686     size_t capacity = _g1->capacity();
  1687     size_t eden_capacity =
  1688       (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
  1690     gclog_or_tty->print_cr(
  1691       "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
  1692       "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
  1693       "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
  1694       EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
  1695       EXT_SIZE_PARAMS(_eden_bytes_before_gc),
  1696       EXT_SIZE_PARAMS(_prev_eden_capacity),
  1697       EXT_SIZE_PARAMS(eden_bytes),
  1698       EXT_SIZE_PARAMS(eden_capacity),
  1699       EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
  1700       EXT_SIZE_PARAMS(survivor_bytes),
  1701       EXT_SIZE_PARAMS(used_before_gc),
  1702       EXT_SIZE_PARAMS(_capacity_before_gc),
  1703       EXT_SIZE_PARAMS(used),
  1704       EXT_SIZE_PARAMS(capacity));
  1706     _prev_eden_capacity = eden_capacity;
  1707   } else if (PrintGC) {
  1708     _g1->print_size_transition(gclog_or_tty,
  1709                                _cur_collection_pause_used_at_start_bytes,
  1710                                _g1->used(), _g1->capacity());
  1714 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
  1715                                                      double update_rs_processed_buffers,
  1716                                                      double goal_ms) {
  1717   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1718   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
  1720   if (G1UseAdaptiveConcRefinement) {
  1721     const int k_gy = 3, k_gr = 6;
  1722     const double inc_k = 1.1, dec_k = 0.9;
  1724     int g = cg1r->green_zone();
  1725     if (update_rs_time > goal_ms) {
  1726       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
  1727     } else {
  1728       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
  1729         g = (int)MAX2(g * inc_k, g + 1.0);
  1732     // Change the refinement threads params
  1733     cg1r->set_green_zone(g);
  1734     cg1r->set_yellow_zone(g * k_gy);
  1735     cg1r->set_red_zone(g * k_gr);
  1736     cg1r->reinitialize_threads();
  1738     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
  1739     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
  1740                                     cg1r->yellow_zone());
  1741     // Change the barrier params
  1742     dcqs.set_process_completed_threshold(processing_threshold);
  1743     dcqs.set_max_completed_queue(cg1r->red_zone());
  1746   int curr_queue_size = dcqs.completed_buffers_num();
  1747   if (curr_queue_size >= cg1r->yellow_zone()) {
  1748     dcqs.set_completed_queue_padding(curr_queue_size);
  1749   } else {
  1750     dcqs.set_completed_queue_padding(0);
  1752   dcqs.notify_if_necessary();
  1755 double
  1756 G1CollectorPolicy::
  1757 predict_young_collection_elapsed_time_ms(size_t adjustment) {
  1758   guarantee( adjustment == 0 || adjustment == 1, "invariant" );
  1760   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1761   size_t young_num = g1h->young_list()->length();
  1762   if (young_num == 0)
  1763     return 0.0;
  1765   young_num += adjustment;
  1766   size_t pending_cards = predict_pending_cards();
  1767   size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
  1768                       predict_rs_length_diff();
  1769   size_t card_num;
  1770   if (gcs_are_young()) {
  1771     card_num = predict_young_card_num(rs_lengths);
  1772   } else {
  1773     card_num = predict_non_young_card_num(rs_lengths);
  1775   size_t young_byte_size = young_num * HeapRegion::GrainBytes;
  1776   double accum_yg_surv_rate =
  1777     _short_lived_surv_rate_group->accum_surv_rate(adjustment);
  1779   size_t bytes_to_copy =
  1780     (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
  1782   return
  1783     predict_rs_update_time_ms(pending_cards) +
  1784     predict_rs_scan_time_ms(card_num) +
  1785     predict_object_copy_time_ms(bytes_to_copy) +
  1786     predict_young_other_time_ms(young_num) +
  1787     predict_constant_other_time_ms();
  1790 double
  1791 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
  1792   size_t rs_length = predict_rs_length_diff();
  1793   size_t card_num;
  1794   if (gcs_are_young()) {
  1795     card_num = predict_young_card_num(rs_length);
  1796   } else {
  1797     card_num = predict_non_young_card_num(rs_length);
  1799   return predict_base_elapsed_time_ms(pending_cards, card_num);
  1802 double
  1803 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
  1804                                                 size_t scanned_cards) {
  1805   return
  1806     predict_rs_update_time_ms(pending_cards) +
  1807     predict_rs_scan_time_ms(scanned_cards) +
  1808     predict_constant_other_time_ms();
  1811 double
  1812 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
  1813                                                   bool young) {
  1814   size_t rs_length = hr->rem_set()->occupied();
  1815   size_t card_num;
  1816   if (gcs_are_young()) {
  1817     card_num = predict_young_card_num(rs_length);
  1818   } else {
  1819     card_num = predict_non_young_card_num(rs_length);
  1821   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  1823   double region_elapsed_time_ms =
  1824     predict_rs_scan_time_ms(card_num) +
  1825     predict_object_copy_time_ms(bytes_to_copy);
  1827   if (young)
  1828     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1829   else
  1830     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1832   return region_elapsed_time_ms;
  1835 size_t
  1836 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  1837   size_t bytes_to_copy;
  1838   if (hr->is_marked())
  1839     bytes_to_copy = hr->max_live_bytes();
  1840   else {
  1841     guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
  1842                "invariant" );
  1843     int age = hr->age_in_surv_rate_group();
  1844     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  1845     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  1848   return bytes_to_copy;
  1851 void
  1852 G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
  1853                                           size_t survivor_cset_region_length) {
  1854   _eden_cset_region_length     = eden_cset_region_length;
  1855   _survivor_cset_region_length = survivor_cset_region_length;
  1856   _old_cset_region_length      = 0;
  1859 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  1860   _recorded_rs_lengths = rs_lengths;
  1863 void G1CollectorPolicy::check_if_region_is_too_expensive(double
  1864                                                            predicted_time_ms) {
  1865   // I don't think we need to do this when in young GC mode since
  1866   // marking will be initiated next time we hit the soft limit anyway...
  1867   if (predicted_time_ms > _expensive_region_limit_ms) {
  1868     ergo_verbose2(ErgoMixedGCs,
  1869               "request mixed GCs end",
  1870               ergo_format_reason("predicted region time higher than threshold")
  1871               ergo_format_ms("predicted region time")
  1872               ergo_format_ms("threshold"),
  1873               predicted_time_ms, _expensive_region_limit_ms);
  1874     // no point in doing another mixed GC
  1875     _should_revert_to_young_gcs = true;
  1879 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  1880                                                double elapsed_ms) {
  1881   _recent_gc_times_ms->add(elapsed_ms);
  1882   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
  1883   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
  1886 size_t G1CollectorPolicy::expansion_amount() {
  1887   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
  1888   double threshold = _gc_overhead_perc;
  1889   if (recent_gc_overhead > threshold) {
  1890     // We will double the existing space, or take
  1891     // G1ExpandByPercentOfAvailable % of the available expansion
  1892     // space, whichever is smaller, bounded below by a minimum
  1893     // expansion (unless that's all that's left.)
  1894     const size_t min_expand_bytes = 1*M;
  1895     size_t reserved_bytes = _g1->max_capacity();
  1896     size_t committed_bytes = _g1->capacity();
  1897     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
  1898     size_t expand_bytes;
  1899     size_t expand_bytes_via_pct =
  1900       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
  1901     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
  1902     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
  1903     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
  1905     ergo_verbose5(ErgoHeapSizing,
  1906                   "attempt heap expansion",
  1907                   ergo_format_reason("recent GC overhead higher than "
  1908                                      "threshold after GC")
  1909                   ergo_format_perc("recent GC overhead")
  1910                   ergo_format_perc("threshold")
  1911                   ergo_format_byte("uncommitted")
  1912                   ergo_format_byte_perc("calculated expansion amount"),
  1913                   recent_gc_overhead, threshold,
  1914                   uncommitted_bytes,
  1915                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
  1917     return expand_bytes;
  1918   } else {
  1919     return 0;
  1923 class CountCSClosure: public HeapRegionClosure {
  1924   G1CollectorPolicy* _g1_policy;
  1925 public:
  1926   CountCSClosure(G1CollectorPolicy* g1_policy) :
  1927     _g1_policy(g1_policy) {}
  1928   bool doHeapRegion(HeapRegion* r) {
  1929     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
  1930     return false;
  1932 };
  1934 void G1CollectorPolicy::count_CS_bytes_used() {
  1935   CountCSClosure cs_closure(this);
  1936   _g1->collection_set_iterate(&cs_closure);
  1939 void G1CollectorPolicy::print_summary(int level,
  1940                                       const char* str,
  1941                                       NumberSeq* seq) const {
  1942   double sum = seq->sum();
  1943   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
  1944                 str, sum / 1000.0, seq->avg());
  1947 void G1CollectorPolicy::print_summary_sd(int level,
  1948                                          const char* str,
  1949                                          NumberSeq* seq) const {
  1950   print_summary(level, str, seq);
  1951   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
  1952                 seq->num(), seq->sd(), seq->maximum());
  1955 void G1CollectorPolicy::check_other_times(int level,
  1956                                         NumberSeq* other_times_ms,
  1957                                         NumberSeq* calc_other_times_ms) const {
  1958   bool should_print = false;
  1959   LineBuffer buf(level + 2);
  1961   double max_sum = MAX2(fabs(other_times_ms->sum()),
  1962                         fabs(calc_other_times_ms->sum()));
  1963   double min_sum = MIN2(fabs(other_times_ms->sum()),
  1964                         fabs(calc_other_times_ms->sum()));
  1965   double sum_ratio = max_sum / min_sum;
  1966   if (sum_ratio > 1.1) {
  1967     should_print = true;
  1968     buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
  1971   double max_avg = MAX2(fabs(other_times_ms->avg()),
  1972                         fabs(calc_other_times_ms->avg()));
  1973   double min_avg = MIN2(fabs(other_times_ms->avg()),
  1974                         fabs(calc_other_times_ms->avg()));
  1975   double avg_ratio = max_avg / min_avg;
  1976   if (avg_ratio > 1.1) {
  1977     should_print = true;
  1978     buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
  1981   if (other_times_ms->sum() < -0.01) {
  1982     buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
  1985   if (other_times_ms->avg() < -0.01) {
  1986     buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
  1989   if (calc_other_times_ms->sum() < -0.01) {
  1990     should_print = true;
  1991     buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
  1994   if (calc_other_times_ms->avg() < -0.01) {
  1995     should_print = true;
  1996     buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
  1999   if (should_print)
  2000     print_summary(level, "Other(Calc)", calc_other_times_ms);
  2003 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
  2004   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  2005   MainBodySummary*    body_summary = summary->main_body_summary();
  2006   if (summary->get_total_seq()->num() > 0) {
  2007     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
  2008     if (body_summary != NULL) {
  2009       print_summary(1, "Root Region Scan Wait", body_summary->get_root_region_scan_wait_seq());
  2010       if (parallel) {
  2011         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
  2012         print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
  2013         print_summary(2, "SATB Filtering", body_summary->get_satb_filtering_seq());
  2014         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
  2015         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
  2016         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
  2017         print_summary(2, "Termination", body_summary->get_termination_seq());
  2018         print_summary(2, "Parallel Other", body_summary->get_parallel_other_seq());
  2020           NumberSeq* other_parts[] = {
  2021             body_summary->get_ext_root_scan_seq(),
  2022             body_summary->get_satb_filtering_seq(),
  2023             body_summary->get_update_rs_seq(),
  2024             body_summary->get_scan_rs_seq(),
  2025             body_summary->get_obj_copy_seq(),
  2026             body_summary->get_termination_seq()
  2027           };
  2028           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
  2029                                         6, other_parts);
  2030           check_other_times(2, body_summary->get_parallel_other_seq(),
  2031                             &calc_other_times_ms);
  2033       } else {
  2034         print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
  2035         print_summary(1, "SATB Filtering", body_summary->get_satb_filtering_seq());
  2036         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
  2037         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
  2038         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
  2041     print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
  2042     print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
  2043     print_summary(1, "Other", summary->get_other_seq());
  2045       if (body_summary != NULL) {
  2046         NumberSeq calc_other_times_ms;
  2047         if (parallel) {
  2048           // parallel
  2049           NumberSeq* other_parts[] = {
  2050             body_summary->get_satb_drain_seq(),
  2051             body_summary->get_root_region_scan_wait_seq(),
  2052             body_summary->get_parallel_seq(),
  2053             body_summary->get_clear_ct_seq()
  2054           };
  2055           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2056                                           4, other_parts);
  2057         } else {
  2058           // serial
  2059           NumberSeq* other_parts[] = {
  2060             body_summary->get_satb_drain_seq(),
  2061             body_summary->get_root_region_scan_wait_seq(),
  2062             body_summary->get_update_rs_seq(),
  2063             body_summary->get_ext_root_scan_seq(),
  2064             body_summary->get_satb_filtering_seq(),
  2065             body_summary->get_scan_rs_seq(),
  2066             body_summary->get_obj_copy_seq()
  2067           };
  2068           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2069                                           7, other_parts);
  2071         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
  2074   } else {
  2075     LineBuffer(1).append_and_print_cr("none");
  2077   LineBuffer(0).append_and_print_cr("");
  2080 void G1CollectorPolicy::print_tracing_info() const {
  2081   if (TraceGen0Time) {
  2082     gclog_or_tty->print_cr("ALL PAUSES");
  2083     print_summary_sd(0, "Total", _all_pause_times_ms);
  2084     gclog_or_tty->print_cr("");
  2085     gclog_or_tty->print_cr("");
  2086     gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
  2087     gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
  2088     gclog_or_tty->print_cr("");
  2090     gclog_or_tty->print_cr("EVACUATION PAUSES");
  2091     print_summary(_summary);
  2093     gclog_or_tty->print_cr("MISC");
  2094     print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
  2095     print_summary_sd(0, "Yields", _all_yield_times_ms);
  2096     for (int i = 0; i < _aux_num; ++i) {
  2097       if (_all_aux_times_ms[i].num() > 0) {
  2098         char buffer[96];
  2099         sprintf(buffer, "Aux%d", i);
  2100         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
  2104   if (TraceGen1Time) {
  2105     if (_all_full_gc_times_ms->num() > 0) {
  2106       gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
  2107                  _all_full_gc_times_ms->num(),
  2108                  _all_full_gc_times_ms->sum() / 1000.0);
  2109       gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
  2110       gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
  2111                     _all_full_gc_times_ms->sd(),
  2112                     _all_full_gc_times_ms->maximum());
  2117 void G1CollectorPolicy::print_yg_surv_rate_info() const {
  2118 #ifndef PRODUCT
  2119   _short_lived_surv_rate_group->print_surv_rate_summary();
  2120   // add this call for any other surv rate groups
  2121 #endif // PRODUCT
  2124 #ifndef PRODUCT
  2125 // for debugging, bit of a hack...
  2126 static char*
  2127 region_num_to_mbs(int length) {
  2128   static char buffer[64];
  2129   double bytes = (double) (length * HeapRegion::GrainBytes);
  2130   double mbs = bytes / (double) (1024 * 1024);
  2131   sprintf(buffer, "%7.2lfMB", mbs);
  2132   return buffer;
  2134 #endif // PRODUCT
  2136 size_t G1CollectorPolicy::max_regions(int purpose) {
  2137   switch (purpose) {
  2138     case GCAllocForSurvived:
  2139       return _max_survivor_regions;
  2140     case GCAllocForTenured:
  2141       return REGIONS_UNLIMITED;
  2142     default:
  2143       ShouldNotReachHere();
  2144       return REGIONS_UNLIMITED;
  2145   };
  2148 void G1CollectorPolicy::update_max_gc_locker_expansion() {
  2149   size_t expansion_region_num = 0;
  2150   if (GCLockerEdenExpansionPercent > 0) {
  2151     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
  2152     double expansion_region_num_d = perc * (double) _young_list_target_length;
  2153     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
  2154     // less than 1.0) we'll get 1.
  2155     expansion_region_num = (size_t) ceil(expansion_region_num_d);
  2156   } else {
  2157     assert(expansion_region_num == 0, "sanity");
  2159   _young_list_max_length = _young_list_target_length + expansion_region_num;
  2160   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
  2163 // Calculates survivor space parameters.
  2164 void G1CollectorPolicy::update_survivors_policy() {
  2165   double max_survivor_regions_d =
  2166                  (double) _young_list_target_length / (double) SurvivorRatio;
  2167   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
  2168   // smaller than 1.0) we'll get 1.
  2169   _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
  2171   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  2172         HeapRegion::GrainWords * _max_survivor_regions);
  2175 #ifndef PRODUCT
  2176 class HRSortIndexIsOKClosure: public HeapRegionClosure {
  2177   CollectionSetChooser* _chooser;
  2178 public:
  2179   HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
  2180     _chooser(chooser) {}
  2182   bool doHeapRegion(HeapRegion* r) {
  2183     if (!r->continuesHumongous()) {
  2184       assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
  2186     return false;
  2188 };
  2190 bool G1CollectorPolicy::assertMarkedBytesDataOK() {
  2191   HRSortIndexIsOKClosure cl(_collectionSetChooser);
  2192   _g1->heap_region_iterate(&cl);
  2193   return true;
  2195 #endif
  2197 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
  2198                                                      GCCause::Cause gc_cause) {
  2199   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2200   if (!during_cycle) {
  2201     ergo_verbose1(ErgoConcCycles,
  2202                   "request concurrent cycle initiation",
  2203                   ergo_format_reason("requested by GC cause")
  2204                   ergo_format_str("GC cause"),
  2205                   GCCause::to_string(gc_cause));
  2206     set_initiate_conc_mark_if_possible();
  2207     return true;
  2208   } else {
  2209     ergo_verbose1(ErgoConcCycles,
  2210                   "do not request concurrent cycle initiation",
  2211                   ergo_format_reason("concurrent cycle already in progress")
  2212                   ergo_format_str("GC cause"),
  2213                   GCCause::to_string(gc_cause));
  2214     return false;
  2218 void
  2219 G1CollectorPolicy::decide_on_conc_mark_initiation() {
  2220   // We are about to decide on whether this pause will be an
  2221   // initial-mark pause.
  2223   // First, during_initial_mark_pause() should not be already set. We
  2224   // will set it here if we have to. However, it should be cleared by
  2225   // the end of the pause (it's only set for the duration of an
  2226   // initial-mark pause).
  2227   assert(!during_initial_mark_pause(), "pre-condition");
  2229   if (initiate_conc_mark_if_possible()) {
  2230     // We had noticed on a previous pause that the heap occupancy has
  2231     // gone over the initiating threshold and we should start a
  2232     // concurrent marking cycle. So we might initiate one.
  2234     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2235     if (!during_cycle) {
  2236       // The concurrent marking thread is not "during a cycle", i.e.,
  2237       // it has completed the last one. So we can go ahead and
  2238       // initiate a new cycle.
  2240       set_during_initial_mark_pause();
  2241       // We do not allow mixed GCs during marking.
  2242       if (!gcs_are_young()) {
  2243         set_gcs_are_young(true);
  2244         ergo_verbose0(ErgoMixedGCs,
  2245                       "end mixed GCs",
  2246                       ergo_format_reason("concurrent cycle is about to start"));
  2249       // And we can now clear initiate_conc_mark_if_possible() as
  2250       // we've already acted on it.
  2251       clear_initiate_conc_mark_if_possible();
  2253       ergo_verbose0(ErgoConcCycles,
  2254                   "initiate concurrent cycle",
  2255                   ergo_format_reason("concurrent cycle initiation requested"));
  2256     } else {
  2257       // The concurrent marking thread is still finishing up the
  2258       // previous cycle. If we start one right now the two cycles
  2259       // overlap. In particular, the concurrent marking thread might
  2260       // be in the process of clearing the next marking bitmap (which
  2261       // we will use for the next cycle if we start one). Starting a
  2262       // cycle now will be bad given that parts of the marking
  2263       // information might get cleared by the marking thread. And we
  2264       // cannot wait for the marking thread to finish the cycle as it
  2265       // periodically yields while clearing the next marking bitmap
  2266       // and, if it's in a yield point, it's waiting for us to
  2267       // finish. So, at this point we will not start a cycle and we'll
  2268       // let the concurrent marking thread complete the last one.
  2269       ergo_verbose0(ErgoConcCycles,
  2270                     "do not initiate concurrent cycle",
  2271                     ergo_format_reason("concurrent cycle already in progress"));
  2276 class KnownGarbageClosure: public HeapRegionClosure {
  2277   CollectionSetChooser* _hrSorted;
  2279 public:
  2280   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
  2281     _hrSorted(hrSorted)
  2282   {}
  2284   bool doHeapRegion(HeapRegion* r) {
  2285     // We only include humongous regions in collection
  2286     // sets when concurrent mark shows that their contained object is
  2287     // unreachable.
  2289     // Do we have any marking information for this region?
  2290     if (r->is_marked()) {
  2291       // We don't include humongous regions in collection
  2292       // sets because we collect them immediately at the end of a marking
  2293       // cycle.  We also don't include young regions because we *must*
  2294       // include them in the next collection pause.
  2295       if (!r->isHumongous() && !r->is_young()) {
  2296         _hrSorted->addMarkedHeapRegion(r);
  2299     return false;
  2301 };
  2303 class ParKnownGarbageHRClosure: public HeapRegionClosure {
  2304   CollectionSetChooser* _hrSorted;
  2305   jint _marked_regions_added;
  2306   jint _chunk_size;
  2307   jint _cur_chunk_idx;
  2308   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
  2309   int _worker;
  2310   int _invokes;
  2312   void get_new_chunk() {
  2313     _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
  2314     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
  2316   void add_region(HeapRegion* r) {
  2317     if (_cur_chunk_idx == _cur_chunk_end) {
  2318       get_new_chunk();
  2320     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
  2321     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
  2322     _marked_regions_added++;
  2323     _cur_chunk_idx++;
  2326 public:
  2327   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  2328                            jint chunk_size,
  2329                            int worker) :
  2330     _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
  2331     _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
  2332     _invokes(0)
  2333   {}
  2335   bool doHeapRegion(HeapRegion* r) {
  2336     // We only include humongous regions in collection
  2337     // sets when concurrent mark shows that their contained object is
  2338     // unreachable.
  2339     _invokes++;
  2341     // Do we have any marking information for this region?
  2342     if (r->is_marked()) {
  2343       // We don't include humongous regions in collection
  2344       // sets because we collect them immediately at the end of a marking
  2345       // cycle.
  2346       // We also do not include young regions in collection sets
  2347       if (!r->isHumongous() && !r->is_young()) {
  2348         add_region(r);
  2351     return false;
  2353   jint marked_regions_added() { return _marked_regions_added; }
  2354   int invokes() { return _invokes; }
  2355 };
  2357 class ParKnownGarbageTask: public AbstractGangTask {
  2358   CollectionSetChooser* _hrSorted;
  2359   jint _chunk_size;
  2360   G1CollectedHeap* _g1;
  2361 public:
  2362   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
  2363     AbstractGangTask("ParKnownGarbageTask"),
  2364     _hrSorted(hrSorted), _chunk_size(chunk_size),
  2365     _g1(G1CollectedHeap::heap())
  2366   {}
  2368   void work(uint worker_id) {
  2369     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
  2370                                                _chunk_size,
  2371                                                worker_id);
  2372     // Back to zero for the claim value.
  2373     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
  2374                                          _g1->workers()->active_workers(),
  2375                                          HeapRegion::InitialClaimValue);
  2376     jint regions_added = parKnownGarbageCl.marked_regions_added();
  2377     _hrSorted->incNumMarkedHeapRegions(regions_added);
  2378     if (G1PrintParCleanupStats) {
  2379       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
  2380                  worker_id, parKnownGarbageCl.invokes(), regions_added);
  2383 };
  2385 void
  2386 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
  2387   double start_sec;
  2388   if (G1PrintParCleanupStats) {
  2389     start_sec = os::elapsedTime();
  2392   _collectionSetChooser->clearMarkedHeapRegions();
  2393   double clear_marked_end_sec;
  2394   if (G1PrintParCleanupStats) {
  2395     clear_marked_end_sec = os::elapsedTime();
  2396     gclog_or_tty->print_cr("  clear marked regions: %8.3f ms.",
  2397                            (clear_marked_end_sec - start_sec) * 1000.0);
  2400   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2401     const size_t OverpartitionFactor = 4;
  2402     size_t WorkUnit;
  2403     // The use of MinChunkSize = 8 in the original code
  2404     // causes some assertion failures when the total number of
  2405     // region is less than 8.  The code here tries to fix that.
  2406     // Should the original code also be fixed?
  2407     if (no_of_gc_threads > 0) {
  2408       const size_t MinWorkUnit =
  2409         MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U);
  2410       WorkUnit =
  2411         MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
  2412              MinWorkUnit);
  2413     } else {
  2414       assert(no_of_gc_threads > 0,
  2415         "The active gc workers should be greater than 0");
  2416       // In a product build do something reasonable to avoid a crash.
  2417       const size_t MinWorkUnit =
  2418         MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
  2419       WorkUnit =
  2420         MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
  2421              MinWorkUnit);
  2423     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
  2424                                                              WorkUnit);
  2425     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  2426                                             (int) WorkUnit);
  2427     _g1->workers()->run_task(&parKnownGarbageTask);
  2429     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2430            "sanity check");
  2431   } else {
  2432     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  2433     _g1->heap_region_iterate(&knownGarbagecl);
  2435   double known_garbage_end_sec;
  2436   if (G1PrintParCleanupStats) {
  2437     known_garbage_end_sec = os::elapsedTime();
  2438     gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
  2439                       (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
  2442   _collectionSetChooser->sortMarkedHeapRegions();
  2443   double end_sec = os::elapsedTime();
  2444   if (G1PrintParCleanupStats) {
  2445     gclog_or_tty->print_cr("  sorting: %8.3f ms.",
  2446                            (end_sec - known_garbage_end_sec) * 1000.0);
  2449   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  2450   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  2451   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  2452   _prev_collection_pause_end_ms += elapsed_time_ms;
  2453   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
  2456 // Add the heap region at the head of the non-incremental collection set
  2457 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
  2458   assert(_inc_cset_build_state == Active, "Precondition");
  2459   assert(!hr->is_young(), "non-incremental add of young region");
  2461   assert(!hr->in_collection_set(), "should not already be in the CSet");
  2462   hr->set_in_collection_set(true);
  2463   hr->set_next_in_collection_set(_collection_set);
  2464   _collection_set = hr;
  2465   _collection_set_bytes_used_before += hr->used();
  2466   _g1->register_region_with_in_cset_fast_test(hr);
  2467   size_t rs_length = hr->rem_set()->occupied();
  2468   _recorded_rs_lengths += rs_length;
  2469   _old_cset_region_length += 1;
  2472 // Initialize the per-collection-set information
  2473 void G1CollectorPolicy::start_incremental_cset_building() {
  2474   assert(_inc_cset_build_state == Inactive, "Precondition");
  2476   _inc_cset_head = NULL;
  2477   _inc_cset_tail = NULL;
  2478   _inc_cset_bytes_used_before = 0;
  2480   _inc_cset_max_finger = 0;
  2481   _inc_cset_recorded_rs_lengths = 0;
  2482   _inc_cset_recorded_rs_lengths_diffs = 0;
  2483   _inc_cset_predicted_elapsed_time_ms = 0.0;
  2484   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
  2485   _inc_cset_build_state = Active;
  2488 void G1CollectorPolicy::finalize_incremental_cset_building() {
  2489   assert(_inc_cset_build_state == Active, "Precondition");
  2490   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
  2492   // The two "main" fields, _inc_cset_recorded_rs_lengths and
  2493   // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
  2494   // that adds a new region to the CSet. Further updates by the
  2495   // concurrent refinement thread that samples the young RSet lengths
  2496   // are accumulated in the *_diffs fields. Here we add the diffs to
  2497   // the "main" fields.
  2499   if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
  2500     _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
  2501   } else {
  2502     // This is defensive. The diff should in theory be always positive
  2503     // as RSets can only grow between GCs. However, given that we
  2504     // sample their size concurrently with other threads updating them
  2505     // it's possible that we might get the wrong size back, which
  2506     // could make the calculations somewhat inaccurate.
  2507     size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
  2508     if (_inc_cset_recorded_rs_lengths >= diffs) {
  2509       _inc_cset_recorded_rs_lengths -= diffs;
  2510     } else {
  2511       _inc_cset_recorded_rs_lengths = 0;
  2514   _inc_cset_predicted_elapsed_time_ms +=
  2515                                      _inc_cset_predicted_elapsed_time_ms_diffs;
  2517   _inc_cset_recorded_rs_lengths_diffs = 0;
  2518   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
  2521 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  2522   // This routine is used when:
  2523   // * adding survivor regions to the incremental cset at the end of an
  2524   //   evacuation pause,
  2525   // * adding the current allocation region to the incremental cset
  2526   //   when it is retired, and
  2527   // * updating existing policy information for a region in the
  2528   //   incremental cset via young list RSet sampling.
  2529   // Therefore this routine may be called at a safepoint by the
  2530   // VM thread, or in-between safepoints by mutator threads (when
  2531   // retiring the current allocation region) or a concurrent
  2532   // refine thread (RSet sampling).
  2534   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  2535   size_t used_bytes = hr->used();
  2536   _inc_cset_recorded_rs_lengths += rs_length;
  2537   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  2538   _inc_cset_bytes_used_before += used_bytes;
  2540   // Cache the values we have added to the aggregated informtion
  2541   // in the heap region in case we have to remove this region from
  2542   // the incremental collection set, or it is updated by the
  2543   // rset sampling code
  2544   hr->set_recorded_rs_length(rs_length);
  2545   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  2548 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
  2549                                                      size_t new_rs_length) {
  2550   // Update the CSet information that is dependent on the new RS length
  2551   assert(hr->is_young(), "Precondition");
  2552   assert(!SafepointSynchronize::is_at_safepoint(),
  2553                                                "should not be at a safepoint");
  2555   // We could have updated _inc_cset_recorded_rs_lengths and
  2556   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
  2557   // that atomically, as this code is executed by a concurrent
  2558   // refinement thread, potentially concurrently with a mutator thread
  2559   // allocating a new region and also updating the same fields. To
  2560   // avoid the atomic operations we accumulate these updates on two
  2561   // separate fields (*_diffs) and we'll just add them to the "main"
  2562   // fields at the start of a GC.
  2564   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
  2565   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
  2566   _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
  2568   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  2569   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  2570   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
  2571   _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
  2573   hr->set_recorded_rs_length(new_rs_length);
  2574   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
  2577 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  2578   assert(hr->is_young(), "invariant");
  2579   assert(hr->young_index_in_cset() > -1, "should have already been set");
  2580   assert(_inc_cset_build_state == Active, "Precondition");
  2582   // We need to clear and set the cached recorded/cached collection set
  2583   // information in the heap region here (before the region gets added
  2584   // to the collection set). An individual heap region's cached values
  2585   // are calculated, aggregated with the policy collection set info,
  2586   // and cached in the heap region here (initially) and (subsequently)
  2587   // by the Young List sampling code.
  2589   size_t rs_length = hr->rem_set()->occupied();
  2590   add_to_incremental_cset_info(hr, rs_length);
  2592   HeapWord* hr_end = hr->end();
  2593   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  2595   assert(!hr->in_collection_set(), "invariant");
  2596   hr->set_in_collection_set(true);
  2597   assert( hr->next_in_collection_set() == NULL, "invariant");
  2599   _g1->register_region_with_in_cset_fast_test(hr);
  2602 // Add the region at the RHS of the incremental cset
  2603 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  2604   // We should only ever be appending survivors at the end of a pause
  2605   assert( hr->is_survivor(), "Logic");
  2607   // Do the 'common' stuff
  2608   add_region_to_incremental_cset_common(hr);
  2610   // Now add the region at the right hand side
  2611   if (_inc_cset_tail == NULL) {
  2612     assert(_inc_cset_head == NULL, "invariant");
  2613     _inc_cset_head = hr;
  2614   } else {
  2615     _inc_cset_tail->set_next_in_collection_set(hr);
  2617   _inc_cset_tail = hr;
  2620 // Add the region to the LHS of the incremental cset
  2621 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  2622   // Survivors should be added to the RHS at the end of a pause
  2623   assert(!hr->is_survivor(), "Logic");
  2625   // Do the 'common' stuff
  2626   add_region_to_incremental_cset_common(hr);
  2628   // Add the region at the left hand side
  2629   hr->set_next_in_collection_set(_inc_cset_head);
  2630   if (_inc_cset_head == NULL) {
  2631     assert(_inc_cset_tail == NULL, "Invariant");
  2632     _inc_cset_tail = hr;
  2634   _inc_cset_head = hr;
  2637 #ifndef PRODUCT
  2638 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  2639   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  2641   st->print_cr("\nCollection_set:");
  2642   HeapRegion* csr = list_head;
  2643   while (csr != NULL) {
  2644     HeapRegion* next = csr->next_in_collection_set();
  2645     assert(csr->in_collection_set(), "bad CS");
  2646     st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
  2647                  "age: %4d, y: %d, surv: %d",
  2648                         csr->bottom(), csr->end(),
  2649                         csr->top(),
  2650                         csr->prev_top_at_mark_start(),
  2651                         csr->next_top_at_mark_start(),
  2652                         csr->top_at_conc_mark_count(),
  2653                         csr->age_in_surv_rate_group_cond(),
  2654                         csr->is_young(),
  2655                         csr->is_survivor());
  2656     csr = next;
  2659 #endif // !PRODUCT
  2661 void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
  2662   // Set this here - in case we're not doing young collections.
  2663   double non_young_start_time_sec = os::elapsedTime();
  2665   YoungList* young_list = _g1->young_list();
  2666   finalize_incremental_cset_building();
  2668   guarantee(target_pause_time_ms > 0.0,
  2669             err_msg("target_pause_time_ms = %1.6lf should be positive",
  2670                     target_pause_time_ms));
  2671   guarantee(_collection_set == NULL, "Precondition");
  2673   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
  2674   double predicted_pause_time_ms = base_time_ms;
  2676   double time_remaining_ms = target_pause_time_ms - base_time_ms;
  2678   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2679                 "start choosing CSet",
  2680                 ergo_format_ms("predicted base time")
  2681                 ergo_format_ms("remaining time")
  2682                 ergo_format_ms("target pause time"),
  2683                 base_time_ms, time_remaining_ms, target_pause_time_ms);
  2685   // the 10% and 50% values are arbitrary...
  2686   double threshold = 0.10 * target_pause_time_ms;
  2687   if (time_remaining_ms < threshold) {
  2688     double prev_time_remaining_ms = time_remaining_ms;
  2689     time_remaining_ms = 0.50 * target_pause_time_ms;
  2690     ergo_verbose3(ErgoCSetConstruction,
  2691                   "adjust remaining time",
  2692                   ergo_format_reason("remaining time lower than threshold")
  2693                   ergo_format_ms("remaining time")
  2694                   ergo_format_ms("threshold")
  2695                   ergo_format_ms("adjusted remaining time"),
  2696                   prev_time_remaining_ms, threshold, time_remaining_ms);
  2699   size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
  2701   HeapRegion* hr;
  2702   double young_start_time_sec = os::elapsedTime();
  2704   _collection_set_bytes_used_before = 0;
  2705   _last_gc_was_young = gcs_are_young() ? true : false;
  2707   if (_last_gc_was_young) {
  2708     ++_young_pause_num;
  2709   } else {
  2710     ++_mixed_pause_num;
  2713   // The young list is laid with the survivor regions from the previous
  2714   // pause are appended to the RHS of the young list, i.e.
  2715   //   [Newly Young Regions ++ Survivors from last pause].
  2717   size_t survivor_region_length = young_list->survivor_length();
  2718   size_t eden_region_length = young_list->length() - survivor_region_length;
  2719   init_cset_region_lengths(eden_region_length, survivor_region_length);
  2720   hr = young_list->first_survivor_region();
  2721   while (hr != NULL) {
  2722     assert(hr->is_survivor(), "badly formed young list");
  2723     hr->set_young();
  2724     hr = hr->get_next_young_region();
  2727   // Clear the fields that point to the survivor list - they are all young now.
  2728   young_list->clear_survivors();
  2730   _collection_set = _inc_cset_head;
  2731   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  2732   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
  2733   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  2735   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2736                 "add young regions to CSet",
  2737                 ergo_format_region("eden")
  2738                 ergo_format_region("survivors")
  2739                 ergo_format_ms("predicted young region time"),
  2740                 eden_region_length, survivor_region_length,
  2741                 _inc_cset_predicted_elapsed_time_ms);
  2743   // The number of recorded young regions is the incremental
  2744   // collection set's current size
  2745   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  2747   double young_end_time_sec = os::elapsedTime();
  2748   _recorded_young_cset_choice_time_ms =
  2749     (young_end_time_sec - young_start_time_sec) * 1000.0;
  2751   // We are doing young collections so reset this.
  2752   non_young_start_time_sec = young_end_time_sec;
  2754   if (!gcs_are_young()) {
  2755     bool should_continue = true;
  2756     NumberSeq seq;
  2757     double avg_prediction = 100000000000000000.0; // something very large
  2759     double prev_predicted_pause_time_ms = predicted_pause_time_ms;
  2760     do {
  2761       // Note that add_old_region_to_cset() increments the
  2762       // _old_cset_region_length field and cset_region_length() returns the
  2763       // sum of _eden_cset_region_length, _survivor_cset_region_length, and
  2764       // _old_cset_region_length. So, as old regions are added to the
  2765       // CSet, _old_cset_region_length will be incremented and
  2766       // cset_region_length(), which is used below, will always reflect
  2767       // the the total number of regions added up to this point to the CSet.
  2769       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
  2770                                                       avg_prediction);
  2771       if (hr != NULL) {
  2772         _g1->old_set_remove(hr);
  2773         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
  2774         time_remaining_ms -= predicted_time_ms;
  2775         predicted_pause_time_ms += predicted_time_ms;
  2776         add_old_region_to_cset(hr);
  2777         seq.add(predicted_time_ms);
  2778         avg_prediction = seq.avg() + seq.sd();
  2781       should_continue = true;
  2782       if (hr == NULL) {
  2783         // No need for an ergo verbose message here,
  2784         // getNextMarkRegion() does this when it returns NULL.
  2785         should_continue = false;
  2786       } else {
  2787         if (adaptive_young_list_length()) {
  2788           if (time_remaining_ms < 0.0) {
  2789             ergo_verbose1(ErgoCSetConstruction,
  2790                           "stop adding old regions to CSet",
  2791                           ergo_format_reason("remaining time is lower than 0")
  2792                           ergo_format_ms("remaining time"),
  2793                           time_remaining_ms);
  2794             should_continue = false;
  2796         } else {
  2797           if (cset_region_length() >= _young_list_fixed_length) {
  2798             ergo_verbose2(ErgoCSetConstruction,
  2799                           "stop adding old regions to CSet",
  2800                           ergo_format_reason("CSet length reached target")
  2801                           ergo_format_region("CSet")
  2802                           ergo_format_region("young target"),
  2803                           cset_region_length(), _young_list_fixed_length);
  2804             should_continue = false;
  2808     } while (should_continue);
  2810     if (!adaptive_young_list_length() &&
  2811         cset_region_length() < _young_list_fixed_length) {
  2812       ergo_verbose2(ErgoCSetConstruction,
  2813                     "request mixed GCs end",
  2814                     ergo_format_reason("CSet length lower than target")
  2815                     ergo_format_region("CSet")
  2816                     ergo_format_region("young target"),
  2817                     cset_region_length(), _young_list_fixed_length);
  2818       _should_revert_to_young_gcs  = true;
  2821     ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
  2822                   "add old regions to CSet",
  2823                   ergo_format_region("old")
  2824                   ergo_format_ms("predicted old region time"),
  2825                   old_cset_region_length(),
  2826                   predicted_pause_time_ms - prev_predicted_pause_time_ms);
  2829   stop_incremental_cset_building();
  2831   count_CS_bytes_used();
  2833   ergo_verbose5(ErgoCSetConstruction,
  2834                 "finish choosing CSet",
  2835                 ergo_format_region("eden")
  2836                 ergo_format_region("survivors")
  2837                 ergo_format_region("old")
  2838                 ergo_format_ms("predicted pause time")
  2839                 ergo_format_ms("target pause time"),
  2840                 eden_region_length, survivor_region_length,
  2841                 old_cset_region_length(),
  2842                 predicted_pause_time_ms, target_pause_time_ms);
  2844   double non_young_end_time_sec = os::elapsedTime();
  2845   _recorded_non_young_cset_choice_time_ms =
  2846     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;

mercurial