src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Wed, 15 Feb 2012 13:06:53 -0500

author
tonyp
date
Wed, 15 Feb 2012 13:06:53 -0500
changeset 3539
a9647476d1a4
parent 3464
eff609af17d7
child 3667
21595f05bc93
permissions
-rw-r--r--

7132029: G1: mixed GC phase lasts for longer than it should
Summary: Revamp of the mechanism that chooses old regions for inclusion in the CSet. It simplifies the code and introduces min and max bounds on the number of old regions added to the CSet at each mixed GC to avoid pathological cases. It also ensures that when we do a mixed GC we'll always find old regions to add to the CSet (i.e., it eliminates the case where a mixed GC will collect no old regions which can happen today).
Reviewed-by: johnc, brutisso

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/concurrentMark.hpp"
    28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    34 #include "runtime/arguments.hpp"
    35 #include "runtime/java.hpp"
    36 #include "runtime/mutexLocker.hpp"
    37 #include "utilities/debug.hpp"
    39 // Different defaults for different number of GC threads
    40 // They were chosen by running GCOld and SPECjbb on debris with different
    41 //   numbers of GC threads and choosing them based on the results
    43 // all the same
    44 static double rs_length_diff_defaults[] = {
    45   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    46 };
    48 static double cost_per_card_ms_defaults[] = {
    49   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    50 };
    52 // all the same
    53 static double young_cards_per_entry_ratio_defaults[] = {
    54   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    55 };
    57 static double cost_per_entry_ms_defaults[] = {
    58   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
    59 };
    61 static double cost_per_byte_ms_defaults[] = {
    62   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
    63 };
    65 // these should be pretty consistent
    66 static double constant_other_time_ms_defaults[] = {
    67   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
    68 };
    71 static double young_other_cost_per_region_ms_defaults[] = {
    72   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
    73 };
    75 static double non_young_other_cost_per_region_ms_defaults[] = {
    76   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
    77 };
    79 // Help class for avoiding interleaved logging
    80 class LineBuffer: public StackObj {
    82 private:
    83   static const int BUFFER_LEN = 1024;
    84   static const int INDENT_CHARS = 3;
    85   char _buffer[BUFFER_LEN];
    86   int _indent_level;
    87   int _cur;
    89   void vappend(const char* format, va_list ap) {
    90     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
    91     if (res != -1) {
    92       _cur += res;
    93     } else {
    94       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
    95       _buffer[BUFFER_LEN -1] = 0;
    96       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
    97     }
    98   }
   100 public:
   101   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
   102     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
   103       _buffer[_cur] = ' ';
   104     }
   105   }
   107 #ifndef PRODUCT
   108   ~LineBuffer() {
   109     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
   110   }
   111 #endif
   113   void append(const char* format, ...) {
   114     va_list ap;
   115     va_start(ap, format);
   116     vappend(format, ap);
   117     va_end(ap);
   118   }
   120   void append_and_print_cr(const char* format, ...) {
   121     va_list ap;
   122     va_start(ap, format);
   123     vappend(format, ap);
   124     va_end(ap);
   125     gclog_or_tty->print_cr("%s", _buffer);
   126     _cur = _indent_level * INDENT_CHARS;
   127   }
   128 };
   130 G1CollectorPolicy::G1CollectorPolicy() :
   131   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
   132                         ? ParallelGCThreads : 1),
   134   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   135   _all_pause_times_ms(new NumberSeq()),
   136   _stop_world_start(0.0),
   137   _all_stop_world_times_ms(new NumberSeq()),
   138   _all_yield_times_ms(new NumberSeq()),
   140   _summary(new Summary()),
   142   _cur_clear_ct_time_ms(0.0),
   143   _mark_closure_time_ms(0.0),
   144   _root_region_scan_wait_time_ms(0.0),
   146   _cur_ref_proc_time_ms(0.0),
   147   _cur_ref_enq_time_ms(0.0),
   149 #ifndef PRODUCT
   150   _min_clear_cc_time_ms(-1.0),
   151   _max_clear_cc_time_ms(-1.0),
   152   _cur_clear_cc_time_ms(0.0),
   153   _cum_clear_cc_time_ms(0.0),
   154   _num_cc_clears(0L),
   155 #endif
   157   _aux_num(10),
   158   _all_aux_times_ms(new NumberSeq[_aux_num]),
   159   _cur_aux_start_times_ms(new double[_aux_num]),
   160   _cur_aux_times_ms(new double[_aux_num]),
   161   _cur_aux_times_set(new bool[_aux_num]),
   163   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   164   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   166   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   167   _prev_collection_pause_end_ms(0.0),
   168   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   169   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   170   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   171   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   172   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   173   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   174   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   175   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   176   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   177   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   178   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   179   _non_young_other_cost_per_region_ms_seq(
   180                                          new TruncatedSeq(TruncatedSeqLength)),
   182   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   183   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
   185   _pause_time_target_ms((double) MaxGCPauseMillis),
   187   _gcs_are_young(true),
   188   _young_pause_num(0),
   189   _mixed_pause_num(0),
   191   _during_marking(false),
   192   _in_marking_window(false),
   193   _in_marking_window_im(false),
   195   _known_garbage_ratio(0.0),
   196   _known_garbage_bytes(0),
   198   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
   200   _recent_prev_end_times_for_all_gcs_sec(
   201                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
   203   _recent_avg_pause_time_ratio(0.0),
   205   _all_full_gc_times_ms(new NumberSeq()),
   207   _initiate_conc_mark_if_possible(false),
   208   _during_initial_mark_pause(false),
   209   _last_young_gc(false),
   210   _last_gc_was_young(false),
   212   _eden_bytes_before_gc(0),
   213   _survivor_bytes_before_gc(0),
   214   _capacity_before_gc(0),
   216   _eden_cset_region_length(0),
   217   _survivor_cset_region_length(0),
   218   _old_cset_region_length(0),
   220   _collection_set(NULL),
   221   _collection_set_bytes_used_before(0),
   223   // Incremental CSet attributes
   224   _inc_cset_build_state(Inactive),
   225   _inc_cset_head(NULL),
   226   _inc_cset_tail(NULL),
   227   _inc_cset_bytes_used_before(0),
   228   _inc_cset_max_finger(NULL),
   229   _inc_cset_recorded_rs_lengths(0),
   230   _inc_cset_recorded_rs_lengths_diffs(0),
   231   _inc_cset_predicted_elapsed_time_ms(0.0),
   232   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
   234 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   235 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   236 #endif // _MSC_VER
   238   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
   239                                                  G1YoungSurvRateNumRegionsSummary)),
   240   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
   241                                               G1YoungSurvRateNumRegionsSummary)),
   242   // add here any more surv rate groups
   243   _recorded_survivor_regions(0),
   244   _recorded_survivor_head(NULL),
   245   _recorded_survivor_tail(NULL),
   246   _survivors_age_table(true),
   248   _gc_overhead_perc(0.0) {
   250   // Set up the region size and associated fields. Given that the
   251   // policy is created before the heap, we have to set this up here,
   252   // so it's done as soon as possible.
   253   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
   254   HeapRegionRemSet::setup_remset_size();
   256   G1ErgoVerbose::initialize();
   257   if (PrintAdaptiveSizePolicy) {
   258     // Currently, we only use a single switch for all the heuristics.
   259     G1ErgoVerbose::set_enabled(true);
   260     // Given that we don't currently have a verboseness level
   261     // parameter, we'll hardcode this to high. This can be easily
   262     // changed in the future.
   263     G1ErgoVerbose::set_level(ErgoHigh);
   264   } else {
   265     G1ErgoVerbose::set_enabled(false);
   266   }
   268   // Verify PLAB sizes
   269   const size_t region_size = HeapRegion::GrainWords;
   270   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   271     char buffer[128];
   272     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
   273                  OldPLABSize > region_size ? "Old" : "Young", region_size);
   274     vm_exit_during_initialization(buffer);
   275   }
   277   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   278   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   280   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
   281   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   282   _par_last_satb_filtering_times_ms = new double[_parallel_gc_threads];
   284   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
   285   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
   287   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
   289   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
   291   _par_last_termination_times_ms = new double[_parallel_gc_threads];
   292   _par_last_termination_attempts = new double[_parallel_gc_threads];
   293   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
   294   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
   295   _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
   297   int index;
   298   if (ParallelGCThreads == 0)
   299     index = 0;
   300   else if (ParallelGCThreads > 8)
   301     index = 7;
   302   else
   303     index = ParallelGCThreads - 1;
   305   _pending_card_diff_seq->add(0.0);
   306   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   307   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
   308   _young_cards_per_entry_ratio_seq->add(
   309                                   young_cards_per_entry_ratio_defaults[index]);
   310   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   311   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   312   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
   313   _young_other_cost_per_region_ms_seq->add(
   314                                young_other_cost_per_region_ms_defaults[index]);
   315   _non_young_other_cost_per_region_ms_seq->add(
   316                            non_young_other_cost_per_region_ms_defaults[index]);
   318   // Below, we might need to calculate the pause time target based on
   319   // the pause interval. When we do so we are going to give G1 maximum
   320   // flexibility and allow it to do pauses when it needs to. So, we'll
   321   // arrange that the pause interval to be pause time target + 1 to
   322   // ensure that a) the pause time target is maximized with respect to
   323   // the pause interval and b) we maintain the invariant that pause
   324   // time target < pause interval. If the user does not want this
   325   // maximum flexibility, they will have to set the pause interval
   326   // explicitly.
   328   // First make sure that, if either parameter is set, its value is
   329   // reasonable.
   330   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   331     if (MaxGCPauseMillis < 1) {
   332       vm_exit_during_initialization("MaxGCPauseMillis should be "
   333                                     "greater than 0");
   334     }
   335   }
   336   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   337     if (GCPauseIntervalMillis < 1) {
   338       vm_exit_during_initialization("GCPauseIntervalMillis should be "
   339                                     "greater than 0");
   340     }
   341   }
   343   // Then, if the pause time target parameter was not set, set it to
   344   // the default value.
   345   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   346     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   347       // The default pause time target in G1 is 200ms
   348       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   349     } else {
   350       // We do not allow the pause interval to be set without the
   351       // pause time target
   352       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
   353                                     "without setting MaxGCPauseMillis");
   354     }
   355   }
   357   // Then, if the interval parameter was not set, set it according to
   358   // the pause time target (this will also deal with the case when the
   359   // pause time target is the default value).
   360   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   361     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
   362   }
   364   // Finally, make sure that the two parameters are consistent.
   365   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
   366     char buffer[256];
   367     jio_snprintf(buffer, 256,
   368                  "MaxGCPauseMillis (%u) should be less than "
   369                  "GCPauseIntervalMillis (%u)",
   370                  MaxGCPauseMillis, GCPauseIntervalMillis);
   371     vm_exit_during_initialization(buffer);
   372   }
   374   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
   375   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   376   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   377   _sigma = (double) G1ConfidencePercent / 100.0;
   379   // start conservatively (around 50ms is about right)
   380   _concurrent_mark_remark_times_ms->add(0.05);
   381   _concurrent_mark_cleanup_times_ms->add(0.20);
   382   _tenuring_threshold = MaxTenuringThreshold;
   383   // _max_survivor_regions will be calculated by
   384   // update_young_list_target_length() during initialization.
   385   _max_survivor_regions = 0;
   387   assert(GCTimeRatio > 0,
   388          "we should have set it to a default value set_g1_gc_flags() "
   389          "if a user set it to 0");
   390   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
   392   uintx reserve_perc = G1ReservePercent;
   393   // Put an artificial ceiling on this so that it's not set to a silly value.
   394   if (reserve_perc > 50) {
   395     reserve_perc = 50;
   396     warning("G1ReservePercent is set to a value that is too large, "
   397             "it's been updated to %u", reserve_perc);
   398   }
   399   _reserve_factor = (double) reserve_perc / 100.0;
   400   // This will be set when the heap is expanded
   401   // for the first time during initialization.
   402   _reserve_regions = 0;
   404   initialize_all();
   405   _collectionSetChooser = new CollectionSetChooser();
   406   _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
   407 }
   409 void G1CollectorPolicy::initialize_flags() {
   410   set_min_alignment(HeapRegion::GrainBytes);
   411   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
   412   if (SurvivorRatio < 1) {
   413     vm_exit_during_initialization("Invalid survivor ratio specified");
   414   }
   415   CollectorPolicy::initialize_flags();
   416 }
   418 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
   419   assert(G1DefaultMinNewGenPercent <= G1DefaultMaxNewGenPercent, "Min larger than max");
   420   assert(G1DefaultMinNewGenPercent > 0 && G1DefaultMinNewGenPercent < 100, "Min out of bounds");
   421   assert(G1DefaultMaxNewGenPercent > 0 && G1DefaultMaxNewGenPercent < 100, "Max out of bounds");
   423   if (FLAG_IS_CMDLINE(NewRatio)) {
   424     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
   425       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
   426     } else {
   427       _sizer_kind = SizerNewRatio;
   428       _adaptive_size = false;
   429       return;
   430     }
   431   }
   433   if (FLAG_IS_CMDLINE(NewSize)) {
   434      _min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes);
   435     if (FLAG_IS_CMDLINE(MaxNewSize)) {
   436       _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
   437       _sizer_kind = SizerMaxAndNewSize;
   438       _adaptive_size = _min_desired_young_length == _max_desired_young_length;
   439     } else {
   440       _sizer_kind = SizerNewSizeOnly;
   441     }
   442   } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
   443     _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
   444     _sizer_kind = SizerMaxNewSizeOnly;
   445   }
   446 }
   448 size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) {
   449   size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
   450   return MAX2((size_t)1, default_value);
   451 }
   453 size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) {
   454   size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
   455   return MAX2((size_t)1, default_value);
   456 }
   458 void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) {
   459   assert(new_number_of_heap_regions > 0, "Heap must be initialized");
   461   switch (_sizer_kind) {
   462     case SizerDefaults:
   463       _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
   464       _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
   465       break;
   466     case SizerNewSizeOnly:
   467       _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
   468       _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
   469       break;
   470     case SizerMaxNewSizeOnly:
   471       _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
   472       _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
   473       break;
   474     case SizerMaxAndNewSize:
   475       // Do nothing. Values set on the command line, don't update them at runtime.
   476       break;
   477     case SizerNewRatio:
   478       _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
   479       _max_desired_young_length = _min_desired_young_length;
   480       break;
   481     default:
   482       ShouldNotReachHere();
   483   }
   485   assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
   486 }
   488 void G1CollectorPolicy::init() {
   489   // Set aside an initial future to_space.
   490   _g1 = G1CollectedHeap::heap();
   492   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   494   initialize_gc_policy_counters();
   496   if (adaptive_young_list_length()) {
   497     _young_list_fixed_length = 0;
   498   } else {
   499     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   500   }
   501   _free_regions_at_end_of_collection = _g1->free_regions();
   502   update_young_list_target_length();
   503   _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
   505   // We may immediately start allocating regions and placing them on the
   506   // collection set list. Initialize the per-collection set info
   507   start_incremental_cset_building();
   508 }
   510 // Create the jstat counters for the policy.
   511 void G1CollectorPolicy::initialize_gc_policy_counters() {
   512   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
   513 }
   515 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
   516                                          double base_time_ms,
   517                                          size_t base_free_regions,
   518                                          double target_pause_time_ms) {
   519   if (young_length >= base_free_regions) {
   520     // end condition 1: not enough space for the young regions
   521     return false;
   522   }
   524   double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
   525   size_t bytes_to_copy =
   526                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   527   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   528   double young_other_time_ms = predict_young_other_time_ms(young_length);
   529   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
   530   if (pause_time_ms > target_pause_time_ms) {
   531     // end condition 2: prediction is over the target pause time
   532     return false;
   533   }
   535   size_t free_bytes =
   536                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
   537   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
   538     // end condition 3: out-of-space (conservatively!)
   539     return false;
   540   }
   542   // success!
   543   return true;
   544 }
   546 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
   547   // re-calculate the necessary reserve
   548   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
   549   // We use ceiling so that if reserve_regions_d is > 0.0 (but
   550   // smaller than 1.0) we'll get 1.
   551   _reserve_regions = (size_t) ceil(reserve_regions_d);
   553   _young_gen_sizer->heap_size_changed(new_number_of_regions);
   554 }
   556 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
   557                                                      size_t base_min_length) {
   558   size_t desired_min_length = 0;
   559   if (adaptive_young_list_length()) {
   560     if (_alloc_rate_ms_seq->num() > 3) {
   561       double now_sec = os::elapsedTime();
   562       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   563       double alloc_rate_ms = predict_alloc_rate_ms();
   564       desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
   565     } else {
   566       // otherwise we don't have enough info to make the prediction
   567     }
   568   }
   569   desired_min_length += base_min_length;
   570   // make sure we don't go below any user-defined minimum bound
   571   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
   572 }
   574 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
   575   // Here, we might want to also take into account any additional
   576   // constraints (i.e., user-defined minimum bound). Currently, we
   577   // effectively don't set this bound.
   578   return _young_gen_sizer->max_desired_young_length();
   579 }
   581 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   582   if (rs_lengths == (size_t) -1) {
   583     // if it's set to the default value (-1), we should predict it;
   584     // otherwise, use the given value.
   585     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   586   }
   588   // Calculate the absolute and desired min bounds.
   590   // This is how many young regions we already have (currently: the survivors).
   591   size_t base_min_length = recorded_survivor_regions();
   592   // This is the absolute minimum young length, which ensures that we
   593   // can allocate one eden region in the worst-case.
   594   size_t absolute_min_length = base_min_length + 1;
   595   size_t desired_min_length =
   596                      calculate_young_list_desired_min_length(base_min_length);
   597   if (desired_min_length < absolute_min_length) {
   598     desired_min_length = absolute_min_length;
   599   }
   601   // Calculate the absolute and desired max bounds.
   603   // We will try our best not to "eat" into the reserve.
   604   size_t absolute_max_length = 0;
   605   if (_free_regions_at_end_of_collection > _reserve_regions) {
   606     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
   607   }
   608   size_t desired_max_length = calculate_young_list_desired_max_length();
   609   if (desired_max_length > absolute_max_length) {
   610     desired_max_length = absolute_max_length;
   611   }
   613   size_t young_list_target_length = 0;
   614   if (adaptive_young_list_length()) {
   615     if (gcs_are_young()) {
   616       young_list_target_length =
   617                         calculate_young_list_target_length(rs_lengths,
   618                                                            base_min_length,
   619                                                            desired_min_length,
   620                                                            desired_max_length);
   621       _rs_lengths_prediction = rs_lengths;
   622     } else {
   623       // Don't calculate anything and let the code below bound it to
   624       // the desired_min_length, i.e., do the next GC as soon as
   625       // possible to maximize how many old regions we can add to it.
   626     }
   627   } else {
   628     // The user asked for a fixed young gen so we'll fix the young gen
   629     // whether the next GC is young or mixed.
   630     young_list_target_length = _young_list_fixed_length;
   631   }
   633   // Make sure we don't go over the desired max length, nor under the
   634   // desired min length. In case they clash, desired_min_length wins
   635   // which is why that test is second.
   636   if (young_list_target_length > desired_max_length) {
   637     young_list_target_length = desired_max_length;
   638   }
   639   if (young_list_target_length < desired_min_length) {
   640     young_list_target_length = desired_min_length;
   641   }
   643   assert(young_list_target_length > recorded_survivor_regions(),
   644          "we should be able to allocate at least one eden region");
   645   assert(young_list_target_length >= absolute_min_length, "post-condition");
   646   _young_list_target_length = young_list_target_length;
   648   update_max_gc_locker_expansion();
   649 }
   651 size_t
   652 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   653                                                    size_t base_min_length,
   654                                                    size_t desired_min_length,
   655                                                    size_t desired_max_length) {
   656   assert(adaptive_young_list_length(), "pre-condition");
   657   assert(gcs_are_young(), "only call this for young GCs");
   659   // In case some edge-condition makes the desired max length too small...
   660   if (desired_max_length <= desired_min_length) {
   661     return desired_min_length;
   662   }
   664   // We'll adjust min_young_length and max_young_length not to include
   665   // the already allocated young regions (i.e., so they reflect the
   666   // min and max eden regions we'll allocate). The base_min_length
   667   // will be reflected in the predictions by the
   668   // survivor_regions_evac_time prediction.
   669   assert(desired_min_length > base_min_length, "invariant");
   670   size_t min_young_length = desired_min_length - base_min_length;
   671   assert(desired_max_length > base_min_length, "invariant");
   672   size_t max_young_length = desired_max_length - base_min_length;
   674   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   675   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   676   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   677   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   678   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   679   double base_time_ms =
   680     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
   681     survivor_regions_evac_time;
   682   size_t available_free_regions = _free_regions_at_end_of_collection;
   683   size_t base_free_regions = 0;
   684   if (available_free_regions > _reserve_regions) {
   685     base_free_regions = available_free_regions - _reserve_regions;
   686   }
   688   // Here, we will make sure that the shortest young length that
   689   // makes sense fits within the target pause time.
   691   if (predict_will_fit(min_young_length, base_time_ms,
   692                        base_free_regions, target_pause_time_ms)) {
   693     // The shortest young length will fit into the target pause time;
   694     // we'll now check whether the absolute maximum number of young
   695     // regions will fit in the target pause time. If not, we'll do
   696     // a binary search between min_young_length and max_young_length.
   697     if (predict_will_fit(max_young_length, base_time_ms,
   698                          base_free_regions, target_pause_time_ms)) {
   699       // The maximum young length will fit into the target pause time.
   700       // We are done so set min young length to the maximum length (as
   701       // the result is assumed to be returned in min_young_length).
   702       min_young_length = max_young_length;
   703     } else {
   704       // The maximum possible number of young regions will not fit within
   705       // the target pause time so we'll search for the optimal
   706       // length. The loop invariants are:
   707       //
   708       // min_young_length < max_young_length
   709       // min_young_length is known to fit into the target pause time
   710       // max_young_length is known not to fit into the target pause time
   711       //
   712       // Going into the loop we know the above hold as we've just
   713       // checked them. Every time around the loop we check whether
   714       // the middle value between min_young_length and
   715       // max_young_length fits into the target pause time. If it
   716       // does, it becomes the new min. If it doesn't, it becomes
   717       // the new max. This way we maintain the loop invariants.
   719       assert(min_young_length < max_young_length, "invariant");
   720       size_t diff = (max_young_length - min_young_length) / 2;
   721       while (diff > 0) {
   722         size_t young_length = min_young_length + diff;
   723         if (predict_will_fit(young_length, base_time_ms,
   724                              base_free_regions, target_pause_time_ms)) {
   725           min_young_length = young_length;
   726         } else {
   727           max_young_length = young_length;
   728         }
   729         assert(min_young_length <  max_young_length, "invariant");
   730         diff = (max_young_length - min_young_length) / 2;
   731       }
   732       // The results is min_young_length which, according to the
   733       // loop invariants, should fit within the target pause time.
   735       // These are the post-conditions of the binary search above:
   736       assert(min_young_length < max_young_length,
   737              "otherwise we should have discovered that max_young_length "
   738              "fits into the pause target and not done the binary search");
   739       assert(predict_will_fit(min_young_length, base_time_ms,
   740                               base_free_regions, target_pause_time_ms),
   741              "min_young_length, the result of the binary search, should "
   742              "fit into the pause target");
   743       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
   744                                base_free_regions, target_pause_time_ms),
   745              "min_young_length, the result of the binary search, should be "
   746              "optimal, so no larger length should fit into the pause target");
   747     }
   748   } else {
   749     // Even the minimum length doesn't fit into the pause time
   750     // target, return it as the result nevertheless.
   751   }
   752   return base_min_length + min_young_length;
   753 }
   755 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   756   double survivor_regions_evac_time = 0.0;
   757   for (HeapRegion * r = _recorded_survivor_head;
   758        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
   759        r = r->get_next_young_region()) {
   760     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
   761   }
   762   return survivor_regions_evac_time;
   763 }
   765 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
   766   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   768   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   769   if (rs_lengths > _rs_lengths_prediction) {
   770     // add 10% to avoid having to recalculate often
   771     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   772     update_young_list_target_length(rs_lengths_prediction);
   773   }
   774 }
   778 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
   779                                                bool is_tlab,
   780                                                bool* gc_overhead_limit_was_exceeded) {
   781   guarantee(false, "Not using this policy feature yet.");
   782   return NULL;
   783 }
   785 // This method controls how a collector handles one or more
   786 // of its generations being fully allocated.
   787 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
   788                                                        bool is_tlab) {
   789   guarantee(false, "Not using this policy feature yet.");
   790   return NULL;
   791 }
   794 #ifndef PRODUCT
   795 bool G1CollectorPolicy::verify_young_ages() {
   796   HeapRegion* head = _g1->young_list()->first_region();
   797   return
   798     verify_young_ages(head, _short_lived_surv_rate_group);
   799   // also call verify_young_ages on any additional surv rate groups
   800 }
   802 bool
   803 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
   804                                      SurvRateGroup *surv_rate_group) {
   805   guarantee( surv_rate_group != NULL, "pre-condition" );
   807   const char* name = surv_rate_group->name();
   808   bool ret = true;
   809   int prev_age = -1;
   811   for (HeapRegion* curr = head;
   812        curr != NULL;
   813        curr = curr->get_next_young_region()) {
   814     SurvRateGroup* group = curr->surv_rate_group();
   815     if (group == NULL && !curr->is_survivor()) {
   816       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
   817       ret = false;
   818     }
   820     if (surv_rate_group == group) {
   821       int age = curr->age_in_surv_rate_group();
   823       if (age < 0) {
   824         gclog_or_tty->print_cr("## %s: encountered negative age", name);
   825         ret = false;
   826       }
   828       if (age <= prev_age) {
   829         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
   830                                "(%d, %d)", name, age, prev_age);
   831         ret = false;
   832       }
   833       prev_age = age;
   834     }
   835   }
   837   return ret;
   838 }
   839 #endif // PRODUCT
   841 void G1CollectorPolicy::record_full_collection_start() {
   842   _cur_collection_start_sec = os::elapsedTime();
   843   // Release the future to-space so that it is available for compaction into.
   844   _g1->set_full_collection();
   845 }
   847 void G1CollectorPolicy::record_full_collection_end() {
   848   // Consider this like a collection pause for the purposes of allocation
   849   // since last pause.
   850   double end_sec = os::elapsedTime();
   851   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
   852   double full_gc_time_ms = full_gc_time_sec * 1000.0;
   854   _all_full_gc_times_ms->add(full_gc_time_ms);
   856   update_recent_gc_times(end_sec, full_gc_time_ms);
   858   _g1->clear_full_collection();
   860   // "Nuke" the heuristics that control the young/mixed GC
   861   // transitions and make sure we start with young GCs after the Full GC.
   862   set_gcs_are_young(true);
   863   _last_young_gc = false;
   864   clear_initiate_conc_mark_if_possible();
   865   clear_during_initial_mark_pause();
   866   _known_garbage_bytes = 0;
   867   _known_garbage_ratio = 0.0;
   868   _in_marking_window = false;
   869   _in_marking_window_im = false;
   871   _short_lived_surv_rate_group->start_adding_regions();
   872   // also call this on any additional surv rate groups
   874   record_survivor_regions(0, NULL, NULL);
   876   _free_regions_at_end_of_collection = _g1->free_regions();
   877   // Reset survivors SurvRateGroup.
   878   _survivor_surv_rate_group->reset();
   879   update_young_list_target_length();
   880   _collectionSetChooser->clearMarkedHeapRegions();
   881 }
   883 void G1CollectorPolicy::record_stop_world_start() {
   884   _stop_world_start = os::elapsedTime();
   885 }
   887 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
   888                                                       size_t start_used) {
   889   if (PrintGCDetails) {
   890     gclog_or_tty->stamp(PrintGCTimeStamps);
   891     gclog_or_tty->print("[GC pause");
   892     gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
   893   }
   895   // We only need to do this here as the policy will only be applied
   896   // to the GC we're about to start. so, no point is calculating this
   897   // every time we calculate / recalculate the target young length.
   898   update_survivors_policy();
   900   assert(_g1->used() == _g1->recalculate_used(),
   901          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
   902                  _g1->used(), _g1->recalculate_used()));
   904   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
   905   _all_stop_world_times_ms->add(s_w_t_ms);
   906   _stop_world_start = 0.0;
   908   _cur_collection_start_sec = start_time_sec;
   909   _cur_collection_pause_used_at_start_bytes = start_used;
   910   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   911   _pending_cards = _g1->pending_card_num();
   912   _max_pending_cards = _g1->max_pending_card_num();
   914   _bytes_in_collection_set_before_gc = 0;
   915   _bytes_copied_during_gc = 0;
   917   YoungList* young_list = _g1->young_list();
   918   _eden_bytes_before_gc = young_list->eden_used_bytes();
   919   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
   920   _capacity_before_gc = _g1->capacity();
   922 #ifdef DEBUG
   923   // initialise these to something well known so that we can spot
   924   // if they are not set properly
   926   for (int i = 0; i < _parallel_gc_threads; ++i) {
   927     _par_last_gc_worker_start_times_ms[i] = -1234.0;
   928     _par_last_ext_root_scan_times_ms[i] = -1234.0;
   929     _par_last_satb_filtering_times_ms[i] = -1234.0;
   930     _par_last_update_rs_times_ms[i] = -1234.0;
   931     _par_last_update_rs_processed_buffers[i] = -1234.0;
   932     _par_last_scan_rs_times_ms[i] = -1234.0;
   933     _par_last_obj_copy_times_ms[i] = -1234.0;
   934     _par_last_termination_times_ms[i] = -1234.0;
   935     _par_last_termination_attempts[i] = -1234.0;
   936     _par_last_gc_worker_end_times_ms[i] = -1234.0;
   937     _par_last_gc_worker_times_ms[i] = -1234.0;
   938     _par_last_gc_worker_other_times_ms[i] = -1234.0;
   939   }
   940 #endif
   942   for (int i = 0; i < _aux_num; ++i) {
   943     _cur_aux_times_ms[i] = 0.0;
   944     _cur_aux_times_set[i] = false;
   945   }
   947   // This is initialized to zero here and is set during
   948   // the evacuation pause if marking is in progress.
   949   _cur_satb_drain_time_ms = 0.0;
   950   // This is initialized to zero here and is set during the evacuation
   951   // pause if we actually waited for the root region scanning to finish.
   952   _root_region_scan_wait_time_ms = 0.0;
   954   _last_gc_was_young = false;
   956   // do that for any other surv rate groups
   957   _short_lived_surv_rate_group->stop_adding_regions();
   958   _survivors_age_table.clear();
   960   assert( verify_young_ages(), "region age verification" );
   961 }
   963 void G1CollectorPolicy::record_concurrent_mark_init_end(double
   964                                                    mark_init_elapsed_time_ms) {
   965   _during_marking = true;
   966   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
   967   clear_during_initial_mark_pause();
   968   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
   969 }
   971 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
   972   _mark_remark_start_sec = os::elapsedTime();
   973   _during_marking = false;
   974 }
   976 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
   977   double end_time_sec = os::elapsedTime();
   978   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
   979   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
   980   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   981   _prev_collection_pause_end_ms += elapsed_time_ms;
   983   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
   984 }
   986 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
   987   _mark_cleanup_start_sec = os::elapsedTime();
   988 }
   990 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
   991   _last_young_gc = true;
   992   _in_marking_window = false;
   993 }
   995 void G1CollectorPolicy::record_concurrent_pause() {
   996   if (_stop_world_start > 0.0) {
   997     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
   998     _all_yield_times_ms->add(yield_ms);
   999   }
  1002 void G1CollectorPolicy::record_concurrent_pause_end() {
  1005 template<class T>
  1006 T sum_of(T* sum_arr, int start, int n, int N) {
  1007   T sum = (T)0;
  1008   for (int i = 0; i < n; i++) {
  1009     int j = (start + i) % N;
  1010     sum += sum_arr[j];
  1012   return sum;
  1015 void G1CollectorPolicy::print_par_stats(int level,
  1016                                         const char* str,
  1017                                         double* data) {
  1018   double min = data[0], max = data[0];
  1019   double total = 0.0;
  1020   LineBuffer buf(level);
  1021   buf.append("[%s (ms):", str);
  1022   for (uint i = 0; i < no_of_gc_threads(); ++i) {
  1023     double val = data[i];
  1024     if (val < min)
  1025       min = val;
  1026     if (val > max)
  1027       max = val;
  1028     total += val;
  1029     buf.append("  %3.1lf", val);
  1031   buf.append_and_print_cr("");
  1032   double avg = total / (double) no_of_gc_threads();
  1033   buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
  1034     avg, min, max, max - min);
  1037 void G1CollectorPolicy::print_par_sizes(int level,
  1038                                         const char* str,
  1039                                         double* data) {
  1040   double min = data[0], max = data[0];
  1041   double total = 0.0;
  1042   LineBuffer buf(level);
  1043   buf.append("[%s :", str);
  1044   for (uint i = 0; i < no_of_gc_threads(); ++i) {
  1045     double val = data[i];
  1046     if (val < min)
  1047       min = val;
  1048     if (val > max)
  1049       max = val;
  1050     total += val;
  1051     buf.append(" %d", (int) val);
  1053   buf.append_and_print_cr("");
  1054   double avg = total / (double) no_of_gc_threads();
  1055   buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
  1056     (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
  1059 void G1CollectorPolicy::print_stats(int level,
  1060                                     const char* str,
  1061                                     double value) {
  1062   LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
  1065 void G1CollectorPolicy::print_stats(int level,
  1066                                     const char* str,
  1067                                     int value) {
  1068   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
  1071 double G1CollectorPolicy::avg_value(double* data) {
  1072   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1073     double ret = 0.0;
  1074     for (uint i = 0; i < no_of_gc_threads(); ++i) {
  1075       ret += data[i];
  1077     return ret / (double) no_of_gc_threads();
  1078   } else {
  1079     return data[0];
  1083 double G1CollectorPolicy::max_value(double* data) {
  1084   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1085     double ret = data[0];
  1086     for (uint i = 1; i < no_of_gc_threads(); ++i) {
  1087       if (data[i] > ret) {
  1088         ret = data[i];
  1091     return ret;
  1092   } else {
  1093     return data[0];
  1097 double G1CollectorPolicy::sum_of_values(double* data) {
  1098   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1099     double sum = 0.0;
  1100     for (uint i = 0; i < no_of_gc_threads(); i++) {
  1101       sum += data[i];
  1103     return sum;
  1104   } else {
  1105     return data[0];
  1109 double G1CollectorPolicy::max_sum(double* data1, double* data2) {
  1110   double ret = data1[0] + data2[0];
  1112   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1113     for (uint i = 1; i < no_of_gc_threads(); ++i) {
  1114       double data = data1[i] + data2[i];
  1115       if (data > ret) {
  1116         ret = data;
  1120   return ret;
  1123 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
  1124   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
  1125     return false;
  1128   size_t marking_initiating_used_threshold =
  1129     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
  1130   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
  1131   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
  1133   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
  1134     if (gcs_are_young()) {
  1135       ergo_verbose5(ErgoConcCycles,
  1136         "request concurrent cycle initiation",
  1137         ergo_format_reason("occupancy higher than threshold")
  1138         ergo_format_byte("occupancy")
  1139         ergo_format_byte("allocation request")
  1140         ergo_format_byte_perc("threshold")
  1141         ergo_format_str("source"),
  1142         cur_used_bytes,
  1143         alloc_byte_size,
  1144         marking_initiating_used_threshold,
  1145         (double) InitiatingHeapOccupancyPercent,
  1146         source);
  1147       return true;
  1148     } else {
  1149       ergo_verbose5(ErgoConcCycles,
  1150         "do not request concurrent cycle initiation",
  1151         ergo_format_reason("still doing mixed collections")
  1152         ergo_format_byte("occupancy")
  1153         ergo_format_byte("allocation request")
  1154         ergo_format_byte_perc("threshold")
  1155         ergo_format_str("source"),
  1156         cur_used_bytes,
  1157         alloc_byte_size,
  1158         marking_initiating_used_threshold,
  1159         (double) InitiatingHeapOccupancyPercent,
  1160         source);
  1164   return false;
  1167 // Anything below that is considered to be zero
  1168 #define MIN_TIMER_GRANULARITY 0.0000001
  1170 void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
  1171   double end_time_sec = os::elapsedTime();
  1172   double elapsed_ms = _last_pause_time_ms;
  1173   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  1174   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
  1175          "otherwise, the subtraction below does not make sense");
  1176   size_t rs_size =
  1177             _cur_collection_pause_used_regions_at_start - cset_region_length();
  1178   size_t cur_used_bytes = _g1->used();
  1179   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
  1180   bool last_pause_included_initial_mark = false;
  1181   bool update_stats = !_g1->evacuation_failed();
  1182   set_no_of_gc_threads(no_of_gc_threads);
  1184 #ifndef PRODUCT
  1185   if (G1YoungSurvRateVerbose) {
  1186     gclog_or_tty->print_cr("");
  1187     _short_lived_surv_rate_group->print();
  1188     // do that for any other surv rate groups too
  1190 #endif // PRODUCT
  1192   last_pause_included_initial_mark = during_initial_mark_pause();
  1193   if (last_pause_included_initial_mark) {
  1194     record_concurrent_mark_init_end(0.0);
  1195   } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
  1196     // Note: this might have already been set, if during the last
  1197     // pause we decided to start a cycle but at the beginning of
  1198     // this pause we decided to postpone it. That's OK.
  1199     set_initiate_conc_mark_if_possible();
  1202   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
  1203                           end_time_sec, false);
  1205   // This assert is exempted when we're doing parallel collection pauses,
  1206   // because the fragmentation caused by the parallel GC allocation buffers
  1207   // can lead to more memory being used during collection than was used
  1208   // before. Best leave this out until the fragmentation problem is fixed.
  1209   // Pauses in which evacuation failed can also lead to negative
  1210   // collections, since no space is reclaimed from a region containing an
  1211   // object whose evacuation failed.
  1212   // Further, we're now always doing parallel collection.  But I'm still
  1213   // leaving this here as a placeholder for a more precise assertion later.
  1214   // (DLD, 10/05.)
  1215   assert((true || parallel) // Always using GC LABs now.
  1216          || _g1->evacuation_failed()
  1217          || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
  1218          "Negative collection");
  1220   size_t freed_bytes =
  1221     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
  1222   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
  1224   double survival_fraction =
  1225     (double)surviving_bytes/
  1226     (double)_collection_set_bytes_used_before;
  1228   // These values are used to update the summary information that is
  1229   // displayed when TraceGen0Time is enabled, and are output as part
  1230   // of the PrintGCDetails output, in the non-parallel case.
  1232   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
  1233   double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
  1234   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
  1235   double update_rs_processed_buffers =
  1236     sum_of_values(_par_last_update_rs_processed_buffers);
  1237   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
  1238   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
  1239   double termination_time = avg_value(_par_last_termination_times_ms);
  1241   double known_time = ext_root_scan_time +
  1242                       satb_filtering_time +
  1243                       update_rs_time +
  1244                       scan_rs_time +
  1245                       obj_copy_time;
  1247   double other_time_ms = elapsed_ms;
  1249   // Subtract the SATB drain time. It's initialized to zero at the
  1250   // start of the pause and is updated during the pause if marking
  1251   // is in progress.
  1252   other_time_ms -= _cur_satb_drain_time_ms;
  1254   // Subtract the root region scanning wait time. It's initialized to
  1255   // zero at the start of the pause.
  1256   other_time_ms -= _root_region_scan_wait_time_ms;
  1258   if (parallel) {
  1259     other_time_ms -= _cur_collection_par_time_ms;
  1260   } else {
  1261     other_time_ms -= known_time;
  1264   // Subtract the time taken to clean the card table from the
  1265   // current value of "other time"
  1266   other_time_ms -= _cur_clear_ct_time_ms;
  1268   // Subtract the time spent completing marking in the collection
  1269   // set. Note if marking is not in progress during the pause
  1270   // the value of _mark_closure_time_ms will be zero.
  1271   other_time_ms -= _mark_closure_time_ms;
  1273   // TraceGen0Time and TraceGen1Time summary info updating.
  1274   _all_pause_times_ms->add(elapsed_ms);
  1276   if (update_stats) {
  1277     _summary->record_total_time_ms(elapsed_ms);
  1278     _summary->record_other_time_ms(other_time_ms);
  1280     MainBodySummary* body_summary = _summary->main_body_summary();
  1281     assert(body_summary != NULL, "should not be null!");
  1283     // This will be non-zero iff marking is currently in progress (i.e.
  1284     // _g1->mark_in_progress() == true) and the currrent pause was not
  1285     // an initial mark pause. Since the body_summary items are NumberSeqs,
  1286     // however, they have to be consistent and updated in lock-step with
  1287     // each other. Therefore we unconditionally record the SATB drain
  1288     // time - even if it's zero.
  1289     body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
  1290     body_summary->record_root_region_scan_wait_time_ms(
  1291                                                _root_region_scan_wait_time_ms);
  1293     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
  1294     body_summary->record_satb_filtering_time_ms(satb_filtering_time);
  1295     body_summary->record_update_rs_time_ms(update_rs_time);
  1296     body_summary->record_scan_rs_time_ms(scan_rs_time);
  1297     body_summary->record_obj_copy_time_ms(obj_copy_time);
  1299     if (parallel) {
  1300       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
  1301       body_summary->record_termination_time_ms(termination_time);
  1303       double parallel_known_time = known_time + termination_time;
  1304       double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
  1305       body_summary->record_parallel_other_time_ms(parallel_other_time);
  1308     body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
  1309     body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
  1311     // We exempt parallel collection from this check because Alloc Buffer
  1312     // fragmentation can produce negative collections.  Same with evac
  1313     // failure.
  1314     // Further, we're now always doing parallel collection.  But I'm still
  1315     // leaving this here as a placeholder for a more precise assertion later.
  1316     // (DLD, 10/05.
  1317     assert((true || parallel)
  1318            || _g1->evacuation_failed()
  1319            || surviving_bytes <= _collection_set_bytes_used_before,
  1320            "Or else negative collection!");
  1322     // this is where we update the allocation rate of the application
  1323     double app_time_ms =
  1324       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
  1325     if (app_time_ms < MIN_TIMER_GRANULARITY) {
  1326       // This usually happens due to the timer not having the required
  1327       // granularity. Some Linuxes are the usual culprits.
  1328       // We'll just set it to something (arbitrarily) small.
  1329       app_time_ms = 1.0;
  1331     // We maintain the invariant that all objects allocated by mutator
  1332     // threads will be allocated out of eden regions. So, we can use
  1333     // the eden region number allocated since the previous GC to
  1334     // calculate the application's allocate rate. The only exception
  1335     // to that is humongous objects that are allocated separately. But
  1336     // given that humongous object allocations do not really affect
  1337     // either the pause's duration nor when the next pause will take
  1338     // place we can safely ignore them here.
  1339     size_t regions_allocated = eden_cset_region_length();
  1340     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1341     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1343     double interval_ms =
  1344       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1345     update_recent_gc_times(end_time_sec, elapsed_ms);
  1346     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
  1347     if (recent_avg_pause_time_ratio() < 0.0 ||
  1348         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
  1349 #ifndef PRODUCT
  1350       // Dump info to allow post-facto debugging
  1351       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
  1352       gclog_or_tty->print_cr("-------------------------------------------");
  1353       gclog_or_tty->print_cr("Recent GC Times (ms):");
  1354       _recent_gc_times_ms->dump();
  1355       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
  1356       _recent_prev_end_times_for_all_gcs_sec->dump();
  1357       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
  1358                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
  1359       // In debug mode, terminate the JVM if the user wants to debug at this point.
  1360       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
  1361 #endif  // !PRODUCT
  1362       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
  1363       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
  1364       if (_recent_avg_pause_time_ratio < 0.0) {
  1365         _recent_avg_pause_time_ratio = 0.0;
  1366       } else {
  1367         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
  1368         _recent_avg_pause_time_ratio = 1.0;
  1373   for (int i = 0; i < _aux_num; ++i) {
  1374     if (_cur_aux_times_set[i]) {
  1375       _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
  1379   // PrintGCDetails output
  1380   if (PrintGCDetails) {
  1381     bool print_marking_info =
  1382       _g1->mark_in_progress() && !last_pause_included_initial_mark;
  1384     gclog_or_tty->print_cr("%s, %1.8lf secs]",
  1385                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
  1386                            elapsed_ms / 1000.0);
  1388     if (_root_region_scan_wait_time_ms > 0.0) {
  1389       print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
  1391     if (parallel) {
  1392       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
  1393       print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
  1394       print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
  1395       if (print_marking_info) {
  1396         print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
  1398       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
  1399       print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
  1400       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
  1401       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
  1402       print_par_stats(2, "Termination", _par_last_termination_times_ms);
  1403       print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
  1404       print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
  1406       for (int i = 0; i < _parallel_gc_threads; i++) {
  1407         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
  1409         double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
  1410                                    _par_last_satb_filtering_times_ms[i] +
  1411                                    _par_last_update_rs_times_ms[i] +
  1412                                    _par_last_scan_rs_times_ms[i] +
  1413                                    _par_last_obj_copy_times_ms[i] +
  1414                                    _par_last_termination_times_ms[i];
  1416         _par_last_gc_worker_other_times_ms[i] = _cur_collection_par_time_ms - worker_known_time;
  1418       print_par_stats(2, "GC Worker", _par_last_gc_worker_times_ms);
  1419       print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
  1420     } else {
  1421       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
  1422       if (print_marking_info) {
  1423         print_stats(1, "SATB Filtering", satb_filtering_time);
  1425       print_stats(1, "Update RS", update_rs_time);
  1426       print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
  1427       print_stats(1, "Scan RS", scan_rs_time);
  1428       print_stats(1, "Object Copying", obj_copy_time);
  1430     if (print_marking_info) {
  1431       print_stats(1, "Complete CSet Marking", _mark_closure_time_ms);
  1433     print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
  1434 #ifndef PRODUCT
  1435     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
  1436     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
  1437     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
  1438     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
  1439     if (_num_cc_clears > 0) {
  1440       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
  1442 #endif
  1443     print_stats(1, "Other", other_time_ms);
  1444     print_stats(2, "Choose CSet",
  1445                    (_recorded_young_cset_choice_time_ms +
  1446                     _recorded_non_young_cset_choice_time_ms));
  1447     print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
  1448     print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
  1449     print_stats(2, "Free CSet",
  1450                    (_recorded_young_free_cset_time_ms +
  1451                     _recorded_non_young_free_cset_time_ms));
  1453     for (int i = 0; i < _aux_num; ++i) {
  1454       if (_cur_aux_times_set[i]) {
  1455         char buffer[96];
  1456         sprintf(buffer, "Aux%d", i);
  1457         print_stats(1, buffer, _cur_aux_times_ms[i]);
  1462   // Update the efficiency-since-mark vars.
  1463   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
  1464   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
  1465     // This usually happens due to the timer not having the required
  1466     // granularity. Some Linuxes are the usual culprits.
  1467     // We'll just set it to something (arbitrarily) small.
  1468     proc_ms = 1.0;
  1470   double cur_efficiency = (double) freed_bytes / proc_ms;
  1472   bool new_in_marking_window = _in_marking_window;
  1473   bool new_in_marking_window_im = false;
  1474   if (during_initial_mark_pause()) {
  1475     new_in_marking_window = true;
  1476     new_in_marking_window_im = true;
  1479   if (_last_young_gc) {
  1480     // This is supposed to to be the "last young GC" before we start
  1481     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
  1483     if (!last_pause_included_initial_mark) {
  1484       if (next_gc_should_be_mixed("start mixed GCs",
  1485                                   "do not start mixed GCs")) {
  1486         set_gcs_are_young(false);
  1488     } else {
  1489       ergo_verbose0(ErgoMixedGCs,
  1490                     "do not start mixed GCs",
  1491                     ergo_format_reason("concurrent cycle is about to start"));
  1493     _last_young_gc = false;
  1496   if (!_last_gc_was_young) {
  1497     // This is a mixed GC. Here we decide whether to continue doing
  1498     // mixed GCs or not.
  1500     if (!next_gc_should_be_mixed("continue mixed GCs",
  1501                                  "do not continue mixed GCs")) {
  1502       set_gcs_are_young(true);
  1506   if (_last_gc_was_young && !_during_marking) {
  1507     _young_gc_eff_seq->add(cur_efficiency);
  1510   _short_lived_surv_rate_group->start_adding_regions();
  1511   // do that for any other surv rate groupsx
  1513   if (update_stats) {
  1514     double pause_time_ms = elapsed_ms;
  1516     size_t diff = 0;
  1517     if (_max_pending_cards >= _pending_cards)
  1518       diff = _max_pending_cards - _pending_cards;
  1519     _pending_card_diff_seq->add((double) diff);
  1521     double cost_per_card_ms = 0.0;
  1522     if (_pending_cards > 0) {
  1523       cost_per_card_ms = update_rs_time / (double) _pending_cards;
  1524       _cost_per_card_ms_seq->add(cost_per_card_ms);
  1527     size_t cards_scanned = _g1->cards_scanned();
  1529     double cost_per_entry_ms = 0.0;
  1530     if (cards_scanned > 10) {
  1531       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
  1532       if (_last_gc_was_young) {
  1533         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1534       } else {
  1535         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1539     if (_max_rs_lengths > 0) {
  1540       double cards_per_entry_ratio =
  1541         (double) cards_scanned / (double) _max_rs_lengths;
  1542       if (_last_gc_was_young) {
  1543         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1544       } else {
  1545         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1549     // This is defensive. For a while _max_rs_lengths could get
  1550     // smaller than _recorded_rs_lengths which was causing
  1551     // rs_length_diff to get very large and mess up the RSet length
  1552     // predictions. The reason was unsafe concurrent updates to the
  1553     // _inc_cset_recorded_rs_lengths field which the code below guards
  1554     // against (see CR 7118202). This bug has now been fixed (see CR
  1555     // 7119027). However, I'm still worried that
  1556     // _inc_cset_recorded_rs_lengths might still end up somewhat
  1557     // inaccurate. The concurrent refinement thread calculates an
  1558     // RSet's length concurrently with other CR threads updating it
  1559     // which might cause it to calculate the length incorrectly (if,
  1560     // say, it's in mid-coarsening). So I'll leave in the defensive
  1561     // conditional below just in case.
  1562     size_t rs_length_diff = 0;
  1563     if (_max_rs_lengths > _recorded_rs_lengths) {
  1564       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1566     _rs_length_diff_seq->add((double) rs_length_diff);
  1568     size_t copied_bytes = surviving_bytes;
  1569     double cost_per_byte_ms = 0.0;
  1570     if (copied_bytes > 0) {
  1571       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
  1572       if (_in_marking_window) {
  1573         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
  1574       } else {
  1575         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1579     double all_other_time_ms = pause_time_ms -
  1580       (update_rs_time + scan_rs_time + obj_copy_time +
  1581        _mark_closure_time_ms + termination_time);
  1583     double young_other_time_ms = 0.0;
  1584     if (young_cset_region_length() > 0) {
  1585       young_other_time_ms =
  1586         _recorded_young_cset_choice_time_ms +
  1587         _recorded_young_free_cset_time_ms;
  1588       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
  1589                                           (double) young_cset_region_length());
  1591     double non_young_other_time_ms = 0.0;
  1592     if (old_cset_region_length() > 0) {
  1593       non_young_other_time_ms =
  1594         _recorded_non_young_cset_choice_time_ms +
  1595         _recorded_non_young_free_cset_time_ms;
  1597       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
  1598                                             (double) old_cset_region_length());
  1601     double constant_other_time_ms = all_other_time_ms -
  1602       (young_other_time_ms + non_young_other_time_ms);
  1603     _constant_other_time_ms_seq->add(constant_other_time_ms);
  1605     double survival_ratio = 0.0;
  1606     if (_bytes_in_collection_set_before_gc > 0) {
  1607       survival_ratio = (double) _bytes_copied_during_gc /
  1608                                    (double) _bytes_in_collection_set_before_gc;
  1611     _pending_cards_seq->add((double) _pending_cards);
  1612     _rs_lengths_seq->add((double) _max_rs_lengths);
  1615   _in_marking_window = new_in_marking_window;
  1616   _in_marking_window_im = new_in_marking_window_im;
  1617   _free_regions_at_end_of_collection = _g1->free_regions();
  1618   update_young_list_target_length();
  1620   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1621   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1622   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
  1624   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
  1627 #define EXT_SIZE_FORMAT "%d%s"
  1628 #define EXT_SIZE_PARAMS(bytes)                                  \
  1629   byte_size_in_proper_unit((bytes)),                            \
  1630   proper_unit_for_byte_size((bytes))
  1632 void G1CollectorPolicy::print_heap_transition() {
  1633   if (PrintGCDetails) {
  1634     YoungList* young_list = _g1->young_list();
  1635     size_t eden_bytes = young_list->eden_used_bytes();
  1636     size_t survivor_bytes = young_list->survivor_used_bytes();
  1637     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
  1638     size_t used = _g1->used();
  1639     size_t capacity = _g1->capacity();
  1640     size_t eden_capacity =
  1641       (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
  1643     gclog_or_tty->print_cr(
  1644       "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
  1645       "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
  1646       "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
  1647       EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
  1648       EXT_SIZE_PARAMS(_eden_bytes_before_gc),
  1649       EXT_SIZE_PARAMS(_prev_eden_capacity),
  1650       EXT_SIZE_PARAMS(eden_bytes),
  1651       EXT_SIZE_PARAMS(eden_capacity),
  1652       EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
  1653       EXT_SIZE_PARAMS(survivor_bytes),
  1654       EXT_SIZE_PARAMS(used_before_gc),
  1655       EXT_SIZE_PARAMS(_capacity_before_gc),
  1656       EXT_SIZE_PARAMS(used),
  1657       EXT_SIZE_PARAMS(capacity));
  1659     _prev_eden_capacity = eden_capacity;
  1660   } else if (PrintGC) {
  1661     _g1->print_size_transition(gclog_or_tty,
  1662                                _cur_collection_pause_used_at_start_bytes,
  1663                                _g1->used(), _g1->capacity());
  1667 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
  1668                                                      double update_rs_processed_buffers,
  1669                                                      double goal_ms) {
  1670   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1671   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
  1673   if (G1UseAdaptiveConcRefinement) {
  1674     const int k_gy = 3, k_gr = 6;
  1675     const double inc_k = 1.1, dec_k = 0.9;
  1677     int g = cg1r->green_zone();
  1678     if (update_rs_time > goal_ms) {
  1679       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
  1680     } else {
  1681       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
  1682         g = (int)MAX2(g * inc_k, g + 1.0);
  1685     // Change the refinement threads params
  1686     cg1r->set_green_zone(g);
  1687     cg1r->set_yellow_zone(g * k_gy);
  1688     cg1r->set_red_zone(g * k_gr);
  1689     cg1r->reinitialize_threads();
  1691     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
  1692     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
  1693                                     cg1r->yellow_zone());
  1694     // Change the barrier params
  1695     dcqs.set_process_completed_threshold(processing_threshold);
  1696     dcqs.set_max_completed_queue(cg1r->red_zone());
  1699   int curr_queue_size = dcqs.completed_buffers_num();
  1700   if (curr_queue_size >= cg1r->yellow_zone()) {
  1701     dcqs.set_completed_queue_padding(curr_queue_size);
  1702   } else {
  1703     dcqs.set_completed_queue_padding(0);
  1705   dcqs.notify_if_necessary();
  1708 double
  1709 G1CollectorPolicy::
  1710 predict_young_collection_elapsed_time_ms(size_t adjustment) {
  1711   guarantee( adjustment == 0 || adjustment == 1, "invariant" );
  1713   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1714   size_t young_num = g1h->young_list()->length();
  1715   if (young_num == 0)
  1716     return 0.0;
  1718   young_num += adjustment;
  1719   size_t pending_cards = predict_pending_cards();
  1720   size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
  1721                       predict_rs_length_diff();
  1722   size_t card_num;
  1723   if (gcs_are_young()) {
  1724     card_num = predict_young_card_num(rs_lengths);
  1725   } else {
  1726     card_num = predict_non_young_card_num(rs_lengths);
  1728   size_t young_byte_size = young_num * HeapRegion::GrainBytes;
  1729   double accum_yg_surv_rate =
  1730     _short_lived_surv_rate_group->accum_surv_rate(adjustment);
  1732   size_t bytes_to_copy =
  1733     (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
  1735   return
  1736     predict_rs_update_time_ms(pending_cards) +
  1737     predict_rs_scan_time_ms(card_num) +
  1738     predict_object_copy_time_ms(bytes_to_copy) +
  1739     predict_young_other_time_ms(young_num) +
  1740     predict_constant_other_time_ms();
  1743 double
  1744 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
  1745   size_t rs_length = predict_rs_length_diff();
  1746   size_t card_num;
  1747   if (gcs_are_young()) {
  1748     card_num = predict_young_card_num(rs_length);
  1749   } else {
  1750     card_num = predict_non_young_card_num(rs_length);
  1752   return predict_base_elapsed_time_ms(pending_cards, card_num);
  1755 double
  1756 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
  1757                                                 size_t scanned_cards) {
  1758   return
  1759     predict_rs_update_time_ms(pending_cards) +
  1760     predict_rs_scan_time_ms(scanned_cards) +
  1761     predict_constant_other_time_ms();
  1764 double
  1765 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
  1766                                                   bool young) {
  1767   size_t rs_length = hr->rem_set()->occupied();
  1768   size_t card_num;
  1769   if (gcs_are_young()) {
  1770     card_num = predict_young_card_num(rs_length);
  1771   } else {
  1772     card_num = predict_non_young_card_num(rs_length);
  1774   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  1776   double region_elapsed_time_ms =
  1777     predict_rs_scan_time_ms(card_num) +
  1778     predict_object_copy_time_ms(bytes_to_copy);
  1780   if (young)
  1781     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1782   else
  1783     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1785   return region_elapsed_time_ms;
  1788 size_t
  1789 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  1790   size_t bytes_to_copy;
  1791   if (hr->is_marked())
  1792     bytes_to_copy = hr->max_live_bytes();
  1793   else {
  1794     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
  1795     int age = hr->age_in_surv_rate_group();
  1796     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  1797     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  1799   return bytes_to_copy;
  1802 void
  1803 G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
  1804                                           size_t survivor_cset_region_length) {
  1805   _eden_cset_region_length     = eden_cset_region_length;
  1806   _survivor_cset_region_length = survivor_cset_region_length;
  1807   _old_cset_region_length      = 0;
  1810 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  1811   _recorded_rs_lengths = rs_lengths;
  1814 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  1815                                                double elapsed_ms) {
  1816   _recent_gc_times_ms->add(elapsed_ms);
  1817   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
  1818   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
  1821 size_t G1CollectorPolicy::expansion_amount() {
  1822   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
  1823   double threshold = _gc_overhead_perc;
  1824   if (recent_gc_overhead > threshold) {
  1825     // We will double the existing space, or take
  1826     // G1ExpandByPercentOfAvailable % of the available expansion
  1827     // space, whichever is smaller, bounded below by a minimum
  1828     // expansion (unless that's all that's left.)
  1829     const size_t min_expand_bytes = 1*M;
  1830     size_t reserved_bytes = _g1->max_capacity();
  1831     size_t committed_bytes = _g1->capacity();
  1832     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
  1833     size_t expand_bytes;
  1834     size_t expand_bytes_via_pct =
  1835       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
  1836     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
  1837     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
  1838     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
  1840     ergo_verbose5(ErgoHeapSizing,
  1841                   "attempt heap expansion",
  1842                   ergo_format_reason("recent GC overhead higher than "
  1843                                      "threshold after GC")
  1844                   ergo_format_perc("recent GC overhead")
  1845                   ergo_format_perc("threshold")
  1846                   ergo_format_byte("uncommitted")
  1847                   ergo_format_byte_perc("calculated expansion amount"),
  1848                   recent_gc_overhead, threshold,
  1849                   uncommitted_bytes,
  1850                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
  1852     return expand_bytes;
  1853   } else {
  1854     return 0;
  1858 class CountCSClosure: public HeapRegionClosure {
  1859   G1CollectorPolicy* _g1_policy;
  1860 public:
  1861   CountCSClosure(G1CollectorPolicy* g1_policy) :
  1862     _g1_policy(g1_policy) {}
  1863   bool doHeapRegion(HeapRegion* r) {
  1864     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
  1865     return false;
  1867 };
  1869 void G1CollectorPolicy::count_CS_bytes_used() {
  1870   CountCSClosure cs_closure(this);
  1871   _g1->collection_set_iterate(&cs_closure);
  1874 void G1CollectorPolicy::print_summary(int level,
  1875                                       const char* str,
  1876                                       NumberSeq* seq) const {
  1877   double sum = seq->sum();
  1878   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
  1879                 str, sum / 1000.0, seq->avg());
  1882 void G1CollectorPolicy::print_summary_sd(int level,
  1883                                          const char* str,
  1884                                          NumberSeq* seq) const {
  1885   print_summary(level, str, seq);
  1886   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
  1887                 seq->num(), seq->sd(), seq->maximum());
  1890 void G1CollectorPolicy::check_other_times(int level,
  1891                                         NumberSeq* other_times_ms,
  1892                                         NumberSeq* calc_other_times_ms) const {
  1893   bool should_print = false;
  1894   LineBuffer buf(level + 2);
  1896   double max_sum = MAX2(fabs(other_times_ms->sum()),
  1897                         fabs(calc_other_times_ms->sum()));
  1898   double min_sum = MIN2(fabs(other_times_ms->sum()),
  1899                         fabs(calc_other_times_ms->sum()));
  1900   double sum_ratio = max_sum / min_sum;
  1901   if (sum_ratio > 1.1) {
  1902     should_print = true;
  1903     buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
  1906   double max_avg = MAX2(fabs(other_times_ms->avg()),
  1907                         fabs(calc_other_times_ms->avg()));
  1908   double min_avg = MIN2(fabs(other_times_ms->avg()),
  1909                         fabs(calc_other_times_ms->avg()));
  1910   double avg_ratio = max_avg / min_avg;
  1911   if (avg_ratio > 1.1) {
  1912     should_print = true;
  1913     buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
  1916   if (other_times_ms->sum() < -0.01) {
  1917     buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
  1920   if (other_times_ms->avg() < -0.01) {
  1921     buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
  1924   if (calc_other_times_ms->sum() < -0.01) {
  1925     should_print = true;
  1926     buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
  1929   if (calc_other_times_ms->avg() < -0.01) {
  1930     should_print = true;
  1931     buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
  1934   if (should_print)
  1935     print_summary(level, "Other(Calc)", calc_other_times_ms);
  1938 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
  1939   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  1940   MainBodySummary*    body_summary = summary->main_body_summary();
  1941   if (summary->get_total_seq()->num() > 0) {
  1942     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
  1943     if (body_summary != NULL) {
  1944       print_summary(1, "Root Region Scan Wait", body_summary->get_root_region_scan_wait_seq());
  1945       if (parallel) {
  1946         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
  1947         print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
  1948         print_summary(2, "SATB Filtering", body_summary->get_satb_filtering_seq());
  1949         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
  1950         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
  1951         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
  1952         print_summary(2, "Termination", body_summary->get_termination_seq());
  1953         print_summary(2, "Parallel Other", body_summary->get_parallel_other_seq());
  1955           NumberSeq* other_parts[] = {
  1956             body_summary->get_ext_root_scan_seq(),
  1957             body_summary->get_satb_filtering_seq(),
  1958             body_summary->get_update_rs_seq(),
  1959             body_summary->get_scan_rs_seq(),
  1960             body_summary->get_obj_copy_seq(),
  1961             body_summary->get_termination_seq()
  1962           };
  1963           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
  1964                                         6, other_parts);
  1965           check_other_times(2, body_summary->get_parallel_other_seq(),
  1966                             &calc_other_times_ms);
  1968       } else {
  1969         print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
  1970         print_summary(1, "SATB Filtering", body_summary->get_satb_filtering_seq());
  1971         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
  1972         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
  1973         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
  1976     print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
  1977     print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
  1978     print_summary(1, "Other", summary->get_other_seq());
  1980       if (body_summary != NULL) {
  1981         NumberSeq calc_other_times_ms;
  1982         if (parallel) {
  1983           // parallel
  1984           NumberSeq* other_parts[] = {
  1985             body_summary->get_satb_drain_seq(),
  1986             body_summary->get_root_region_scan_wait_seq(),
  1987             body_summary->get_parallel_seq(),
  1988             body_summary->get_clear_ct_seq()
  1989           };
  1990           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  1991                                           4, other_parts);
  1992         } else {
  1993           // serial
  1994           NumberSeq* other_parts[] = {
  1995             body_summary->get_satb_drain_seq(),
  1996             body_summary->get_root_region_scan_wait_seq(),
  1997             body_summary->get_update_rs_seq(),
  1998             body_summary->get_ext_root_scan_seq(),
  1999             body_summary->get_satb_filtering_seq(),
  2000             body_summary->get_scan_rs_seq(),
  2001             body_summary->get_obj_copy_seq()
  2002           };
  2003           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2004                                           7, other_parts);
  2006         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
  2009   } else {
  2010     LineBuffer(1).append_and_print_cr("none");
  2012   LineBuffer(0).append_and_print_cr("");
  2015 void G1CollectorPolicy::print_tracing_info() const {
  2016   if (TraceGen0Time) {
  2017     gclog_or_tty->print_cr("ALL PAUSES");
  2018     print_summary_sd(0, "Total", _all_pause_times_ms);
  2019     gclog_or_tty->print_cr("");
  2020     gclog_or_tty->print_cr("");
  2021     gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
  2022     gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
  2023     gclog_or_tty->print_cr("");
  2025     gclog_or_tty->print_cr("EVACUATION PAUSES");
  2026     print_summary(_summary);
  2028     gclog_or_tty->print_cr("MISC");
  2029     print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
  2030     print_summary_sd(0, "Yields", _all_yield_times_ms);
  2031     for (int i = 0; i < _aux_num; ++i) {
  2032       if (_all_aux_times_ms[i].num() > 0) {
  2033         char buffer[96];
  2034         sprintf(buffer, "Aux%d", i);
  2035         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
  2039   if (TraceGen1Time) {
  2040     if (_all_full_gc_times_ms->num() > 0) {
  2041       gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
  2042                  _all_full_gc_times_ms->num(),
  2043                  _all_full_gc_times_ms->sum() / 1000.0);
  2044       gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
  2045       gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
  2046                     _all_full_gc_times_ms->sd(),
  2047                     _all_full_gc_times_ms->maximum());
  2052 void G1CollectorPolicy::print_yg_surv_rate_info() const {
  2053 #ifndef PRODUCT
  2054   _short_lived_surv_rate_group->print_surv_rate_summary();
  2055   // add this call for any other surv rate groups
  2056 #endif // PRODUCT
  2059 #ifndef PRODUCT
  2060 // for debugging, bit of a hack...
  2061 static char*
  2062 region_num_to_mbs(int length) {
  2063   static char buffer[64];
  2064   double bytes = (double) (length * HeapRegion::GrainBytes);
  2065   double mbs = bytes / (double) (1024 * 1024);
  2066   sprintf(buffer, "%7.2lfMB", mbs);
  2067   return buffer;
  2069 #endif // PRODUCT
  2071 size_t G1CollectorPolicy::max_regions(int purpose) {
  2072   switch (purpose) {
  2073     case GCAllocForSurvived:
  2074       return _max_survivor_regions;
  2075     case GCAllocForTenured:
  2076       return REGIONS_UNLIMITED;
  2077     default:
  2078       ShouldNotReachHere();
  2079       return REGIONS_UNLIMITED;
  2080   };
  2083 void G1CollectorPolicy::update_max_gc_locker_expansion() {
  2084   size_t expansion_region_num = 0;
  2085   if (GCLockerEdenExpansionPercent > 0) {
  2086     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
  2087     double expansion_region_num_d = perc * (double) _young_list_target_length;
  2088     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
  2089     // less than 1.0) we'll get 1.
  2090     expansion_region_num = (size_t) ceil(expansion_region_num_d);
  2091   } else {
  2092     assert(expansion_region_num == 0, "sanity");
  2094   _young_list_max_length = _young_list_target_length + expansion_region_num;
  2095   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
  2098 // Calculates survivor space parameters.
  2099 void G1CollectorPolicy::update_survivors_policy() {
  2100   double max_survivor_regions_d =
  2101                  (double) _young_list_target_length / (double) SurvivorRatio;
  2102   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
  2103   // smaller than 1.0) we'll get 1.
  2104   _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
  2106   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  2107         HeapRegion::GrainWords * _max_survivor_regions);
  2110 #ifndef PRODUCT
  2111 class HRSortIndexIsOKClosure: public HeapRegionClosure {
  2112   CollectionSetChooser* _chooser;
  2113 public:
  2114   HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
  2115     _chooser(chooser) {}
  2117   bool doHeapRegion(HeapRegion* r) {
  2118     if (!r->continuesHumongous()) {
  2119       assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
  2121     return false;
  2123 };
  2125 bool G1CollectorPolicy::assertMarkedBytesDataOK() {
  2126   HRSortIndexIsOKClosure cl(_collectionSetChooser);
  2127   _g1->heap_region_iterate(&cl);
  2128   return true;
  2130 #endif
  2132 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
  2133                                                      GCCause::Cause gc_cause) {
  2134   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2135   if (!during_cycle) {
  2136     ergo_verbose1(ErgoConcCycles,
  2137                   "request concurrent cycle initiation",
  2138                   ergo_format_reason("requested by GC cause")
  2139                   ergo_format_str("GC cause"),
  2140                   GCCause::to_string(gc_cause));
  2141     set_initiate_conc_mark_if_possible();
  2142     return true;
  2143   } else {
  2144     ergo_verbose1(ErgoConcCycles,
  2145                   "do not request concurrent cycle initiation",
  2146                   ergo_format_reason("concurrent cycle already in progress")
  2147                   ergo_format_str("GC cause"),
  2148                   GCCause::to_string(gc_cause));
  2149     return false;
  2153 void
  2154 G1CollectorPolicy::decide_on_conc_mark_initiation() {
  2155   // We are about to decide on whether this pause will be an
  2156   // initial-mark pause.
  2158   // First, during_initial_mark_pause() should not be already set. We
  2159   // will set it here if we have to. However, it should be cleared by
  2160   // the end of the pause (it's only set for the duration of an
  2161   // initial-mark pause).
  2162   assert(!during_initial_mark_pause(), "pre-condition");
  2164   if (initiate_conc_mark_if_possible()) {
  2165     // We had noticed on a previous pause that the heap occupancy has
  2166     // gone over the initiating threshold and we should start a
  2167     // concurrent marking cycle. So we might initiate one.
  2169     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2170     if (!during_cycle) {
  2171       // The concurrent marking thread is not "during a cycle", i.e.,
  2172       // it has completed the last one. So we can go ahead and
  2173       // initiate a new cycle.
  2175       set_during_initial_mark_pause();
  2176       // We do not allow mixed GCs during marking.
  2177       if (!gcs_are_young()) {
  2178         set_gcs_are_young(true);
  2179         ergo_verbose0(ErgoMixedGCs,
  2180                       "end mixed GCs",
  2181                       ergo_format_reason("concurrent cycle is about to start"));
  2184       // And we can now clear initiate_conc_mark_if_possible() as
  2185       // we've already acted on it.
  2186       clear_initiate_conc_mark_if_possible();
  2188       ergo_verbose0(ErgoConcCycles,
  2189                   "initiate concurrent cycle",
  2190                   ergo_format_reason("concurrent cycle initiation requested"));
  2191     } else {
  2192       // The concurrent marking thread is still finishing up the
  2193       // previous cycle. If we start one right now the two cycles
  2194       // overlap. In particular, the concurrent marking thread might
  2195       // be in the process of clearing the next marking bitmap (which
  2196       // we will use for the next cycle if we start one). Starting a
  2197       // cycle now will be bad given that parts of the marking
  2198       // information might get cleared by the marking thread. And we
  2199       // cannot wait for the marking thread to finish the cycle as it
  2200       // periodically yields while clearing the next marking bitmap
  2201       // and, if it's in a yield point, it's waiting for us to
  2202       // finish. So, at this point we will not start a cycle and we'll
  2203       // let the concurrent marking thread complete the last one.
  2204       ergo_verbose0(ErgoConcCycles,
  2205                     "do not initiate concurrent cycle",
  2206                     ergo_format_reason("concurrent cycle already in progress"));
  2211 class KnownGarbageClosure: public HeapRegionClosure {
  2212   G1CollectedHeap* _g1h;
  2213   CollectionSetChooser* _hrSorted;
  2215 public:
  2216   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
  2217     _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
  2219   bool doHeapRegion(HeapRegion* r) {
  2220     // We only include humongous regions in collection
  2221     // sets when concurrent mark shows that their contained object is
  2222     // unreachable.
  2224     // Do we have any marking information for this region?
  2225     if (r->is_marked()) {
  2226       // We will skip any region that's currently used as an old GC
  2227       // alloc region (we should not consider those for collection
  2228       // before we fill them up).
  2229       if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
  2230         _hrSorted->addMarkedHeapRegion(r);
  2233     return false;
  2235 };
  2237 class ParKnownGarbageHRClosure: public HeapRegionClosure {
  2238   G1CollectedHeap* _g1h;
  2239   CollectionSetChooser* _hrSorted;
  2240   jint _marked_regions_added;
  2241   size_t _reclaimable_bytes_added;
  2242   jint _chunk_size;
  2243   jint _cur_chunk_idx;
  2244   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
  2245   int _worker;
  2246   int _invokes;
  2248   void get_new_chunk() {
  2249     _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
  2250     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
  2252   void add_region(HeapRegion* r) {
  2253     if (_cur_chunk_idx == _cur_chunk_end) {
  2254       get_new_chunk();
  2256     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
  2257     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
  2258     _marked_regions_added++;
  2259     _reclaimable_bytes_added += r->reclaimable_bytes();
  2260     _cur_chunk_idx++;
  2263 public:
  2264   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  2265                            jint chunk_size,
  2266                            int worker) :
  2267       _g1h(G1CollectedHeap::heap()),
  2268       _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
  2269       _marked_regions_added(0), _reclaimable_bytes_added(0),
  2270       _cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { }
  2272   bool doHeapRegion(HeapRegion* r) {
  2273     // We only include humongous regions in collection
  2274     // sets when concurrent mark shows that their contained object is
  2275     // unreachable.
  2276     _invokes++;
  2278     // Do we have any marking information for this region?
  2279     if (r->is_marked()) {
  2280       // We will skip any region that's currently used as an old GC
  2281       // alloc region (we should not consider those for collection
  2282       // before we fill them up).
  2283       if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
  2284         add_region(r);
  2287     return false;
  2289   jint marked_regions_added() { return _marked_regions_added; }
  2290   size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
  2291   int invokes() { return _invokes; }
  2292 };
  2294 class ParKnownGarbageTask: public AbstractGangTask {
  2295   CollectionSetChooser* _hrSorted;
  2296   jint _chunk_size;
  2297   G1CollectedHeap* _g1;
  2298 public:
  2299   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
  2300     AbstractGangTask("ParKnownGarbageTask"),
  2301     _hrSorted(hrSorted), _chunk_size(chunk_size),
  2302     _g1(G1CollectedHeap::heap()) { }
  2304   void work(uint worker_id) {
  2305     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
  2306                                                _chunk_size,
  2307                                                worker_id);
  2308     // Back to zero for the claim value.
  2309     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
  2310                                          _g1->workers()->active_workers(),
  2311                                          HeapRegion::InitialClaimValue);
  2312     jint regions_added = parKnownGarbageCl.marked_regions_added();
  2313     size_t reclaimable_bytes_added =
  2314                                    parKnownGarbageCl.reclaimable_bytes_added();
  2315     _hrSorted->updateTotals(regions_added, reclaimable_bytes_added);
  2316     if (G1PrintParCleanupStats) {
  2317       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
  2318                  worker_id, parKnownGarbageCl.invokes(), regions_added);
  2321 };
  2323 void
  2324 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
  2325   double start_sec;
  2326   if (G1PrintParCleanupStats) {
  2327     start_sec = os::elapsedTime();
  2330   _collectionSetChooser->clearMarkedHeapRegions();
  2331   double clear_marked_end_sec;
  2332   if (G1PrintParCleanupStats) {
  2333     clear_marked_end_sec = os::elapsedTime();
  2334     gclog_or_tty->print_cr("  clear marked regions: %8.3f ms.",
  2335                            (clear_marked_end_sec - start_sec) * 1000.0);
  2338   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2339     const size_t OverpartitionFactor = 4;
  2340     size_t WorkUnit;
  2341     // The use of MinChunkSize = 8 in the original code
  2342     // causes some assertion failures when the total number of
  2343     // region is less than 8.  The code here tries to fix that.
  2344     // Should the original code also be fixed?
  2345     if (no_of_gc_threads > 0) {
  2346       const size_t MinWorkUnit =
  2347         MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U);
  2348       WorkUnit =
  2349         MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
  2350              MinWorkUnit);
  2351     } else {
  2352       assert(no_of_gc_threads > 0,
  2353         "The active gc workers should be greater than 0");
  2354       // In a product build do something reasonable to avoid a crash.
  2355       const size_t MinWorkUnit =
  2356         MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
  2357       WorkUnit =
  2358         MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
  2359              MinWorkUnit);
  2361     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
  2362                                                              WorkUnit);
  2363     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  2364                                             (int) WorkUnit);
  2365     _g1->workers()->run_task(&parKnownGarbageTask);
  2367     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2368            "sanity check");
  2369   } else {
  2370     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  2371     _g1->heap_region_iterate(&knownGarbagecl);
  2373   double known_garbage_end_sec;
  2374   if (G1PrintParCleanupStats) {
  2375     known_garbage_end_sec = os::elapsedTime();
  2376     gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
  2377                       (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
  2380   _collectionSetChooser->sortMarkedHeapRegions();
  2381   double end_sec = os::elapsedTime();
  2382   if (G1PrintParCleanupStats) {
  2383     gclog_or_tty->print_cr("  sorting: %8.3f ms.",
  2384                            (end_sec - known_garbage_end_sec) * 1000.0);
  2387   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  2388   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  2389   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  2390   _prev_collection_pause_end_ms += elapsed_time_ms;
  2391   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
  2394 // Add the heap region at the head of the non-incremental collection set
  2395 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
  2396   assert(_inc_cset_build_state == Active, "Precondition");
  2397   assert(!hr->is_young(), "non-incremental add of young region");
  2399   assert(!hr->in_collection_set(), "should not already be in the CSet");
  2400   hr->set_in_collection_set(true);
  2401   hr->set_next_in_collection_set(_collection_set);
  2402   _collection_set = hr;
  2403   _collection_set_bytes_used_before += hr->used();
  2404   _g1->register_region_with_in_cset_fast_test(hr);
  2405   size_t rs_length = hr->rem_set()->occupied();
  2406   _recorded_rs_lengths += rs_length;
  2407   _old_cset_region_length += 1;
  2410 // Initialize the per-collection-set information
  2411 void G1CollectorPolicy::start_incremental_cset_building() {
  2412   assert(_inc_cset_build_state == Inactive, "Precondition");
  2414   _inc_cset_head = NULL;
  2415   _inc_cset_tail = NULL;
  2416   _inc_cset_bytes_used_before = 0;
  2418   _inc_cset_max_finger = 0;
  2419   _inc_cset_recorded_rs_lengths = 0;
  2420   _inc_cset_recorded_rs_lengths_diffs = 0;
  2421   _inc_cset_predicted_elapsed_time_ms = 0.0;
  2422   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
  2423   _inc_cset_build_state = Active;
  2426 void G1CollectorPolicy::finalize_incremental_cset_building() {
  2427   assert(_inc_cset_build_state == Active, "Precondition");
  2428   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
  2430   // The two "main" fields, _inc_cset_recorded_rs_lengths and
  2431   // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
  2432   // that adds a new region to the CSet. Further updates by the
  2433   // concurrent refinement thread that samples the young RSet lengths
  2434   // are accumulated in the *_diffs fields. Here we add the diffs to
  2435   // the "main" fields.
  2437   if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
  2438     _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
  2439   } else {
  2440     // This is defensive. The diff should in theory be always positive
  2441     // as RSets can only grow between GCs. However, given that we
  2442     // sample their size concurrently with other threads updating them
  2443     // it's possible that we might get the wrong size back, which
  2444     // could make the calculations somewhat inaccurate.
  2445     size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
  2446     if (_inc_cset_recorded_rs_lengths >= diffs) {
  2447       _inc_cset_recorded_rs_lengths -= diffs;
  2448     } else {
  2449       _inc_cset_recorded_rs_lengths = 0;
  2452   _inc_cset_predicted_elapsed_time_ms +=
  2453                                      _inc_cset_predicted_elapsed_time_ms_diffs;
  2455   _inc_cset_recorded_rs_lengths_diffs = 0;
  2456   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
  2459 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  2460   // This routine is used when:
  2461   // * adding survivor regions to the incremental cset at the end of an
  2462   //   evacuation pause,
  2463   // * adding the current allocation region to the incremental cset
  2464   //   when it is retired, and
  2465   // * updating existing policy information for a region in the
  2466   //   incremental cset via young list RSet sampling.
  2467   // Therefore this routine may be called at a safepoint by the
  2468   // VM thread, or in-between safepoints by mutator threads (when
  2469   // retiring the current allocation region) or a concurrent
  2470   // refine thread (RSet sampling).
  2472   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  2473   size_t used_bytes = hr->used();
  2474   _inc_cset_recorded_rs_lengths += rs_length;
  2475   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  2476   _inc_cset_bytes_used_before += used_bytes;
  2478   // Cache the values we have added to the aggregated informtion
  2479   // in the heap region in case we have to remove this region from
  2480   // the incremental collection set, or it is updated by the
  2481   // rset sampling code
  2482   hr->set_recorded_rs_length(rs_length);
  2483   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  2486 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
  2487                                                      size_t new_rs_length) {
  2488   // Update the CSet information that is dependent on the new RS length
  2489   assert(hr->is_young(), "Precondition");
  2490   assert(!SafepointSynchronize::is_at_safepoint(),
  2491                                                "should not be at a safepoint");
  2493   // We could have updated _inc_cset_recorded_rs_lengths and
  2494   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
  2495   // that atomically, as this code is executed by a concurrent
  2496   // refinement thread, potentially concurrently with a mutator thread
  2497   // allocating a new region and also updating the same fields. To
  2498   // avoid the atomic operations we accumulate these updates on two
  2499   // separate fields (*_diffs) and we'll just add them to the "main"
  2500   // fields at the start of a GC.
  2502   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
  2503   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
  2504   _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
  2506   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  2507   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  2508   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
  2509   _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
  2511   hr->set_recorded_rs_length(new_rs_length);
  2512   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
  2515 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  2516   assert(hr->is_young(), "invariant");
  2517   assert(hr->young_index_in_cset() > -1, "should have already been set");
  2518   assert(_inc_cset_build_state == Active, "Precondition");
  2520   // We need to clear and set the cached recorded/cached collection set
  2521   // information in the heap region here (before the region gets added
  2522   // to the collection set). An individual heap region's cached values
  2523   // are calculated, aggregated with the policy collection set info,
  2524   // and cached in the heap region here (initially) and (subsequently)
  2525   // by the Young List sampling code.
  2527   size_t rs_length = hr->rem_set()->occupied();
  2528   add_to_incremental_cset_info(hr, rs_length);
  2530   HeapWord* hr_end = hr->end();
  2531   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  2533   assert(!hr->in_collection_set(), "invariant");
  2534   hr->set_in_collection_set(true);
  2535   assert( hr->next_in_collection_set() == NULL, "invariant");
  2537   _g1->register_region_with_in_cset_fast_test(hr);
  2540 // Add the region at the RHS of the incremental cset
  2541 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  2542   // We should only ever be appending survivors at the end of a pause
  2543   assert( hr->is_survivor(), "Logic");
  2545   // Do the 'common' stuff
  2546   add_region_to_incremental_cset_common(hr);
  2548   // Now add the region at the right hand side
  2549   if (_inc_cset_tail == NULL) {
  2550     assert(_inc_cset_head == NULL, "invariant");
  2551     _inc_cset_head = hr;
  2552   } else {
  2553     _inc_cset_tail->set_next_in_collection_set(hr);
  2555   _inc_cset_tail = hr;
  2558 // Add the region to the LHS of the incremental cset
  2559 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  2560   // Survivors should be added to the RHS at the end of a pause
  2561   assert(!hr->is_survivor(), "Logic");
  2563   // Do the 'common' stuff
  2564   add_region_to_incremental_cset_common(hr);
  2566   // Add the region at the left hand side
  2567   hr->set_next_in_collection_set(_inc_cset_head);
  2568   if (_inc_cset_head == NULL) {
  2569     assert(_inc_cset_tail == NULL, "Invariant");
  2570     _inc_cset_tail = hr;
  2572   _inc_cset_head = hr;
  2575 #ifndef PRODUCT
  2576 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  2577   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  2579   st->print_cr("\nCollection_set:");
  2580   HeapRegion* csr = list_head;
  2581   while (csr != NULL) {
  2582     HeapRegion* next = csr->next_in_collection_set();
  2583     assert(csr->in_collection_set(), "bad CS");
  2584     st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
  2585                  "age: %4d, y: %d, surv: %d",
  2586                         csr->bottom(), csr->end(),
  2587                         csr->top(),
  2588                         csr->prev_top_at_mark_start(),
  2589                         csr->next_top_at_mark_start(),
  2590                         csr->top_at_conc_mark_count(),
  2591                         csr->age_in_surv_rate_group_cond(),
  2592                         csr->is_young(),
  2593                         csr->is_survivor());
  2594     csr = next;
  2597 #endif // !PRODUCT
  2599 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
  2600                                                 const char* false_action_str) {
  2601   CollectionSetChooser* cset_chooser = _collectionSetChooser;
  2602   if (cset_chooser->isEmpty()) {
  2603     ergo_verbose0(ErgoMixedGCs,
  2604                   false_action_str,
  2605                   ergo_format_reason("candidate old regions not available"));
  2606     return false;
  2608   size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes();
  2609   size_t capacity_bytes = _g1->capacity();
  2610   double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
  2611   double threshold = (double) G1OldReclaimableThresholdPercent;
  2612   if (perc < threshold) {
  2613     ergo_verbose4(ErgoMixedGCs,
  2614               false_action_str,
  2615               ergo_format_reason("reclaimable percentage lower than threshold")
  2616               ergo_format_region("candidate old regions")
  2617               ergo_format_byte_perc("reclaimable")
  2618               ergo_format_perc("threshold"),
  2619               cset_chooser->remainingRegions(),
  2620               reclaimable_bytes, perc, threshold);
  2621     return false;
  2624   ergo_verbose4(ErgoMixedGCs,
  2625                 true_action_str,
  2626                 ergo_format_reason("candidate old regions available")
  2627                 ergo_format_region("candidate old regions")
  2628                 ergo_format_byte_perc("reclaimable")
  2629                 ergo_format_perc("threshold"),
  2630                 cset_chooser->remainingRegions(),
  2631                 reclaimable_bytes, perc, threshold);
  2632   return true;
  2635 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
  2636   // Set this here - in case we're not doing young collections.
  2637   double non_young_start_time_sec = os::elapsedTime();
  2639   YoungList* young_list = _g1->young_list();
  2640   finalize_incremental_cset_building();
  2642   guarantee(target_pause_time_ms > 0.0,
  2643             err_msg("target_pause_time_ms = %1.6lf should be positive",
  2644                     target_pause_time_ms));
  2645   guarantee(_collection_set == NULL, "Precondition");
  2647   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
  2648   double predicted_pause_time_ms = base_time_ms;
  2649   double time_remaining_ms = target_pause_time_ms - base_time_ms;
  2651   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2652                 "start choosing CSet",
  2653                 ergo_format_ms("predicted base time")
  2654                 ergo_format_ms("remaining time")
  2655                 ergo_format_ms("target pause time"),
  2656                 base_time_ms, time_remaining_ms, target_pause_time_ms);
  2658   HeapRegion* hr;
  2659   double young_start_time_sec = os::elapsedTime();
  2661   _collection_set_bytes_used_before = 0;
  2662   _last_gc_was_young = gcs_are_young() ? true : false;
  2664   if (_last_gc_was_young) {
  2665     ++_young_pause_num;
  2666   } else {
  2667     ++_mixed_pause_num;
  2670   // The young list is laid with the survivor regions from the previous
  2671   // pause are appended to the RHS of the young list, i.e.
  2672   //   [Newly Young Regions ++ Survivors from last pause].
  2674   size_t survivor_region_length = young_list->survivor_length();
  2675   size_t eden_region_length = young_list->length() - survivor_region_length;
  2676   init_cset_region_lengths(eden_region_length, survivor_region_length);
  2677   hr = young_list->first_survivor_region();
  2678   while (hr != NULL) {
  2679     assert(hr->is_survivor(), "badly formed young list");
  2680     hr->set_young();
  2681     hr = hr->get_next_young_region();
  2684   // Clear the fields that point to the survivor list - they are all young now.
  2685   young_list->clear_survivors();
  2687   _collection_set = _inc_cset_head;
  2688   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  2689   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
  2690   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  2692   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2693                 "add young regions to CSet",
  2694                 ergo_format_region("eden")
  2695                 ergo_format_region("survivors")
  2696                 ergo_format_ms("predicted young region time"),
  2697                 eden_region_length, survivor_region_length,
  2698                 _inc_cset_predicted_elapsed_time_ms);
  2700   // The number of recorded young regions is the incremental
  2701   // collection set's current size
  2702   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  2704   double young_end_time_sec = os::elapsedTime();
  2705   _recorded_young_cset_choice_time_ms =
  2706     (young_end_time_sec - young_start_time_sec) * 1000.0;
  2708   // We are doing young collections so reset this.
  2709   non_young_start_time_sec = young_end_time_sec;
  2711   if (!gcs_are_young()) {
  2712     CollectionSetChooser* cset_chooser = _collectionSetChooser;
  2713     assert(cset_chooser->verify(), "CSet Chooser verification - pre");
  2714     const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength();
  2715     const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
  2717     size_t expensive_region_num = 0;
  2718     bool check_time_remaining = adaptive_young_list_length();
  2719     HeapRegion* hr = cset_chooser->peek();
  2720     while (hr != NULL) {
  2721       if (old_cset_region_length() >= max_old_cset_length) {
  2722         // Added maximum number of old regions to the CSet.
  2723         ergo_verbose2(ErgoCSetConstruction,
  2724                       "finish adding old regions to CSet",
  2725                       ergo_format_reason("old CSet region num reached max")
  2726                       ergo_format_region("old")
  2727                       ergo_format_region("max"),
  2728                       old_cset_region_length(), max_old_cset_length);
  2729         break;
  2732       double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
  2733       if (check_time_remaining) {
  2734         if (predicted_time_ms > time_remaining_ms) {
  2735           // Too expensive for the current CSet.
  2737           if (old_cset_region_length() >= min_old_cset_length) {
  2738             // We have added the minimum number of old regions to the CSet,
  2739             // we are done with this CSet.
  2740             ergo_verbose4(ErgoCSetConstruction,
  2741                           "finish adding old regions to CSet",
  2742                           ergo_format_reason("predicted time is too high")
  2743                           ergo_format_ms("predicted time")
  2744                           ergo_format_ms("remaining time")
  2745                           ergo_format_region("old")
  2746                           ergo_format_region("min"),
  2747                           predicted_time_ms, time_remaining_ms,
  2748                           old_cset_region_length(), min_old_cset_length);
  2749             break;
  2752           // We'll add it anyway given that we haven't reached the
  2753           // minimum number of old regions.
  2754           expensive_region_num += 1;
  2756       } else {
  2757         if (old_cset_region_length() >= min_old_cset_length) {
  2758           // In the non-auto-tuning case, we'll finish adding regions
  2759           // to the CSet if we reach the minimum.
  2760           ergo_verbose2(ErgoCSetConstruction,
  2761                         "finish adding old regions to CSet",
  2762                         ergo_format_reason("old CSet region num reached min")
  2763                         ergo_format_region("old")
  2764                         ergo_format_region("min"),
  2765                         old_cset_region_length(), min_old_cset_length);
  2766           break;
  2770       // We will add this region to the CSet.
  2771       time_remaining_ms -= predicted_time_ms;
  2772       predicted_pause_time_ms += predicted_time_ms;
  2773       cset_chooser->remove_and_move_to_next(hr);
  2774       _g1->old_set_remove(hr);
  2775       add_old_region_to_cset(hr);
  2777       hr = cset_chooser->peek();
  2779     if (hr == NULL) {
  2780       ergo_verbose0(ErgoCSetConstruction,
  2781                     "finish adding old regions to CSet",
  2782                     ergo_format_reason("candidate old regions not available"));
  2785     if (expensive_region_num > 0) {
  2786       // We print the information once here at the end, predicated on
  2787       // whether we added any apparently expensive regions or not, to
  2788       // avoid generating output per region.
  2789       ergo_verbose4(ErgoCSetConstruction,
  2790                     "added expensive regions to CSet",
  2791                     ergo_format_reason("old CSet region num not reached min")
  2792                     ergo_format_region("old")
  2793                     ergo_format_region("expensive")
  2794                     ergo_format_region("min")
  2795                     ergo_format_ms("remaining time"),
  2796                     old_cset_region_length(),
  2797                     expensive_region_num,
  2798                     min_old_cset_length,
  2799                     time_remaining_ms);
  2802     assert(cset_chooser->verify(), "CSet Chooser verification - post");
  2805   stop_incremental_cset_building();
  2807   count_CS_bytes_used();
  2809   ergo_verbose5(ErgoCSetConstruction,
  2810                 "finish choosing CSet",
  2811                 ergo_format_region("eden")
  2812                 ergo_format_region("survivors")
  2813                 ergo_format_region("old")
  2814                 ergo_format_ms("predicted pause time")
  2815                 ergo_format_ms("target pause time"),
  2816                 eden_region_length, survivor_region_length,
  2817                 old_cset_region_length(),
  2818                 predicted_pause_time_ms, target_pause_time_ms);
  2820   double non_young_end_time_sec = os::elapsedTime();
  2821   _recorded_non_young_cset_choice_time_ms =
  2822     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;

mercurial