src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Fri, 13 Apr 2012 01:59:38 +0200

author
brutisso
date
Fri, 13 Apr 2012 01:59:38 +0200
changeset 3710
5c86f8211d1e
parent 3691
2a0172480595
child 3713
720b6a76dd9d
permissions
-rw-r--r--

7160728: Introduce an extra logging level for G1 logging
Summary: Added log levels "fine", "finer" and "finest". Let PrintGC map to "fine" and PrintGCDetails map to "finer". Separated out the per worker information in the G1 logging to the "finest" level.
Reviewed-by: stefank, jwilhelm, tonyp, johnc

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/concurrentMark.hpp"
    28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    32 #include "gc_implementation/g1/g1Log.hpp"
    33 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    34 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    35 #include "runtime/arguments.hpp"
    36 #include "runtime/java.hpp"
    37 #include "runtime/mutexLocker.hpp"
    38 #include "utilities/debug.hpp"
    40 // Different defaults for different number of GC threads
    41 // They were chosen by running GCOld and SPECjbb on debris with different
    42 //   numbers of GC threads and choosing them based on the results
    44 // all the same
    45 static double rs_length_diff_defaults[] = {
    46   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    47 };
    49 static double cost_per_card_ms_defaults[] = {
    50   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    51 };
    53 // all the same
    54 static double young_cards_per_entry_ratio_defaults[] = {
    55   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    56 };
    58 static double cost_per_entry_ms_defaults[] = {
    59   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
    60 };
    62 static double cost_per_byte_ms_defaults[] = {
    63   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
    64 };
    66 // these should be pretty consistent
    67 static double constant_other_time_ms_defaults[] = {
    68   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
    69 };
    72 static double young_other_cost_per_region_ms_defaults[] = {
    73   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
    74 };
    76 static double non_young_other_cost_per_region_ms_defaults[] = {
    77   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
    78 };
    80 // Help class for avoiding interleaved logging
    81 class LineBuffer: public StackObj {
    83 private:
    84   static const int BUFFER_LEN = 1024;
    85   static const int INDENT_CHARS = 3;
    86   char _buffer[BUFFER_LEN];
    87   int _indent_level;
    88   int _cur;
    90   void vappend(const char* format, va_list ap) {
    91     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
    92     if (res != -1) {
    93       _cur += res;
    94     } else {
    95       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
    96       _buffer[BUFFER_LEN -1] = 0;
    97       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
    98     }
    99   }
   101 public:
   102   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
   103     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
   104       _buffer[_cur] = ' ';
   105     }
   106   }
   108 #ifndef PRODUCT
   109   ~LineBuffer() {
   110     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
   111   }
   112 #endif
   114   void append(const char* format, ...) {
   115     va_list ap;
   116     va_start(ap, format);
   117     vappend(format, ap);
   118     va_end(ap);
   119   }
   121   void append_and_print_cr(const char* format, ...) {
   122     va_list ap;
   123     va_start(ap, format);
   124     vappend(format, ap);
   125     va_end(ap);
   126     gclog_or_tty->print_cr("%s", _buffer);
   127     _cur = _indent_level * INDENT_CHARS;
   128   }
   129 };
   131 G1CollectorPolicy::G1CollectorPolicy() :
   132   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
   133                         ? ParallelGCThreads : 1),
   135   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   136   _all_pause_times_ms(new NumberSeq()),
   137   _stop_world_start(0.0),
   138   _all_stop_world_times_ms(new NumberSeq()),
   139   _all_yield_times_ms(new NumberSeq()),
   141   _summary(new Summary()),
   143   _cur_clear_ct_time_ms(0.0),
   144   _root_region_scan_wait_time_ms(0.0),
   146   _cur_ref_proc_time_ms(0.0),
   147   _cur_ref_enq_time_ms(0.0),
   149 #ifndef PRODUCT
   150   _min_clear_cc_time_ms(-1.0),
   151   _max_clear_cc_time_ms(-1.0),
   152   _cur_clear_cc_time_ms(0.0),
   153   _cum_clear_cc_time_ms(0.0),
   154   _num_cc_clears(0L),
   155 #endif
   157   _aux_num(10),
   158   _all_aux_times_ms(new NumberSeq[_aux_num]),
   159   _cur_aux_start_times_ms(new double[_aux_num]),
   160   _cur_aux_times_ms(new double[_aux_num]),
   161   _cur_aux_times_set(new bool[_aux_num]),
   163   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   164   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   166   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   167   _prev_collection_pause_end_ms(0.0),
   168   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   169   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   170   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   171   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   172   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   173   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   174   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   175   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   176   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   177   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   178   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   179   _non_young_other_cost_per_region_ms_seq(
   180                                          new TruncatedSeq(TruncatedSeqLength)),
   182   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   183   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
   185   _pause_time_target_ms((double) MaxGCPauseMillis),
   187   _gcs_are_young(true),
   188   _young_pause_num(0),
   189   _mixed_pause_num(0),
   191   _during_marking(false),
   192   _in_marking_window(false),
   193   _in_marking_window_im(false),
   195   _known_garbage_ratio(0.0),
   196   _known_garbage_bytes(0),
   198   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
   200   _recent_prev_end_times_for_all_gcs_sec(
   201                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
   203   _recent_avg_pause_time_ratio(0.0),
   205   _all_full_gc_times_ms(new NumberSeq()),
   207   _initiate_conc_mark_if_possible(false),
   208   _during_initial_mark_pause(false),
   209   _last_young_gc(false),
   210   _last_gc_was_young(false),
   212   _eden_bytes_before_gc(0),
   213   _survivor_bytes_before_gc(0),
   214   _capacity_before_gc(0),
   216   _eden_cset_region_length(0),
   217   _survivor_cset_region_length(0),
   218   _old_cset_region_length(0),
   220   _collection_set(NULL),
   221   _collection_set_bytes_used_before(0),
   223   // Incremental CSet attributes
   224   _inc_cset_build_state(Inactive),
   225   _inc_cset_head(NULL),
   226   _inc_cset_tail(NULL),
   227   _inc_cset_bytes_used_before(0),
   228   _inc_cset_max_finger(NULL),
   229   _inc_cset_recorded_rs_lengths(0),
   230   _inc_cset_recorded_rs_lengths_diffs(0),
   231   _inc_cset_predicted_elapsed_time_ms(0.0),
   232   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
   234 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   235 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   236 #endif // _MSC_VER
   238   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
   239                                                  G1YoungSurvRateNumRegionsSummary)),
   240   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
   241                                               G1YoungSurvRateNumRegionsSummary)),
   242   // add here any more surv rate groups
   243   _recorded_survivor_regions(0),
   244   _recorded_survivor_head(NULL),
   245   _recorded_survivor_tail(NULL),
   246   _survivors_age_table(true),
   248   _gc_overhead_perc(0.0) {
   250   // Set up the region size and associated fields. Given that the
   251   // policy is created before the heap, we have to set this up here,
   252   // so it's done as soon as possible.
   253   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
   254   HeapRegionRemSet::setup_remset_size();
   256   G1ErgoVerbose::initialize();
   257   if (PrintAdaptiveSizePolicy) {
   258     // Currently, we only use a single switch for all the heuristics.
   259     G1ErgoVerbose::set_enabled(true);
   260     // Given that we don't currently have a verboseness level
   261     // parameter, we'll hardcode this to high. This can be easily
   262     // changed in the future.
   263     G1ErgoVerbose::set_level(ErgoHigh);
   264   } else {
   265     G1ErgoVerbose::set_enabled(false);
   266   }
   268   // Verify PLAB sizes
   269   const size_t region_size = HeapRegion::GrainWords;
   270   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   271     char buffer[128];
   272     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
   273                  OldPLABSize > region_size ? "Old" : "Young", region_size);
   274     vm_exit_during_initialization(buffer);
   275   }
   277   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   278   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   280   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
   281   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   282   _par_last_satb_filtering_times_ms = new double[_parallel_gc_threads];
   284   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
   285   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
   287   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
   289   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
   291   _par_last_termination_times_ms = new double[_parallel_gc_threads];
   292   _par_last_termination_attempts = new double[_parallel_gc_threads];
   293   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
   294   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
   295   _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
   297   int index;
   298   if (ParallelGCThreads == 0)
   299     index = 0;
   300   else if (ParallelGCThreads > 8)
   301     index = 7;
   302   else
   303     index = ParallelGCThreads - 1;
   305   _pending_card_diff_seq->add(0.0);
   306   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   307   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
   308   _young_cards_per_entry_ratio_seq->add(
   309                                   young_cards_per_entry_ratio_defaults[index]);
   310   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   311   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   312   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
   313   _young_other_cost_per_region_ms_seq->add(
   314                                young_other_cost_per_region_ms_defaults[index]);
   315   _non_young_other_cost_per_region_ms_seq->add(
   316                            non_young_other_cost_per_region_ms_defaults[index]);
   318   // Below, we might need to calculate the pause time target based on
   319   // the pause interval. When we do so we are going to give G1 maximum
   320   // flexibility and allow it to do pauses when it needs to. So, we'll
   321   // arrange that the pause interval to be pause time target + 1 to
   322   // ensure that a) the pause time target is maximized with respect to
   323   // the pause interval and b) we maintain the invariant that pause
   324   // time target < pause interval. If the user does not want this
   325   // maximum flexibility, they will have to set the pause interval
   326   // explicitly.
   328   // First make sure that, if either parameter is set, its value is
   329   // reasonable.
   330   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   331     if (MaxGCPauseMillis < 1) {
   332       vm_exit_during_initialization("MaxGCPauseMillis should be "
   333                                     "greater than 0");
   334     }
   335   }
   336   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   337     if (GCPauseIntervalMillis < 1) {
   338       vm_exit_during_initialization("GCPauseIntervalMillis should be "
   339                                     "greater than 0");
   340     }
   341   }
   343   // Then, if the pause time target parameter was not set, set it to
   344   // the default value.
   345   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   346     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   347       // The default pause time target in G1 is 200ms
   348       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   349     } else {
   350       // We do not allow the pause interval to be set without the
   351       // pause time target
   352       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
   353                                     "without setting MaxGCPauseMillis");
   354     }
   355   }
   357   // Then, if the interval parameter was not set, set it according to
   358   // the pause time target (this will also deal with the case when the
   359   // pause time target is the default value).
   360   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   361     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
   362   }
   364   // Finally, make sure that the two parameters are consistent.
   365   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
   366     char buffer[256];
   367     jio_snprintf(buffer, 256,
   368                  "MaxGCPauseMillis (%u) should be less than "
   369                  "GCPauseIntervalMillis (%u)",
   370                  MaxGCPauseMillis, GCPauseIntervalMillis);
   371     vm_exit_during_initialization(buffer);
   372   }
   374   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
   375   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   376   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   377   _sigma = (double) G1ConfidencePercent / 100.0;
   379   // start conservatively (around 50ms is about right)
   380   _concurrent_mark_remark_times_ms->add(0.05);
   381   _concurrent_mark_cleanup_times_ms->add(0.20);
   382   _tenuring_threshold = MaxTenuringThreshold;
   383   // _max_survivor_regions will be calculated by
   384   // update_young_list_target_length() during initialization.
   385   _max_survivor_regions = 0;
   387   assert(GCTimeRatio > 0,
   388          "we should have set it to a default value set_g1_gc_flags() "
   389          "if a user set it to 0");
   390   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
   392   uintx reserve_perc = G1ReservePercent;
   393   // Put an artificial ceiling on this so that it's not set to a silly value.
   394   if (reserve_perc > 50) {
   395     reserve_perc = 50;
   396     warning("G1ReservePercent is set to a value that is too large, "
   397             "it's been updated to %u", reserve_perc);
   398   }
   399   _reserve_factor = (double) reserve_perc / 100.0;
   400   // This will be set when the heap is expanded
   401   // for the first time during initialization.
   402   _reserve_regions = 0;
   404   initialize_all();
   405   _collectionSetChooser = new CollectionSetChooser();
   406   _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
   407 }
   409 void G1CollectorPolicy::initialize_flags() {
   410   set_min_alignment(HeapRegion::GrainBytes);
   411   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
   412   if (SurvivorRatio < 1) {
   413     vm_exit_during_initialization("Invalid survivor ratio specified");
   414   }
   415   CollectorPolicy::initialize_flags();
   416 }
   418 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
   419   assert(G1DefaultMinNewGenPercent <= G1DefaultMaxNewGenPercent, "Min larger than max");
   420   assert(G1DefaultMinNewGenPercent > 0 && G1DefaultMinNewGenPercent < 100, "Min out of bounds");
   421   assert(G1DefaultMaxNewGenPercent > 0 && G1DefaultMaxNewGenPercent < 100, "Max out of bounds");
   423   if (FLAG_IS_CMDLINE(NewRatio)) {
   424     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
   425       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
   426     } else {
   427       _sizer_kind = SizerNewRatio;
   428       _adaptive_size = false;
   429       return;
   430     }
   431   }
   433   if (FLAG_IS_CMDLINE(NewSize)) {
   434      _min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes);
   435     if (FLAG_IS_CMDLINE(MaxNewSize)) {
   436       _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
   437       _sizer_kind = SizerMaxAndNewSize;
   438       _adaptive_size = _min_desired_young_length == _max_desired_young_length;
   439     } else {
   440       _sizer_kind = SizerNewSizeOnly;
   441     }
   442   } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
   443     _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
   444     _sizer_kind = SizerMaxNewSizeOnly;
   445   }
   446 }
   448 size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) {
   449   size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
   450   return MAX2((size_t)1, default_value);
   451 }
   453 size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) {
   454   size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
   455   return MAX2((size_t)1, default_value);
   456 }
   458 void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) {
   459   assert(new_number_of_heap_regions > 0, "Heap must be initialized");
   461   switch (_sizer_kind) {
   462     case SizerDefaults:
   463       _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
   464       _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
   465       break;
   466     case SizerNewSizeOnly:
   467       _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
   468       _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
   469       break;
   470     case SizerMaxNewSizeOnly:
   471       _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
   472       _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
   473       break;
   474     case SizerMaxAndNewSize:
   475       // Do nothing. Values set on the command line, don't update them at runtime.
   476       break;
   477     case SizerNewRatio:
   478       _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
   479       _max_desired_young_length = _min_desired_young_length;
   480       break;
   481     default:
   482       ShouldNotReachHere();
   483   }
   485   assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
   486 }
   488 void G1CollectorPolicy::init() {
   489   // Set aside an initial future to_space.
   490   _g1 = G1CollectedHeap::heap();
   492   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   494   initialize_gc_policy_counters();
   496   if (adaptive_young_list_length()) {
   497     _young_list_fixed_length = 0;
   498   } else {
   499     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   500   }
   501   _free_regions_at_end_of_collection = _g1->free_regions();
   502   update_young_list_target_length();
   503   _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
   505   // We may immediately start allocating regions and placing them on the
   506   // collection set list. Initialize the per-collection set info
   507   start_incremental_cset_building();
   508 }
   510 // Create the jstat counters for the policy.
   511 void G1CollectorPolicy::initialize_gc_policy_counters() {
   512   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
   513 }
   515 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
   516                                          double base_time_ms,
   517                                          size_t base_free_regions,
   518                                          double target_pause_time_ms) {
   519   if (young_length >= base_free_regions) {
   520     // end condition 1: not enough space for the young regions
   521     return false;
   522   }
   524   double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
   525   size_t bytes_to_copy =
   526                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   527   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   528   double young_other_time_ms = predict_young_other_time_ms(young_length);
   529   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
   530   if (pause_time_ms > target_pause_time_ms) {
   531     // end condition 2: prediction is over the target pause time
   532     return false;
   533   }
   535   size_t free_bytes =
   536                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
   537   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
   538     // end condition 3: out-of-space (conservatively!)
   539     return false;
   540   }
   542   // success!
   543   return true;
   544 }
   546 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
   547   // re-calculate the necessary reserve
   548   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
   549   // We use ceiling so that if reserve_regions_d is > 0.0 (but
   550   // smaller than 1.0) we'll get 1.
   551   _reserve_regions = (size_t) ceil(reserve_regions_d);
   553   _young_gen_sizer->heap_size_changed(new_number_of_regions);
   554 }
   556 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
   557                                                      size_t base_min_length) {
   558   size_t desired_min_length = 0;
   559   if (adaptive_young_list_length()) {
   560     if (_alloc_rate_ms_seq->num() > 3) {
   561       double now_sec = os::elapsedTime();
   562       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   563       double alloc_rate_ms = predict_alloc_rate_ms();
   564       desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
   565     } else {
   566       // otherwise we don't have enough info to make the prediction
   567     }
   568   }
   569   desired_min_length += base_min_length;
   570   // make sure we don't go below any user-defined minimum bound
   571   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
   572 }
   574 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
   575   // Here, we might want to also take into account any additional
   576   // constraints (i.e., user-defined minimum bound). Currently, we
   577   // effectively don't set this bound.
   578   return _young_gen_sizer->max_desired_young_length();
   579 }
   581 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   582   if (rs_lengths == (size_t) -1) {
   583     // if it's set to the default value (-1), we should predict it;
   584     // otherwise, use the given value.
   585     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   586   }
   588   // Calculate the absolute and desired min bounds.
   590   // This is how many young regions we already have (currently: the survivors).
   591   size_t base_min_length = recorded_survivor_regions();
   592   // This is the absolute minimum young length, which ensures that we
   593   // can allocate one eden region in the worst-case.
   594   size_t absolute_min_length = base_min_length + 1;
   595   size_t desired_min_length =
   596                      calculate_young_list_desired_min_length(base_min_length);
   597   if (desired_min_length < absolute_min_length) {
   598     desired_min_length = absolute_min_length;
   599   }
   601   // Calculate the absolute and desired max bounds.
   603   // We will try our best not to "eat" into the reserve.
   604   size_t absolute_max_length = 0;
   605   if (_free_regions_at_end_of_collection > _reserve_regions) {
   606     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
   607   }
   608   size_t desired_max_length = calculate_young_list_desired_max_length();
   609   if (desired_max_length > absolute_max_length) {
   610     desired_max_length = absolute_max_length;
   611   }
   613   size_t young_list_target_length = 0;
   614   if (adaptive_young_list_length()) {
   615     if (gcs_are_young()) {
   616       young_list_target_length =
   617                         calculate_young_list_target_length(rs_lengths,
   618                                                            base_min_length,
   619                                                            desired_min_length,
   620                                                            desired_max_length);
   621       _rs_lengths_prediction = rs_lengths;
   622     } else {
   623       // Don't calculate anything and let the code below bound it to
   624       // the desired_min_length, i.e., do the next GC as soon as
   625       // possible to maximize how many old regions we can add to it.
   626     }
   627   } else {
   628     // The user asked for a fixed young gen so we'll fix the young gen
   629     // whether the next GC is young or mixed.
   630     young_list_target_length = _young_list_fixed_length;
   631   }
   633   // Make sure we don't go over the desired max length, nor under the
   634   // desired min length. In case they clash, desired_min_length wins
   635   // which is why that test is second.
   636   if (young_list_target_length > desired_max_length) {
   637     young_list_target_length = desired_max_length;
   638   }
   639   if (young_list_target_length < desired_min_length) {
   640     young_list_target_length = desired_min_length;
   641   }
   643   assert(young_list_target_length > recorded_survivor_regions(),
   644          "we should be able to allocate at least one eden region");
   645   assert(young_list_target_length >= absolute_min_length, "post-condition");
   646   _young_list_target_length = young_list_target_length;
   648   update_max_gc_locker_expansion();
   649 }
   651 size_t
   652 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   653                                                    size_t base_min_length,
   654                                                    size_t desired_min_length,
   655                                                    size_t desired_max_length) {
   656   assert(adaptive_young_list_length(), "pre-condition");
   657   assert(gcs_are_young(), "only call this for young GCs");
   659   // In case some edge-condition makes the desired max length too small...
   660   if (desired_max_length <= desired_min_length) {
   661     return desired_min_length;
   662   }
   664   // We'll adjust min_young_length and max_young_length not to include
   665   // the already allocated young regions (i.e., so they reflect the
   666   // min and max eden regions we'll allocate). The base_min_length
   667   // will be reflected in the predictions by the
   668   // survivor_regions_evac_time prediction.
   669   assert(desired_min_length > base_min_length, "invariant");
   670   size_t min_young_length = desired_min_length - base_min_length;
   671   assert(desired_max_length > base_min_length, "invariant");
   672   size_t max_young_length = desired_max_length - base_min_length;
   674   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   675   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   676   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   677   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   678   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   679   double base_time_ms =
   680     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
   681     survivor_regions_evac_time;
   682   size_t available_free_regions = _free_regions_at_end_of_collection;
   683   size_t base_free_regions = 0;
   684   if (available_free_regions > _reserve_regions) {
   685     base_free_regions = available_free_regions - _reserve_regions;
   686   }
   688   // Here, we will make sure that the shortest young length that
   689   // makes sense fits within the target pause time.
   691   if (predict_will_fit(min_young_length, base_time_ms,
   692                        base_free_regions, target_pause_time_ms)) {
   693     // The shortest young length will fit into the target pause time;
   694     // we'll now check whether the absolute maximum number of young
   695     // regions will fit in the target pause time. If not, we'll do
   696     // a binary search between min_young_length and max_young_length.
   697     if (predict_will_fit(max_young_length, base_time_ms,
   698                          base_free_regions, target_pause_time_ms)) {
   699       // The maximum young length will fit into the target pause time.
   700       // We are done so set min young length to the maximum length (as
   701       // the result is assumed to be returned in min_young_length).
   702       min_young_length = max_young_length;
   703     } else {
   704       // The maximum possible number of young regions will not fit within
   705       // the target pause time so we'll search for the optimal
   706       // length. The loop invariants are:
   707       //
   708       // min_young_length < max_young_length
   709       // min_young_length is known to fit into the target pause time
   710       // max_young_length is known not to fit into the target pause time
   711       //
   712       // Going into the loop we know the above hold as we've just
   713       // checked them. Every time around the loop we check whether
   714       // the middle value between min_young_length and
   715       // max_young_length fits into the target pause time. If it
   716       // does, it becomes the new min. If it doesn't, it becomes
   717       // the new max. This way we maintain the loop invariants.
   719       assert(min_young_length < max_young_length, "invariant");
   720       size_t diff = (max_young_length - min_young_length) / 2;
   721       while (diff > 0) {
   722         size_t young_length = min_young_length + diff;
   723         if (predict_will_fit(young_length, base_time_ms,
   724                              base_free_regions, target_pause_time_ms)) {
   725           min_young_length = young_length;
   726         } else {
   727           max_young_length = young_length;
   728         }
   729         assert(min_young_length <  max_young_length, "invariant");
   730         diff = (max_young_length - min_young_length) / 2;
   731       }
   732       // The results is min_young_length which, according to the
   733       // loop invariants, should fit within the target pause time.
   735       // These are the post-conditions of the binary search above:
   736       assert(min_young_length < max_young_length,
   737              "otherwise we should have discovered that max_young_length "
   738              "fits into the pause target and not done the binary search");
   739       assert(predict_will_fit(min_young_length, base_time_ms,
   740                               base_free_regions, target_pause_time_ms),
   741              "min_young_length, the result of the binary search, should "
   742              "fit into the pause target");
   743       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
   744                                base_free_regions, target_pause_time_ms),
   745              "min_young_length, the result of the binary search, should be "
   746              "optimal, so no larger length should fit into the pause target");
   747     }
   748   } else {
   749     // Even the minimum length doesn't fit into the pause time
   750     // target, return it as the result nevertheless.
   751   }
   752   return base_min_length + min_young_length;
   753 }
   755 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   756   double survivor_regions_evac_time = 0.0;
   757   for (HeapRegion * r = _recorded_survivor_head;
   758        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
   759        r = r->get_next_young_region()) {
   760     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
   761   }
   762   return survivor_regions_evac_time;
   763 }
   765 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
   766   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   768   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   769   if (rs_lengths > _rs_lengths_prediction) {
   770     // add 10% to avoid having to recalculate often
   771     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   772     update_young_list_target_length(rs_lengths_prediction);
   773   }
   774 }
   778 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
   779                                                bool is_tlab,
   780                                                bool* gc_overhead_limit_was_exceeded) {
   781   guarantee(false, "Not using this policy feature yet.");
   782   return NULL;
   783 }
   785 // This method controls how a collector handles one or more
   786 // of its generations being fully allocated.
   787 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
   788                                                        bool is_tlab) {
   789   guarantee(false, "Not using this policy feature yet.");
   790   return NULL;
   791 }
   794 #ifndef PRODUCT
   795 bool G1CollectorPolicy::verify_young_ages() {
   796   HeapRegion* head = _g1->young_list()->first_region();
   797   return
   798     verify_young_ages(head, _short_lived_surv_rate_group);
   799   // also call verify_young_ages on any additional surv rate groups
   800 }
   802 bool
   803 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
   804                                      SurvRateGroup *surv_rate_group) {
   805   guarantee( surv_rate_group != NULL, "pre-condition" );
   807   const char* name = surv_rate_group->name();
   808   bool ret = true;
   809   int prev_age = -1;
   811   for (HeapRegion* curr = head;
   812        curr != NULL;
   813        curr = curr->get_next_young_region()) {
   814     SurvRateGroup* group = curr->surv_rate_group();
   815     if (group == NULL && !curr->is_survivor()) {
   816       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
   817       ret = false;
   818     }
   820     if (surv_rate_group == group) {
   821       int age = curr->age_in_surv_rate_group();
   823       if (age < 0) {
   824         gclog_or_tty->print_cr("## %s: encountered negative age", name);
   825         ret = false;
   826       }
   828       if (age <= prev_age) {
   829         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
   830                                "(%d, %d)", name, age, prev_age);
   831         ret = false;
   832       }
   833       prev_age = age;
   834     }
   835   }
   837   return ret;
   838 }
   839 #endif // PRODUCT
   841 void G1CollectorPolicy::record_full_collection_start() {
   842   _cur_collection_start_sec = os::elapsedTime();
   843   // Release the future to-space so that it is available for compaction into.
   844   _g1->set_full_collection();
   845 }
   847 void G1CollectorPolicy::record_full_collection_end() {
   848   // Consider this like a collection pause for the purposes of allocation
   849   // since last pause.
   850   double end_sec = os::elapsedTime();
   851   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
   852   double full_gc_time_ms = full_gc_time_sec * 1000.0;
   854   _all_full_gc_times_ms->add(full_gc_time_ms);
   856   update_recent_gc_times(end_sec, full_gc_time_ms);
   858   _g1->clear_full_collection();
   860   // "Nuke" the heuristics that control the young/mixed GC
   861   // transitions and make sure we start with young GCs after the Full GC.
   862   set_gcs_are_young(true);
   863   _last_young_gc = false;
   864   clear_initiate_conc_mark_if_possible();
   865   clear_during_initial_mark_pause();
   866   _known_garbage_bytes = 0;
   867   _known_garbage_ratio = 0.0;
   868   _in_marking_window = false;
   869   _in_marking_window_im = false;
   871   _short_lived_surv_rate_group->start_adding_regions();
   872   // also call this on any additional surv rate groups
   874   record_survivor_regions(0, NULL, NULL);
   876   _free_regions_at_end_of_collection = _g1->free_regions();
   877   // Reset survivors SurvRateGroup.
   878   _survivor_surv_rate_group->reset();
   879   update_young_list_target_length();
   880   _collectionSetChooser->clearMarkedHeapRegions();
   881 }
   883 void G1CollectorPolicy::record_stop_world_start() {
   884   _stop_world_start = os::elapsedTime();
   885 }
   887 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
   888                                                       size_t start_used) {
   889   if (G1Log::finer()) {
   890     gclog_or_tty->stamp(PrintGCTimeStamps);
   891     gclog_or_tty->print("[GC pause");
   892     gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
   893   }
   895   // We only need to do this here as the policy will only be applied
   896   // to the GC we're about to start. so, no point is calculating this
   897   // every time we calculate / recalculate the target young length.
   898   update_survivors_policy();
   900   assert(_g1->used() == _g1->recalculate_used(),
   901          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
   902                  _g1->used(), _g1->recalculate_used()));
   904   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
   905   _all_stop_world_times_ms->add(s_w_t_ms);
   906   _stop_world_start = 0.0;
   908   _cur_collection_start_sec = start_time_sec;
   909   _cur_collection_pause_used_at_start_bytes = start_used;
   910   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   911   _pending_cards = _g1->pending_card_num();
   912   _max_pending_cards = _g1->max_pending_card_num();
   914   _bytes_in_collection_set_before_gc = 0;
   915   _bytes_copied_during_gc = 0;
   917   YoungList* young_list = _g1->young_list();
   918   _eden_bytes_before_gc = young_list->eden_used_bytes();
   919   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
   920   _capacity_before_gc = _g1->capacity();
   922 #ifdef DEBUG
   923   // initialise these to something well known so that we can spot
   924   // if they are not set properly
   926   for (int i = 0; i < _parallel_gc_threads; ++i) {
   927     _par_last_gc_worker_start_times_ms[i] = -1234.0;
   928     _par_last_ext_root_scan_times_ms[i] = -1234.0;
   929     _par_last_satb_filtering_times_ms[i] = -1234.0;
   930     _par_last_update_rs_times_ms[i] = -1234.0;
   931     _par_last_update_rs_processed_buffers[i] = -1234.0;
   932     _par_last_scan_rs_times_ms[i] = -1234.0;
   933     _par_last_obj_copy_times_ms[i] = -1234.0;
   934     _par_last_termination_times_ms[i] = -1234.0;
   935     _par_last_termination_attempts[i] = -1234.0;
   936     _par_last_gc_worker_end_times_ms[i] = -1234.0;
   937     _par_last_gc_worker_times_ms[i] = -1234.0;
   938     _par_last_gc_worker_other_times_ms[i] = -1234.0;
   939   }
   940 #endif
   942   for (int i = 0; i < _aux_num; ++i) {
   943     _cur_aux_times_ms[i] = 0.0;
   944     _cur_aux_times_set[i] = false;
   945   }
   947   // This is initialized to zero here and is set during the evacuation
   948   // pause if we actually waited for the root region scanning to finish.
   949   _root_region_scan_wait_time_ms = 0.0;
   951   _last_gc_was_young = false;
   953   // do that for any other surv rate groups
   954   _short_lived_surv_rate_group->stop_adding_regions();
   955   _survivors_age_table.clear();
   957   assert( verify_young_ages(), "region age verification" );
   958 }
   960 void G1CollectorPolicy::record_concurrent_mark_init_end(double
   961                                                    mark_init_elapsed_time_ms) {
   962   _during_marking = true;
   963   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
   964   clear_during_initial_mark_pause();
   965   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
   966 }
   968 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
   969   _mark_remark_start_sec = os::elapsedTime();
   970   _during_marking = false;
   971 }
   973 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
   974   double end_time_sec = os::elapsedTime();
   975   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
   976   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
   977   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   978   _prev_collection_pause_end_ms += elapsed_time_ms;
   980   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
   981 }
   983 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
   984   _mark_cleanup_start_sec = os::elapsedTime();
   985 }
   987 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
   988   _last_young_gc = true;
   989   _in_marking_window = false;
   990 }
   992 void G1CollectorPolicy::record_concurrent_pause() {
   993   if (_stop_world_start > 0.0) {
   994     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
   995     _all_yield_times_ms->add(yield_ms);
   996   }
   997 }
   999 void G1CollectorPolicy::record_concurrent_pause_end() {
  1002 template<class T>
  1003 T sum_of(T* sum_arr, int start, int n, int N) {
  1004   T sum = (T)0;
  1005   for (int i = 0; i < n; i++) {
  1006     int j = (start + i) % N;
  1007     sum += sum_arr[j];
  1009   return sum;
  1012 void G1CollectorPolicy::print_par_stats(int level,
  1013                                         const char* str,
  1014                                         double* data) {
  1015   double min = data[0], max = data[0];
  1016   double total = 0.0;
  1017   LineBuffer buf(level);
  1018   buf.append("[%s (ms):", str);
  1019   for (uint i = 0; i < no_of_gc_threads(); ++i) {
  1020     double val = data[i];
  1021     if (val < min)
  1022       min = val;
  1023     if (val > max)
  1024       max = val;
  1025     total += val;
  1026     if (G1Log::finest()) {
  1027       buf.append("  %.1lf", val);
  1031   if (G1Log::finest()) {
  1032     buf.append_and_print_cr("");
  1034   double avg = total / (double) no_of_gc_threads();
  1035   buf.append_and_print_cr(" Avg: %.1lf Min: %.1lf Max: %.1lf Diff: %.1lf]",
  1036     avg, min, max, max - min);
  1039 void G1CollectorPolicy::print_par_sizes(int level,
  1040                                         const char* str,
  1041                                         double* data) {
  1042   double min = data[0], max = data[0];
  1043   double total = 0.0;
  1044   LineBuffer buf(level);
  1045   buf.append("[%s :", str);
  1046   for (uint i = 0; i < no_of_gc_threads(); ++i) {
  1047     double val = data[i];
  1048     if (val < min)
  1049       min = val;
  1050     if (val > max)
  1051       max = val;
  1052     total += val;
  1053     buf.append(" %d", (int) val);
  1055   buf.append_and_print_cr("");
  1056   double avg = total / (double) no_of_gc_threads();
  1057   buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
  1058     (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
  1061 void G1CollectorPolicy::print_stats(int level,
  1062                                     const char* str,
  1063                                     double value) {
  1064   LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
  1067 void G1CollectorPolicy::print_stats(int level,
  1068                                     const char* str,
  1069                                     int value) {
  1070   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
  1073 double G1CollectorPolicy::avg_value(double* data) {
  1074   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1075     double ret = 0.0;
  1076     for (uint i = 0; i < no_of_gc_threads(); ++i) {
  1077       ret += data[i];
  1079     return ret / (double) no_of_gc_threads();
  1080   } else {
  1081     return data[0];
  1085 double G1CollectorPolicy::max_value(double* data) {
  1086   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1087     double ret = data[0];
  1088     for (uint i = 1; i < no_of_gc_threads(); ++i) {
  1089       if (data[i] > ret) {
  1090         ret = data[i];
  1093     return ret;
  1094   } else {
  1095     return data[0];
  1099 double G1CollectorPolicy::sum_of_values(double* data) {
  1100   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1101     double sum = 0.0;
  1102     for (uint i = 0; i < no_of_gc_threads(); i++) {
  1103       sum += data[i];
  1105     return sum;
  1106   } else {
  1107     return data[0];
  1111 double G1CollectorPolicy::max_sum(double* data1, double* data2) {
  1112   double ret = data1[0] + data2[0];
  1114   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1115     for (uint i = 1; i < no_of_gc_threads(); ++i) {
  1116       double data = data1[i] + data2[i];
  1117       if (data > ret) {
  1118         ret = data;
  1122   return ret;
  1125 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
  1126   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
  1127     return false;
  1130   size_t marking_initiating_used_threshold =
  1131     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
  1132   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
  1133   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
  1135   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
  1136     if (gcs_are_young()) {
  1137       ergo_verbose5(ErgoConcCycles,
  1138         "request concurrent cycle initiation",
  1139         ergo_format_reason("occupancy higher than threshold")
  1140         ergo_format_byte("occupancy")
  1141         ergo_format_byte("allocation request")
  1142         ergo_format_byte_perc("threshold")
  1143         ergo_format_str("source"),
  1144         cur_used_bytes,
  1145         alloc_byte_size,
  1146         marking_initiating_used_threshold,
  1147         (double) InitiatingHeapOccupancyPercent,
  1148         source);
  1149       return true;
  1150     } else {
  1151       ergo_verbose5(ErgoConcCycles,
  1152         "do not request concurrent cycle initiation",
  1153         ergo_format_reason("still doing mixed collections")
  1154         ergo_format_byte("occupancy")
  1155         ergo_format_byte("allocation request")
  1156         ergo_format_byte_perc("threshold")
  1157         ergo_format_str("source"),
  1158         cur_used_bytes,
  1159         alloc_byte_size,
  1160         marking_initiating_used_threshold,
  1161         (double) InitiatingHeapOccupancyPercent,
  1162         source);
  1166   return false;
  1169 // Anything below that is considered to be zero
  1170 #define MIN_TIMER_GRANULARITY 0.0000001
  1172 void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
  1173   double end_time_sec = os::elapsedTime();
  1174   double elapsed_ms = _last_pause_time_ms;
  1175   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  1176   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
  1177          "otherwise, the subtraction below does not make sense");
  1178   size_t rs_size =
  1179             _cur_collection_pause_used_regions_at_start - cset_region_length();
  1180   size_t cur_used_bytes = _g1->used();
  1181   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
  1182   bool last_pause_included_initial_mark = false;
  1183   bool update_stats = !_g1->evacuation_failed();
  1184   set_no_of_gc_threads(no_of_gc_threads);
  1186 #ifndef PRODUCT
  1187   if (G1YoungSurvRateVerbose) {
  1188     gclog_or_tty->print_cr("");
  1189     _short_lived_surv_rate_group->print();
  1190     // do that for any other surv rate groups too
  1192 #endif // PRODUCT
  1194   last_pause_included_initial_mark = during_initial_mark_pause();
  1195   if (last_pause_included_initial_mark) {
  1196     record_concurrent_mark_init_end(0.0);
  1197   } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
  1198     // Note: this might have already been set, if during the last
  1199     // pause we decided to start a cycle but at the beginning of
  1200     // this pause we decided to postpone it. That's OK.
  1201     set_initiate_conc_mark_if_possible();
  1204   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
  1205                           end_time_sec, false);
  1207   // This assert is exempted when we're doing parallel collection pauses,
  1208   // because the fragmentation caused by the parallel GC allocation buffers
  1209   // can lead to more memory being used during collection than was used
  1210   // before. Best leave this out until the fragmentation problem is fixed.
  1211   // Pauses in which evacuation failed can also lead to negative
  1212   // collections, since no space is reclaimed from a region containing an
  1213   // object whose evacuation failed.
  1214   // Further, we're now always doing parallel collection.  But I'm still
  1215   // leaving this here as a placeholder for a more precise assertion later.
  1216   // (DLD, 10/05.)
  1217   assert((true || parallel) // Always using GC LABs now.
  1218          || _g1->evacuation_failed()
  1219          || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
  1220          "Negative collection");
  1222   size_t freed_bytes =
  1223     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
  1224   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
  1226   double survival_fraction =
  1227     (double)surviving_bytes/
  1228     (double)_collection_set_bytes_used_before;
  1230   // These values are used to update the summary information that is
  1231   // displayed when TraceGen0Time is enabled, and are output as part
  1232   // of the "finer" output, in the non-parallel case.
  1234   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
  1235   double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
  1236   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
  1237   double update_rs_processed_buffers =
  1238     sum_of_values(_par_last_update_rs_processed_buffers);
  1239   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
  1240   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
  1241   double termination_time = avg_value(_par_last_termination_times_ms);
  1243   double known_time = ext_root_scan_time +
  1244                       satb_filtering_time +
  1245                       update_rs_time +
  1246                       scan_rs_time +
  1247                       obj_copy_time;
  1249   double other_time_ms = elapsed_ms;
  1251   // Subtract the root region scanning wait time. It's initialized to
  1252   // zero at the start of the pause.
  1253   other_time_ms -= _root_region_scan_wait_time_ms;
  1255   if (parallel) {
  1256     other_time_ms -= _cur_collection_par_time_ms;
  1257   } else {
  1258     other_time_ms -= known_time;
  1261   // Now subtract the time taken to fix up roots in generated code
  1262   other_time_ms -= _cur_collection_code_root_fixup_time_ms;
  1264   // Subtract the time taken to clean the card table from the
  1265   // current value of "other time"
  1266   other_time_ms -= _cur_clear_ct_time_ms;
  1268   // TraceGen0Time and TraceGen1Time summary info updating.
  1269   _all_pause_times_ms->add(elapsed_ms);
  1271   if (update_stats) {
  1272     _summary->record_total_time_ms(elapsed_ms);
  1273     _summary->record_other_time_ms(other_time_ms);
  1275     MainBodySummary* body_summary = _summary->main_body_summary();
  1276     assert(body_summary != NULL, "should not be null!");
  1278     body_summary->record_root_region_scan_wait_time_ms(
  1279                                                _root_region_scan_wait_time_ms);
  1280     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
  1281     body_summary->record_satb_filtering_time_ms(satb_filtering_time);
  1282     body_summary->record_update_rs_time_ms(update_rs_time);
  1283     body_summary->record_scan_rs_time_ms(scan_rs_time);
  1284     body_summary->record_obj_copy_time_ms(obj_copy_time);
  1286     if (parallel) {
  1287       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
  1288       body_summary->record_termination_time_ms(termination_time);
  1290       double parallel_known_time = known_time + termination_time;
  1291       double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
  1292       body_summary->record_parallel_other_time_ms(parallel_other_time);
  1295     body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
  1297     // We exempt parallel collection from this check because Alloc Buffer
  1298     // fragmentation can produce negative collections.  Same with evac
  1299     // failure.
  1300     // Further, we're now always doing parallel collection.  But I'm still
  1301     // leaving this here as a placeholder for a more precise assertion later.
  1302     // (DLD, 10/05.
  1303     assert((true || parallel)
  1304            || _g1->evacuation_failed()
  1305            || surviving_bytes <= _collection_set_bytes_used_before,
  1306            "Or else negative collection!");
  1308     // this is where we update the allocation rate of the application
  1309     double app_time_ms =
  1310       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
  1311     if (app_time_ms < MIN_TIMER_GRANULARITY) {
  1312       // This usually happens due to the timer not having the required
  1313       // granularity. Some Linuxes are the usual culprits.
  1314       // We'll just set it to something (arbitrarily) small.
  1315       app_time_ms = 1.0;
  1317     // We maintain the invariant that all objects allocated by mutator
  1318     // threads will be allocated out of eden regions. So, we can use
  1319     // the eden region number allocated since the previous GC to
  1320     // calculate the application's allocate rate. The only exception
  1321     // to that is humongous objects that are allocated separately. But
  1322     // given that humongous object allocations do not really affect
  1323     // either the pause's duration nor when the next pause will take
  1324     // place we can safely ignore them here.
  1325     size_t regions_allocated = eden_cset_region_length();
  1326     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1327     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1329     double interval_ms =
  1330       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1331     update_recent_gc_times(end_time_sec, elapsed_ms);
  1332     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
  1333     if (recent_avg_pause_time_ratio() < 0.0 ||
  1334         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
  1335 #ifndef PRODUCT
  1336       // Dump info to allow post-facto debugging
  1337       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
  1338       gclog_or_tty->print_cr("-------------------------------------------");
  1339       gclog_or_tty->print_cr("Recent GC Times (ms):");
  1340       _recent_gc_times_ms->dump();
  1341       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
  1342       _recent_prev_end_times_for_all_gcs_sec->dump();
  1343       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
  1344                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
  1345       // In debug mode, terminate the JVM if the user wants to debug at this point.
  1346       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
  1347 #endif  // !PRODUCT
  1348       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
  1349       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
  1350       if (_recent_avg_pause_time_ratio < 0.0) {
  1351         _recent_avg_pause_time_ratio = 0.0;
  1352       } else {
  1353         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
  1354         _recent_avg_pause_time_ratio = 1.0;
  1359   for (int i = 0; i < _aux_num; ++i) {
  1360     if (_cur_aux_times_set[i]) {
  1361       _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
  1365   if (G1Log::finer()) {
  1366     bool print_marking_info =
  1367       _g1->mark_in_progress() && !last_pause_included_initial_mark;
  1369     gclog_or_tty->print_cr("%s, %1.8lf secs]",
  1370                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
  1371                            elapsed_ms / 1000.0);
  1373     if (_root_region_scan_wait_time_ms > 0.0) {
  1374       print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
  1376     if (parallel) {
  1377       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
  1378       print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
  1379       print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
  1380       if (print_marking_info) {
  1381         print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
  1383       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
  1384       if (G1Log::finest()) {
  1385         print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
  1387       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
  1388       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
  1389       print_par_stats(2, "Termination", _par_last_termination_times_ms);
  1390       if (G1Log::finest()) {
  1391         print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
  1394       for (int i = 0; i < _parallel_gc_threads; i++) {
  1395         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] -
  1396                                           _par_last_gc_worker_start_times_ms[i];
  1398         double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
  1399                                    _par_last_satb_filtering_times_ms[i] +
  1400                                    _par_last_update_rs_times_ms[i] +
  1401                                    _par_last_scan_rs_times_ms[i] +
  1402                                    _par_last_obj_copy_times_ms[i] +
  1403                                    _par_last_termination_times_ms[i];
  1405         _par_last_gc_worker_other_times_ms[i] = _par_last_gc_worker_times_ms[i] -
  1406                                                 worker_known_time;
  1409       print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
  1410       print_par_stats(2, "GC Worker Total", _par_last_gc_worker_times_ms);
  1411       print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
  1412     } else {
  1413       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
  1414       if (print_marking_info) {
  1415         print_stats(1, "SATB Filtering", satb_filtering_time);
  1417       print_stats(1, "Update RS", update_rs_time);
  1418       if (G1Log::finest()) {
  1419         print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
  1421       print_stats(1, "Scan RS", scan_rs_time);
  1422       print_stats(1, "Object Copying", obj_copy_time);
  1424     print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
  1425     print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
  1426 #ifndef PRODUCT
  1427     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
  1428     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
  1429     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
  1430     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
  1431     if (_num_cc_clears > 0) {
  1432       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
  1434 #endif
  1435     print_stats(1, "Other", other_time_ms);
  1436     print_stats(2, "Choose CSet",
  1437                    (_recorded_young_cset_choice_time_ms +
  1438                     _recorded_non_young_cset_choice_time_ms));
  1439     print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
  1440     print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
  1441     print_stats(2, "Free CSet",
  1442                    (_recorded_young_free_cset_time_ms +
  1443                     _recorded_non_young_free_cset_time_ms));
  1445     for (int i = 0; i < _aux_num; ++i) {
  1446       if (_cur_aux_times_set[i]) {
  1447         char buffer[96];
  1448         sprintf(buffer, "Aux%d", i);
  1449         print_stats(1, buffer, _cur_aux_times_ms[i]);
  1454   // Update the efficiency-since-mark vars.
  1455   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
  1456   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
  1457     // This usually happens due to the timer not having the required
  1458     // granularity. Some Linuxes are the usual culprits.
  1459     // We'll just set it to something (arbitrarily) small.
  1460     proc_ms = 1.0;
  1462   double cur_efficiency = (double) freed_bytes / proc_ms;
  1464   bool new_in_marking_window = _in_marking_window;
  1465   bool new_in_marking_window_im = false;
  1466   if (during_initial_mark_pause()) {
  1467     new_in_marking_window = true;
  1468     new_in_marking_window_im = true;
  1471   if (_last_young_gc) {
  1472     // This is supposed to to be the "last young GC" before we start
  1473     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
  1475     if (!last_pause_included_initial_mark) {
  1476       if (next_gc_should_be_mixed("start mixed GCs",
  1477                                   "do not start mixed GCs")) {
  1478         set_gcs_are_young(false);
  1480     } else {
  1481       ergo_verbose0(ErgoMixedGCs,
  1482                     "do not start mixed GCs",
  1483                     ergo_format_reason("concurrent cycle is about to start"));
  1485     _last_young_gc = false;
  1488   if (!_last_gc_was_young) {
  1489     // This is a mixed GC. Here we decide whether to continue doing
  1490     // mixed GCs or not.
  1492     if (!next_gc_should_be_mixed("continue mixed GCs",
  1493                                  "do not continue mixed GCs")) {
  1494       set_gcs_are_young(true);
  1498   if (_last_gc_was_young && !_during_marking) {
  1499     _young_gc_eff_seq->add(cur_efficiency);
  1502   _short_lived_surv_rate_group->start_adding_regions();
  1503   // do that for any other surv rate groupsx
  1505   if (update_stats) {
  1506     double pause_time_ms = elapsed_ms;
  1508     size_t diff = 0;
  1509     if (_max_pending_cards >= _pending_cards)
  1510       diff = _max_pending_cards - _pending_cards;
  1511     _pending_card_diff_seq->add((double) diff);
  1513     double cost_per_card_ms = 0.0;
  1514     if (_pending_cards > 0) {
  1515       cost_per_card_ms = update_rs_time / (double) _pending_cards;
  1516       _cost_per_card_ms_seq->add(cost_per_card_ms);
  1519     size_t cards_scanned = _g1->cards_scanned();
  1521     double cost_per_entry_ms = 0.0;
  1522     if (cards_scanned > 10) {
  1523       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
  1524       if (_last_gc_was_young) {
  1525         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1526       } else {
  1527         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1531     if (_max_rs_lengths > 0) {
  1532       double cards_per_entry_ratio =
  1533         (double) cards_scanned / (double) _max_rs_lengths;
  1534       if (_last_gc_was_young) {
  1535         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1536       } else {
  1537         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1541     // This is defensive. For a while _max_rs_lengths could get
  1542     // smaller than _recorded_rs_lengths which was causing
  1543     // rs_length_diff to get very large and mess up the RSet length
  1544     // predictions. The reason was unsafe concurrent updates to the
  1545     // _inc_cset_recorded_rs_lengths field which the code below guards
  1546     // against (see CR 7118202). This bug has now been fixed (see CR
  1547     // 7119027). However, I'm still worried that
  1548     // _inc_cset_recorded_rs_lengths might still end up somewhat
  1549     // inaccurate. The concurrent refinement thread calculates an
  1550     // RSet's length concurrently with other CR threads updating it
  1551     // which might cause it to calculate the length incorrectly (if,
  1552     // say, it's in mid-coarsening). So I'll leave in the defensive
  1553     // conditional below just in case.
  1554     size_t rs_length_diff = 0;
  1555     if (_max_rs_lengths > _recorded_rs_lengths) {
  1556       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1558     _rs_length_diff_seq->add((double) rs_length_diff);
  1560     size_t copied_bytes = surviving_bytes;
  1561     double cost_per_byte_ms = 0.0;
  1562     if (copied_bytes > 0) {
  1563       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
  1564       if (_in_marking_window) {
  1565         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
  1566       } else {
  1567         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1571     double all_other_time_ms = pause_time_ms -
  1572       (update_rs_time + scan_rs_time + obj_copy_time + termination_time);
  1574     double young_other_time_ms = 0.0;
  1575     if (young_cset_region_length() > 0) {
  1576       young_other_time_ms =
  1577         _recorded_young_cset_choice_time_ms +
  1578         _recorded_young_free_cset_time_ms;
  1579       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
  1580                                           (double) young_cset_region_length());
  1582     double non_young_other_time_ms = 0.0;
  1583     if (old_cset_region_length() > 0) {
  1584       non_young_other_time_ms =
  1585         _recorded_non_young_cset_choice_time_ms +
  1586         _recorded_non_young_free_cset_time_ms;
  1588       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
  1589                                             (double) old_cset_region_length());
  1592     double constant_other_time_ms = all_other_time_ms -
  1593       (young_other_time_ms + non_young_other_time_ms);
  1594     _constant_other_time_ms_seq->add(constant_other_time_ms);
  1596     double survival_ratio = 0.0;
  1597     if (_bytes_in_collection_set_before_gc > 0) {
  1598       survival_ratio = (double) _bytes_copied_during_gc /
  1599                                    (double) _bytes_in_collection_set_before_gc;
  1602     _pending_cards_seq->add((double) _pending_cards);
  1603     _rs_lengths_seq->add((double) _max_rs_lengths);
  1606   _in_marking_window = new_in_marking_window;
  1607   _in_marking_window_im = new_in_marking_window_im;
  1608   _free_regions_at_end_of_collection = _g1->free_regions();
  1609   update_young_list_target_length();
  1611   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1612   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1613   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
  1615   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
  1618 #define EXT_SIZE_FORMAT "%d%s"
  1619 #define EXT_SIZE_PARAMS(bytes)                                  \
  1620   byte_size_in_proper_unit((bytes)),                            \
  1621   proper_unit_for_byte_size((bytes))
  1623 void G1CollectorPolicy::print_heap_transition() {
  1624   if (G1Log::finer()) {
  1625     YoungList* young_list = _g1->young_list();
  1626     size_t eden_bytes = young_list->eden_used_bytes();
  1627     size_t survivor_bytes = young_list->survivor_used_bytes();
  1628     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
  1629     size_t used = _g1->used();
  1630     size_t capacity = _g1->capacity();
  1631     size_t eden_capacity =
  1632       (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
  1634     gclog_or_tty->print_cr(
  1635       "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
  1636       "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
  1637       "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
  1638       EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
  1639       EXT_SIZE_PARAMS(_eden_bytes_before_gc),
  1640       EXT_SIZE_PARAMS(_prev_eden_capacity),
  1641       EXT_SIZE_PARAMS(eden_bytes),
  1642       EXT_SIZE_PARAMS(eden_capacity),
  1643       EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
  1644       EXT_SIZE_PARAMS(survivor_bytes),
  1645       EXT_SIZE_PARAMS(used_before_gc),
  1646       EXT_SIZE_PARAMS(_capacity_before_gc),
  1647       EXT_SIZE_PARAMS(used),
  1648       EXT_SIZE_PARAMS(capacity));
  1650     _prev_eden_capacity = eden_capacity;
  1651   } else if (G1Log::fine()) {
  1652     _g1->print_size_transition(gclog_or_tty,
  1653                                _cur_collection_pause_used_at_start_bytes,
  1654                                _g1->used(), _g1->capacity());
  1658 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
  1659                                                      double update_rs_processed_buffers,
  1660                                                      double goal_ms) {
  1661   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1662   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
  1664   if (G1UseAdaptiveConcRefinement) {
  1665     const int k_gy = 3, k_gr = 6;
  1666     const double inc_k = 1.1, dec_k = 0.9;
  1668     int g = cg1r->green_zone();
  1669     if (update_rs_time > goal_ms) {
  1670       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
  1671     } else {
  1672       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
  1673         g = (int)MAX2(g * inc_k, g + 1.0);
  1676     // Change the refinement threads params
  1677     cg1r->set_green_zone(g);
  1678     cg1r->set_yellow_zone(g * k_gy);
  1679     cg1r->set_red_zone(g * k_gr);
  1680     cg1r->reinitialize_threads();
  1682     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
  1683     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
  1684                                     cg1r->yellow_zone());
  1685     // Change the barrier params
  1686     dcqs.set_process_completed_threshold(processing_threshold);
  1687     dcqs.set_max_completed_queue(cg1r->red_zone());
  1690   int curr_queue_size = dcqs.completed_buffers_num();
  1691   if (curr_queue_size >= cg1r->yellow_zone()) {
  1692     dcqs.set_completed_queue_padding(curr_queue_size);
  1693   } else {
  1694     dcqs.set_completed_queue_padding(0);
  1696   dcqs.notify_if_necessary();
  1699 double
  1700 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
  1701   size_t rs_length = predict_rs_length_diff();
  1702   size_t card_num;
  1703   if (gcs_are_young()) {
  1704     card_num = predict_young_card_num(rs_length);
  1705   } else {
  1706     card_num = predict_non_young_card_num(rs_length);
  1708   return predict_base_elapsed_time_ms(pending_cards, card_num);
  1711 double
  1712 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
  1713                                                 size_t scanned_cards) {
  1714   return
  1715     predict_rs_update_time_ms(pending_cards) +
  1716     predict_rs_scan_time_ms(scanned_cards) +
  1717     predict_constant_other_time_ms();
  1720 double
  1721 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
  1722                                                   bool young) {
  1723   size_t rs_length = hr->rem_set()->occupied();
  1724   size_t card_num;
  1725   if (gcs_are_young()) {
  1726     card_num = predict_young_card_num(rs_length);
  1727   } else {
  1728     card_num = predict_non_young_card_num(rs_length);
  1730   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  1732   double region_elapsed_time_ms =
  1733     predict_rs_scan_time_ms(card_num) +
  1734     predict_object_copy_time_ms(bytes_to_copy);
  1736   if (young)
  1737     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1738   else
  1739     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1741   return region_elapsed_time_ms;
  1744 size_t
  1745 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  1746   size_t bytes_to_copy;
  1747   if (hr->is_marked())
  1748     bytes_to_copy = hr->max_live_bytes();
  1749   else {
  1750     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
  1751     int age = hr->age_in_surv_rate_group();
  1752     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  1753     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  1755   return bytes_to_copy;
  1758 void
  1759 G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
  1760                                           size_t survivor_cset_region_length) {
  1761   _eden_cset_region_length     = eden_cset_region_length;
  1762   _survivor_cset_region_length = survivor_cset_region_length;
  1763   _old_cset_region_length      = 0;
  1766 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  1767   _recorded_rs_lengths = rs_lengths;
  1770 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  1771                                                double elapsed_ms) {
  1772   _recent_gc_times_ms->add(elapsed_ms);
  1773   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
  1774   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
  1777 size_t G1CollectorPolicy::expansion_amount() {
  1778   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
  1779   double threshold = _gc_overhead_perc;
  1780   if (recent_gc_overhead > threshold) {
  1781     // We will double the existing space, or take
  1782     // G1ExpandByPercentOfAvailable % of the available expansion
  1783     // space, whichever is smaller, bounded below by a minimum
  1784     // expansion (unless that's all that's left.)
  1785     const size_t min_expand_bytes = 1*M;
  1786     size_t reserved_bytes = _g1->max_capacity();
  1787     size_t committed_bytes = _g1->capacity();
  1788     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
  1789     size_t expand_bytes;
  1790     size_t expand_bytes_via_pct =
  1791       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
  1792     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
  1793     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
  1794     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
  1796     ergo_verbose5(ErgoHeapSizing,
  1797                   "attempt heap expansion",
  1798                   ergo_format_reason("recent GC overhead higher than "
  1799                                      "threshold after GC")
  1800                   ergo_format_perc("recent GC overhead")
  1801                   ergo_format_perc("threshold")
  1802                   ergo_format_byte("uncommitted")
  1803                   ergo_format_byte_perc("calculated expansion amount"),
  1804                   recent_gc_overhead, threshold,
  1805                   uncommitted_bytes,
  1806                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
  1808     return expand_bytes;
  1809   } else {
  1810     return 0;
  1814 class CountCSClosure: public HeapRegionClosure {
  1815   G1CollectorPolicy* _g1_policy;
  1816 public:
  1817   CountCSClosure(G1CollectorPolicy* g1_policy) :
  1818     _g1_policy(g1_policy) {}
  1819   bool doHeapRegion(HeapRegion* r) {
  1820     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
  1821     return false;
  1823 };
  1825 void G1CollectorPolicy::count_CS_bytes_used() {
  1826   CountCSClosure cs_closure(this);
  1827   _g1->collection_set_iterate(&cs_closure);
  1830 void G1CollectorPolicy::print_summary(int level,
  1831                                       const char* str,
  1832                                       NumberSeq* seq) const {
  1833   double sum = seq->sum();
  1834   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
  1835                 str, sum / 1000.0, seq->avg());
  1838 void G1CollectorPolicy::print_summary_sd(int level,
  1839                                          const char* str,
  1840                                          NumberSeq* seq) const {
  1841   print_summary(level, str, seq);
  1842   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
  1843                 seq->num(), seq->sd(), seq->maximum());
  1846 void G1CollectorPolicy::check_other_times(int level,
  1847                                         NumberSeq* other_times_ms,
  1848                                         NumberSeq* calc_other_times_ms) const {
  1849   bool should_print = false;
  1850   LineBuffer buf(level + 2);
  1852   double max_sum = MAX2(fabs(other_times_ms->sum()),
  1853                         fabs(calc_other_times_ms->sum()));
  1854   double min_sum = MIN2(fabs(other_times_ms->sum()),
  1855                         fabs(calc_other_times_ms->sum()));
  1856   double sum_ratio = max_sum / min_sum;
  1857   if (sum_ratio > 1.1) {
  1858     should_print = true;
  1859     buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
  1862   double max_avg = MAX2(fabs(other_times_ms->avg()),
  1863                         fabs(calc_other_times_ms->avg()));
  1864   double min_avg = MIN2(fabs(other_times_ms->avg()),
  1865                         fabs(calc_other_times_ms->avg()));
  1866   double avg_ratio = max_avg / min_avg;
  1867   if (avg_ratio > 1.1) {
  1868     should_print = true;
  1869     buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
  1872   if (other_times_ms->sum() < -0.01) {
  1873     buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
  1876   if (other_times_ms->avg() < -0.01) {
  1877     buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
  1880   if (calc_other_times_ms->sum() < -0.01) {
  1881     should_print = true;
  1882     buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
  1885   if (calc_other_times_ms->avg() < -0.01) {
  1886     should_print = true;
  1887     buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
  1890   if (should_print)
  1891     print_summary(level, "Other(Calc)", calc_other_times_ms);
  1894 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
  1895   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  1896   MainBodySummary*    body_summary = summary->main_body_summary();
  1897   if (summary->get_total_seq()->num() > 0) {
  1898     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
  1899     if (body_summary != NULL) {
  1900       print_summary(1, "Root Region Scan Wait", body_summary->get_root_region_scan_wait_seq());
  1901       if (parallel) {
  1902         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
  1903         print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
  1904         print_summary(2, "SATB Filtering", body_summary->get_satb_filtering_seq());
  1905         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
  1906         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
  1907         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
  1908         print_summary(2, "Termination", body_summary->get_termination_seq());
  1909         print_summary(2, "Parallel Other", body_summary->get_parallel_other_seq());
  1911           NumberSeq* other_parts[] = {
  1912             body_summary->get_ext_root_scan_seq(),
  1913             body_summary->get_satb_filtering_seq(),
  1914             body_summary->get_update_rs_seq(),
  1915             body_summary->get_scan_rs_seq(),
  1916             body_summary->get_obj_copy_seq(),
  1917             body_summary->get_termination_seq()
  1918           };
  1919           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
  1920                                         6, other_parts);
  1921           check_other_times(2, body_summary->get_parallel_other_seq(),
  1922                             &calc_other_times_ms);
  1924       } else {
  1925         print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
  1926         print_summary(1, "SATB Filtering", body_summary->get_satb_filtering_seq());
  1927         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
  1928         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
  1929         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
  1932     print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
  1933     print_summary(1, "Other", summary->get_other_seq());
  1935       if (body_summary != NULL) {
  1936         NumberSeq calc_other_times_ms;
  1937         if (parallel) {
  1938           // parallel
  1939           NumberSeq* other_parts[] = {
  1940             body_summary->get_root_region_scan_wait_seq(),
  1941             body_summary->get_parallel_seq(),
  1942             body_summary->get_clear_ct_seq()
  1943           };
  1944           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  1945                                           3, other_parts);
  1946         } else {
  1947           // serial
  1948           NumberSeq* other_parts[] = {
  1949             body_summary->get_root_region_scan_wait_seq(),
  1950             body_summary->get_update_rs_seq(),
  1951             body_summary->get_ext_root_scan_seq(),
  1952             body_summary->get_satb_filtering_seq(),
  1953             body_summary->get_scan_rs_seq(),
  1954             body_summary->get_obj_copy_seq()
  1955           };
  1956           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  1957                                           6, other_parts);
  1959         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
  1962   } else {
  1963     LineBuffer(1).append_and_print_cr("none");
  1965   LineBuffer(0).append_and_print_cr("");
  1968 void G1CollectorPolicy::print_tracing_info() const {
  1969   if (TraceGen0Time) {
  1970     gclog_or_tty->print_cr("ALL PAUSES");
  1971     print_summary_sd(0, "Total", _all_pause_times_ms);
  1972     gclog_or_tty->print_cr("");
  1973     gclog_or_tty->print_cr("");
  1974     gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
  1975     gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
  1976     gclog_or_tty->print_cr("");
  1978     gclog_or_tty->print_cr("EVACUATION PAUSES");
  1979     print_summary(_summary);
  1981     gclog_or_tty->print_cr("MISC");
  1982     print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
  1983     print_summary_sd(0, "Yields", _all_yield_times_ms);
  1984     for (int i = 0; i < _aux_num; ++i) {
  1985       if (_all_aux_times_ms[i].num() > 0) {
  1986         char buffer[96];
  1987         sprintf(buffer, "Aux%d", i);
  1988         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
  1992   if (TraceGen1Time) {
  1993     if (_all_full_gc_times_ms->num() > 0) {
  1994       gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
  1995                  _all_full_gc_times_ms->num(),
  1996                  _all_full_gc_times_ms->sum() / 1000.0);
  1997       gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
  1998       gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
  1999                     _all_full_gc_times_ms->sd(),
  2000                     _all_full_gc_times_ms->maximum());
  2005 void G1CollectorPolicy::print_yg_surv_rate_info() const {
  2006 #ifndef PRODUCT
  2007   _short_lived_surv_rate_group->print_surv_rate_summary();
  2008   // add this call for any other surv rate groups
  2009 #endif // PRODUCT
  2012 #ifndef PRODUCT
  2013 // for debugging, bit of a hack...
  2014 static char*
  2015 region_num_to_mbs(int length) {
  2016   static char buffer[64];
  2017   double bytes = (double) (length * HeapRegion::GrainBytes);
  2018   double mbs = bytes / (double) (1024 * 1024);
  2019   sprintf(buffer, "%7.2lfMB", mbs);
  2020   return buffer;
  2022 #endif // PRODUCT
  2024 size_t G1CollectorPolicy::max_regions(int purpose) {
  2025   switch (purpose) {
  2026     case GCAllocForSurvived:
  2027       return _max_survivor_regions;
  2028     case GCAllocForTenured:
  2029       return REGIONS_UNLIMITED;
  2030     default:
  2031       ShouldNotReachHere();
  2032       return REGIONS_UNLIMITED;
  2033   };
  2036 void G1CollectorPolicy::update_max_gc_locker_expansion() {
  2037   size_t expansion_region_num = 0;
  2038   if (GCLockerEdenExpansionPercent > 0) {
  2039     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
  2040     double expansion_region_num_d = perc * (double) _young_list_target_length;
  2041     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
  2042     // less than 1.0) we'll get 1.
  2043     expansion_region_num = (size_t) ceil(expansion_region_num_d);
  2044   } else {
  2045     assert(expansion_region_num == 0, "sanity");
  2047   _young_list_max_length = _young_list_target_length + expansion_region_num;
  2048   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
  2051 // Calculates survivor space parameters.
  2052 void G1CollectorPolicy::update_survivors_policy() {
  2053   double max_survivor_regions_d =
  2054                  (double) _young_list_target_length / (double) SurvivorRatio;
  2055   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
  2056   // smaller than 1.0) we'll get 1.
  2057   _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
  2059   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  2060         HeapRegion::GrainWords * _max_survivor_regions);
  2063 #ifndef PRODUCT
  2064 class HRSortIndexIsOKClosure: public HeapRegionClosure {
  2065   CollectionSetChooser* _chooser;
  2066 public:
  2067   HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
  2068     _chooser(chooser) {}
  2070   bool doHeapRegion(HeapRegion* r) {
  2071     if (!r->continuesHumongous()) {
  2072       assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
  2074     return false;
  2076 };
  2078 bool G1CollectorPolicy::assertMarkedBytesDataOK() {
  2079   HRSortIndexIsOKClosure cl(_collectionSetChooser);
  2080   _g1->heap_region_iterate(&cl);
  2081   return true;
  2083 #endif
  2085 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
  2086                                                      GCCause::Cause gc_cause) {
  2087   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2088   if (!during_cycle) {
  2089     ergo_verbose1(ErgoConcCycles,
  2090                   "request concurrent cycle initiation",
  2091                   ergo_format_reason("requested by GC cause")
  2092                   ergo_format_str("GC cause"),
  2093                   GCCause::to_string(gc_cause));
  2094     set_initiate_conc_mark_if_possible();
  2095     return true;
  2096   } else {
  2097     ergo_verbose1(ErgoConcCycles,
  2098                   "do not request concurrent cycle initiation",
  2099                   ergo_format_reason("concurrent cycle already in progress")
  2100                   ergo_format_str("GC cause"),
  2101                   GCCause::to_string(gc_cause));
  2102     return false;
  2106 void
  2107 G1CollectorPolicy::decide_on_conc_mark_initiation() {
  2108   // We are about to decide on whether this pause will be an
  2109   // initial-mark pause.
  2111   // First, during_initial_mark_pause() should not be already set. We
  2112   // will set it here if we have to. However, it should be cleared by
  2113   // the end of the pause (it's only set for the duration of an
  2114   // initial-mark pause).
  2115   assert(!during_initial_mark_pause(), "pre-condition");
  2117   if (initiate_conc_mark_if_possible()) {
  2118     // We had noticed on a previous pause that the heap occupancy has
  2119     // gone over the initiating threshold and we should start a
  2120     // concurrent marking cycle. So we might initiate one.
  2122     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2123     if (!during_cycle) {
  2124       // The concurrent marking thread is not "during a cycle", i.e.,
  2125       // it has completed the last one. So we can go ahead and
  2126       // initiate a new cycle.
  2128       set_during_initial_mark_pause();
  2129       // We do not allow mixed GCs during marking.
  2130       if (!gcs_are_young()) {
  2131         set_gcs_are_young(true);
  2132         ergo_verbose0(ErgoMixedGCs,
  2133                       "end mixed GCs",
  2134                       ergo_format_reason("concurrent cycle is about to start"));
  2137       // And we can now clear initiate_conc_mark_if_possible() as
  2138       // we've already acted on it.
  2139       clear_initiate_conc_mark_if_possible();
  2141       ergo_verbose0(ErgoConcCycles,
  2142                   "initiate concurrent cycle",
  2143                   ergo_format_reason("concurrent cycle initiation requested"));
  2144     } else {
  2145       // The concurrent marking thread is still finishing up the
  2146       // previous cycle. If we start one right now the two cycles
  2147       // overlap. In particular, the concurrent marking thread might
  2148       // be in the process of clearing the next marking bitmap (which
  2149       // we will use for the next cycle if we start one). Starting a
  2150       // cycle now will be bad given that parts of the marking
  2151       // information might get cleared by the marking thread. And we
  2152       // cannot wait for the marking thread to finish the cycle as it
  2153       // periodically yields while clearing the next marking bitmap
  2154       // and, if it's in a yield point, it's waiting for us to
  2155       // finish. So, at this point we will not start a cycle and we'll
  2156       // let the concurrent marking thread complete the last one.
  2157       ergo_verbose0(ErgoConcCycles,
  2158                     "do not initiate concurrent cycle",
  2159                     ergo_format_reason("concurrent cycle already in progress"));
  2164 class KnownGarbageClosure: public HeapRegionClosure {
  2165   G1CollectedHeap* _g1h;
  2166   CollectionSetChooser* _hrSorted;
  2168 public:
  2169   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
  2170     _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
  2172   bool doHeapRegion(HeapRegion* r) {
  2173     // We only include humongous regions in collection
  2174     // sets when concurrent mark shows that their contained object is
  2175     // unreachable.
  2177     // Do we have any marking information for this region?
  2178     if (r->is_marked()) {
  2179       // We will skip any region that's currently used as an old GC
  2180       // alloc region (we should not consider those for collection
  2181       // before we fill them up).
  2182       if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
  2183         _hrSorted->addMarkedHeapRegion(r);
  2186     return false;
  2188 };
  2190 class ParKnownGarbageHRClosure: public HeapRegionClosure {
  2191   G1CollectedHeap* _g1h;
  2192   CollectionSetChooser* _hrSorted;
  2193   jint _marked_regions_added;
  2194   size_t _reclaimable_bytes_added;
  2195   jint _chunk_size;
  2196   jint _cur_chunk_idx;
  2197   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
  2198   int _worker;
  2199   int _invokes;
  2201   void get_new_chunk() {
  2202     _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
  2203     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
  2205   void add_region(HeapRegion* r) {
  2206     if (_cur_chunk_idx == _cur_chunk_end) {
  2207       get_new_chunk();
  2209     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
  2210     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
  2211     _marked_regions_added++;
  2212     _reclaimable_bytes_added += r->reclaimable_bytes();
  2213     _cur_chunk_idx++;
  2216 public:
  2217   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  2218                            jint chunk_size,
  2219                            int worker) :
  2220       _g1h(G1CollectedHeap::heap()),
  2221       _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
  2222       _marked_regions_added(0), _reclaimable_bytes_added(0),
  2223       _cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { }
  2225   bool doHeapRegion(HeapRegion* r) {
  2226     // We only include humongous regions in collection
  2227     // sets when concurrent mark shows that their contained object is
  2228     // unreachable.
  2229     _invokes++;
  2231     // Do we have any marking information for this region?
  2232     if (r->is_marked()) {
  2233       // We will skip any region that's currently used as an old GC
  2234       // alloc region (we should not consider those for collection
  2235       // before we fill them up).
  2236       if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
  2237         add_region(r);
  2240     return false;
  2242   jint marked_regions_added() { return _marked_regions_added; }
  2243   size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
  2244   int invokes() { return _invokes; }
  2245 };
  2247 class ParKnownGarbageTask: public AbstractGangTask {
  2248   CollectionSetChooser* _hrSorted;
  2249   jint _chunk_size;
  2250   G1CollectedHeap* _g1;
  2251 public:
  2252   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
  2253     AbstractGangTask("ParKnownGarbageTask"),
  2254     _hrSorted(hrSorted), _chunk_size(chunk_size),
  2255     _g1(G1CollectedHeap::heap()) { }
  2257   void work(uint worker_id) {
  2258     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
  2259                                                _chunk_size,
  2260                                                worker_id);
  2261     // Back to zero for the claim value.
  2262     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
  2263                                          _g1->workers()->active_workers(),
  2264                                          HeapRegion::InitialClaimValue);
  2265     jint regions_added = parKnownGarbageCl.marked_regions_added();
  2266     size_t reclaimable_bytes_added =
  2267                                    parKnownGarbageCl.reclaimable_bytes_added();
  2268     _hrSorted->updateTotals(regions_added, reclaimable_bytes_added);
  2269     if (G1PrintParCleanupStats) {
  2270       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
  2271                  worker_id, parKnownGarbageCl.invokes(), regions_added);
  2274 };
  2276 void
  2277 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
  2278   double start_sec;
  2279   if (G1PrintParCleanupStats) {
  2280     start_sec = os::elapsedTime();
  2283   _collectionSetChooser->clearMarkedHeapRegions();
  2284   double clear_marked_end_sec;
  2285   if (G1PrintParCleanupStats) {
  2286     clear_marked_end_sec = os::elapsedTime();
  2287     gclog_or_tty->print_cr("  clear marked regions: %8.3f ms.",
  2288                            (clear_marked_end_sec - start_sec) * 1000.0);
  2291   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2292     const size_t OverpartitionFactor = 4;
  2293     size_t WorkUnit;
  2294     // The use of MinChunkSize = 8 in the original code
  2295     // causes some assertion failures when the total number of
  2296     // region is less than 8.  The code here tries to fix that.
  2297     // Should the original code also be fixed?
  2298     if (no_of_gc_threads > 0) {
  2299       const size_t MinWorkUnit =
  2300         MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U);
  2301       WorkUnit =
  2302         MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
  2303              MinWorkUnit);
  2304     } else {
  2305       assert(no_of_gc_threads > 0,
  2306         "The active gc workers should be greater than 0");
  2307       // In a product build do something reasonable to avoid a crash.
  2308       const size_t MinWorkUnit =
  2309         MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
  2310       WorkUnit =
  2311         MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
  2312              MinWorkUnit);
  2314     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
  2315                                                              WorkUnit);
  2316     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  2317                                             (int) WorkUnit);
  2318     _g1->workers()->run_task(&parKnownGarbageTask);
  2320     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2321            "sanity check");
  2322   } else {
  2323     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  2324     _g1->heap_region_iterate(&knownGarbagecl);
  2326   double known_garbage_end_sec;
  2327   if (G1PrintParCleanupStats) {
  2328     known_garbage_end_sec = os::elapsedTime();
  2329     gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
  2330                       (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
  2333   _collectionSetChooser->sortMarkedHeapRegions();
  2334   double end_sec = os::elapsedTime();
  2335   if (G1PrintParCleanupStats) {
  2336     gclog_or_tty->print_cr("  sorting: %8.3f ms.",
  2337                            (end_sec - known_garbage_end_sec) * 1000.0);
  2340   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  2341   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  2342   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  2343   _prev_collection_pause_end_ms += elapsed_time_ms;
  2344   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
  2347 // Add the heap region at the head of the non-incremental collection set
  2348 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
  2349   assert(_inc_cset_build_state == Active, "Precondition");
  2350   assert(!hr->is_young(), "non-incremental add of young region");
  2352   assert(!hr->in_collection_set(), "should not already be in the CSet");
  2353   hr->set_in_collection_set(true);
  2354   hr->set_next_in_collection_set(_collection_set);
  2355   _collection_set = hr;
  2356   _collection_set_bytes_used_before += hr->used();
  2357   _g1->register_region_with_in_cset_fast_test(hr);
  2358   size_t rs_length = hr->rem_set()->occupied();
  2359   _recorded_rs_lengths += rs_length;
  2360   _old_cset_region_length += 1;
  2363 // Initialize the per-collection-set information
  2364 void G1CollectorPolicy::start_incremental_cset_building() {
  2365   assert(_inc_cset_build_state == Inactive, "Precondition");
  2367   _inc_cset_head = NULL;
  2368   _inc_cset_tail = NULL;
  2369   _inc_cset_bytes_used_before = 0;
  2371   _inc_cset_max_finger = 0;
  2372   _inc_cset_recorded_rs_lengths = 0;
  2373   _inc_cset_recorded_rs_lengths_diffs = 0;
  2374   _inc_cset_predicted_elapsed_time_ms = 0.0;
  2375   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
  2376   _inc_cset_build_state = Active;
  2379 void G1CollectorPolicy::finalize_incremental_cset_building() {
  2380   assert(_inc_cset_build_state == Active, "Precondition");
  2381   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
  2383   // The two "main" fields, _inc_cset_recorded_rs_lengths and
  2384   // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
  2385   // that adds a new region to the CSet. Further updates by the
  2386   // concurrent refinement thread that samples the young RSet lengths
  2387   // are accumulated in the *_diffs fields. Here we add the diffs to
  2388   // the "main" fields.
  2390   if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
  2391     _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
  2392   } else {
  2393     // This is defensive. The diff should in theory be always positive
  2394     // as RSets can only grow between GCs. However, given that we
  2395     // sample their size concurrently with other threads updating them
  2396     // it's possible that we might get the wrong size back, which
  2397     // could make the calculations somewhat inaccurate.
  2398     size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
  2399     if (_inc_cset_recorded_rs_lengths >= diffs) {
  2400       _inc_cset_recorded_rs_lengths -= diffs;
  2401     } else {
  2402       _inc_cset_recorded_rs_lengths = 0;
  2405   _inc_cset_predicted_elapsed_time_ms +=
  2406                                      _inc_cset_predicted_elapsed_time_ms_diffs;
  2408   _inc_cset_recorded_rs_lengths_diffs = 0;
  2409   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
  2412 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  2413   // This routine is used when:
  2414   // * adding survivor regions to the incremental cset at the end of an
  2415   //   evacuation pause,
  2416   // * adding the current allocation region to the incremental cset
  2417   //   when it is retired, and
  2418   // * updating existing policy information for a region in the
  2419   //   incremental cset via young list RSet sampling.
  2420   // Therefore this routine may be called at a safepoint by the
  2421   // VM thread, or in-between safepoints by mutator threads (when
  2422   // retiring the current allocation region) or a concurrent
  2423   // refine thread (RSet sampling).
  2425   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  2426   size_t used_bytes = hr->used();
  2427   _inc_cset_recorded_rs_lengths += rs_length;
  2428   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  2429   _inc_cset_bytes_used_before += used_bytes;
  2431   // Cache the values we have added to the aggregated informtion
  2432   // in the heap region in case we have to remove this region from
  2433   // the incremental collection set, or it is updated by the
  2434   // rset sampling code
  2435   hr->set_recorded_rs_length(rs_length);
  2436   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  2439 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
  2440                                                      size_t new_rs_length) {
  2441   // Update the CSet information that is dependent on the new RS length
  2442   assert(hr->is_young(), "Precondition");
  2443   assert(!SafepointSynchronize::is_at_safepoint(),
  2444                                                "should not be at a safepoint");
  2446   // We could have updated _inc_cset_recorded_rs_lengths and
  2447   // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
  2448   // that atomically, as this code is executed by a concurrent
  2449   // refinement thread, potentially concurrently with a mutator thread
  2450   // allocating a new region and also updating the same fields. To
  2451   // avoid the atomic operations we accumulate these updates on two
  2452   // separate fields (*_diffs) and we'll just add them to the "main"
  2453   // fields at the start of a GC.
  2455   ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
  2456   ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
  2457   _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
  2459   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  2460   double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  2461   double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
  2462   _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
  2464   hr->set_recorded_rs_length(new_rs_length);
  2465   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
  2468 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  2469   assert(hr->is_young(), "invariant");
  2470   assert(hr->young_index_in_cset() > -1, "should have already been set");
  2471   assert(_inc_cset_build_state == Active, "Precondition");
  2473   // We need to clear and set the cached recorded/cached collection set
  2474   // information in the heap region here (before the region gets added
  2475   // to the collection set). An individual heap region's cached values
  2476   // are calculated, aggregated with the policy collection set info,
  2477   // and cached in the heap region here (initially) and (subsequently)
  2478   // by the Young List sampling code.
  2480   size_t rs_length = hr->rem_set()->occupied();
  2481   add_to_incremental_cset_info(hr, rs_length);
  2483   HeapWord* hr_end = hr->end();
  2484   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  2486   assert(!hr->in_collection_set(), "invariant");
  2487   hr->set_in_collection_set(true);
  2488   assert( hr->next_in_collection_set() == NULL, "invariant");
  2490   _g1->register_region_with_in_cset_fast_test(hr);
  2493 // Add the region at the RHS of the incremental cset
  2494 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  2495   // We should only ever be appending survivors at the end of a pause
  2496   assert( hr->is_survivor(), "Logic");
  2498   // Do the 'common' stuff
  2499   add_region_to_incremental_cset_common(hr);
  2501   // Now add the region at the right hand side
  2502   if (_inc_cset_tail == NULL) {
  2503     assert(_inc_cset_head == NULL, "invariant");
  2504     _inc_cset_head = hr;
  2505   } else {
  2506     _inc_cset_tail->set_next_in_collection_set(hr);
  2508   _inc_cset_tail = hr;
  2511 // Add the region to the LHS of the incremental cset
  2512 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  2513   // Survivors should be added to the RHS at the end of a pause
  2514   assert(!hr->is_survivor(), "Logic");
  2516   // Do the 'common' stuff
  2517   add_region_to_incremental_cset_common(hr);
  2519   // Add the region at the left hand side
  2520   hr->set_next_in_collection_set(_inc_cset_head);
  2521   if (_inc_cset_head == NULL) {
  2522     assert(_inc_cset_tail == NULL, "Invariant");
  2523     _inc_cset_tail = hr;
  2525   _inc_cset_head = hr;
  2528 #ifndef PRODUCT
  2529 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  2530   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  2532   st->print_cr("\nCollection_set:");
  2533   HeapRegion* csr = list_head;
  2534   while (csr != NULL) {
  2535     HeapRegion* next = csr->next_in_collection_set();
  2536     assert(csr->in_collection_set(), "bad CS");
  2537     st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
  2538                  "age: %4d, y: %d, surv: %d",
  2539                         csr->bottom(), csr->end(),
  2540                         csr->top(),
  2541                         csr->prev_top_at_mark_start(),
  2542                         csr->next_top_at_mark_start(),
  2543                         csr->top_at_conc_mark_count(),
  2544                         csr->age_in_surv_rate_group_cond(),
  2545                         csr->is_young(),
  2546                         csr->is_survivor());
  2547     csr = next;
  2550 #endif // !PRODUCT
  2552 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
  2553                                                 const char* false_action_str) {
  2554   CollectionSetChooser* cset_chooser = _collectionSetChooser;
  2555   if (cset_chooser->isEmpty()) {
  2556     ergo_verbose0(ErgoMixedGCs,
  2557                   false_action_str,
  2558                   ergo_format_reason("candidate old regions not available"));
  2559     return false;
  2561   size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes();
  2562   size_t capacity_bytes = _g1->capacity();
  2563   double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
  2564   double threshold = (double) G1HeapWastePercent;
  2565   if (perc < threshold) {
  2566     ergo_verbose4(ErgoMixedGCs,
  2567               false_action_str,
  2568               ergo_format_reason("reclaimable percentage lower than threshold")
  2569               ergo_format_region("candidate old regions")
  2570               ergo_format_byte_perc("reclaimable")
  2571               ergo_format_perc("threshold"),
  2572               cset_chooser->remainingRegions(),
  2573               reclaimable_bytes, perc, threshold);
  2574     return false;
  2577   ergo_verbose4(ErgoMixedGCs,
  2578                 true_action_str,
  2579                 ergo_format_reason("candidate old regions available")
  2580                 ergo_format_region("candidate old regions")
  2581                 ergo_format_byte_perc("reclaimable")
  2582                 ergo_format_perc("threshold"),
  2583                 cset_chooser->remainingRegions(),
  2584                 reclaimable_bytes, perc, threshold);
  2585   return true;
  2588 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
  2589   // Set this here - in case we're not doing young collections.
  2590   double non_young_start_time_sec = os::elapsedTime();
  2592   YoungList* young_list = _g1->young_list();
  2593   finalize_incremental_cset_building();
  2595   guarantee(target_pause_time_ms > 0.0,
  2596             err_msg("target_pause_time_ms = %1.6lf should be positive",
  2597                     target_pause_time_ms));
  2598   guarantee(_collection_set == NULL, "Precondition");
  2600   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
  2601   double predicted_pause_time_ms = base_time_ms;
  2602   double time_remaining_ms = target_pause_time_ms - base_time_ms;
  2604   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2605                 "start choosing CSet",
  2606                 ergo_format_ms("predicted base time")
  2607                 ergo_format_ms("remaining time")
  2608                 ergo_format_ms("target pause time"),
  2609                 base_time_ms, time_remaining_ms, target_pause_time_ms);
  2611   HeapRegion* hr;
  2612   double young_start_time_sec = os::elapsedTime();
  2614   _collection_set_bytes_used_before = 0;
  2615   _last_gc_was_young = gcs_are_young() ? true : false;
  2617   if (_last_gc_was_young) {
  2618     ++_young_pause_num;
  2619   } else {
  2620     ++_mixed_pause_num;
  2623   // The young list is laid with the survivor regions from the previous
  2624   // pause are appended to the RHS of the young list, i.e.
  2625   //   [Newly Young Regions ++ Survivors from last pause].
  2627   size_t survivor_region_length = young_list->survivor_length();
  2628   size_t eden_region_length = young_list->length() - survivor_region_length;
  2629   init_cset_region_lengths(eden_region_length, survivor_region_length);
  2630   hr = young_list->first_survivor_region();
  2631   while (hr != NULL) {
  2632     assert(hr->is_survivor(), "badly formed young list");
  2633     hr->set_young();
  2634     hr = hr->get_next_young_region();
  2637   // Clear the fields that point to the survivor list - they are all young now.
  2638   young_list->clear_survivors();
  2640   _collection_set = _inc_cset_head;
  2641   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  2642   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
  2643   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  2645   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2646                 "add young regions to CSet",
  2647                 ergo_format_region("eden")
  2648                 ergo_format_region("survivors")
  2649                 ergo_format_ms("predicted young region time"),
  2650                 eden_region_length, survivor_region_length,
  2651                 _inc_cset_predicted_elapsed_time_ms);
  2653   // The number of recorded young regions is the incremental
  2654   // collection set's current size
  2655   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  2657   double young_end_time_sec = os::elapsedTime();
  2658   _recorded_young_cset_choice_time_ms =
  2659     (young_end_time_sec - young_start_time_sec) * 1000.0;
  2661   // We are doing young collections so reset this.
  2662   non_young_start_time_sec = young_end_time_sec;
  2664   if (!gcs_are_young()) {
  2665     CollectionSetChooser* cset_chooser = _collectionSetChooser;
  2666     assert(cset_chooser->verify(), "CSet Chooser verification - pre");
  2667     const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength();
  2668     const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
  2670     size_t expensive_region_num = 0;
  2671     bool check_time_remaining = adaptive_young_list_length();
  2672     HeapRegion* hr = cset_chooser->peek();
  2673     while (hr != NULL) {
  2674       if (old_cset_region_length() >= max_old_cset_length) {
  2675         // Added maximum number of old regions to the CSet.
  2676         ergo_verbose2(ErgoCSetConstruction,
  2677                       "finish adding old regions to CSet",
  2678                       ergo_format_reason("old CSet region num reached max")
  2679                       ergo_format_region("old")
  2680                       ergo_format_region("max"),
  2681                       old_cset_region_length(), max_old_cset_length);
  2682         break;
  2685       double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
  2686       if (check_time_remaining) {
  2687         if (predicted_time_ms > time_remaining_ms) {
  2688           // Too expensive for the current CSet.
  2690           if (old_cset_region_length() >= min_old_cset_length) {
  2691             // We have added the minimum number of old regions to the CSet,
  2692             // we are done with this CSet.
  2693             ergo_verbose4(ErgoCSetConstruction,
  2694                           "finish adding old regions to CSet",
  2695                           ergo_format_reason("predicted time is too high")
  2696                           ergo_format_ms("predicted time")
  2697                           ergo_format_ms("remaining time")
  2698                           ergo_format_region("old")
  2699                           ergo_format_region("min"),
  2700                           predicted_time_ms, time_remaining_ms,
  2701                           old_cset_region_length(), min_old_cset_length);
  2702             break;
  2705           // We'll add it anyway given that we haven't reached the
  2706           // minimum number of old regions.
  2707           expensive_region_num += 1;
  2709       } else {
  2710         if (old_cset_region_length() >= min_old_cset_length) {
  2711           // In the non-auto-tuning case, we'll finish adding regions
  2712           // to the CSet if we reach the minimum.
  2713           ergo_verbose2(ErgoCSetConstruction,
  2714                         "finish adding old regions to CSet",
  2715                         ergo_format_reason("old CSet region num reached min")
  2716                         ergo_format_region("old")
  2717                         ergo_format_region("min"),
  2718                         old_cset_region_length(), min_old_cset_length);
  2719           break;
  2723       // We will add this region to the CSet.
  2724       time_remaining_ms -= predicted_time_ms;
  2725       predicted_pause_time_ms += predicted_time_ms;
  2726       cset_chooser->remove_and_move_to_next(hr);
  2727       _g1->old_set_remove(hr);
  2728       add_old_region_to_cset(hr);
  2730       hr = cset_chooser->peek();
  2732     if (hr == NULL) {
  2733       ergo_verbose0(ErgoCSetConstruction,
  2734                     "finish adding old regions to CSet",
  2735                     ergo_format_reason("candidate old regions not available"));
  2738     if (expensive_region_num > 0) {
  2739       // We print the information once here at the end, predicated on
  2740       // whether we added any apparently expensive regions or not, to
  2741       // avoid generating output per region.
  2742       ergo_verbose4(ErgoCSetConstruction,
  2743                     "added expensive regions to CSet",
  2744                     ergo_format_reason("old CSet region num not reached min")
  2745                     ergo_format_region("old")
  2746                     ergo_format_region("expensive")
  2747                     ergo_format_region("min")
  2748                     ergo_format_ms("remaining time"),
  2749                     old_cset_region_length(),
  2750                     expensive_region_num,
  2751                     min_old_cset_length,
  2752                     time_remaining_ms);
  2755     assert(cset_chooser->verify(), "CSet Chooser verification - post");
  2758   stop_incremental_cset_building();
  2760   count_CS_bytes_used();
  2762   ergo_verbose5(ErgoCSetConstruction,
  2763                 "finish choosing CSet",
  2764                 ergo_format_region("eden")
  2765                 ergo_format_region("survivors")
  2766                 ergo_format_region("old")
  2767                 ergo_format_ms("predicted pause time")
  2768                 ergo_format_ms("target pause time"),
  2769                 eden_region_length, survivor_region_length,
  2770                 old_cset_region_length(),
  2771                 predicted_pause_time_ms, target_pause_time_ms);
  2773   double non_young_end_time_sec = os::elapsedTime();
  2774   _recorded_non_young_cset_choice_time_ms =
  2775     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;

mercurial