src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Fri, 16 Dec 2011 02:14:27 -0500

author
tonyp
date
Fri, 16 Dec 2011 02:14:27 -0500
changeset 3337
41406797186b
parent 3326
d23d2b18183e
child 3356
67fdcb391461
permissions
-rw-r--r--

7113012: G1: rename not-fully-young GCs as "mixed"
Summary: Renamed partially-young GCs as mixed and fully-young GCs as young. Change all external output that includes those terms (GC log and GC ergo log) as well as any comments, fields, methods, etc. The changeset also includes very minor code tidying up (added some curly brackets).
Reviewed-by: johnc, brutisso

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/concurrentMark.hpp"
    28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
    32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    34 #include "runtime/arguments.hpp"
    35 #include "runtime/java.hpp"
    36 #include "runtime/mutexLocker.hpp"
    37 #include "utilities/debug.hpp"
    39 // Different defaults for different number of GC threads
    40 // They were chosen by running GCOld and SPECjbb on debris with different
    41 //   numbers of GC threads and choosing them based on the results
    43 // all the same
    44 static double rs_length_diff_defaults[] = {
    45   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    46 };
    48 static double cost_per_card_ms_defaults[] = {
    49   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    50 };
    52 // all the same
    53 static double young_cards_per_entry_ratio_defaults[] = {
    54   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    55 };
    57 static double cost_per_entry_ms_defaults[] = {
    58   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
    59 };
    61 static double cost_per_byte_ms_defaults[] = {
    62   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
    63 };
    65 // these should be pretty consistent
    66 static double constant_other_time_ms_defaults[] = {
    67   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
    68 };
    71 static double young_other_cost_per_region_ms_defaults[] = {
    72   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
    73 };
    75 static double non_young_other_cost_per_region_ms_defaults[] = {
    76   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
    77 };
    79 // Help class for avoiding interleaved logging
    80 class LineBuffer: public StackObj {
    82 private:
    83   static const int BUFFER_LEN = 1024;
    84   static const int INDENT_CHARS = 3;
    85   char _buffer[BUFFER_LEN];
    86   int _indent_level;
    87   int _cur;
    89   void vappend(const char* format, va_list ap) {
    90     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
    91     if (res != -1) {
    92       _cur += res;
    93     } else {
    94       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
    95       _buffer[BUFFER_LEN -1] = 0;
    96       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
    97     }
    98   }
   100 public:
   101   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
   102     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
   103       _buffer[_cur] = ' ';
   104     }
   105   }
   107 #ifndef PRODUCT
   108   ~LineBuffer() {
   109     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
   110   }
   111 #endif
   113   void append(const char* format, ...) {
   114     va_list ap;
   115     va_start(ap, format);
   116     vappend(format, ap);
   117     va_end(ap);
   118   }
   120   void append_and_print_cr(const char* format, ...) {
   121     va_list ap;
   122     va_start(ap, format);
   123     vappend(format, ap);
   124     va_end(ap);
   125     gclog_or_tty->print_cr("%s", _buffer);
   126     _cur = _indent_level * INDENT_CHARS;
   127   }
   128 };
   130 G1CollectorPolicy::G1CollectorPolicy() :
   131   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
   132                         ? ParallelGCThreads : 1),
   134   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   135   _all_pause_times_ms(new NumberSeq()),
   136   _stop_world_start(0.0),
   137   _all_stop_world_times_ms(new NumberSeq()),
   138   _all_yield_times_ms(new NumberSeq()),
   139   _using_new_ratio_calculations(false),
   141   _summary(new Summary()),
   143   _cur_clear_ct_time_ms(0.0),
   144   _mark_closure_time_ms(0.0),
   146   _cur_ref_proc_time_ms(0.0),
   147   _cur_ref_enq_time_ms(0.0),
   149 #ifndef PRODUCT
   150   _min_clear_cc_time_ms(-1.0),
   151   _max_clear_cc_time_ms(-1.0),
   152   _cur_clear_cc_time_ms(0.0),
   153   _cum_clear_cc_time_ms(0.0),
   154   _num_cc_clears(0L),
   155 #endif
   157   _aux_num(10),
   158   _all_aux_times_ms(new NumberSeq[_aux_num]),
   159   _cur_aux_start_times_ms(new double[_aux_num]),
   160   _cur_aux_times_ms(new double[_aux_num]),
   161   _cur_aux_times_set(new bool[_aux_num]),
   163   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   164   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   166   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   167   _prev_collection_pause_end_ms(0.0),
   168   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   169   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   170   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   171   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   172   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   173   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   174   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   175   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   176   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   177   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   178   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   179   _non_young_other_cost_per_region_ms_seq(
   180                                          new TruncatedSeq(TruncatedSeqLength)),
   182   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   183   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
   185   _pause_time_target_ms((double) MaxGCPauseMillis),
   187   _gcs_are_young(true),
   188   _young_pause_num(0),
   189   _mixed_pause_num(0),
   191   _during_marking(false),
   192   _in_marking_window(false),
   193   _in_marking_window_im(false),
   195   _known_garbage_ratio(0.0),
   196   _known_garbage_bytes(0),
   198   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
   200   _recent_prev_end_times_for_all_gcs_sec(
   201                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
   203   _recent_avg_pause_time_ratio(0.0),
   205   _all_full_gc_times_ms(new NumberSeq()),
   207   _initiate_conc_mark_if_possible(false),
   208   _during_initial_mark_pause(false),
   209   _should_revert_to_young_gcs(false),
   210   _last_young_gc(false),
   211   _last_gc_was_young(false),
   213   _eden_bytes_before_gc(0),
   214   _survivor_bytes_before_gc(0),
   215   _capacity_before_gc(0),
   217   _prev_collection_pause_used_at_end_bytes(0),
   219   _eden_cset_region_length(0),
   220   _survivor_cset_region_length(0),
   221   _old_cset_region_length(0),
   223   _collection_set(NULL),
   224   _collection_set_bytes_used_before(0),
   226   // Incremental CSet attributes
   227   _inc_cset_build_state(Inactive),
   228   _inc_cset_head(NULL),
   229   _inc_cset_tail(NULL),
   230   _inc_cset_bytes_used_before(0),
   231   _inc_cset_max_finger(NULL),
   232   _inc_cset_recorded_rs_lengths(0),
   233   _inc_cset_predicted_elapsed_time_ms(0.0),
   235 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   236 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   237 #endif // _MSC_VER
   239   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
   240                                                  G1YoungSurvRateNumRegionsSummary)),
   241   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
   242                                               G1YoungSurvRateNumRegionsSummary)),
   243   // add here any more surv rate groups
   244   _recorded_survivor_regions(0),
   245   _recorded_survivor_head(NULL),
   246   _recorded_survivor_tail(NULL),
   247   _survivors_age_table(true),
   249   _gc_overhead_perc(0.0) {
   251   // Set up the region size and associated fields. Given that the
   252   // policy is created before the heap, we have to set this up here,
   253   // so it's done as soon as possible.
   254   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
   255   HeapRegionRemSet::setup_remset_size();
   257   G1ErgoVerbose::initialize();
   258   if (PrintAdaptiveSizePolicy) {
   259     // Currently, we only use a single switch for all the heuristics.
   260     G1ErgoVerbose::set_enabled(true);
   261     // Given that we don't currently have a verboseness level
   262     // parameter, we'll hardcode this to high. This can be easily
   263     // changed in the future.
   264     G1ErgoVerbose::set_level(ErgoHigh);
   265   } else {
   266     G1ErgoVerbose::set_enabled(false);
   267   }
   269   // Verify PLAB sizes
   270   const size_t region_size = HeapRegion::GrainWords;
   271   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   272     char buffer[128];
   273     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
   274                  OldPLABSize > region_size ? "Old" : "Young", region_size);
   275     vm_exit_during_initialization(buffer);
   276   }
   278   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   279   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   281   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
   282   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   283   _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
   285   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
   286   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
   288   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
   290   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
   292   _par_last_termination_times_ms = new double[_parallel_gc_threads];
   293   _par_last_termination_attempts = new double[_parallel_gc_threads];
   294   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
   295   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
   296   _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
   298   // start conservatively
   299   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
   301   int index;
   302   if (ParallelGCThreads == 0)
   303     index = 0;
   304   else if (ParallelGCThreads > 8)
   305     index = 7;
   306   else
   307     index = ParallelGCThreads - 1;
   309   _pending_card_diff_seq->add(0.0);
   310   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   311   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
   312   _young_cards_per_entry_ratio_seq->add(
   313                                   young_cards_per_entry_ratio_defaults[index]);
   314   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   315   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   316   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
   317   _young_other_cost_per_region_ms_seq->add(
   318                                young_other_cost_per_region_ms_defaults[index]);
   319   _non_young_other_cost_per_region_ms_seq->add(
   320                            non_young_other_cost_per_region_ms_defaults[index]);
   322   // Below, we might need to calculate the pause time target based on
   323   // the pause interval. When we do so we are going to give G1 maximum
   324   // flexibility and allow it to do pauses when it needs to. So, we'll
   325   // arrange that the pause interval to be pause time target + 1 to
   326   // ensure that a) the pause time target is maximized with respect to
   327   // the pause interval and b) we maintain the invariant that pause
   328   // time target < pause interval. If the user does not want this
   329   // maximum flexibility, they will have to set the pause interval
   330   // explicitly.
   332   // First make sure that, if either parameter is set, its value is
   333   // reasonable.
   334   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   335     if (MaxGCPauseMillis < 1) {
   336       vm_exit_during_initialization("MaxGCPauseMillis should be "
   337                                     "greater than 0");
   338     }
   339   }
   340   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   341     if (GCPauseIntervalMillis < 1) {
   342       vm_exit_during_initialization("GCPauseIntervalMillis should be "
   343                                     "greater than 0");
   344     }
   345   }
   347   // Then, if the pause time target parameter was not set, set it to
   348   // the default value.
   349   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   350     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   351       // The default pause time target in G1 is 200ms
   352       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   353     } else {
   354       // We do not allow the pause interval to be set without the
   355       // pause time target
   356       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
   357                                     "without setting MaxGCPauseMillis");
   358     }
   359   }
   361   // Then, if the interval parameter was not set, set it according to
   362   // the pause time target (this will also deal with the case when the
   363   // pause time target is the default value).
   364   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   365     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
   366   }
   368   // Finally, make sure that the two parameters are consistent.
   369   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
   370     char buffer[256];
   371     jio_snprintf(buffer, 256,
   372                  "MaxGCPauseMillis (%u) should be less than "
   373                  "GCPauseIntervalMillis (%u)",
   374                  MaxGCPauseMillis, GCPauseIntervalMillis);
   375     vm_exit_during_initialization(buffer);
   376   }
   378   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
   379   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   380   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   381   _sigma = (double) G1ConfidencePercent / 100.0;
   383   // start conservatively (around 50ms is about right)
   384   _concurrent_mark_remark_times_ms->add(0.05);
   385   _concurrent_mark_cleanup_times_ms->add(0.20);
   386   _tenuring_threshold = MaxTenuringThreshold;
   387   // _max_survivor_regions will be calculated by
   388   // update_young_list_target_length() during initialization.
   389   _max_survivor_regions = 0;
   391   assert(GCTimeRatio > 0,
   392          "we should have set it to a default value set_g1_gc_flags() "
   393          "if a user set it to 0");
   394   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
   396   uintx reserve_perc = G1ReservePercent;
   397   // Put an artificial ceiling on this so that it's not set to a silly value.
   398   if (reserve_perc > 50) {
   399     reserve_perc = 50;
   400     warning("G1ReservePercent is set to a value that is too large, "
   401             "it's been updated to %u", reserve_perc);
   402   }
   403   _reserve_factor = (double) reserve_perc / 100.0;
   404   // This will be set when the heap is expanded
   405   // for the first time during initialization.
   406   _reserve_regions = 0;
   408   initialize_all();
   409   _collectionSetChooser = new CollectionSetChooser();
   410 }
   412 // Increment "i", mod "len"
   413 static void inc_mod(int& i, int len) {
   414   i++; if (i == len) i = 0;
   415 }
   417 void G1CollectorPolicy::initialize_flags() {
   418   set_min_alignment(HeapRegion::GrainBytes);
   419   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
   420   if (SurvivorRatio < 1) {
   421     vm_exit_during_initialization("Invalid survivor ratio specified");
   422   }
   423   CollectorPolicy::initialize_flags();
   424 }
   426 // The easiest way to deal with the parsing of the NewSize /
   427 // MaxNewSize / etc. parameteres is to re-use the code in the
   428 // TwoGenerationCollectorPolicy class. This is similar to what
   429 // ParallelScavenge does with its GenerationSizer class (see
   430 // ParallelScavengeHeap::initialize()). We might change this in the
   431 // future, but it's a good start.
   432 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
   433 private:
   434   size_t size_to_region_num(size_t byte_size) {
   435     return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
   436   }
   438 public:
   439   G1YoungGenSizer() {
   440     initialize_flags();
   441     initialize_size_info();
   442   }
   443   size_t min_young_region_num() {
   444     return size_to_region_num(_min_gen0_size);
   445   }
   446   size_t initial_young_region_num() {
   447     return size_to_region_num(_initial_gen0_size);
   448   }
   449   size_t max_young_region_num() {
   450     return size_to_region_num(_max_gen0_size);
   451   }
   452 };
   454 void G1CollectorPolicy::update_young_list_size_using_newratio(size_t number_of_heap_regions) {
   455   assert(number_of_heap_regions > 0, "Heap must be initialized");
   456   size_t young_size = number_of_heap_regions / (NewRatio + 1);
   457   _min_desired_young_length = young_size;
   458   _max_desired_young_length = young_size;
   459 }
   461 void G1CollectorPolicy::init() {
   462   // Set aside an initial future to_space.
   463   _g1 = G1CollectedHeap::heap();
   465   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   467   initialize_gc_policy_counters();
   469   G1YoungGenSizer sizer;
   470   _min_desired_young_length = sizer.min_young_region_num();
   471   _max_desired_young_length = sizer.max_young_region_num();
   473   if (FLAG_IS_CMDLINE(NewRatio)) {
   474     if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
   475       warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
   476     } else {
   477       // Treat NewRatio as a fixed size that is only recalculated when the heap size changes
   478       update_young_list_size_using_newratio(_g1->n_regions());
   479       _using_new_ratio_calculations = true;
   480     }
   481   }
   483   assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
   485   set_adaptive_young_list_length(_min_desired_young_length < _max_desired_young_length);
   486   if (adaptive_young_list_length()) {
   487     _young_list_fixed_length = 0;
   488   } else {
   489     assert(_min_desired_young_length == _max_desired_young_length, "Min and max young size differ");
   490     _young_list_fixed_length = _min_desired_young_length;
   491   }
   492   _free_regions_at_end_of_collection = _g1->free_regions();
   493   update_young_list_target_length();
   494   _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
   496   // We may immediately start allocating regions and placing them on the
   497   // collection set list. Initialize the per-collection set info
   498   start_incremental_cset_building();
   499 }
   501 // Create the jstat counters for the policy.
   502 void G1CollectorPolicy::initialize_gc_policy_counters() {
   503   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
   504 }
   506 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
   507                                          double base_time_ms,
   508                                          size_t base_free_regions,
   509                                          double target_pause_time_ms) {
   510   if (young_length >= base_free_regions) {
   511     // end condition 1: not enough space for the young regions
   512     return false;
   513   }
   515   double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
   516   size_t bytes_to_copy =
   517                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   518   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   519   double young_other_time_ms = predict_young_other_time_ms(young_length);
   520   double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
   521   if (pause_time_ms > target_pause_time_ms) {
   522     // end condition 2: prediction is over the target pause time
   523     return false;
   524   }
   526   size_t free_bytes =
   527                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
   528   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
   529     // end condition 3: out-of-space (conservatively!)
   530     return false;
   531   }
   533   // success!
   534   return true;
   535 }
   537 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
   538   // re-calculate the necessary reserve
   539   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
   540   // We use ceiling so that if reserve_regions_d is > 0.0 (but
   541   // smaller than 1.0) we'll get 1.
   542   _reserve_regions = (size_t) ceil(reserve_regions_d);
   544   if (_using_new_ratio_calculations) {
   545     // -XX:NewRatio was specified so we need to update the
   546     // young gen length when the heap size has changed.
   547     update_young_list_size_using_newratio(new_number_of_regions);
   548   }
   549 }
   551 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
   552                                                      size_t base_min_length) {
   553   size_t desired_min_length = 0;
   554   if (adaptive_young_list_length()) {
   555     if (_alloc_rate_ms_seq->num() > 3) {
   556       double now_sec = os::elapsedTime();
   557       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   558       double alloc_rate_ms = predict_alloc_rate_ms();
   559       desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
   560     } else {
   561       // otherwise we don't have enough info to make the prediction
   562     }
   563   }
   564   desired_min_length += base_min_length;
   565   // make sure we don't go below any user-defined minimum bound
   566   return MAX2(_min_desired_young_length, desired_min_length);
   567 }
   569 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
   570   // Here, we might want to also take into account any additional
   571   // constraints (i.e., user-defined minimum bound). Currently, we
   572   // effectively don't set this bound.
   573   return _max_desired_young_length;
   574 }
   576 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   577   if (rs_lengths == (size_t) -1) {
   578     // if it's set to the default value (-1), we should predict it;
   579     // otherwise, use the given value.
   580     rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   581   }
   583   // Calculate the absolute and desired min bounds.
   585   // This is how many young regions we already have (currently: the survivors).
   586   size_t base_min_length = recorded_survivor_regions();
   587   // This is the absolute minimum young length, which ensures that we
   588   // can allocate one eden region in the worst-case.
   589   size_t absolute_min_length = base_min_length + 1;
   590   size_t desired_min_length =
   591                      calculate_young_list_desired_min_length(base_min_length);
   592   if (desired_min_length < absolute_min_length) {
   593     desired_min_length = absolute_min_length;
   594   }
   596   // Calculate the absolute and desired max bounds.
   598   // We will try our best not to "eat" into the reserve.
   599   size_t absolute_max_length = 0;
   600   if (_free_regions_at_end_of_collection > _reserve_regions) {
   601     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
   602   }
   603   size_t desired_max_length = calculate_young_list_desired_max_length();
   604   if (desired_max_length > absolute_max_length) {
   605     desired_max_length = absolute_max_length;
   606   }
   608   size_t young_list_target_length = 0;
   609   if (adaptive_young_list_length()) {
   610     if (gcs_are_young()) {
   611       young_list_target_length =
   612                         calculate_young_list_target_length(rs_lengths,
   613                                                            base_min_length,
   614                                                            desired_min_length,
   615                                                            desired_max_length);
   616       _rs_lengths_prediction = rs_lengths;
   617     } else {
   618       // Don't calculate anything and let the code below bound it to
   619       // the desired_min_length, i.e., do the next GC as soon as
   620       // possible to maximize how many old regions we can add to it.
   621     }
   622   } else {
   623     if (gcs_are_young()) {
   624       young_list_target_length = _young_list_fixed_length;
   625     } else {
   626       // A bit arbitrary: during mixed GCs we allocate half
   627       // the young regions to try to add old regions to the CSet.
   628       young_list_target_length = _young_list_fixed_length / 2;
   629       // We choose to accept that we might go under the desired min
   630       // length given that we intentionally ask for a smaller young gen.
   631       desired_min_length = absolute_min_length;
   632     }
   633   }
   635   // Make sure we don't go over the desired max length, nor under the
   636   // desired min length. In case they clash, desired_min_length wins
   637   // which is why that test is second.
   638   if (young_list_target_length > desired_max_length) {
   639     young_list_target_length = desired_max_length;
   640   }
   641   if (young_list_target_length < desired_min_length) {
   642     young_list_target_length = desired_min_length;
   643   }
   645   assert(young_list_target_length > recorded_survivor_regions(),
   646          "we should be able to allocate at least one eden region");
   647   assert(young_list_target_length >= absolute_min_length, "post-condition");
   648   _young_list_target_length = young_list_target_length;
   650   update_max_gc_locker_expansion();
   651 }
   653 size_t
   654 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   655                                                    size_t base_min_length,
   656                                                    size_t desired_min_length,
   657                                                    size_t desired_max_length) {
   658   assert(adaptive_young_list_length(), "pre-condition");
   659   assert(gcs_are_young(), "only call this for young GCs");
   661   // In case some edge-condition makes the desired max length too small...
   662   if (desired_max_length <= desired_min_length) {
   663     return desired_min_length;
   664   }
   666   // We'll adjust min_young_length and max_young_length not to include
   667   // the already allocated young regions (i.e., so they reflect the
   668   // min and max eden regions we'll allocate). The base_min_length
   669   // will be reflected in the predictions by the
   670   // survivor_regions_evac_time prediction.
   671   assert(desired_min_length > base_min_length, "invariant");
   672   size_t min_young_length = desired_min_length - base_min_length;
   673   assert(desired_max_length > base_min_length, "invariant");
   674   size_t max_young_length = desired_max_length - base_min_length;
   676   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   677   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   678   size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   679   size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   680   size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   681   double base_time_ms =
   682     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
   683     survivor_regions_evac_time;
   684   size_t available_free_regions = _free_regions_at_end_of_collection;
   685   size_t base_free_regions = 0;
   686   if (available_free_regions > _reserve_regions) {
   687     base_free_regions = available_free_regions - _reserve_regions;
   688   }
   690   // Here, we will make sure that the shortest young length that
   691   // makes sense fits within the target pause time.
   693   if (predict_will_fit(min_young_length, base_time_ms,
   694                        base_free_regions, target_pause_time_ms)) {
   695     // The shortest young length will fit into the target pause time;
   696     // we'll now check whether the absolute maximum number of young
   697     // regions will fit in the target pause time. If not, we'll do
   698     // a binary search between min_young_length and max_young_length.
   699     if (predict_will_fit(max_young_length, base_time_ms,
   700                          base_free_regions, target_pause_time_ms)) {
   701       // The maximum young length will fit into the target pause time.
   702       // We are done so set min young length to the maximum length (as
   703       // the result is assumed to be returned in min_young_length).
   704       min_young_length = max_young_length;
   705     } else {
   706       // The maximum possible number of young regions will not fit within
   707       // the target pause time so we'll search for the optimal
   708       // length. The loop invariants are:
   709       //
   710       // min_young_length < max_young_length
   711       // min_young_length is known to fit into the target pause time
   712       // max_young_length is known not to fit into the target pause time
   713       //
   714       // Going into the loop we know the above hold as we've just
   715       // checked them. Every time around the loop we check whether
   716       // the middle value between min_young_length and
   717       // max_young_length fits into the target pause time. If it
   718       // does, it becomes the new min. If it doesn't, it becomes
   719       // the new max. This way we maintain the loop invariants.
   721       assert(min_young_length < max_young_length, "invariant");
   722       size_t diff = (max_young_length - min_young_length) / 2;
   723       while (diff > 0) {
   724         size_t young_length = min_young_length + diff;
   725         if (predict_will_fit(young_length, base_time_ms,
   726                              base_free_regions, target_pause_time_ms)) {
   727           min_young_length = young_length;
   728         } else {
   729           max_young_length = young_length;
   730         }
   731         assert(min_young_length <  max_young_length, "invariant");
   732         diff = (max_young_length - min_young_length) / 2;
   733       }
   734       // The results is min_young_length which, according to the
   735       // loop invariants, should fit within the target pause time.
   737       // These are the post-conditions of the binary search above:
   738       assert(min_young_length < max_young_length,
   739              "otherwise we should have discovered that max_young_length "
   740              "fits into the pause target and not done the binary search");
   741       assert(predict_will_fit(min_young_length, base_time_ms,
   742                               base_free_regions, target_pause_time_ms),
   743              "min_young_length, the result of the binary search, should "
   744              "fit into the pause target");
   745       assert(!predict_will_fit(min_young_length + 1, base_time_ms,
   746                                base_free_regions, target_pause_time_ms),
   747              "min_young_length, the result of the binary search, should be "
   748              "optimal, so no larger length should fit into the pause target");
   749     }
   750   } else {
   751     // Even the minimum length doesn't fit into the pause time
   752     // target, return it as the result nevertheless.
   753   }
   754   return base_min_length + min_young_length;
   755 }
   757 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   758   double survivor_regions_evac_time = 0.0;
   759   for (HeapRegion * r = _recorded_survivor_head;
   760        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
   761        r = r->get_next_young_region()) {
   762     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
   763   }
   764   return survivor_regions_evac_time;
   765 }
   767 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
   768   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   770   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   771   if (rs_lengths > _rs_lengths_prediction) {
   772     // add 10% to avoid having to recalculate often
   773     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   774     update_young_list_target_length(rs_lengths_prediction);
   775   }
   776 }
   780 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
   781                                                bool is_tlab,
   782                                                bool* gc_overhead_limit_was_exceeded) {
   783   guarantee(false, "Not using this policy feature yet.");
   784   return NULL;
   785 }
   787 // This method controls how a collector handles one or more
   788 // of its generations being fully allocated.
   789 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
   790                                                        bool is_tlab) {
   791   guarantee(false, "Not using this policy feature yet.");
   792   return NULL;
   793 }
   796 #ifndef PRODUCT
   797 bool G1CollectorPolicy::verify_young_ages() {
   798   HeapRegion* head = _g1->young_list()->first_region();
   799   return
   800     verify_young_ages(head, _short_lived_surv_rate_group);
   801   // also call verify_young_ages on any additional surv rate groups
   802 }
   804 bool
   805 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
   806                                      SurvRateGroup *surv_rate_group) {
   807   guarantee( surv_rate_group != NULL, "pre-condition" );
   809   const char* name = surv_rate_group->name();
   810   bool ret = true;
   811   int prev_age = -1;
   813   for (HeapRegion* curr = head;
   814        curr != NULL;
   815        curr = curr->get_next_young_region()) {
   816     SurvRateGroup* group = curr->surv_rate_group();
   817     if (group == NULL && !curr->is_survivor()) {
   818       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
   819       ret = false;
   820     }
   822     if (surv_rate_group == group) {
   823       int age = curr->age_in_surv_rate_group();
   825       if (age < 0) {
   826         gclog_or_tty->print_cr("## %s: encountered negative age", name);
   827         ret = false;
   828       }
   830       if (age <= prev_age) {
   831         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
   832                                "(%d, %d)", name, age, prev_age);
   833         ret = false;
   834       }
   835       prev_age = age;
   836     }
   837   }
   839   return ret;
   840 }
   841 #endif // PRODUCT
   843 void G1CollectorPolicy::record_full_collection_start() {
   844   _cur_collection_start_sec = os::elapsedTime();
   845   // Release the future to-space so that it is available for compaction into.
   846   _g1->set_full_collection();
   847 }
   849 void G1CollectorPolicy::record_full_collection_end() {
   850   // Consider this like a collection pause for the purposes of allocation
   851   // since last pause.
   852   double end_sec = os::elapsedTime();
   853   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
   854   double full_gc_time_ms = full_gc_time_sec * 1000.0;
   856   _all_full_gc_times_ms->add(full_gc_time_ms);
   858   update_recent_gc_times(end_sec, full_gc_time_ms);
   860   _g1->clear_full_collection();
   862   // "Nuke" the heuristics that control the young/mixed GC
   863   // transitions and make sure we start with young GCs after the Full GC.
   864   set_gcs_are_young(true);
   865   _last_young_gc = false;
   866   _should_revert_to_young_gcs = false;
   867   clear_initiate_conc_mark_if_possible();
   868   clear_during_initial_mark_pause();
   869   _known_garbage_bytes = 0;
   870   _known_garbage_ratio = 0.0;
   871   _in_marking_window = false;
   872   _in_marking_window_im = false;
   874   _short_lived_surv_rate_group->start_adding_regions();
   875   // also call this on any additional surv rate groups
   877   record_survivor_regions(0, NULL, NULL);
   879   _free_regions_at_end_of_collection = _g1->free_regions();
   880   // Reset survivors SurvRateGroup.
   881   _survivor_surv_rate_group->reset();
   882   update_young_list_target_length();
   883   _collectionSetChooser->updateAfterFullCollection();
   884 }
   886 void G1CollectorPolicy::record_stop_world_start() {
   887   _stop_world_start = os::elapsedTime();
   888 }
   890 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
   891                                                       size_t start_used) {
   892   if (PrintGCDetails) {
   893     gclog_or_tty->stamp(PrintGCTimeStamps);
   894     gclog_or_tty->print("[GC pause");
   895     gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
   896   }
   898   // We only need to do this here as the policy will only be applied
   899   // to the GC we're about to start. so, no point is calculating this
   900   // every time we calculate / recalculate the target young length.
   901   update_survivors_policy();
   903   assert(_g1->used() == _g1->recalculate_used(),
   904          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
   905                  _g1->used(), _g1->recalculate_used()));
   907   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
   908   _all_stop_world_times_ms->add(s_w_t_ms);
   909   _stop_world_start = 0.0;
   911   _cur_collection_start_sec = start_time_sec;
   912   _cur_collection_pause_used_at_start_bytes = start_used;
   913   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   914   _pending_cards = _g1->pending_card_num();
   915   _max_pending_cards = _g1->max_pending_card_num();
   917   _bytes_in_collection_set_before_gc = 0;
   918   _bytes_copied_during_gc = 0;
   920   YoungList* young_list = _g1->young_list();
   921   _eden_bytes_before_gc = young_list->eden_used_bytes();
   922   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
   923   _capacity_before_gc = _g1->capacity();
   925 #ifdef DEBUG
   926   // initialise these to something well known so that we can spot
   927   // if they are not set properly
   929   for (int i = 0; i < _parallel_gc_threads; ++i) {
   930     _par_last_gc_worker_start_times_ms[i] = -1234.0;
   931     _par_last_ext_root_scan_times_ms[i] = -1234.0;
   932     _par_last_mark_stack_scan_times_ms[i] = -1234.0;
   933     _par_last_update_rs_times_ms[i] = -1234.0;
   934     _par_last_update_rs_processed_buffers[i] = -1234.0;
   935     _par_last_scan_rs_times_ms[i] = -1234.0;
   936     _par_last_obj_copy_times_ms[i] = -1234.0;
   937     _par_last_termination_times_ms[i] = -1234.0;
   938     _par_last_termination_attempts[i] = -1234.0;
   939     _par_last_gc_worker_end_times_ms[i] = -1234.0;
   940     _par_last_gc_worker_times_ms[i] = -1234.0;
   941     _par_last_gc_worker_other_times_ms[i] = -1234.0;
   942   }
   943 #endif
   945   for (int i = 0; i < _aux_num; ++i) {
   946     _cur_aux_times_ms[i] = 0.0;
   947     _cur_aux_times_set[i] = false;
   948   }
   950   // This is initialized to zero here and is set during
   951   // the evacuation pause if marking is in progress.
   952   _cur_satb_drain_time_ms = 0.0;
   954   _last_gc_was_young = false;
   956   // do that for any other surv rate groups
   957   _short_lived_surv_rate_group->stop_adding_regions();
   958   _survivors_age_table.clear();
   960   assert( verify_young_ages(), "region age verification" );
   961 }
   963 void G1CollectorPolicy::record_concurrent_mark_init_end(double
   964                                                    mark_init_elapsed_time_ms) {
   965   _during_marking = true;
   966   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
   967   clear_during_initial_mark_pause();
   968   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
   969 }
   971 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
   972   _mark_remark_start_sec = os::elapsedTime();
   973   _during_marking = false;
   974 }
   976 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
   977   double end_time_sec = os::elapsedTime();
   978   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
   979   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
   980   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   981   _prev_collection_pause_end_ms += elapsed_time_ms;
   983   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
   984 }
   986 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
   987   _mark_cleanup_start_sec = os::elapsedTime();
   988 }
   990 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
   991   _should_revert_to_young_gcs = false;
   992   _last_young_gc = true;
   993   _in_marking_window = false;
   994 }
   996 void G1CollectorPolicy::record_concurrent_pause() {
   997   if (_stop_world_start > 0.0) {
   998     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
   999     _all_yield_times_ms->add(yield_ms);
  1003 void G1CollectorPolicy::record_concurrent_pause_end() {
  1006 template<class T>
  1007 T sum_of(T* sum_arr, int start, int n, int N) {
  1008   T sum = (T)0;
  1009   for (int i = 0; i < n; i++) {
  1010     int j = (start + i) % N;
  1011     sum += sum_arr[j];
  1013   return sum;
  1016 void G1CollectorPolicy::print_par_stats(int level,
  1017                                         const char* str,
  1018                                         double* data) {
  1019   double min = data[0], max = data[0];
  1020   double total = 0.0;
  1021   LineBuffer buf(level);
  1022   buf.append("[%s (ms):", str);
  1023   for (uint i = 0; i < no_of_gc_threads(); ++i) {
  1024     double val = data[i];
  1025     if (val < min)
  1026       min = val;
  1027     if (val > max)
  1028       max = val;
  1029     total += val;
  1030     buf.append("  %3.1lf", val);
  1032   buf.append_and_print_cr("");
  1033   double avg = total / (double) no_of_gc_threads();
  1034   buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
  1035     avg, min, max, max - min);
  1038 void G1CollectorPolicy::print_par_sizes(int level,
  1039                                         const char* str,
  1040                                         double* data) {
  1041   double min = data[0], max = data[0];
  1042   double total = 0.0;
  1043   LineBuffer buf(level);
  1044   buf.append("[%s :", str);
  1045   for (uint i = 0; i < no_of_gc_threads(); ++i) {
  1046     double val = data[i];
  1047     if (val < min)
  1048       min = val;
  1049     if (val > max)
  1050       max = val;
  1051     total += val;
  1052     buf.append(" %d", (int) val);
  1054   buf.append_and_print_cr("");
  1055   double avg = total / (double) no_of_gc_threads();
  1056   buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
  1057     (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
  1060 void G1CollectorPolicy::print_stats(int level,
  1061                                     const char* str,
  1062                                     double value) {
  1063   LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
  1066 void G1CollectorPolicy::print_stats(int level,
  1067                                     const char* str,
  1068                                     int value) {
  1069   LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
  1072 double G1CollectorPolicy::avg_value(double* data) {
  1073   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1074     double ret = 0.0;
  1075     for (uint i = 0; i < no_of_gc_threads(); ++i) {
  1076       ret += data[i];
  1078     return ret / (double) no_of_gc_threads();
  1079   } else {
  1080     return data[0];
  1084 double G1CollectorPolicy::max_value(double* data) {
  1085   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1086     double ret = data[0];
  1087     for (uint i = 1; i < no_of_gc_threads(); ++i) {
  1088       if (data[i] > ret) {
  1089         ret = data[i];
  1092     return ret;
  1093   } else {
  1094     return data[0];
  1098 double G1CollectorPolicy::sum_of_values(double* data) {
  1099   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1100     double sum = 0.0;
  1101     for (uint i = 0; i < no_of_gc_threads(); i++) {
  1102       sum += data[i];
  1104     return sum;
  1105   } else {
  1106     return data[0];
  1110 double G1CollectorPolicy::max_sum(double* data1, double* data2) {
  1111   double ret = data1[0] + data2[0];
  1113   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1114     for (uint i = 1; i < no_of_gc_threads(); ++i) {
  1115       double data = data1[i] + data2[i];
  1116       if (data > ret) {
  1117         ret = data;
  1121   return ret;
  1124 // Anything below that is considered to be zero
  1125 #define MIN_TIMER_GRANULARITY 0.0000001
  1127 void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
  1128   double end_time_sec = os::elapsedTime();
  1129   double elapsed_ms = _last_pause_time_ms;
  1130   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  1131   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
  1132          "otherwise, the subtraction below does not make sense");
  1133   size_t rs_size =
  1134             _cur_collection_pause_used_regions_at_start - cset_region_length();
  1135   size_t cur_used_bytes = _g1->used();
  1136   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
  1137   bool last_pause_included_initial_mark = false;
  1138   bool update_stats = !_g1->evacuation_failed();
  1139   set_no_of_gc_threads(no_of_gc_threads);
  1141 #ifndef PRODUCT
  1142   if (G1YoungSurvRateVerbose) {
  1143     gclog_or_tty->print_cr("");
  1144     _short_lived_surv_rate_group->print();
  1145     // do that for any other surv rate groups too
  1147 #endif // PRODUCT
  1149   last_pause_included_initial_mark = during_initial_mark_pause();
  1150   if (last_pause_included_initial_mark)
  1151     record_concurrent_mark_init_end(0.0);
  1153   size_t marking_initiating_used_threshold =
  1154     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
  1156   if (!_g1->mark_in_progress() && !_last_young_gc) {
  1157     assert(!last_pause_included_initial_mark, "invariant");
  1158     if (cur_used_bytes > marking_initiating_used_threshold) {
  1159       if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
  1160         assert(!during_initial_mark_pause(), "we should not see this here");
  1162         ergo_verbose3(ErgoConcCycles,
  1163                       "request concurrent cycle initiation",
  1164                       ergo_format_reason("occupancy higher than threshold")
  1165                       ergo_format_byte("occupancy")
  1166                       ergo_format_byte_perc("threshold"),
  1167                       cur_used_bytes,
  1168                       marking_initiating_used_threshold,
  1169                       (double) InitiatingHeapOccupancyPercent);
  1171         // Note: this might have already been set, if during the last
  1172         // pause we decided to start a cycle but at the beginning of
  1173         // this pause we decided to postpone it. That's OK.
  1174         set_initiate_conc_mark_if_possible();
  1175       } else {
  1176         ergo_verbose2(ErgoConcCycles,
  1177                   "do not request concurrent cycle initiation",
  1178                   ergo_format_reason("occupancy lower than previous occupancy")
  1179                   ergo_format_byte("occupancy")
  1180                   ergo_format_byte("previous occupancy"),
  1181                   cur_used_bytes,
  1182                   _prev_collection_pause_used_at_end_bytes);
  1187   _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
  1189   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
  1190                           end_time_sec, false);
  1192   // This assert is exempted when we're doing parallel collection pauses,
  1193   // because the fragmentation caused by the parallel GC allocation buffers
  1194   // can lead to more memory being used during collection than was used
  1195   // before. Best leave this out until the fragmentation problem is fixed.
  1196   // Pauses in which evacuation failed can also lead to negative
  1197   // collections, since no space is reclaimed from a region containing an
  1198   // object whose evacuation failed.
  1199   // Further, we're now always doing parallel collection.  But I'm still
  1200   // leaving this here as a placeholder for a more precise assertion later.
  1201   // (DLD, 10/05.)
  1202   assert((true || parallel) // Always using GC LABs now.
  1203          || _g1->evacuation_failed()
  1204          || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
  1205          "Negative collection");
  1207   size_t freed_bytes =
  1208     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
  1209   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
  1211   double survival_fraction =
  1212     (double)surviving_bytes/
  1213     (double)_collection_set_bytes_used_before;
  1215   // These values are used to update the summary information that is
  1216   // displayed when TraceGen0Time is enabled, and are output as part
  1217   // of the PrintGCDetails output, in the non-parallel case.
  1219   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
  1220   double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
  1221   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
  1222   double update_rs_processed_buffers =
  1223     sum_of_values(_par_last_update_rs_processed_buffers);
  1224   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
  1225   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
  1226   double termination_time = avg_value(_par_last_termination_times_ms);
  1228   double known_time = ext_root_scan_time +
  1229                       mark_stack_scan_time +
  1230                       update_rs_time +
  1231                       scan_rs_time +
  1232                       obj_copy_time;
  1234   double other_time_ms = elapsed_ms;
  1236   // Subtract the SATB drain time. It's initialized to zero at the
  1237   // start of the pause and is updated during the pause if marking
  1238   // is in progress.
  1239   other_time_ms -= _cur_satb_drain_time_ms;
  1241   if (parallel) {
  1242     other_time_ms -= _cur_collection_par_time_ms;
  1243   } else {
  1244     other_time_ms -= known_time;
  1247   // Subtract the time taken to clean the card table from the
  1248   // current value of "other time"
  1249   other_time_ms -= _cur_clear_ct_time_ms;
  1251   // Subtract the time spent completing marking in the collection
  1252   // set. Note if marking is not in progress during the pause
  1253   // the value of _mark_closure_time_ms will be zero.
  1254   other_time_ms -= _mark_closure_time_ms;
  1256   // TraceGen0Time and TraceGen1Time summary info updating.
  1257   _all_pause_times_ms->add(elapsed_ms);
  1259   if (update_stats) {
  1260     _summary->record_total_time_ms(elapsed_ms);
  1261     _summary->record_other_time_ms(other_time_ms);
  1263     MainBodySummary* body_summary = _summary->main_body_summary();
  1264     assert(body_summary != NULL, "should not be null!");
  1266     // This will be non-zero iff marking is currently in progress (i.e.
  1267     // _g1->mark_in_progress() == true) and the currrent pause was not
  1268     // an initial mark pause. Since the body_summary items are NumberSeqs,
  1269     // however, they have to be consistent and updated in lock-step with
  1270     // each other. Therefore we unconditionally record the SATB drain
  1271     // time - even if it's zero.
  1272     body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
  1274     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
  1275     body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
  1276     body_summary->record_update_rs_time_ms(update_rs_time);
  1277     body_summary->record_scan_rs_time_ms(scan_rs_time);
  1278     body_summary->record_obj_copy_time_ms(obj_copy_time);
  1280     if (parallel) {
  1281       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
  1282       body_summary->record_termination_time_ms(termination_time);
  1284       double parallel_known_time = known_time + termination_time;
  1285       double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
  1286       body_summary->record_parallel_other_time_ms(parallel_other_time);
  1289     body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
  1290     body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
  1292     // We exempt parallel collection from this check because Alloc Buffer
  1293     // fragmentation can produce negative collections.  Same with evac
  1294     // failure.
  1295     // Further, we're now always doing parallel collection.  But I'm still
  1296     // leaving this here as a placeholder for a more precise assertion later.
  1297     // (DLD, 10/05.
  1298     assert((true || parallel)
  1299            || _g1->evacuation_failed()
  1300            || surviving_bytes <= _collection_set_bytes_used_before,
  1301            "Or else negative collection!");
  1303     // this is where we update the allocation rate of the application
  1304     double app_time_ms =
  1305       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
  1306     if (app_time_ms < MIN_TIMER_GRANULARITY) {
  1307       // This usually happens due to the timer not having the required
  1308       // granularity. Some Linuxes are the usual culprits.
  1309       // We'll just set it to something (arbitrarily) small.
  1310       app_time_ms = 1.0;
  1312     // We maintain the invariant that all objects allocated by mutator
  1313     // threads will be allocated out of eden regions. So, we can use
  1314     // the eden region number allocated since the previous GC to
  1315     // calculate the application's allocate rate. The only exception
  1316     // to that is humongous objects that are allocated separately. But
  1317     // given that humongous object allocations do not really affect
  1318     // either the pause's duration nor when the next pause will take
  1319     // place we can safely ignore them here.
  1320     size_t regions_allocated = eden_cset_region_length();
  1321     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1322     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1324     double interval_ms =
  1325       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1326     update_recent_gc_times(end_time_sec, elapsed_ms);
  1327     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
  1328     if (recent_avg_pause_time_ratio() < 0.0 ||
  1329         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
  1330 #ifndef PRODUCT
  1331       // Dump info to allow post-facto debugging
  1332       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
  1333       gclog_or_tty->print_cr("-------------------------------------------");
  1334       gclog_or_tty->print_cr("Recent GC Times (ms):");
  1335       _recent_gc_times_ms->dump();
  1336       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
  1337       _recent_prev_end_times_for_all_gcs_sec->dump();
  1338       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
  1339                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
  1340       // In debug mode, terminate the JVM if the user wants to debug at this point.
  1341       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
  1342 #endif  // !PRODUCT
  1343       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
  1344       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
  1345       if (_recent_avg_pause_time_ratio < 0.0) {
  1346         _recent_avg_pause_time_ratio = 0.0;
  1347       } else {
  1348         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
  1349         _recent_avg_pause_time_ratio = 1.0;
  1354   for (int i = 0; i < _aux_num; ++i) {
  1355     if (_cur_aux_times_set[i]) {
  1356       _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
  1360   // PrintGCDetails output
  1361   if (PrintGCDetails) {
  1362     bool print_marking_info =
  1363       _g1->mark_in_progress() && !last_pause_included_initial_mark;
  1365     gclog_or_tty->print_cr("%s, %1.8lf secs]",
  1366                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
  1367                            elapsed_ms / 1000.0);
  1369     if (print_marking_info) {
  1370       print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
  1373     if (parallel) {
  1374       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
  1375       print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
  1376       print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
  1377       if (print_marking_info) {
  1378         print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
  1380       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
  1381       print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
  1382       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
  1383       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
  1384       print_par_stats(2, "Termination", _par_last_termination_times_ms);
  1385       print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
  1386       print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
  1388       for (int i = 0; i < _parallel_gc_threads; i++) {
  1389         _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
  1391         double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
  1392                                    _par_last_mark_stack_scan_times_ms[i] +
  1393                                    _par_last_update_rs_times_ms[i] +
  1394                                    _par_last_scan_rs_times_ms[i] +
  1395                                    _par_last_obj_copy_times_ms[i] +
  1396                                    _par_last_termination_times_ms[i];
  1398         _par_last_gc_worker_other_times_ms[i] = _cur_collection_par_time_ms - worker_known_time;
  1400       print_par_stats(2, "GC Worker", _par_last_gc_worker_times_ms);
  1401       print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
  1402     } else {
  1403       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
  1404       if (print_marking_info) {
  1405         print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
  1407       print_stats(1, "Update RS", update_rs_time);
  1408       print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
  1409       print_stats(1, "Scan RS", scan_rs_time);
  1410       print_stats(1, "Object Copying", obj_copy_time);
  1412     if (print_marking_info) {
  1413       print_stats(1, "Complete CSet Marking", _mark_closure_time_ms);
  1415     print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
  1416 #ifndef PRODUCT
  1417     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
  1418     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
  1419     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
  1420     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
  1421     if (_num_cc_clears > 0) {
  1422       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
  1424 #endif
  1425     print_stats(1, "Other", other_time_ms);
  1426     print_stats(2, "Choose CSet",
  1427                    (_recorded_young_cset_choice_time_ms +
  1428                     _recorded_non_young_cset_choice_time_ms));
  1429     print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
  1430     print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
  1431     print_stats(2, "Free CSet",
  1432                    (_recorded_young_free_cset_time_ms +
  1433                     _recorded_non_young_free_cset_time_ms));
  1435     for (int i = 0; i < _aux_num; ++i) {
  1436       if (_cur_aux_times_set[i]) {
  1437         char buffer[96];
  1438         sprintf(buffer, "Aux%d", i);
  1439         print_stats(1, buffer, _cur_aux_times_ms[i]);
  1444   // Update the efficiency-since-mark vars.
  1445   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
  1446   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
  1447     // This usually happens due to the timer not having the required
  1448     // granularity. Some Linuxes are the usual culprits.
  1449     // We'll just set it to something (arbitrarily) small.
  1450     proc_ms = 1.0;
  1452   double cur_efficiency = (double) freed_bytes / proc_ms;
  1454   bool new_in_marking_window = _in_marking_window;
  1455   bool new_in_marking_window_im = false;
  1456   if (during_initial_mark_pause()) {
  1457     new_in_marking_window = true;
  1458     new_in_marking_window_im = true;
  1461   if (_last_young_gc) {
  1462     if (!last_pause_included_initial_mark) {
  1463       ergo_verbose2(ErgoMixedGCs,
  1464                     "start mixed GCs",
  1465                     ergo_format_byte_perc("known garbage"),
  1466                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1467       set_gcs_are_young(false);
  1468     } else {
  1469       ergo_verbose0(ErgoMixedGCs,
  1470                     "do not start mixed GCs",
  1471                     ergo_format_reason("concurrent cycle is about to start"));
  1473     _last_young_gc = false;
  1476   if (!_last_gc_was_young) {
  1477     if (_should_revert_to_young_gcs) {
  1478       ergo_verbose2(ErgoMixedGCs,
  1479                     "end mixed GCs",
  1480                     ergo_format_reason("mixed GCs end requested")
  1481                     ergo_format_byte_perc("known garbage"),
  1482                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1483       set_gcs_are_young(true);
  1484     } else if (_known_garbage_ratio < 0.05) {
  1485       ergo_verbose3(ErgoMixedGCs,
  1486                "end mixed GCs",
  1487                ergo_format_reason("known garbage percent lower than threshold")
  1488                ergo_format_byte_perc("known garbage")
  1489                ergo_format_perc("threshold"),
  1490                _known_garbage_bytes, _known_garbage_ratio * 100.0,
  1491                0.05 * 100.0);
  1492       set_gcs_are_young(true);
  1493     } else if (adaptive_young_list_length() &&
  1494               (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
  1495       ergo_verbose5(ErgoMixedGCs,
  1496                     "end mixed GCs",
  1497                     ergo_format_reason("current GC efficiency lower than "
  1498                                        "predicted young GC efficiency")
  1499                     ergo_format_double("GC efficiency factor")
  1500                     ergo_format_double("current GC efficiency")
  1501                     ergo_format_double("predicted young GC efficiency")
  1502                     ergo_format_byte_perc("known garbage"),
  1503                     get_gc_eff_factor(), cur_efficiency,
  1504                     predict_young_gc_eff(),
  1505                     _known_garbage_bytes, _known_garbage_ratio * 100.0);
  1506       set_gcs_are_young(true);
  1509   _should_revert_to_young_gcs = false;
  1511   if (_last_gc_was_young && !_during_marking) {
  1512     _young_gc_eff_seq->add(cur_efficiency);
  1515   _short_lived_surv_rate_group->start_adding_regions();
  1516   // do that for any other surv rate groupsx
  1518   if (update_stats) {
  1519     double pause_time_ms = elapsed_ms;
  1521     size_t diff = 0;
  1522     if (_max_pending_cards >= _pending_cards)
  1523       diff = _max_pending_cards - _pending_cards;
  1524     _pending_card_diff_seq->add((double) diff);
  1526     double cost_per_card_ms = 0.0;
  1527     if (_pending_cards > 0) {
  1528       cost_per_card_ms = update_rs_time / (double) _pending_cards;
  1529       _cost_per_card_ms_seq->add(cost_per_card_ms);
  1532     size_t cards_scanned = _g1->cards_scanned();
  1534     double cost_per_entry_ms = 0.0;
  1535     if (cards_scanned > 10) {
  1536       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
  1537       if (_last_gc_was_young) {
  1538         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1539       } else {
  1540         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1544     if (_max_rs_lengths > 0) {
  1545       double cards_per_entry_ratio =
  1546         (double) cards_scanned / (double) _max_rs_lengths;
  1547       if (_last_gc_was_young) {
  1548         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1549       } else {
  1550         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1554     // It turns out that, sometimes, _max_rs_lengths can get smaller
  1555     // than _recorded_rs_lengths which causes rs_length_diff to get
  1556     // very large and mess up the RSet length predictions. We'll be
  1557     // defensive until we work out why this happens.
  1558     size_t rs_length_diff = 0;
  1559     if (_max_rs_lengths > _recorded_rs_lengths) {
  1560       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1562     _rs_length_diff_seq->add((double) rs_length_diff);
  1564     size_t copied_bytes = surviving_bytes;
  1565     double cost_per_byte_ms = 0.0;
  1566     if (copied_bytes > 0) {
  1567       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
  1568       if (_in_marking_window) {
  1569         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
  1570       } else {
  1571         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1575     double all_other_time_ms = pause_time_ms -
  1576       (update_rs_time + scan_rs_time + obj_copy_time +
  1577        _mark_closure_time_ms + termination_time);
  1579     double young_other_time_ms = 0.0;
  1580     if (young_cset_region_length() > 0) {
  1581       young_other_time_ms =
  1582         _recorded_young_cset_choice_time_ms +
  1583         _recorded_young_free_cset_time_ms;
  1584       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
  1585                                           (double) young_cset_region_length());
  1587     double non_young_other_time_ms = 0.0;
  1588     if (old_cset_region_length() > 0) {
  1589       non_young_other_time_ms =
  1590         _recorded_non_young_cset_choice_time_ms +
  1591         _recorded_non_young_free_cset_time_ms;
  1593       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
  1594                                             (double) old_cset_region_length());
  1597     double constant_other_time_ms = all_other_time_ms -
  1598       (young_other_time_ms + non_young_other_time_ms);
  1599     _constant_other_time_ms_seq->add(constant_other_time_ms);
  1601     double survival_ratio = 0.0;
  1602     if (_bytes_in_collection_set_before_gc > 0) {
  1603       survival_ratio = (double) _bytes_copied_during_gc /
  1604                                    (double) _bytes_in_collection_set_before_gc;
  1607     _pending_cards_seq->add((double) _pending_cards);
  1608     _rs_lengths_seq->add((double) _max_rs_lengths);
  1610     double expensive_region_limit_ms =
  1611       (double) MaxGCPauseMillis - predict_constant_other_time_ms();
  1612     if (expensive_region_limit_ms < 0.0) {
  1613       // this means that the other time was predicted to be longer than
  1614       // than the max pause time
  1615       expensive_region_limit_ms = (double) MaxGCPauseMillis;
  1617     _expensive_region_limit_ms = expensive_region_limit_ms;
  1620   _in_marking_window = new_in_marking_window;
  1621   _in_marking_window_im = new_in_marking_window_im;
  1622   _free_regions_at_end_of_collection = _g1->free_regions();
  1623   update_young_list_target_length();
  1625   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1626   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1627   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
  1629   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
  1632 #define EXT_SIZE_FORMAT "%d%s"
  1633 #define EXT_SIZE_PARAMS(bytes)                                  \
  1634   byte_size_in_proper_unit((bytes)),                            \
  1635   proper_unit_for_byte_size((bytes))
  1637 void G1CollectorPolicy::print_heap_transition() {
  1638   if (PrintGCDetails) {
  1639     YoungList* young_list = _g1->young_list();
  1640     size_t eden_bytes = young_list->eden_used_bytes();
  1641     size_t survivor_bytes = young_list->survivor_used_bytes();
  1642     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
  1643     size_t used = _g1->used();
  1644     size_t capacity = _g1->capacity();
  1645     size_t eden_capacity =
  1646       (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
  1648     gclog_or_tty->print_cr(
  1649       "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
  1650       "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
  1651       "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
  1652       EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
  1653       EXT_SIZE_PARAMS(_eden_bytes_before_gc),
  1654       EXT_SIZE_PARAMS(_prev_eden_capacity),
  1655       EXT_SIZE_PARAMS(eden_bytes),
  1656       EXT_SIZE_PARAMS(eden_capacity),
  1657       EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
  1658       EXT_SIZE_PARAMS(survivor_bytes),
  1659       EXT_SIZE_PARAMS(used_before_gc),
  1660       EXT_SIZE_PARAMS(_capacity_before_gc),
  1661       EXT_SIZE_PARAMS(used),
  1662       EXT_SIZE_PARAMS(capacity));
  1664     _prev_eden_capacity = eden_capacity;
  1665   } else if (PrintGC) {
  1666     _g1->print_size_transition(gclog_or_tty,
  1667                                _cur_collection_pause_used_at_start_bytes,
  1668                                _g1->used(), _g1->capacity());
  1672 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
  1673                                                      double update_rs_processed_buffers,
  1674                                                      double goal_ms) {
  1675   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1676   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
  1678   if (G1UseAdaptiveConcRefinement) {
  1679     const int k_gy = 3, k_gr = 6;
  1680     const double inc_k = 1.1, dec_k = 0.9;
  1682     int g = cg1r->green_zone();
  1683     if (update_rs_time > goal_ms) {
  1684       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
  1685     } else {
  1686       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
  1687         g = (int)MAX2(g * inc_k, g + 1.0);
  1690     // Change the refinement threads params
  1691     cg1r->set_green_zone(g);
  1692     cg1r->set_yellow_zone(g * k_gy);
  1693     cg1r->set_red_zone(g * k_gr);
  1694     cg1r->reinitialize_threads();
  1696     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
  1697     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
  1698                                     cg1r->yellow_zone());
  1699     // Change the barrier params
  1700     dcqs.set_process_completed_threshold(processing_threshold);
  1701     dcqs.set_max_completed_queue(cg1r->red_zone());
  1704   int curr_queue_size = dcqs.completed_buffers_num();
  1705   if (curr_queue_size >= cg1r->yellow_zone()) {
  1706     dcqs.set_completed_queue_padding(curr_queue_size);
  1707   } else {
  1708     dcqs.set_completed_queue_padding(0);
  1710   dcqs.notify_if_necessary();
  1713 double
  1714 G1CollectorPolicy::
  1715 predict_young_collection_elapsed_time_ms(size_t adjustment) {
  1716   guarantee( adjustment == 0 || adjustment == 1, "invariant" );
  1718   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1719   size_t young_num = g1h->young_list()->length();
  1720   if (young_num == 0)
  1721     return 0.0;
  1723   young_num += adjustment;
  1724   size_t pending_cards = predict_pending_cards();
  1725   size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
  1726                       predict_rs_length_diff();
  1727   size_t card_num;
  1728   if (gcs_are_young()) {
  1729     card_num = predict_young_card_num(rs_lengths);
  1730   } else {
  1731     card_num = predict_non_young_card_num(rs_lengths);
  1733   size_t young_byte_size = young_num * HeapRegion::GrainBytes;
  1734   double accum_yg_surv_rate =
  1735     _short_lived_surv_rate_group->accum_surv_rate(adjustment);
  1737   size_t bytes_to_copy =
  1738     (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
  1740   return
  1741     predict_rs_update_time_ms(pending_cards) +
  1742     predict_rs_scan_time_ms(card_num) +
  1743     predict_object_copy_time_ms(bytes_to_copy) +
  1744     predict_young_other_time_ms(young_num) +
  1745     predict_constant_other_time_ms();
  1748 double
  1749 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
  1750   size_t rs_length = predict_rs_length_diff();
  1751   size_t card_num;
  1752   if (gcs_are_young()) {
  1753     card_num = predict_young_card_num(rs_length);
  1754   } else {
  1755     card_num = predict_non_young_card_num(rs_length);
  1757   return predict_base_elapsed_time_ms(pending_cards, card_num);
  1760 double
  1761 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
  1762                                                 size_t scanned_cards) {
  1763   return
  1764     predict_rs_update_time_ms(pending_cards) +
  1765     predict_rs_scan_time_ms(scanned_cards) +
  1766     predict_constant_other_time_ms();
  1769 double
  1770 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
  1771                                                   bool young) {
  1772   size_t rs_length = hr->rem_set()->occupied();
  1773   size_t card_num;
  1774   if (gcs_are_young()) {
  1775     card_num = predict_young_card_num(rs_length);
  1776   } else {
  1777     card_num = predict_non_young_card_num(rs_length);
  1779   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  1781   double region_elapsed_time_ms =
  1782     predict_rs_scan_time_ms(card_num) +
  1783     predict_object_copy_time_ms(bytes_to_copy);
  1785   if (young)
  1786     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1787   else
  1788     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1790   return region_elapsed_time_ms;
  1793 size_t
  1794 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  1795   size_t bytes_to_copy;
  1796   if (hr->is_marked())
  1797     bytes_to_copy = hr->max_live_bytes();
  1798   else {
  1799     guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
  1800                "invariant" );
  1801     int age = hr->age_in_surv_rate_group();
  1802     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  1803     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  1806   return bytes_to_copy;
  1809 void
  1810 G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
  1811                                           size_t survivor_cset_region_length) {
  1812   _eden_cset_region_length     = eden_cset_region_length;
  1813   _survivor_cset_region_length = survivor_cset_region_length;
  1814   _old_cset_region_length      = 0;
  1817 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  1818   _recorded_rs_lengths = rs_lengths;
  1821 void G1CollectorPolicy::check_if_region_is_too_expensive(double
  1822                                                            predicted_time_ms) {
  1823   // I don't think we need to do this when in young GC mode since
  1824   // marking will be initiated next time we hit the soft limit anyway...
  1825   if (predicted_time_ms > _expensive_region_limit_ms) {
  1826     ergo_verbose2(ErgoMixedGCs,
  1827               "request mixed GCs end",
  1828               ergo_format_reason("predicted region time higher than threshold")
  1829               ergo_format_ms("predicted region time")
  1830               ergo_format_ms("threshold"),
  1831               predicted_time_ms, _expensive_region_limit_ms);
  1832     // no point in doing another mixed GC
  1833     _should_revert_to_young_gcs = true;
  1837 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  1838                                                double elapsed_ms) {
  1839   _recent_gc_times_ms->add(elapsed_ms);
  1840   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
  1841   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
  1844 size_t G1CollectorPolicy::expansion_amount() {
  1845   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
  1846   double threshold = _gc_overhead_perc;
  1847   if (recent_gc_overhead > threshold) {
  1848     // We will double the existing space, or take
  1849     // G1ExpandByPercentOfAvailable % of the available expansion
  1850     // space, whichever is smaller, bounded below by a minimum
  1851     // expansion (unless that's all that's left.)
  1852     const size_t min_expand_bytes = 1*M;
  1853     size_t reserved_bytes = _g1->max_capacity();
  1854     size_t committed_bytes = _g1->capacity();
  1855     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
  1856     size_t expand_bytes;
  1857     size_t expand_bytes_via_pct =
  1858       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
  1859     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
  1860     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
  1861     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
  1863     ergo_verbose5(ErgoHeapSizing,
  1864                   "attempt heap expansion",
  1865                   ergo_format_reason("recent GC overhead higher than "
  1866                                      "threshold after GC")
  1867                   ergo_format_perc("recent GC overhead")
  1868                   ergo_format_perc("threshold")
  1869                   ergo_format_byte("uncommitted")
  1870                   ergo_format_byte_perc("calculated expansion amount"),
  1871                   recent_gc_overhead, threshold,
  1872                   uncommitted_bytes,
  1873                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
  1875     return expand_bytes;
  1876   } else {
  1877     return 0;
  1881 class CountCSClosure: public HeapRegionClosure {
  1882   G1CollectorPolicy* _g1_policy;
  1883 public:
  1884   CountCSClosure(G1CollectorPolicy* g1_policy) :
  1885     _g1_policy(g1_policy) {}
  1886   bool doHeapRegion(HeapRegion* r) {
  1887     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
  1888     return false;
  1890 };
  1892 void G1CollectorPolicy::count_CS_bytes_used() {
  1893   CountCSClosure cs_closure(this);
  1894   _g1->collection_set_iterate(&cs_closure);
  1897 void G1CollectorPolicy::print_summary(int level,
  1898                                       const char* str,
  1899                                       NumberSeq* seq) const {
  1900   double sum = seq->sum();
  1901   LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
  1902                 str, sum / 1000.0, seq->avg());
  1905 void G1CollectorPolicy::print_summary_sd(int level,
  1906                                          const char* str,
  1907                                          NumberSeq* seq) const {
  1908   print_summary(level, str, seq);
  1909   LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
  1910                 seq->num(), seq->sd(), seq->maximum());
  1913 void G1CollectorPolicy::check_other_times(int level,
  1914                                         NumberSeq* other_times_ms,
  1915                                         NumberSeq* calc_other_times_ms) const {
  1916   bool should_print = false;
  1917   LineBuffer buf(level + 2);
  1919   double max_sum = MAX2(fabs(other_times_ms->sum()),
  1920                         fabs(calc_other_times_ms->sum()));
  1921   double min_sum = MIN2(fabs(other_times_ms->sum()),
  1922                         fabs(calc_other_times_ms->sum()));
  1923   double sum_ratio = max_sum / min_sum;
  1924   if (sum_ratio > 1.1) {
  1925     should_print = true;
  1926     buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
  1929   double max_avg = MAX2(fabs(other_times_ms->avg()),
  1930                         fabs(calc_other_times_ms->avg()));
  1931   double min_avg = MIN2(fabs(other_times_ms->avg()),
  1932                         fabs(calc_other_times_ms->avg()));
  1933   double avg_ratio = max_avg / min_avg;
  1934   if (avg_ratio > 1.1) {
  1935     should_print = true;
  1936     buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
  1939   if (other_times_ms->sum() < -0.01) {
  1940     buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
  1943   if (other_times_ms->avg() < -0.01) {
  1944     buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
  1947   if (calc_other_times_ms->sum() < -0.01) {
  1948     should_print = true;
  1949     buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
  1952   if (calc_other_times_ms->avg() < -0.01) {
  1953     should_print = true;
  1954     buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
  1957   if (should_print)
  1958     print_summary(level, "Other(Calc)", calc_other_times_ms);
  1961 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
  1962   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  1963   MainBodySummary*    body_summary = summary->main_body_summary();
  1964   if (summary->get_total_seq()->num() > 0) {
  1965     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
  1966     if (body_summary != NULL) {
  1967       print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
  1968       if (parallel) {
  1969         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
  1970         print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
  1971         print_summary(2, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq());
  1972         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
  1973         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
  1974         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
  1975         print_summary(2, "Termination", body_summary->get_termination_seq());
  1976         print_summary(2, "Parallel Other", body_summary->get_parallel_other_seq());
  1978           NumberSeq* other_parts[] = {
  1979             body_summary->get_ext_root_scan_seq(),
  1980             body_summary->get_mark_stack_scan_seq(),
  1981             body_summary->get_update_rs_seq(),
  1982             body_summary->get_scan_rs_seq(),
  1983             body_summary->get_obj_copy_seq(),
  1984             body_summary->get_termination_seq()
  1985           };
  1986           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
  1987                                         6, other_parts);
  1988           check_other_times(2, body_summary->get_parallel_other_seq(),
  1989                             &calc_other_times_ms);
  1991       } else {
  1992         print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
  1993         print_summary(1, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq());
  1994         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
  1995         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
  1996         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
  1999     print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
  2000     print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
  2001     print_summary(1, "Other", summary->get_other_seq());
  2003       if (body_summary != NULL) {
  2004         NumberSeq calc_other_times_ms;
  2005         if (parallel) {
  2006           // parallel
  2007           NumberSeq* other_parts[] = {
  2008             body_summary->get_satb_drain_seq(),
  2009             body_summary->get_parallel_seq(),
  2010             body_summary->get_clear_ct_seq()
  2011           };
  2012           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2013                                                 3, other_parts);
  2014         } else {
  2015           // serial
  2016           NumberSeq* other_parts[] = {
  2017             body_summary->get_satb_drain_seq(),
  2018             body_summary->get_update_rs_seq(),
  2019             body_summary->get_ext_root_scan_seq(),
  2020             body_summary->get_mark_stack_scan_seq(),
  2021             body_summary->get_scan_rs_seq(),
  2022             body_summary->get_obj_copy_seq()
  2023           };
  2024           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2025                                                 6, other_parts);
  2027         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
  2030   } else {
  2031     LineBuffer(1).append_and_print_cr("none");
  2033   LineBuffer(0).append_and_print_cr("");
  2036 void G1CollectorPolicy::print_tracing_info() const {
  2037   if (TraceGen0Time) {
  2038     gclog_or_tty->print_cr("ALL PAUSES");
  2039     print_summary_sd(0, "Total", _all_pause_times_ms);
  2040     gclog_or_tty->print_cr("");
  2041     gclog_or_tty->print_cr("");
  2042     gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
  2043     gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
  2044     gclog_or_tty->print_cr("");
  2046     gclog_or_tty->print_cr("EVACUATION PAUSES");
  2047     print_summary(_summary);
  2049     gclog_or_tty->print_cr("MISC");
  2050     print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
  2051     print_summary_sd(0, "Yields", _all_yield_times_ms);
  2052     for (int i = 0; i < _aux_num; ++i) {
  2053       if (_all_aux_times_ms[i].num() > 0) {
  2054         char buffer[96];
  2055         sprintf(buffer, "Aux%d", i);
  2056         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
  2060   if (TraceGen1Time) {
  2061     if (_all_full_gc_times_ms->num() > 0) {
  2062       gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
  2063                  _all_full_gc_times_ms->num(),
  2064                  _all_full_gc_times_ms->sum() / 1000.0);
  2065       gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
  2066       gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
  2067                     _all_full_gc_times_ms->sd(),
  2068                     _all_full_gc_times_ms->maximum());
  2073 void G1CollectorPolicy::print_yg_surv_rate_info() const {
  2074 #ifndef PRODUCT
  2075   _short_lived_surv_rate_group->print_surv_rate_summary();
  2076   // add this call for any other surv rate groups
  2077 #endif // PRODUCT
  2080 #ifndef PRODUCT
  2081 // for debugging, bit of a hack...
  2082 static char*
  2083 region_num_to_mbs(int length) {
  2084   static char buffer[64];
  2085   double bytes = (double) (length * HeapRegion::GrainBytes);
  2086   double mbs = bytes / (double) (1024 * 1024);
  2087   sprintf(buffer, "%7.2lfMB", mbs);
  2088   return buffer;
  2090 #endif // PRODUCT
  2092 size_t G1CollectorPolicy::max_regions(int purpose) {
  2093   switch (purpose) {
  2094     case GCAllocForSurvived:
  2095       return _max_survivor_regions;
  2096     case GCAllocForTenured:
  2097       return REGIONS_UNLIMITED;
  2098     default:
  2099       ShouldNotReachHere();
  2100       return REGIONS_UNLIMITED;
  2101   };
  2104 void G1CollectorPolicy::update_max_gc_locker_expansion() {
  2105   size_t expansion_region_num = 0;
  2106   if (GCLockerEdenExpansionPercent > 0) {
  2107     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
  2108     double expansion_region_num_d = perc * (double) _young_list_target_length;
  2109     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
  2110     // less than 1.0) we'll get 1.
  2111     expansion_region_num = (size_t) ceil(expansion_region_num_d);
  2112   } else {
  2113     assert(expansion_region_num == 0, "sanity");
  2115   _young_list_max_length = _young_list_target_length + expansion_region_num;
  2116   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
  2119 // Calculates survivor space parameters.
  2120 void G1CollectorPolicy::update_survivors_policy() {
  2121   double max_survivor_regions_d =
  2122                  (double) _young_list_target_length / (double) SurvivorRatio;
  2123   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
  2124   // smaller than 1.0) we'll get 1.
  2125   _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
  2127   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  2128         HeapRegion::GrainWords * _max_survivor_regions);
  2131 #ifndef PRODUCT
  2132 class HRSortIndexIsOKClosure: public HeapRegionClosure {
  2133   CollectionSetChooser* _chooser;
  2134 public:
  2135   HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
  2136     _chooser(chooser) {}
  2138   bool doHeapRegion(HeapRegion* r) {
  2139     if (!r->continuesHumongous()) {
  2140       assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
  2142     return false;
  2144 };
  2146 bool G1CollectorPolicy::assertMarkedBytesDataOK() {
  2147   HRSortIndexIsOKClosure cl(_collectionSetChooser);
  2148   _g1->heap_region_iterate(&cl);
  2149   return true;
  2151 #endif
  2153 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
  2154                                                      GCCause::Cause gc_cause) {
  2155   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2156   if (!during_cycle) {
  2157     ergo_verbose1(ErgoConcCycles,
  2158                   "request concurrent cycle initiation",
  2159                   ergo_format_reason("requested by GC cause")
  2160                   ergo_format_str("GC cause"),
  2161                   GCCause::to_string(gc_cause));
  2162     set_initiate_conc_mark_if_possible();
  2163     return true;
  2164   } else {
  2165     ergo_verbose1(ErgoConcCycles,
  2166                   "do not request concurrent cycle initiation",
  2167                   ergo_format_reason("concurrent cycle already in progress")
  2168                   ergo_format_str("GC cause"),
  2169                   GCCause::to_string(gc_cause));
  2170     return false;
  2174 void
  2175 G1CollectorPolicy::decide_on_conc_mark_initiation() {
  2176   // We are about to decide on whether this pause will be an
  2177   // initial-mark pause.
  2179   // First, during_initial_mark_pause() should not be already set. We
  2180   // will set it here if we have to. However, it should be cleared by
  2181   // the end of the pause (it's only set for the duration of an
  2182   // initial-mark pause).
  2183   assert(!during_initial_mark_pause(), "pre-condition");
  2185   if (initiate_conc_mark_if_possible()) {
  2186     // We had noticed on a previous pause that the heap occupancy has
  2187     // gone over the initiating threshold and we should start a
  2188     // concurrent marking cycle. So we might initiate one.
  2190     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2191     if (!during_cycle) {
  2192       // The concurrent marking thread is not "during a cycle", i.e.,
  2193       // it has completed the last one. So we can go ahead and
  2194       // initiate a new cycle.
  2196       set_during_initial_mark_pause();
  2197       // We do not allow mixed GCs during marking.
  2198       if (!gcs_are_young()) {
  2199         set_gcs_are_young(true);
  2200         ergo_verbose0(ErgoMixedGCs,
  2201                       "end mixed GCs",
  2202                       ergo_format_reason("concurrent cycle is about to start"));
  2205       // And we can now clear initiate_conc_mark_if_possible() as
  2206       // we've already acted on it.
  2207       clear_initiate_conc_mark_if_possible();
  2209       ergo_verbose0(ErgoConcCycles,
  2210                   "initiate concurrent cycle",
  2211                   ergo_format_reason("concurrent cycle initiation requested"));
  2212     } else {
  2213       // The concurrent marking thread is still finishing up the
  2214       // previous cycle. If we start one right now the two cycles
  2215       // overlap. In particular, the concurrent marking thread might
  2216       // be in the process of clearing the next marking bitmap (which
  2217       // we will use for the next cycle if we start one). Starting a
  2218       // cycle now will be bad given that parts of the marking
  2219       // information might get cleared by the marking thread. And we
  2220       // cannot wait for the marking thread to finish the cycle as it
  2221       // periodically yields while clearing the next marking bitmap
  2222       // and, if it's in a yield point, it's waiting for us to
  2223       // finish. So, at this point we will not start a cycle and we'll
  2224       // let the concurrent marking thread complete the last one.
  2225       ergo_verbose0(ErgoConcCycles,
  2226                     "do not initiate concurrent cycle",
  2227                     ergo_format_reason("concurrent cycle already in progress"));
  2232 class KnownGarbageClosure: public HeapRegionClosure {
  2233   CollectionSetChooser* _hrSorted;
  2235 public:
  2236   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
  2237     _hrSorted(hrSorted)
  2238   {}
  2240   bool doHeapRegion(HeapRegion* r) {
  2241     // We only include humongous regions in collection
  2242     // sets when concurrent mark shows that their contained object is
  2243     // unreachable.
  2245     // Do we have any marking information for this region?
  2246     if (r->is_marked()) {
  2247       // We don't include humongous regions in collection
  2248       // sets because we collect them immediately at the end of a marking
  2249       // cycle.  We also don't include young regions because we *must*
  2250       // include them in the next collection pause.
  2251       if (!r->isHumongous() && !r->is_young()) {
  2252         _hrSorted->addMarkedHeapRegion(r);
  2255     return false;
  2257 };
  2259 class ParKnownGarbageHRClosure: public HeapRegionClosure {
  2260   CollectionSetChooser* _hrSorted;
  2261   jint _marked_regions_added;
  2262   jint _chunk_size;
  2263   jint _cur_chunk_idx;
  2264   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
  2265   int _worker;
  2266   int _invokes;
  2268   void get_new_chunk() {
  2269     _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
  2270     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
  2272   void add_region(HeapRegion* r) {
  2273     if (_cur_chunk_idx == _cur_chunk_end) {
  2274       get_new_chunk();
  2276     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
  2277     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
  2278     _marked_regions_added++;
  2279     _cur_chunk_idx++;
  2282 public:
  2283   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  2284                            jint chunk_size,
  2285                            int worker) :
  2286     _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
  2287     _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
  2288     _invokes(0)
  2289   {}
  2291   bool doHeapRegion(HeapRegion* r) {
  2292     // We only include humongous regions in collection
  2293     // sets when concurrent mark shows that their contained object is
  2294     // unreachable.
  2295     _invokes++;
  2297     // Do we have any marking information for this region?
  2298     if (r->is_marked()) {
  2299       // We don't include humongous regions in collection
  2300       // sets because we collect them immediately at the end of a marking
  2301       // cycle.
  2302       // We also do not include young regions in collection sets
  2303       if (!r->isHumongous() && !r->is_young()) {
  2304         add_region(r);
  2307     return false;
  2309   jint marked_regions_added() { return _marked_regions_added; }
  2310   int invokes() { return _invokes; }
  2311 };
  2313 class ParKnownGarbageTask: public AbstractGangTask {
  2314   CollectionSetChooser* _hrSorted;
  2315   jint _chunk_size;
  2316   G1CollectedHeap* _g1;
  2317 public:
  2318   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
  2319     AbstractGangTask("ParKnownGarbageTask"),
  2320     _hrSorted(hrSorted), _chunk_size(chunk_size),
  2321     _g1(G1CollectedHeap::heap())
  2322   {}
  2324   void work(int i) {
  2325     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
  2326     // Back to zero for the claim value.
  2327     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
  2328                                          _g1->workers()->active_workers(),
  2329                                          HeapRegion::InitialClaimValue);
  2330     jint regions_added = parKnownGarbageCl.marked_regions_added();
  2331     _hrSorted->incNumMarkedHeapRegions(regions_added);
  2332     if (G1PrintParCleanupStats) {
  2333       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
  2334                  i, parKnownGarbageCl.invokes(), regions_added);
  2337 };
  2339 void
  2340 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
  2341   double start_sec;
  2342   if (G1PrintParCleanupStats) {
  2343     start_sec = os::elapsedTime();
  2346   _collectionSetChooser->clearMarkedHeapRegions();
  2347   double clear_marked_end_sec;
  2348   if (G1PrintParCleanupStats) {
  2349     clear_marked_end_sec = os::elapsedTime();
  2350     gclog_or_tty->print_cr("  clear marked regions: %8.3f ms.",
  2351                            (clear_marked_end_sec - start_sec) * 1000.0);
  2354   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2355     const size_t OverpartitionFactor = 4;
  2356     size_t WorkUnit;
  2357     // The use of MinChunkSize = 8 in the original code
  2358     // causes some assertion failures when the total number of
  2359     // region is less than 8.  The code here tries to fix that.
  2360     // Should the original code also be fixed?
  2361     if (no_of_gc_threads > 0) {
  2362       const size_t MinWorkUnit =
  2363         MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U);
  2364       WorkUnit =
  2365         MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
  2366              MinWorkUnit);
  2367     } else {
  2368       assert(no_of_gc_threads > 0,
  2369         "The active gc workers should be greater than 0");
  2370       // In a product build do something reasonable to avoid a crash.
  2371       const size_t MinWorkUnit =
  2372         MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
  2373       WorkUnit =
  2374         MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
  2375              MinWorkUnit);
  2377     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
  2378                                                              WorkUnit);
  2379     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  2380                                             (int) WorkUnit);
  2381     _g1->workers()->run_task(&parKnownGarbageTask);
  2383     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2384            "sanity check");
  2385   } else {
  2386     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  2387     _g1->heap_region_iterate(&knownGarbagecl);
  2389   double known_garbage_end_sec;
  2390   if (G1PrintParCleanupStats) {
  2391     known_garbage_end_sec = os::elapsedTime();
  2392     gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
  2393                       (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
  2396   _collectionSetChooser->sortMarkedHeapRegions();
  2397   double end_sec = os::elapsedTime();
  2398   if (G1PrintParCleanupStats) {
  2399     gclog_or_tty->print_cr("  sorting: %8.3f ms.",
  2400                            (end_sec - known_garbage_end_sec) * 1000.0);
  2403   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  2404   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  2405   _cur_mark_stop_world_time_ms += elapsed_time_ms;
  2406   _prev_collection_pause_end_ms += elapsed_time_ms;
  2407   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
  2410 // Add the heap region at the head of the non-incremental collection set
  2411 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
  2412   assert(_inc_cset_build_state == Active, "Precondition");
  2413   assert(!hr->is_young(), "non-incremental add of young region");
  2415   if (_g1->mark_in_progress())
  2416     _g1->concurrent_mark()->registerCSetRegion(hr);
  2418   assert(!hr->in_collection_set(), "should not already be in the CSet");
  2419   hr->set_in_collection_set(true);
  2420   hr->set_next_in_collection_set(_collection_set);
  2421   _collection_set = hr;
  2422   _collection_set_bytes_used_before += hr->used();
  2423   _g1->register_region_with_in_cset_fast_test(hr);
  2424   size_t rs_length = hr->rem_set()->occupied();
  2425   _recorded_rs_lengths += rs_length;
  2426   _old_cset_region_length += 1;
  2429 // Initialize the per-collection-set information
  2430 void G1CollectorPolicy::start_incremental_cset_building() {
  2431   assert(_inc_cset_build_state == Inactive, "Precondition");
  2433   _inc_cset_head = NULL;
  2434   _inc_cset_tail = NULL;
  2435   _inc_cset_bytes_used_before = 0;
  2437   _inc_cset_max_finger = 0;
  2438   _inc_cset_recorded_rs_lengths = 0;
  2439   _inc_cset_predicted_elapsed_time_ms = 0;
  2440   _inc_cset_build_state = Active;
  2443 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  2444   // This routine is used when:
  2445   // * adding survivor regions to the incremental cset at the end of an
  2446   //   evacuation pause,
  2447   // * adding the current allocation region to the incremental cset
  2448   //   when it is retired, and
  2449   // * updating existing policy information for a region in the
  2450   //   incremental cset via young list RSet sampling.
  2451   // Therefore this routine may be called at a safepoint by the
  2452   // VM thread, or in-between safepoints by mutator threads (when
  2453   // retiring the current allocation region) or a concurrent
  2454   // refine thread (RSet sampling).
  2456   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  2457   size_t used_bytes = hr->used();
  2459   _inc_cset_recorded_rs_lengths += rs_length;
  2460   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  2462   _inc_cset_bytes_used_before += used_bytes;
  2464   // Cache the values we have added to the aggregated informtion
  2465   // in the heap region in case we have to remove this region from
  2466   // the incremental collection set, or it is updated by the
  2467   // rset sampling code
  2468   hr->set_recorded_rs_length(rs_length);
  2469   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  2472 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
  2473   // This routine is currently only called as part of the updating of
  2474   // existing policy information for regions in the incremental cset that
  2475   // is performed by the concurrent refine thread(s) as part of young list
  2476   // RSet sampling. Therefore we should not be at a safepoint.
  2478   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  2479   assert(hr->is_young(), "it should be");
  2481   size_t used_bytes = hr->used();
  2482   size_t old_rs_length = hr->recorded_rs_length();
  2483   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  2485   // Subtract the old recorded/predicted policy information for
  2486   // the given heap region from the collection set info.
  2487   _inc_cset_recorded_rs_lengths -= old_rs_length;
  2488   _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
  2490   _inc_cset_bytes_used_before -= used_bytes;
  2492   // Clear the values cached in the heap region
  2493   hr->set_recorded_rs_length(0);
  2494   hr->set_predicted_elapsed_time_ms(0);
  2497 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
  2498   // Update the collection set information that is dependent on the new RS length
  2499   assert(hr->is_young(), "Precondition");
  2501   remove_from_incremental_cset_info(hr);
  2502   add_to_incremental_cset_info(hr, new_rs_length);
  2505 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  2506   assert(hr->is_young(), "invariant");
  2507   assert(hr->young_index_in_cset() > -1, "should have already been set");
  2508   assert(_inc_cset_build_state == Active, "Precondition");
  2510   // We need to clear and set the cached recorded/cached collection set
  2511   // information in the heap region here (before the region gets added
  2512   // to the collection set). An individual heap region's cached values
  2513   // are calculated, aggregated with the policy collection set info,
  2514   // and cached in the heap region here (initially) and (subsequently)
  2515   // by the Young List sampling code.
  2517   size_t rs_length = hr->rem_set()->occupied();
  2518   add_to_incremental_cset_info(hr, rs_length);
  2520   HeapWord* hr_end = hr->end();
  2521   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  2523   assert(!hr->in_collection_set(), "invariant");
  2524   hr->set_in_collection_set(true);
  2525   assert( hr->next_in_collection_set() == NULL, "invariant");
  2527   _g1->register_region_with_in_cset_fast_test(hr);
  2530 // Add the region at the RHS of the incremental cset
  2531 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  2532   // We should only ever be appending survivors at the end of a pause
  2533   assert( hr->is_survivor(), "Logic");
  2535   // Do the 'common' stuff
  2536   add_region_to_incremental_cset_common(hr);
  2538   // Now add the region at the right hand side
  2539   if (_inc_cset_tail == NULL) {
  2540     assert(_inc_cset_head == NULL, "invariant");
  2541     _inc_cset_head = hr;
  2542   } else {
  2543     _inc_cset_tail->set_next_in_collection_set(hr);
  2545   _inc_cset_tail = hr;
  2548 // Add the region to the LHS of the incremental cset
  2549 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  2550   // Survivors should be added to the RHS at the end of a pause
  2551   assert(!hr->is_survivor(), "Logic");
  2553   // Do the 'common' stuff
  2554   add_region_to_incremental_cset_common(hr);
  2556   // Add the region at the left hand side
  2557   hr->set_next_in_collection_set(_inc_cset_head);
  2558   if (_inc_cset_head == NULL) {
  2559     assert(_inc_cset_tail == NULL, "Invariant");
  2560     _inc_cset_tail = hr;
  2562   _inc_cset_head = hr;
  2565 #ifndef PRODUCT
  2566 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  2567   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  2569   st->print_cr("\nCollection_set:");
  2570   HeapRegion* csr = list_head;
  2571   while (csr != NULL) {
  2572     HeapRegion* next = csr->next_in_collection_set();
  2573     assert(csr->in_collection_set(), "bad CS");
  2574     st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
  2575                  "age: %4d, y: %d, surv: %d",
  2576                         csr->bottom(), csr->end(),
  2577                         csr->top(),
  2578                         csr->prev_top_at_mark_start(),
  2579                         csr->next_top_at_mark_start(),
  2580                         csr->top_at_conc_mark_count(),
  2581                         csr->age_in_surv_rate_group_cond(),
  2582                         csr->is_young(),
  2583                         csr->is_survivor());
  2584     csr = next;
  2587 #endif // !PRODUCT
  2589 void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
  2590   // Set this here - in case we're not doing young collections.
  2591   double non_young_start_time_sec = os::elapsedTime();
  2593   YoungList* young_list = _g1->young_list();
  2595   guarantee(target_pause_time_ms > 0.0,
  2596             err_msg("target_pause_time_ms = %1.6lf should be positive",
  2597                     target_pause_time_ms));
  2598   guarantee(_collection_set == NULL, "Precondition");
  2600   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
  2601   double predicted_pause_time_ms = base_time_ms;
  2603   double time_remaining_ms = target_pause_time_ms - base_time_ms;
  2605   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2606                 "start choosing CSet",
  2607                 ergo_format_ms("predicted base time")
  2608                 ergo_format_ms("remaining time")
  2609                 ergo_format_ms("target pause time"),
  2610                 base_time_ms, time_remaining_ms, target_pause_time_ms);
  2612   // the 10% and 50% values are arbitrary...
  2613   double threshold = 0.10 * target_pause_time_ms;
  2614   if (time_remaining_ms < threshold) {
  2615     double prev_time_remaining_ms = time_remaining_ms;
  2616     time_remaining_ms = 0.50 * target_pause_time_ms;
  2617     ergo_verbose3(ErgoCSetConstruction,
  2618                   "adjust remaining time",
  2619                   ergo_format_reason("remaining time lower than threshold")
  2620                   ergo_format_ms("remaining time")
  2621                   ergo_format_ms("threshold")
  2622                   ergo_format_ms("adjusted remaining time"),
  2623                   prev_time_remaining_ms, threshold, time_remaining_ms);
  2626   size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
  2628   HeapRegion* hr;
  2629   double young_start_time_sec = os::elapsedTime();
  2631   _collection_set_bytes_used_before = 0;
  2632   _last_gc_was_young = gcs_are_young() ? true : false;
  2634   if (_last_gc_was_young) {
  2635     ++_young_pause_num;
  2636   } else {
  2637     ++_mixed_pause_num;
  2640   // The young list is laid with the survivor regions from the previous
  2641   // pause are appended to the RHS of the young list, i.e.
  2642   //   [Newly Young Regions ++ Survivors from last pause].
  2644   size_t survivor_region_length = young_list->survivor_length();
  2645   size_t eden_region_length = young_list->length() - survivor_region_length;
  2646   init_cset_region_lengths(eden_region_length, survivor_region_length);
  2647   hr = young_list->first_survivor_region();
  2648   while (hr != NULL) {
  2649     assert(hr->is_survivor(), "badly formed young list");
  2650     hr->set_young();
  2651     hr = hr->get_next_young_region();
  2654   // Clear the fields that point to the survivor list - they are all young now.
  2655   young_list->clear_survivors();
  2657   if (_g1->mark_in_progress())
  2658     _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
  2660   _collection_set = _inc_cset_head;
  2661   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  2662   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
  2663   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  2665   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  2666                 "add young regions to CSet",
  2667                 ergo_format_region("eden")
  2668                 ergo_format_region("survivors")
  2669                 ergo_format_ms("predicted young region time"),
  2670                 eden_region_length, survivor_region_length,
  2671                 _inc_cset_predicted_elapsed_time_ms);
  2673   // The number of recorded young regions is the incremental
  2674   // collection set's current size
  2675   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  2677   double young_end_time_sec = os::elapsedTime();
  2678   _recorded_young_cset_choice_time_ms =
  2679     (young_end_time_sec - young_start_time_sec) * 1000.0;
  2681   // We are doing young collections so reset this.
  2682   non_young_start_time_sec = young_end_time_sec;
  2684   if (!gcs_are_young()) {
  2685     bool should_continue = true;
  2686     NumberSeq seq;
  2687     double avg_prediction = 100000000000000000.0; // something very large
  2689     double prev_predicted_pause_time_ms = predicted_pause_time_ms;
  2690     do {
  2691       // Note that add_old_region_to_cset() increments the
  2692       // _old_cset_region_length field and cset_region_length() returns the
  2693       // sum of _eden_cset_region_length, _survivor_cset_region_length, and
  2694       // _old_cset_region_length. So, as old regions are added to the
  2695       // CSet, _old_cset_region_length will be incremented and
  2696       // cset_region_length(), which is used below, will always reflect
  2697       // the the total number of regions added up to this point to the CSet.
  2699       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
  2700                                                       avg_prediction);
  2701       if (hr != NULL) {
  2702         _g1->old_set_remove(hr);
  2703         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
  2704         time_remaining_ms -= predicted_time_ms;
  2705         predicted_pause_time_ms += predicted_time_ms;
  2706         add_old_region_to_cset(hr);
  2707         seq.add(predicted_time_ms);
  2708         avg_prediction = seq.avg() + seq.sd();
  2711       should_continue = true;
  2712       if (hr == NULL) {
  2713         // No need for an ergo verbose message here,
  2714         // getNextMarkRegion() does this when it returns NULL.
  2715         should_continue = false;
  2716       } else {
  2717         if (adaptive_young_list_length()) {
  2718           if (time_remaining_ms < 0.0) {
  2719             ergo_verbose1(ErgoCSetConstruction,
  2720                           "stop adding old regions to CSet",
  2721                           ergo_format_reason("remaining time is lower than 0")
  2722                           ergo_format_ms("remaining time"),
  2723                           time_remaining_ms);
  2724             should_continue = false;
  2726         } else {
  2727           if (cset_region_length() >= _young_list_fixed_length) {
  2728             ergo_verbose2(ErgoCSetConstruction,
  2729                           "stop adding old regions to CSet",
  2730                           ergo_format_reason("CSet length reached target")
  2731                           ergo_format_region("CSet")
  2732                           ergo_format_region("young target"),
  2733                           cset_region_length(), _young_list_fixed_length);
  2734             should_continue = false;
  2738     } while (should_continue);
  2740     if (!adaptive_young_list_length() &&
  2741         cset_region_length() < _young_list_fixed_length) {
  2742       ergo_verbose2(ErgoCSetConstruction,
  2743                     "request mixed GCs end",
  2744                     ergo_format_reason("CSet length lower than target")
  2745                     ergo_format_region("CSet")
  2746                     ergo_format_region("young target"),
  2747                     cset_region_length(), _young_list_fixed_length);
  2748       _should_revert_to_young_gcs  = true;
  2751     ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
  2752                   "add old regions to CSet",
  2753                   ergo_format_region("old")
  2754                   ergo_format_ms("predicted old region time"),
  2755                   old_cset_region_length(),
  2756                   predicted_pause_time_ms - prev_predicted_pause_time_ms);
  2759   stop_incremental_cset_building();
  2761   count_CS_bytes_used();
  2763   ergo_verbose5(ErgoCSetConstruction,
  2764                 "finish choosing CSet",
  2765                 ergo_format_region("eden")
  2766                 ergo_format_region("survivors")
  2767                 ergo_format_region("old")
  2768                 ergo_format_ms("predicted pause time")
  2769                 ergo_format_ms("target pause time"),
  2770                 eden_region_length, survivor_region_length,
  2771                 old_cset_region_length(),
  2772                 predicted_pause_time_ms, target_pause_time_ms);
  2774   double non_young_end_time_sec = os::elapsedTime();
  2775   _recorded_non_young_cset_choice_time_ms =
  2776     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;

mercurial