src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Tue, 24 Aug 2010 17:24:33 -0400

author
tonyp
date
Tue, 24 Aug 2010 17:24:33 -0400
changeset 2315
631f79e71e90
parent 2314
f95d63e2154a
child 2333
016a3628c885
permissions
-rw-r--r--

6974966: G1: unnecessary direct-to-old allocations
Summary: This change revamps the slow allocation path of G1. Improvements include the following: a) Allocations directly to old regions are now totally banned. G1 now only allows allocations out of young regions (with the only exception being humongous regions). b) The thread that allocates a new region (which is now guaranteed to be young) does not dirty all its cards. Each thread that successfully allocates out of a young region is now responsible for dirtying the cards that corresponding to the "block" that just got allocated. c) allocate_new_tlab() and mem_allocate() are now implemented differently and TLAB allocations are only done by allocate_new_tlab(). d) If a thread schedules an evacuation pause in order to satisfy an allocation request, it will perform the allocation at the end of the safepoint so that the thread that initiated the GC also gets "first pick" of any space made available by the GC. e) If a thread is unable to allocate a humongous object it will schedule an evacuation pause in case it reclaims enough regions so that the humongous allocation can be satisfied aftewards. f) The G1 policy is more careful to set the young list target length to be the survivor number +1. g) Lots of code tidy up, removal, refactoring to make future changes easier.
Reviewed-by: johnc, ysr

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/concurrentMark.hpp"
    28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    31 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    32 #include "gc_implementation/shared/gcPolicyCounters.hpp"
    33 #include "runtime/arguments.hpp"
    34 #include "runtime/java.hpp"
    35 #include "runtime/mutexLocker.hpp"
    36 #include "utilities/debug.hpp"
    38 #define PREDICTIONS_VERBOSE 0
    40 // <NEW PREDICTION>
    42 // Different defaults for different number of GC threads
    43 // They were chosen by running GCOld and SPECjbb on debris with different
    44 //   numbers of GC threads and choosing them based on the results
    46 // all the same
    47 static double rs_length_diff_defaults[] = {
    48   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    49 };
    51 static double cost_per_card_ms_defaults[] = {
    52   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    53 };
    55 // all the same
    56 static double fully_young_cards_per_entry_ratio_defaults[] = {
    57   1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    58 };
    60 static double cost_per_entry_ms_defaults[] = {
    61   0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
    62 };
    64 static double cost_per_byte_ms_defaults[] = {
    65   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
    66 };
    68 // these should be pretty consistent
    69 static double constant_other_time_ms_defaults[] = {
    70   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
    71 };
    74 static double young_other_cost_per_region_ms_defaults[] = {
    75   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
    76 };
    78 static double non_young_other_cost_per_region_ms_defaults[] = {
    79   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
    80 };
    82 // </NEW PREDICTION>
    84 G1CollectorPolicy::G1CollectorPolicy() :
    85   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
    86     ? ParallelGCThreads : 1),
    89   _n_pauses(0),
    90   _recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    91   _recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    92   _recent_evac_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    93   _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    94   _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
    95   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    96   _all_pause_times_ms(new NumberSeq()),
    97   _stop_world_start(0.0),
    98   _all_stop_world_times_ms(new NumberSeq()),
    99   _all_yield_times_ms(new NumberSeq()),
   101   _all_mod_union_times_ms(new NumberSeq()),
   103   _summary(new Summary()),
   105 #ifndef PRODUCT
   106   _cur_clear_ct_time_ms(0.0),
   107   _min_clear_cc_time_ms(-1.0),
   108   _max_clear_cc_time_ms(-1.0),
   109   _cur_clear_cc_time_ms(0.0),
   110   _cum_clear_cc_time_ms(0.0),
   111   _num_cc_clears(0L),
   112 #endif
   114   _region_num_young(0),
   115   _region_num_tenured(0),
   116   _prev_region_num_young(0),
   117   _prev_region_num_tenured(0),
   119   _aux_num(10),
   120   _all_aux_times_ms(new NumberSeq[_aux_num]),
   121   _cur_aux_start_times_ms(new double[_aux_num]),
   122   _cur_aux_times_ms(new double[_aux_num]),
   123   _cur_aux_times_set(new bool[_aux_num]),
   125   _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   126   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   127   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   129   // <NEW PREDICTION>
   131   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   132   _prev_collection_pause_end_ms(0.0),
   133   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   134   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   135   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   136   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   137   _partially_young_cards_per_entry_ratio_seq(
   138                                          new TruncatedSeq(TruncatedSeqLength)),
   139   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   140   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   141   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   142   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   143   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   144   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   145   _non_young_other_cost_per_region_ms_seq(
   146                                          new TruncatedSeq(TruncatedSeqLength)),
   148   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   149   _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   150   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
   152   _pause_time_target_ms((double) MaxGCPauseMillis),
   154   // </NEW PREDICTION>
   156   _in_young_gc_mode(false),
   157   _full_young_gcs(true),
   158   _full_young_pause_num(0),
   159   _partial_young_pause_num(0),
   161   _during_marking(false),
   162   _in_marking_window(false),
   163   _in_marking_window_im(false),
   165   _known_garbage_ratio(0.0),
   166   _known_garbage_bytes(0),
   168   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
   170    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
   172   _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
   173   _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
   175   _recent_avg_pause_time_ratio(0.0),
   176   _num_markings(0),
   177   _n_marks(0),
   178   _n_pauses_at_mark_end(0),
   180   _all_full_gc_times_ms(new NumberSeq()),
   182   // G1PausesBtwnConcMark defaults to -1
   183   // so the hack is to do the cast  QQQ FIXME
   184   _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
   185   _n_marks_since_last_pause(0),
   186   _initiate_conc_mark_if_possible(false),
   187   _during_initial_mark_pause(false),
   188   _should_revert_to_full_young_gcs(false),
   189   _last_full_young_gc(false),
   191   _prev_collection_pause_used_at_end_bytes(0),
   193   _collection_set(NULL),
   194   _collection_set_size(0),
   195   _collection_set_bytes_used_before(0),
   197   // Incremental CSet attributes
   198   _inc_cset_build_state(Inactive),
   199   _inc_cset_head(NULL),
   200   _inc_cset_tail(NULL),
   201   _inc_cset_size(0),
   202   _inc_cset_young_index(0),
   203   _inc_cset_bytes_used_before(0),
   204   _inc_cset_max_finger(NULL),
   205   _inc_cset_recorded_young_bytes(0),
   206   _inc_cset_recorded_rs_lengths(0),
   207   _inc_cset_predicted_elapsed_time_ms(0.0),
   208   _inc_cset_predicted_bytes_to_copy(0),
   210 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   211 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   212 #endif // _MSC_VER
   214   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
   215                                                  G1YoungSurvRateNumRegionsSummary)),
   216   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
   217                                               G1YoungSurvRateNumRegionsSummary)),
   218   // add here any more surv rate groups
   219   _recorded_survivor_regions(0),
   220   _recorded_survivor_head(NULL),
   221   _recorded_survivor_tail(NULL),
   222   _survivors_age_table(true),
   224   _gc_overhead_perc(0.0)
   226 {
   227   // Set up the region size and associated fields. Given that the
   228   // policy is created before the heap, we have to set this up here,
   229   // so it's done as soon as possible.
   230   HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
   231   HeapRegionRemSet::setup_remset_size();
   233   // Verify PLAB sizes
   234   const uint region_size = HeapRegion::GrainWords;
   235   if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   236     char buffer[128];
   237     jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
   238                  OldPLABSize > region_size ? "Old" : "Young", region_size);
   239     vm_exit_during_initialization(buffer);
   240   }
   242   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   243   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   245   _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
   246   _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
   247   _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
   249   _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
   250   _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
   252   _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
   254   _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
   256   _par_last_termination_times_ms = new double[_parallel_gc_threads];
   257   _par_last_termination_attempts = new double[_parallel_gc_threads];
   258   _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
   260   // start conservatively
   261   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
   263   // <NEW PREDICTION>
   265   int index;
   266   if (ParallelGCThreads == 0)
   267     index = 0;
   268   else if (ParallelGCThreads > 8)
   269     index = 7;
   270   else
   271     index = ParallelGCThreads - 1;
   273   _pending_card_diff_seq->add(0.0);
   274   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   275   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
   276   _fully_young_cards_per_entry_ratio_seq->add(
   277                             fully_young_cards_per_entry_ratio_defaults[index]);
   278   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   279   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   280   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
   281   _young_other_cost_per_region_ms_seq->add(
   282                                young_other_cost_per_region_ms_defaults[index]);
   283   _non_young_other_cost_per_region_ms_seq->add(
   284                            non_young_other_cost_per_region_ms_defaults[index]);
   286   // </NEW PREDICTION>
   288   // Below, we might need to calculate the pause time target based on
   289   // the pause interval. When we do so we are going to give G1 maximum
   290   // flexibility and allow it to do pauses when it needs to. So, we'll
   291   // arrange that the pause interval to be pause time target + 1 to
   292   // ensure that a) the pause time target is maximized with respect to
   293   // the pause interval and b) we maintain the invariant that pause
   294   // time target < pause interval. If the user does not want this
   295   // maximum flexibility, they will have to set the pause interval
   296   // explicitly.
   298   // First make sure that, if either parameter is set, its value is
   299   // reasonable.
   300   if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   301     if (MaxGCPauseMillis < 1) {
   302       vm_exit_during_initialization("MaxGCPauseMillis should be "
   303                                     "greater than 0");
   304     }
   305   }
   306   if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   307     if (GCPauseIntervalMillis < 1) {
   308       vm_exit_during_initialization("GCPauseIntervalMillis should be "
   309                                     "greater than 0");
   310     }
   311   }
   313   // Then, if the pause time target parameter was not set, set it to
   314   // the default value.
   315   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   316     if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   317       // The default pause time target in G1 is 200ms
   318       FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   319     } else {
   320       // We do not allow the pause interval to be set without the
   321       // pause time target
   322       vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
   323                                     "without setting MaxGCPauseMillis");
   324     }
   325   }
   327   // Then, if the interval parameter was not set, set it according to
   328   // the pause time target (this will also deal with the case when the
   329   // pause time target is the default value).
   330   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   331     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
   332   }
   334   // Finally, make sure that the two parameters are consistent.
   335   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
   336     char buffer[256];
   337     jio_snprintf(buffer, 256,
   338                  "MaxGCPauseMillis (%u) should be less than "
   339                  "GCPauseIntervalMillis (%u)",
   340                  MaxGCPauseMillis, GCPauseIntervalMillis);
   341     vm_exit_during_initialization(buffer);
   342   }
   344   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
   345   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   346   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   347   _sigma = (double) G1ConfidencePercent / 100.0;
   349   // start conservatively (around 50ms is about right)
   350   _concurrent_mark_init_times_ms->add(0.05);
   351   _concurrent_mark_remark_times_ms->add(0.05);
   352   _concurrent_mark_cleanup_times_ms->add(0.20);
   353   _tenuring_threshold = MaxTenuringThreshold;
   355   // if G1FixedSurvivorSpaceSize is 0 which means the size is not
   356   // fixed, then _max_survivor_regions will be calculated at
   357   // calculate_young_list_target_length during initialization
   358   _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
   360   assert(GCTimeRatio > 0,
   361          "we should have set it to a default value set_g1_gc_flags() "
   362          "if a user set it to 0");
   363   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
   365   initialize_all();
   366 }
   368 // Increment "i", mod "len"
   369 static void inc_mod(int& i, int len) {
   370   i++; if (i == len) i = 0;
   371 }
   373 void G1CollectorPolicy::initialize_flags() {
   374   set_min_alignment(HeapRegion::GrainBytes);
   375   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
   376   if (SurvivorRatio < 1) {
   377     vm_exit_during_initialization("Invalid survivor ratio specified");
   378   }
   379   CollectorPolicy::initialize_flags();
   380 }
   382 // The easiest way to deal with the parsing of the NewSize /
   383 // MaxNewSize / etc. parameteres is to re-use the code in the
   384 // TwoGenerationCollectorPolicy class. This is similar to what
   385 // ParallelScavenge does with its GenerationSizer class (see
   386 // ParallelScavengeHeap::initialize()). We might change this in the
   387 // future, but it's a good start.
   388 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
   389   size_t size_to_region_num(size_t byte_size) {
   390     return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
   391   }
   393 public:
   394   G1YoungGenSizer() {
   395     initialize_flags();
   396     initialize_size_info();
   397   }
   399   size_t min_young_region_num() {
   400     return size_to_region_num(_min_gen0_size);
   401   }
   402   size_t initial_young_region_num() {
   403     return size_to_region_num(_initial_gen0_size);
   404   }
   405   size_t max_young_region_num() {
   406     return size_to_region_num(_max_gen0_size);
   407   }
   408 };
   410 void G1CollectorPolicy::init() {
   411   // Set aside an initial future to_space.
   412   _g1 = G1CollectedHeap::heap();
   414   assert(Heap_lock->owned_by_self(), "Locking discipline.");
   416   initialize_gc_policy_counters();
   418   if (G1Gen) {
   419     _in_young_gc_mode = true;
   421     G1YoungGenSizer sizer;
   422     size_t initial_region_num = sizer.initial_young_region_num();
   424     if (UseAdaptiveSizePolicy) {
   425       set_adaptive_young_list_length(true);
   426       _young_list_fixed_length = 0;
   427     } else {
   428       set_adaptive_young_list_length(false);
   429       _young_list_fixed_length = initial_region_num;
   430     }
   431     _free_regions_at_end_of_collection = _g1->free_regions();
   432     calculate_young_list_min_length();
   433     guarantee( _young_list_min_length == 0, "invariant, not enough info" );
   434     calculate_young_list_target_length();
   435   } else {
   436      _young_list_fixed_length = 0;
   437     _in_young_gc_mode = false;
   438   }
   440   // We may immediately start allocating regions and placing them on the
   441   // collection set list. Initialize the per-collection set info
   442   start_incremental_cset_building();
   443 }
   445 // Create the jstat counters for the policy.
   446 void G1CollectorPolicy::initialize_gc_policy_counters()
   447 {
   448   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
   449 }
   451 void G1CollectorPolicy::calculate_young_list_min_length() {
   452   _young_list_min_length = 0;
   454   if (!adaptive_young_list_length())
   455     return;
   457   if (_alloc_rate_ms_seq->num() > 3) {
   458     double now_sec = os::elapsedTime();
   459     double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   460     double alloc_rate_ms = predict_alloc_rate_ms();
   461     size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
   462     size_t current_region_num = _g1->young_list()->length();
   463     _young_list_min_length = min_regions + current_region_num;
   464   }
   465 }
   467 void G1CollectorPolicy::calculate_young_list_target_length() {
   468   if (adaptive_young_list_length()) {
   469     size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   470     calculate_young_list_target_length(rs_lengths);
   471   } else {
   472     if (full_young_gcs())
   473       _young_list_target_length = _young_list_fixed_length;
   474     else
   475       _young_list_target_length = _young_list_fixed_length / 2;
   476   }
   478   // Make sure we allow the application to allocate at least one
   479   // region before we need to do a collection again.
   480   size_t min_length = _g1->young_list()->length() + 1;
   481   _young_list_target_length = MAX2(_young_list_target_length, min_length);
   482   calculate_survivors_policy();
   483 }
   485 void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
   486   guarantee( adaptive_young_list_length(), "pre-condition" );
   487   guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" );
   489   double start_time_sec = os::elapsedTime();
   490   size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
   491   min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
   492   size_t reserve_regions =
   493     (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
   495   if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
   496     // we are in fully-young mode and there are free regions in the heap
   498     double survivor_regions_evac_time =
   499         predict_survivor_regions_evac_time();
   501     double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   502     size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   503     size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   504     size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   505     double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
   506                           + survivor_regions_evac_time;
   508     // the result
   509     size_t final_young_length = 0;
   511     size_t init_free_regions =
   512       MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions);
   514     // if we're still under the pause target...
   515     if (base_time_ms <= target_pause_time_ms) {
   516       // We make sure that the shortest young length that makes sense
   517       // fits within the target pause time.
   518       size_t min_young_length = 1;
   520       if (predict_will_fit(min_young_length, base_time_ms,
   521                                      init_free_regions, target_pause_time_ms)) {
   522         // The shortest young length will fit within the target pause time;
   523         // we'll now check whether the absolute maximum number of young
   524         // regions will fit in the target pause time. If not, we'll do
   525         // a binary search between min_young_length and max_young_length
   526         size_t abs_max_young_length = _free_regions_at_end_of_collection - 1;
   527         size_t max_young_length = abs_max_young_length;
   529         if (max_young_length > min_young_length) {
   530           // Let's check if the initial max young length will fit within the
   531           // target pause. If so then there is no need to search for a maximal
   532           // young length - we'll return the initial maximum
   534           if (predict_will_fit(max_young_length, base_time_ms,
   535                                 init_free_regions, target_pause_time_ms)) {
   536             // The maximum young length will satisfy the target pause time.
   537             // We are done so set min young length to this maximum length.
   538             // The code after the loop will then set final_young_length using
   539             // the value cached in the minimum length.
   540             min_young_length = max_young_length;
   541           } else {
   542             // The maximum possible number of young regions will not fit within
   543             // the target pause time so let's search....
   545             size_t diff = (max_young_length - min_young_length) / 2;
   546             max_young_length = min_young_length + diff;
   548             while (max_young_length > min_young_length) {
   549               if (predict_will_fit(max_young_length, base_time_ms,
   550                                         init_free_regions, target_pause_time_ms)) {
   552                 // The current max young length will fit within the target
   553                 // pause time. Note we do not exit the loop here. By setting
   554                 // min = max, and then increasing the max below means that
   555                 // we will continue searching for an upper bound in the
   556                 // range [max..max+diff]
   557                 min_young_length = max_young_length;
   558               }
   559               diff = (max_young_length - min_young_length) / 2;
   560               max_young_length = min_young_length + diff;
   561             }
   562             // the above loop found a maximal young length that will fit
   563             // within the target pause time.
   564           }
   565           assert(min_young_length <= abs_max_young_length, "just checking");
   566         }
   567         final_young_length = min_young_length;
   568       }
   569     }
   570     // and we're done!
   572     // we should have at least one region in the target young length
   573     _young_list_target_length =
   574                               final_young_length + _recorded_survivor_regions;
   576     // let's keep an eye of how long we spend on this calculation
   577     // right now, I assume that we'll print it when we need it; we
   578     // should really adde it to the breakdown of a pause
   579     double end_time_sec = os::elapsedTime();
   580     double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
   582 #ifdef TRACE_CALC_YOUNG_LENGTH
   583     // leave this in for debugging, just in case
   584     gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", "
   585                            "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT,
   586                            target_pause_time_ms,
   587                            _young_list_target_length
   588                            elapsed_time_ms,
   589                            full_young_gcs() ? "full" : "partial",
   590                            during_initial_mark_pause() ? " i-m" : "",
   591                            _in_marking_window,
   592                            _in_marking_window_im);
   593 #endif // TRACE_CALC_YOUNG_LENGTH
   595     if (_young_list_target_length < _young_list_min_length) {
   596       // bummer; this means that, if we do a pause when the maximal
   597       // length dictates, we'll violate the pause spacing target (the
   598       // min length was calculate based on the application's current
   599       // alloc rate);
   601       // so, we have to bite the bullet, and allocate the minimum
   602       // number. We'll violate our target, but we just can't meet it.
   604 #ifdef TRACE_CALC_YOUNG_LENGTH
   605       // leave this in for debugging, just in case
   606       gclog_or_tty->print_cr("adjusted target length from "
   607                              SIZE_FORMAT " to " SIZE_FORMAT,
   608                              _young_list_target_length, _young_list_min_length);
   609 #endif // TRACE_CALC_YOUNG_LENGTH
   611       _young_list_target_length = _young_list_min_length;
   612     }
   613   } else {
   614     // we are in a partially-young mode or we've run out of regions (due
   615     // to evacuation failure)
   617 #ifdef TRACE_CALC_YOUNG_LENGTH
   618     // leave this in for debugging, just in case
   619     gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
   620                            _young_list_min_length);
   621 #endif // TRACE_CALC_YOUNG_LENGTH
   622     // we'll do the pause as soon as possible by choosing the minimum
   623     _young_list_target_length = _young_list_min_length;
   624   }
   626   _rs_lengths_prediction = rs_lengths;
   627 }
   629 // This is used by: calculate_young_list_target_length(rs_length). It
   630 // returns true iff:
   631 //   the predicted pause time for the given young list will not overflow
   632 //   the target pause time
   633 // and:
   634 //   the predicted amount of surviving data will not overflow the
   635 //   the amount of free space available for survivor regions.
   636 //
   637 bool
   638 G1CollectorPolicy::predict_will_fit(size_t young_length,
   639                                     double base_time_ms,
   640                                     size_t init_free_regions,
   641                                     double target_pause_time_ms) {
   643   if (young_length >= init_free_regions)
   644     // end condition 1: not enough space for the young regions
   645     return false;
   647   double accum_surv_rate_adj = 0.0;
   648   double accum_surv_rate =
   649     accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
   651   size_t bytes_to_copy =
   652     (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   654   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   656   double young_other_time_ms =
   657                        predict_young_other_time_ms(young_length);
   659   double pause_time_ms =
   660                    base_time_ms + copy_time_ms + young_other_time_ms;
   662   if (pause_time_ms > target_pause_time_ms)
   663     // end condition 2: over the target pause time
   664     return false;
   666   size_t free_bytes =
   667                  (init_free_regions - young_length) * HeapRegion::GrainBytes;
   669   if ((2.0 + sigma()) * (double) bytes_to_copy > (double) free_bytes)
   670     // end condition 3: out of to-space (conservatively)
   671     return false;
   673   // success!
   674   return true;
   675 }
   677 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   678   double survivor_regions_evac_time = 0.0;
   679   for (HeapRegion * r = _recorded_survivor_head;
   680        r != NULL && r != _recorded_survivor_tail->get_next_young_region();
   681        r = r->get_next_young_region()) {
   682     survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
   683   }
   684   return survivor_regions_evac_time;
   685 }
   687 void G1CollectorPolicy::check_prediction_validity() {
   688   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   690   size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   691   if (rs_lengths > _rs_lengths_prediction) {
   692     // add 10% to avoid having to recalculate often
   693     size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   694     calculate_young_list_target_length(rs_lengths_prediction);
   695   }
   696 }
   698 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
   699                                                bool is_tlab,
   700                                                bool* gc_overhead_limit_was_exceeded) {
   701   guarantee(false, "Not using this policy feature yet.");
   702   return NULL;
   703 }
   705 // This method controls how a collector handles one or more
   706 // of its generations being fully allocated.
   707 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
   708                                                        bool is_tlab) {
   709   guarantee(false, "Not using this policy feature yet.");
   710   return NULL;
   711 }
   714 #ifndef PRODUCT
   715 bool G1CollectorPolicy::verify_young_ages() {
   716   HeapRegion* head = _g1->young_list()->first_region();
   717   return
   718     verify_young_ages(head, _short_lived_surv_rate_group);
   719   // also call verify_young_ages on any additional surv rate groups
   720 }
   722 bool
   723 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
   724                                      SurvRateGroup *surv_rate_group) {
   725   guarantee( surv_rate_group != NULL, "pre-condition" );
   727   const char* name = surv_rate_group->name();
   728   bool ret = true;
   729   int prev_age = -1;
   731   for (HeapRegion* curr = head;
   732        curr != NULL;
   733        curr = curr->get_next_young_region()) {
   734     SurvRateGroup* group = curr->surv_rate_group();
   735     if (group == NULL && !curr->is_survivor()) {
   736       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
   737       ret = false;
   738     }
   740     if (surv_rate_group == group) {
   741       int age = curr->age_in_surv_rate_group();
   743       if (age < 0) {
   744         gclog_or_tty->print_cr("## %s: encountered negative age", name);
   745         ret = false;
   746       }
   748       if (age <= prev_age) {
   749         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
   750                                "(%d, %d)", name, age, prev_age);
   751         ret = false;
   752       }
   753       prev_age = age;
   754     }
   755   }
   757   return ret;
   758 }
   759 #endif // PRODUCT
   761 void G1CollectorPolicy::record_full_collection_start() {
   762   _cur_collection_start_sec = os::elapsedTime();
   763   // Release the future to-space so that it is available for compaction into.
   764   _g1->set_full_collection();
   765 }
   767 void G1CollectorPolicy::record_full_collection_end() {
   768   // Consider this like a collection pause for the purposes of allocation
   769   // since last pause.
   770   double end_sec = os::elapsedTime();
   771   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
   772   double full_gc_time_ms = full_gc_time_sec * 1000.0;
   774   _all_full_gc_times_ms->add(full_gc_time_ms);
   776   update_recent_gc_times(end_sec, full_gc_time_ms);
   778   _g1->clear_full_collection();
   780   // "Nuke" the heuristics that control the fully/partially young GC
   781   // transitions and make sure we start with fully young GCs after the
   782   // Full GC.
   783   set_full_young_gcs(true);
   784   _last_full_young_gc = false;
   785   _should_revert_to_full_young_gcs = false;
   786   clear_initiate_conc_mark_if_possible();
   787   clear_during_initial_mark_pause();
   788   _known_garbage_bytes = 0;
   789   _known_garbage_ratio = 0.0;
   790   _in_marking_window = false;
   791   _in_marking_window_im = false;
   793   _short_lived_surv_rate_group->start_adding_regions();
   794   // also call this on any additional surv rate groups
   796   record_survivor_regions(0, NULL, NULL);
   798   _prev_region_num_young   = _region_num_young;
   799   _prev_region_num_tenured = _region_num_tenured;
   801   _free_regions_at_end_of_collection = _g1->free_regions();
   802   // Reset survivors SurvRateGroup.
   803   _survivor_surv_rate_group->reset();
   804   calculate_young_list_min_length();
   805   calculate_young_list_target_length();
   806 }
   808 void G1CollectorPolicy::record_before_bytes(size_t bytes) {
   809   _bytes_in_to_space_before_gc += bytes;
   810 }
   812 void G1CollectorPolicy::record_after_bytes(size_t bytes) {
   813   _bytes_in_to_space_after_gc += bytes;
   814 }
   816 void G1CollectorPolicy::record_stop_world_start() {
   817   _stop_world_start = os::elapsedTime();
   818 }
   820 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
   821                                                       size_t start_used) {
   822   if (PrintGCDetails) {
   823     gclog_or_tty->stamp(PrintGCTimeStamps);
   824     gclog_or_tty->print("[GC pause");
   825     if (in_young_gc_mode())
   826       gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
   827   }
   829   assert(_g1->used() == _g1->recalculate_used(),
   830          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
   831                  _g1->used(), _g1->recalculate_used()));
   833   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
   834   _all_stop_world_times_ms->add(s_w_t_ms);
   835   _stop_world_start = 0.0;
   837   _cur_collection_start_sec = start_time_sec;
   838   _cur_collection_pause_used_at_start_bytes = start_used;
   839   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   840   _pending_cards = _g1->pending_card_num();
   841   _max_pending_cards = _g1->max_pending_card_num();
   843   _bytes_in_to_space_before_gc = 0;
   844   _bytes_in_to_space_after_gc = 0;
   845   _bytes_in_collection_set_before_gc = 0;
   847 #ifdef DEBUG
   848   // initialise these to something well known so that we can spot
   849   // if they are not set properly
   851   for (int i = 0; i < _parallel_gc_threads; ++i) {
   852     _par_last_gc_worker_start_times_ms[i] = -1234.0;
   853     _par_last_ext_root_scan_times_ms[i] = -1234.0;
   854     _par_last_mark_stack_scan_times_ms[i] = -1234.0;
   855     _par_last_update_rs_times_ms[i] = -1234.0;
   856     _par_last_update_rs_processed_buffers[i] = -1234.0;
   857     _par_last_scan_rs_times_ms[i] = -1234.0;
   858     _par_last_obj_copy_times_ms[i] = -1234.0;
   859     _par_last_termination_times_ms[i] = -1234.0;
   860     _par_last_termination_attempts[i] = -1234.0;
   861     _par_last_gc_worker_end_times_ms[i] = -1234.0;
   862   }
   863 #endif
   865   for (int i = 0; i < _aux_num; ++i) {
   866     _cur_aux_times_ms[i] = 0.0;
   867     _cur_aux_times_set[i] = false;
   868   }
   870   _satb_drain_time_set = false;
   871   _last_satb_drain_processed_buffers = -1;
   873   if (in_young_gc_mode())
   874     _last_young_gc_full = false;
   876   // do that for any other surv rate groups
   877   _short_lived_surv_rate_group->stop_adding_regions();
   878   _survivors_age_table.clear();
   880   assert( verify_young_ages(), "region age verification" );
   881 }
   883 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
   884   _mark_closure_time_ms = mark_closure_time_ms;
   885 }
   887 void G1CollectorPolicy::record_concurrent_mark_init_start() {
   888   _mark_init_start_sec = os::elapsedTime();
   889   guarantee(!in_young_gc_mode(), "should not do be here in young GC mode");
   890 }
   892 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
   893                                                    mark_init_elapsed_time_ms) {
   894   _during_marking = true;
   895   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
   896   clear_during_initial_mark_pause();
   897   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
   898 }
   900 void G1CollectorPolicy::record_concurrent_mark_init_end() {
   901   double end_time_sec = os::elapsedTime();
   902   double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
   903   _concurrent_mark_init_times_ms->add(elapsed_time_ms);
   904   record_concurrent_mark_init_end_pre(elapsed_time_ms);
   906   _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
   907 }
   909 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
   910   _mark_remark_start_sec = os::elapsedTime();
   911   _during_marking = false;
   912 }
   914 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
   915   double end_time_sec = os::elapsedTime();
   916   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
   917   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
   918   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   919   _prev_collection_pause_end_ms += elapsed_time_ms;
   921   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
   922 }
   924 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
   925   _mark_cleanup_start_sec = os::elapsedTime();
   926 }
   928 void
   929 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
   930                                                       size_t max_live_bytes) {
   931   record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
   932   record_concurrent_mark_cleanup_end_work2();
   933 }
   935 void
   936 G1CollectorPolicy::
   937 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
   938                                          size_t max_live_bytes) {
   939   if (_n_marks < 2) _n_marks++;
   940   if (G1PolicyVerbose > 0)
   941     gclog_or_tty->print_cr("At end of marking, max_live is " SIZE_FORMAT " MB "
   942                            " (of " SIZE_FORMAT " MB heap).",
   943                            max_live_bytes/M, _g1->capacity()/M);
   944 }
   946 // The important thing about this is that it includes "os::elapsedTime".
   947 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
   948   double end_time_sec = os::elapsedTime();
   949   double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
   950   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
   951   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   952   _prev_collection_pause_end_ms += elapsed_time_ms;
   954   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
   956   _num_markings++;
   958   // We did a marking, so reset the "since_last_mark" variables.
   959   double considerConcMarkCost = 1.0;
   960   // If there are available processors, concurrent activity is free...
   961   if (Threads::number_of_non_daemon_threads() * 2 <
   962       os::active_processor_count()) {
   963     considerConcMarkCost = 0.0;
   964   }
   965   _n_pauses_at_mark_end = _n_pauses;
   966   _n_marks_since_last_pause++;
   967 }
   969 void
   970 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
   971   if (in_young_gc_mode()) {
   972     _should_revert_to_full_young_gcs = false;
   973     _last_full_young_gc = true;
   974     _in_marking_window = false;
   975     if (adaptive_young_list_length())
   976       calculate_young_list_target_length();
   977   }
   978 }
   980 void G1CollectorPolicy::record_concurrent_pause() {
   981   if (_stop_world_start > 0.0) {
   982     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
   983     _all_yield_times_ms->add(yield_ms);
   984   }
   985 }
   987 void G1CollectorPolicy::record_concurrent_pause_end() {
   988 }
   990 void G1CollectorPolicy::record_collection_pause_end_CH_strong_roots() {
   991   _cur_CH_strong_roots_end_sec = os::elapsedTime();
   992   _cur_CH_strong_roots_dur_ms =
   993     (_cur_CH_strong_roots_end_sec - _cur_collection_start_sec) * 1000.0;
   994 }
   996 void G1CollectorPolicy::record_collection_pause_end_G1_strong_roots() {
   997   _cur_G1_strong_roots_end_sec = os::elapsedTime();
   998   _cur_G1_strong_roots_dur_ms =
   999     (_cur_G1_strong_roots_end_sec - _cur_CH_strong_roots_end_sec) * 1000.0;
  1002 template<class T>
  1003 T sum_of(T* sum_arr, int start, int n, int N) {
  1004   T sum = (T)0;
  1005   for (int i = 0; i < n; i++) {
  1006     int j = (start + i) % N;
  1007     sum += sum_arr[j];
  1009   return sum;
  1012 void G1CollectorPolicy::print_par_stats(int level,
  1013                                         const char* str,
  1014                                         double* data,
  1015                                          bool summary) {
  1016   double min = data[0], max = data[0];
  1017   double total = 0.0;
  1018   int j;
  1019   for (j = 0; j < level; ++j)
  1020     gclog_or_tty->print("   ");
  1021   gclog_or_tty->print("[%s (ms):", str);
  1022   for (uint i = 0; i < ParallelGCThreads; ++i) {
  1023     double val = data[i];
  1024     if (val < min)
  1025       min = val;
  1026     if (val > max)
  1027       max = val;
  1028     total += val;
  1029     gclog_or_tty->print("  %3.1lf", val);
  1031   if (summary) {
  1032     gclog_or_tty->print_cr("");
  1033     double avg = total / (double) ParallelGCThreads;
  1034     gclog_or_tty->print(" ");
  1035     for (j = 0; j < level; ++j)
  1036       gclog_or_tty->print("   ");
  1037     gclog_or_tty->print("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf",
  1038                         avg, min, max);
  1040   gclog_or_tty->print_cr("]");
  1043 void G1CollectorPolicy::print_par_sizes(int level,
  1044                                         const char* str,
  1045                                         double* data,
  1046                                         bool summary) {
  1047   double min = data[0], max = data[0];
  1048   double total = 0.0;
  1049   int j;
  1050   for (j = 0; j < level; ++j)
  1051     gclog_or_tty->print("   ");
  1052   gclog_or_tty->print("[%s :", str);
  1053   for (uint i = 0; i < ParallelGCThreads; ++i) {
  1054     double val = data[i];
  1055     if (val < min)
  1056       min = val;
  1057     if (val > max)
  1058       max = val;
  1059     total += val;
  1060     gclog_or_tty->print(" %d", (int) val);
  1062   if (summary) {
  1063     gclog_or_tty->print_cr("");
  1064     double avg = total / (double) ParallelGCThreads;
  1065     gclog_or_tty->print(" ");
  1066     for (j = 0; j < level; ++j)
  1067       gclog_or_tty->print("   ");
  1068     gclog_or_tty->print("Sum: %d, Avg: %d, Min: %d, Max: %d",
  1069                (int)total, (int)avg, (int)min, (int)max);
  1071   gclog_or_tty->print_cr("]");
  1074 void G1CollectorPolicy::print_stats (int level,
  1075                                      const char* str,
  1076                                      double value) {
  1077   for (int j = 0; j < level; ++j)
  1078     gclog_or_tty->print("   ");
  1079   gclog_or_tty->print_cr("[%s: %5.1lf ms]", str, value);
  1082 void G1CollectorPolicy::print_stats (int level,
  1083                                      const char* str,
  1084                                      int value) {
  1085   for (int j = 0; j < level; ++j)
  1086     gclog_or_tty->print("   ");
  1087   gclog_or_tty->print_cr("[%s: %d]", str, value);
  1090 double G1CollectorPolicy::avg_value (double* data) {
  1091   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1092     double ret = 0.0;
  1093     for (uint i = 0; i < ParallelGCThreads; ++i)
  1094       ret += data[i];
  1095     return ret / (double) ParallelGCThreads;
  1096   } else {
  1097     return data[0];
  1101 double G1CollectorPolicy::max_value (double* data) {
  1102   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1103     double ret = data[0];
  1104     for (uint i = 1; i < ParallelGCThreads; ++i)
  1105       if (data[i] > ret)
  1106         ret = data[i];
  1107     return ret;
  1108   } else {
  1109     return data[0];
  1113 double G1CollectorPolicy::sum_of_values (double* data) {
  1114   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1115     double sum = 0.0;
  1116     for (uint i = 0; i < ParallelGCThreads; i++)
  1117       sum += data[i];
  1118     return sum;
  1119   } else {
  1120     return data[0];
  1124 double G1CollectorPolicy::max_sum (double* data1,
  1125                                    double* data2) {
  1126   double ret = data1[0] + data2[0];
  1128   if (G1CollectedHeap::use_parallel_gc_threads()) {
  1129     for (uint i = 1; i < ParallelGCThreads; ++i) {
  1130       double data = data1[i] + data2[i];
  1131       if (data > ret)
  1132         ret = data;
  1135   return ret;
  1138 // Anything below that is considered to be zero
  1139 #define MIN_TIMER_GRANULARITY 0.0000001
  1141 void G1CollectorPolicy::record_collection_pause_end() {
  1142   double end_time_sec = os::elapsedTime();
  1143   double elapsed_ms = _last_pause_time_ms;
  1144   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  1145   double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0;
  1146   size_t rs_size =
  1147     _cur_collection_pause_used_regions_at_start - collection_set_size();
  1148   size_t cur_used_bytes = _g1->used();
  1149   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
  1150   bool last_pause_included_initial_mark = false;
  1151   bool update_stats = !_g1->evacuation_failed();
  1153 #ifndef PRODUCT
  1154   if (G1YoungSurvRateVerbose) {
  1155     gclog_or_tty->print_cr("");
  1156     _short_lived_surv_rate_group->print();
  1157     // do that for any other surv rate groups too
  1159 #endif // PRODUCT
  1161   if (in_young_gc_mode()) {
  1162     last_pause_included_initial_mark = during_initial_mark_pause();
  1163     if (last_pause_included_initial_mark)
  1164       record_concurrent_mark_init_end_pre(0.0);
  1166     size_t min_used_targ =
  1167       (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
  1170     if (!_g1->mark_in_progress() && !_last_full_young_gc) {
  1171       assert(!last_pause_included_initial_mark, "invariant");
  1172       if (cur_used_bytes > min_used_targ &&
  1173           cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
  1174         assert(!during_initial_mark_pause(), "we should not see this here");
  1176         // Note: this might have already been set, if during the last
  1177         // pause we decided to start a cycle but at the beginning of
  1178         // this pause we decided to postpone it. That's OK.
  1179         set_initiate_conc_mark_if_possible();
  1183     _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
  1186   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
  1187                           end_time_sec, false);
  1189   guarantee(_cur_collection_pause_used_regions_at_start >=
  1190             collection_set_size(),
  1191             "Negative RS size?");
  1193   // This assert is exempted when we're doing parallel collection pauses,
  1194   // because the fragmentation caused by the parallel GC allocation buffers
  1195   // can lead to more memory being used during collection than was used
  1196   // before. Best leave this out until the fragmentation problem is fixed.
  1197   // Pauses in which evacuation failed can also lead to negative
  1198   // collections, since no space is reclaimed from a region containing an
  1199   // object whose evacuation failed.
  1200   // Further, we're now always doing parallel collection.  But I'm still
  1201   // leaving this here as a placeholder for a more precise assertion later.
  1202   // (DLD, 10/05.)
  1203   assert((true || parallel) // Always using GC LABs now.
  1204          || _g1->evacuation_failed()
  1205          || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
  1206          "Negative collection");
  1208   size_t freed_bytes =
  1209     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
  1210   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
  1212   double survival_fraction =
  1213     (double)surviving_bytes/
  1214     (double)_collection_set_bytes_used_before;
  1216   _n_pauses++;
  1218   if (update_stats) {
  1219     _recent_CH_strong_roots_times_ms->add(_cur_CH_strong_roots_dur_ms);
  1220     _recent_G1_strong_roots_times_ms->add(_cur_G1_strong_roots_dur_ms);
  1221     _recent_evac_times_ms->add(evac_ms);
  1222     _recent_pause_times_ms->add(elapsed_ms);
  1224     _recent_rs_sizes->add(rs_size);
  1226     // We exempt parallel collection from this check because Alloc Buffer
  1227     // fragmentation can produce negative collections.  Same with evac
  1228     // failure.
  1229     // Further, we're now always doing parallel collection.  But I'm still
  1230     // leaving this here as a placeholder for a more precise assertion later.
  1231     // (DLD, 10/05.
  1232     assert((true || parallel)
  1233            || _g1->evacuation_failed()
  1234            || surviving_bytes <= _collection_set_bytes_used_before,
  1235            "Or else negative collection!");
  1236     _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
  1237     _recent_CS_bytes_surviving->add(surviving_bytes);
  1239     // this is where we update the allocation rate of the application
  1240     double app_time_ms =
  1241       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
  1242     if (app_time_ms < MIN_TIMER_GRANULARITY) {
  1243       // This usually happens due to the timer not having the required
  1244       // granularity. Some Linuxes are the usual culprits.
  1245       // We'll just set it to something (arbitrarily) small.
  1246       app_time_ms = 1.0;
  1248     size_t regions_allocated =
  1249       (_region_num_young - _prev_region_num_young) +
  1250       (_region_num_tenured - _prev_region_num_tenured);
  1251     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1252     _alloc_rate_ms_seq->add(alloc_rate_ms);
  1253     _prev_region_num_young   = _region_num_young;
  1254     _prev_region_num_tenured = _region_num_tenured;
  1256     double interval_ms =
  1257       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1258     update_recent_gc_times(end_time_sec, elapsed_ms);
  1259     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
  1260     if (recent_avg_pause_time_ratio() < 0.0 ||
  1261         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
  1262 #ifndef PRODUCT
  1263       // Dump info to allow post-facto debugging
  1264       gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
  1265       gclog_or_tty->print_cr("-------------------------------------------");
  1266       gclog_or_tty->print_cr("Recent GC Times (ms):");
  1267       _recent_gc_times_ms->dump();
  1268       gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
  1269       _recent_prev_end_times_for_all_gcs_sec->dump();
  1270       gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
  1271                              _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
  1272       // In debug mode, terminate the JVM if the user wants to debug at this point.
  1273       assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
  1274 #endif  // !PRODUCT
  1275       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
  1276       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
  1277       if (_recent_avg_pause_time_ratio < 0.0) {
  1278         _recent_avg_pause_time_ratio = 0.0;
  1279       } else {
  1280         assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
  1281         _recent_avg_pause_time_ratio = 1.0;
  1286   if (G1PolicyVerbose > 1) {
  1287     gclog_or_tty->print_cr("   Recording collection pause(%d)", _n_pauses);
  1290   PauseSummary* summary = _summary;
  1292   double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
  1293   double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
  1294   double update_rs_time = avg_value(_par_last_update_rs_times_ms);
  1295   double update_rs_processed_buffers =
  1296     sum_of_values(_par_last_update_rs_processed_buffers);
  1297   double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
  1298   double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
  1299   double termination_time = avg_value(_par_last_termination_times_ms);
  1301   double parallel_other_time = _cur_collection_par_time_ms -
  1302     (update_rs_time + ext_root_scan_time + mark_stack_scan_time +
  1303      scan_rs_time + obj_copy_time + termination_time);
  1304   if (update_stats) {
  1305     MainBodySummary* body_summary = summary->main_body_summary();
  1306     guarantee(body_summary != NULL, "should not be null!");
  1308     if (_satb_drain_time_set)
  1309       body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
  1310     else
  1311       body_summary->record_satb_drain_time_ms(0.0);
  1312     body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
  1313     body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
  1314     body_summary->record_update_rs_time_ms(update_rs_time);
  1315     body_summary->record_scan_rs_time_ms(scan_rs_time);
  1316     body_summary->record_obj_copy_time_ms(obj_copy_time);
  1317     if (parallel) {
  1318       body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
  1319       body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
  1320       body_summary->record_termination_time_ms(termination_time);
  1321       body_summary->record_parallel_other_time_ms(parallel_other_time);
  1323     body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
  1326   if (G1PolicyVerbose > 1) {
  1327     gclog_or_tty->print_cr("      ET: %10.6f ms           (avg: %10.6f ms)\n"
  1328                            "        CH Strong: %10.6f ms    (avg: %10.6f ms)\n"
  1329                            "        G1 Strong: %10.6f ms    (avg: %10.6f ms)\n"
  1330                            "        Evac:      %10.6f ms    (avg: %10.6f ms)\n"
  1331                            "       ET-RS:  %10.6f ms      (avg: %10.6f ms)\n"
  1332                            "      |RS|: " SIZE_FORMAT,
  1333                            elapsed_ms, recent_avg_time_for_pauses_ms(),
  1334                            _cur_CH_strong_roots_dur_ms, recent_avg_time_for_CH_strong_ms(),
  1335                            _cur_G1_strong_roots_dur_ms, recent_avg_time_for_G1_strong_ms(),
  1336                            evac_ms, recent_avg_time_for_evac_ms(),
  1337                            scan_rs_time,
  1338                            recent_avg_time_for_pauses_ms() -
  1339                            recent_avg_time_for_G1_strong_ms(),
  1340                            rs_size);
  1342     gclog_or_tty->print_cr("       Used at start: " SIZE_FORMAT"K"
  1343                            "       At end " SIZE_FORMAT "K\n"
  1344                            "       garbage      : " SIZE_FORMAT "K"
  1345                            "       of     " SIZE_FORMAT "K\n"
  1346                            "       survival     : %6.2f%%  (%6.2f%% avg)",
  1347                            _cur_collection_pause_used_at_start_bytes/K,
  1348                            _g1->used()/K, freed_bytes/K,
  1349                            _collection_set_bytes_used_before/K,
  1350                            survival_fraction*100.0,
  1351                            recent_avg_survival_fraction()*100.0);
  1352     gclog_or_tty->print_cr("       Recent %% gc pause time: %6.2f",
  1353                            recent_avg_pause_time_ratio() * 100.0);
  1356   double other_time_ms = elapsed_ms;
  1358   if (_satb_drain_time_set) {
  1359     other_time_ms -= _cur_satb_drain_time_ms;
  1362   if (parallel) {
  1363     other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
  1364   } else {
  1365     other_time_ms -=
  1366       update_rs_time +
  1367       ext_root_scan_time + mark_stack_scan_time +
  1368       scan_rs_time + obj_copy_time;
  1371   if (PrintGCDetails) {
  1372     gclog_or_tty->print_cr("%s, %1.8lf secs]",
  1373                            (last_pause_included_initial_mark) ? " (initial-mark)" : "",
  1374                            elapsed_ms / 1000.0);
  1376     if (_satb_drain_time_set) {
  1377       print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
  1379     if (_last_satb_drain_processed_buffers >= 0) {
  1380       print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
  1382     if (parallel) {
  1383       print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
  1384       print_par_stats(2, "GC Worker Start Time",
  1385                       _par_last_gc_worker_start_times_ms, false);
  1386       print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
  1387       print_par_sizes(3, "Processed Buffers",
  1388                       _par_last_update_rs_processed_buffers, true);
  1389       print_par_stats(2, "Ext Root Scanning",
  1390                       _par_last_ext_root_scan_times_ms);
  1391       print_par_stats(2, "Mark Stack Scanning",
  1392                       _par_last_mark_stack_scan_times_ms);
  1393       print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
  1394       print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
  1395       print_par_stats(2, "Termination", _par_last_termination_times_ms);
  1396       print_par_sizes(3, "Termination Attempts",
  1397                       _par_last_termination_attempts, true);
  1398       print_par_stats(2, "GC Worker End Time",
  1399                       _par_last_gc_worker_end_times_ms, false);
  1400       print_stats(2, "Other", parallel_other_time);
  1401       print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
  1402     } else {
  1403       print_stats(1, "Update RS", update_rs_time);
  1404       print_stats(2, "Processed Buffers",
  1405                   (int)update_rs_processed_buffers);
  1406       print_stats(1, "Ext Root Scanning", ext_root_scan_time);
  1407       print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
  1408       print_stats(1, "Scan RS", scan_rs_time);
  1409       print_stats(1, "Object Copying", obj_copy_time);
  1411 #ifndef PRODUCT
  1412     print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
  1413     print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
  1414     print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
  1415     print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
  1416     if (_num_cc_clears > 0) {
  1417       print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
  1419 #endif
  1420     print_stats(1, "Other", other_time_ms);
  1421     print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
  1423     for (int i = 0; i < _aux_num; ++i) {
  1424       if (_cur_aux_times_set[i]) {
  1425         char buffer[96];
  1426         sprintf(buffer, "Aux%d", i);
  1427         print_stats(1, buffer, _cur_aux_times_ms[i]);
  1431   if (PrintGCDetails)
  1432     gclog_or_tty->print("   [");
  1433   if (PrintGC || PrintGCDetails)
  1434     _g1->print_size_transition(gclog_or_tty,
  1435                                _cur_collection_pause_used_at_start_bytes,
  1436                                _g1->used(), _g1->capacity());
  1437   if (PrintGCDetails)
  1438     gclog_or_tty->print_cr("]");
  1440   _all_pause_times_ms->add(elapsed_ms);
  1441   if (update_stats) {
  1442     summary->record_total_time_ms(elapsed_ms);
  1443     summary->record_other_time_ms(other_time_ms);
  1445   for (int i = 0; i < _aux_num; ++i)
  1446     if (_cur_aux_times_set[i])
  1447       _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
  1449   // Reset marks-between-pauses counter.
  1450   _n_marks_since_last_pause = 0;
  1452   // Update the efficiency-since-mark vars.
  1453   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
  1454   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
  1455     // This usually happens due to the timer not having the required
  1456     // granularity. Some Linuxes are the usual culprits.
  1457     // We'll just set it to something (arbitrarily) small.
  1458     proc_ms = 1.0;
  1460   double cur_efficiency = (double) freed_bytes / proc_ms;
  1462   bool new_in_marking_window = _in_marking_window;
  1463   bool new_in_marking_window_im = false;
  1464   if (during_initial_mark_pause()) {
  1465     new_in_marking_window = true;
  1466     new_in_marking_window_im = true;
  1469   if (in_young_gc_mode()) {
  1470     if (_last_full_young_gc) {
  1471       set_full_young_gcs(false);
  1472       _last_full_young_gc = false;
  1475     if ( !_last_young_gc_full ) {
  1476       if ( _should_revert_to_full_young_gcs ||
  1477            _known_garbage_ratio < 0.05 ||
  1478            (adaptive_young_list_length() &&
  1479            (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
  1480         set_full_young_gcs(true);
  1483     _should_revert_to_full_young_gcs = false;
  1485     if (_last_young_gc_full && !_during_marking)
  1486       _young_gc_eff_seq->add(cur_efficiency);
  1489   _short_lived_surv_rate_group->start_adding_regions();
  1490   // do that for any other surv rate groupsx
  1492   // <NEW PREDICTION>
  1494   if (update_stats) {
  1495     double pause_time_ms = elapsed_ms;
  1497     size_t diff = 0;
  1498     if (_max_pending_cards >= _pending_cards)
  1499       diff = _max_pending_cards - _pending_cards;
  1500     _pending_card_diff_seq->add((double) diff);
  1502     double cost_per_card_ms = 0.0;
  1503     if (_pending_cards > 0) {
  1504       cost_per_card_ms = update_rs_time / (double) _pending_cards;
  1505       _cost_per_card_ms_seq->add(cost_per_card_ms);
  1508     size_t cards_scanned = _g1->cards_scanned();
  1510     double cost_per_entry_ms = 0.0;
  1511     if (cards_scanned > 10) {
  1512       cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
  1513       if (_last_young_gc_full)
  1514         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1515       else
  1516         _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1519     if (_max_rs_lengths > 0) {
  1520       double cards_per_entry_ratio =
  1521         (double) cards_scanned / (double) _max_rs_lengths;
  1522       if (_last_young_gc_full)
  1523         _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1524       else
  1525         _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1528     size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1529     if (rs_length_diff >= 0)
  1530       _rs_length_diff_seq->add((double) rs_length_diff);
  1532     size_t copied_bytes = surviving_bytes;
  1533     double cost_per_byte_ms = 0.0;
  1534     if (copied_bytes > 0) {
  1535       cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
  1536       if (_in_marking_window)
  1537         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
  1538       else
  1539         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1542     double all_other_time_ms = pause_time_ms -
  1543       (update_rs_time + scan_rs_time + obj_copy_time +
  1544        _mark_closure_time_ms + termination_time);
  1546     double young_other_time_ms = 0.0;
  1547     if (_recorded_young_regions > 0) {
  1548       young_other_time_ms =
  1549         _recorded_young_cset_choice_time_ms +
  1550         _recorded_young_free_cset_time_ms;
  1551       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
  1552                                              (double) _recorded_young_regions);
  1554     double non_young_other_time_ms = 0.0;
  1555     if (_recorded_non_young_regions > 0) {
  1556       non_young_other_time_ms =
  1557         _recorded_non_young_cset_choice_time_ms +
  1558         _recorded_non_young_free_cset_time_ms;
  1560       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
  1561                                          (double) _recorded_non_young_regions);
  1564     double constant_other_time_ms = all_other_time_ms -
  1565       (young_other_time_ms + non_young_other_time_ms);
  1566     _constant_other_time_ms_seq->add(constant_other_time_ms);
  1568     double survival_ratio = 0.0;
  1569     if (_bytes_in_collection_set_before_gc > 0) {
  1570       survival_ratio = (double) bytes_in_to_space_during_gc() /
  1571         (double) _bytes_in_collection_set_before_gc;
  1574     _pending_cards_seq->add((double) _pending_cards);
  1575     _scanned_cards_seq->add((double) cards_scanned);
  1576     _rs_lengths_seq->add((double) _max_rs_lengths);
  1578     double expensive_region_limit_ms =
  1579       (double) MaxGCPauseMillis - predict_constant_other_time_ms();
  1580     if (expensive_region_limit_ms < 0.0) {
  1581       // this means that the other time was predicted to be longer than
  1582       // than the max pause time
  1583       expensive_region_limit_ms = (double) MaxGCPauseMillis;
  1585     _expensive_region_limit_ms = expensive_region_limit_ms;
  1587     if (PREDICTIONS_VERBOSE) {
  1588       gclog_or_tty->print_cr("");
  1589       gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
  1590                     "REGIONS %d %d %d "
  1591                     "PENDING_CARDS %d %d "
  1592                     "CARDS_SCANNED %d %d "
  1593                     "RS_LENGTHS %d %d "
  1594                     "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
  1595                     "SURVIVAL_RATIO %1.6lf %1.6lf "
  1596                     "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
  1597                     "OTHER_YOUNG %1.6lf %1.6lf "
  1598                     "OTHER_NON_YOUNG %1.6lf %1.6lf "
  1599                     "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
  1600                     "ELAPSED %1.6lf %1.6lf ",
  1601                     _cur_collection_start_sec,
  1602                     (!_last_young_gc_full) ? 2 :
  1603                     (last_pause_included_initial_mark) ? 1 : 0,
  1604                     _recorded_region_num,
  1605                     _recorded_young_regions,
  1606                     _recorded_non_young_regions,
  1607                     _predicted_pending_cards, _pending_cards,
  1608                     _predicted_cards_scanned, cards_scanned,
  1609                     _predicted_rs_lengths, _max_rs_lengths,
  1610                     _predicted_rs_update_time_ms, update_rs_time,
  1611                     _predicted_rs_scan_time_ms, scan_rs_time,
  1612                     _predicted_survival_ratio, survival_ratio,
  1613                     _predicted_object_copy_time_ms, obj_copy_time,
  1614                     _predicted_constant_other_time_ms, constant_other_time_ms,
  1615                     _predicted_young_other_time_ms, young_other_time_ms,
  1616                     _predicted_non_young_other_time_ms,
  1617                     non_young_other_time_ms,
  1618                     _vtime_diff_ms, termination_time,
  1619                     _predicted_pause_time_ms, elapsed_ms);
  1622     if (G1PolicyVerbose > 0) {
  1623       gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
  1624                     _predicted_pause_time_ms,
  1625                     (_within_target) ? "within" : "outside",
  1626                     elapsed_ms);
  1631   _in_marking_window = new_in_marking_window;
  1632   _in_marking_window_im = new_in_marking_window_im;
  1633   _free_regions_at_end_of_collection = _g1->free_regions();
  1634   calculate_young_list_min_length();
  1635   calculate_young_list_target_length();
  1637   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1638   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1639   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
  1640   // </NEW PREDICTION>
  1643 // <NEW PREDICTION>
  1645 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
  1646                                                      double update_rs_processed_buffers,
  1647                                                      double goal_ms) {
  1648   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1649   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
  1651   if (G1UseAdaptiveConcRefinement) {
  1652     const int k_gy = 3, k_gr = 6;
  1653     const double inc_k = 1.1, dec_k = 0.9;
  1655     int g = cg1r->green_zone();
  1656     if (update_rs_time > goal_ms) {
  1657       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
  1658     } else {
  1659       if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
  1660         g = (int)MAX2(g * inc_k, g + 1.0);
  1663     // Change the refinement threads params
  1664     cg1r->set_green_zone(g);
  1665     cg1r->set_yellow_zone(g * k_gy);
  1666     cg1r->set_red_zone(g * k_gr);
  1667     cg1r->reinitialize_threads();
  1669     int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
  1670     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
  1671                                     cg1r->yellow_zone());
  1672     // Change the barrier params
  1673     dcqs.set_process_completed_threshold(processing_threshold);
  1674     dcqs.set_max_completed_queue(cg1r->red_zone());
  1677   int curr_queue_size = dcqs.completed_buffers_num();
  1678   if (curr_queue_size >= cg1r->yellow_zone()) {
  1679     dcqs.set_completed_queue_padding(curr_queue_size);
  1680   } else {
  1681     dcqs.set_completed_queue_padding(0);
  1683   dcqs.notify_if_necessary();
  1686 double
  1687 G1CollectorPolicy::
  1688 predict_young_collection_elapsed_time_ms(size_t adjustment) {
  1689   guarantee( adjustment == 0 || adjustment == 1, "invariant" );
  1691   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1692   size_t young_num = g1h->young_list()->length();
  1693   if (young_num == 0)
  1694     return 0.0;
  1696   young_num += adjustment;
  1697   size_t pending_cards = predict_pending_cards();
  1698   size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
  1699                       predict_rs_length_diff();
  1700   size_t card_num;
  1701   if (full_young_gcs())
  1702     card_num = predict_young_card_num(rs_lengths);
  1703   else
  1704     card_num = predict_non_young_card_num(rs_lengths);
  1705   size_t young_byte_size = young_num * HeapRegion::GrainBytes;
  1706   double accum_yg_surv_rate =
  1707     _short_lived_surv_rate_group->accum_surv_rate(adjustment);
  1709   size_t bytes_to_copy =
  1710     (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
  1712   return
  1713     predict_rs_update_time_ms(pending_cards) +
  1714     predict_rs_scan_time_ms(card_num) +
  1715     predict_object_copy_time_ms(bytes_to_copy) +
  1716     predict_young_other_time_ms(young_num) +
  1717     predict_constant_other_time_ms();
  1720 double
  1721 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
  1722   size_t rs_length = predict_rs_length_diff();
  1723   size_t card_num;
  1724   if (full_young_gcs())
  1725     card_num = predict_young_card_num(rs_length);
  1726   else
  1727     card_num = predict_non_young_card_num(rs_length);
  1728   return predict_base_elapsed_time_ms(pending_cards, card_num);
  1731 double
  1732 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
  1733                                                 size_t scanned_cards) {
  1734   return
  1735     predict_rs_update_time_ms(pending_cards) +
  1736     predict_rs_scan_time_ms(scanned_cards) +
  1737     predict_constant_other_time_ms();
  1740 double
  1741 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
  1742                                                   bool young) {
  1743   size_t rs_length = hr->rem_set()->occupied();
  1744   size_t card_num;
  1745   if (full_young_gcs())
  1746     card_num = predict_young_card_num(rs_length);
  1747   else
  1748     card_num = predict_non_young_card_num(rs_length);
  1749   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  1751   double region_elapsed_time_ms =
  1752     predict_rs_scan_time_ms(card_num) +
  1753     predict_object_copy_time_ms(bytes_to_copy);
  1755   if (young)
  1756     region_elapsed_time_ms += predict_young_other_time_ms(1);
  1757   else
  1758     region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1760   return region_elapsed_time_ms;
  1763 size_t
  1764 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  1765   size_t bytes_to_copy;
  1766   if (hr->is_marked())
  1767     bytes_to_copy = hr->max_live_bytes();
  1768   else {
  1769     guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
  1770                "invariant" );
  1771     int age = hr->age_in_surv_rate_group();
  1772     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  1773     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  1776   return bytes_to_copy;
  1779 void
  1780 G1CollectorPolicy::start_recording_regions() {
  1781   _recorded_rs_lengths            = 0;
  1782   _recorded_young_regions         = 0;
  1783   _recorded_non_young_regions     = 0;
  1785 #if PREDICTIONS_VERBOSE
  1786   _recorded_marked_bytes          = 0;
  1787   _recorded_young_bytes           = 0;
  1788   _predicted_bytes_to_copy        = 0;
  1789   _predicted_rs_lengths           = 0;
  1790   _predicted_cards_scanned        = 0;
  1791 #endif // PREDICTIONS_VERBOSE
  1794 void
  1795 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
  1796 #if PREDICTIONS_VERBOSE
  1797   if (!young) {
  1798     _recorded_marked_bytes += hr->max_live_bytes();
  1800   _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
  1801 #endif // PREDICTIONS_VERBOSE
  1803   size_t rs_length = hr->rem_set()->occupied();
  1804   _recorded_rs_lengths += rs_length;
  1807 void
  1808 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
  1809   assert(!hr->is_young(), "should not call this");
  1810   ++_recorded_non_young_regions;
  1811   record_cset_region_info(hr, false);
  1814 void
  1815 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
  1816   _recorded_young_regions = n_regions;
  1819 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
  1820 #if PREDICTIONS_VERBOSE
  1821   _recorded_young_bytes = bytes;
  1822 #endif // PREDICTIONS_VERBOSE
  1825 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  1826   _recorded_rs_lengths = rs_lengths;
  1829 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
  1830   _predicted_bytes_to_copy = bytes;
  1833 void
  1834 G1CollectorPolicy::end_recording_regions() {
  1835   // The _predicted_pause_time_ms field is referenced in code
  1836   // not under PREDICTIONS_VERBOSE. Let's initialize it.
  1837   _predicted_pause_time_ms = -1.0;
  1839 #if PREDICTIONS_VERBOSE
  1840   _predicted_pending_cards = predict_pending_cards();
  1841   _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
  1842   if (full_young_gcs())
  1843     _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
  1844   else
  1845     _predicted_cards_scanned +=
  1846       predict_non_young_card_num(_predicted_rs_lengths);
  1847   _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
  1849   _predicted_rs_update_time_ms =
  1850     predict_rs_update_time_ms(_g1->pending_card_num());
  1851   _predicted_rs_scan_time_ms =
  1852     predict_rs_scan_time_ms(_predicted_cards_scanned);
  1853   _predicted_object_copy_time_ms =
  1854     predict_object_copy_time_ms(_predicted_bytes_to_copy);
  1855   _predicted_constant_other_time_ms =
  1856     predict_constant_other_time_ms();
  1857   _predicted_young_other_time_ms =
  1858     predict_young_other_time_ms(_recorded_young_regions);
  1859   _predicted_non_young_other_time_ms =
  1860     predict_non_young_other_time_ms(_recorded_non_young_regions);
  1862   _predicted_pause_time_ms =
  1863     _predicted_rs_update_time_ms +
  1864     _predicted_rs_scan_time_ms +
  1865     _predicted_object_copy_time_ms +
  1866     _predicted_constant_other_time_ms +
  1867     _predicted_young_other_time_ms +
  1868     _predicted_non_young_other_time_ms;
  1869 #endif // PREDICTIONS_VERBOSE
  1872 void G1CollectorPolicy::check_if_region_is_too_expensive(double
  1873                                                            predicted_time_ms) {
  1874   // I don't think we need to do this when in young GC mode since
  1875   // marking will be initiated next time we hit the soft limit anyway...
  1876   if (predicted_time_ms > _expensive_region_limit_ms) {
  1877     if (!in_young_gc_mode()) {
  1878         set_full_young_gcs(true);
  1879         // We might want to do something different here. However,
  1880         // right now we don't support the non-generational G1 mode
  1881         // (and in fact we are planning to remove the associated code,
  1882         // see CR 6814390). So, let's leave it as is and this will be
  1883         // removed some time in the future
  1884         ShouldNotReachHere();
  1885         set_during_initial_mark_pause();
  1886     } else
  1887       // no point in doing another partial one
  1888       _should_revert_to_full_young_gcs = true;
  1892 // </NEW PREDICTION>
  1895 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  1896                                                double elapsed_ms) {
  1897   _recent_gc_times_ms->add(elapsed_ms);
  1898   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
  1899   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
  1902 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
  1903   if (_recent_pause_times_ms->num() == 0) return (double) MaxGCPauseMillis;
  1904   else return _recent_pause_times_ms->avg();
  1907 double G1CollectorPolicy::recent_avg_time_for_CH_strong_ms() {
  1908   if (_recent_CH_strong_roots_times_ms->num() == 0)
  1909     return (double)MaxGCPauseMillis/3.0;
  1910   else return _recent_CH_strong_roots_times_ms->avg();
  1913 double G1CollectorPolicy::recent_avg_time_for_G1_strong_ms() {
  1914   if (_recent_G1_strong_roots_times_ms->num() == 0)
  1915     return (double)MaxGCPauseMillis/3.0;
  1916   else return _recent_G1_strong_roots_times_ms->avg();
  1919 double G1CollectorPolicy::recent_avg_time_for_evac_ms() {
  1920   if (_recent_evac_times_ms->num() == 0) return (double)MaxGCPauseMillis/3.0;
  1921   else return _recent_evac_times_ms->avg();
  1924 int G1CollectorPolicy::number_of_recent_gcs() {
  1925   assert(_recent_CH_strong_roots_times_ms->num() ==
  1926          _recent_G1_strong_roots_times_ms->num(), "Sequence out of sync");
  1927   assert(_recent_G1_strong_roots_times_ms->num() ==
  1928          _recent_evac_times_ms->num(), "Sequence out of sync");
  1929   assert(_recent_evac_times_ms->num() ==
  1930          _recent_pause_times_ms->num(), "Sequence out of sync");
  1931   assert(_recent_pause_times_ms->num() ==
  1932          _recent_CS_bytes_used_before->num(), "Sequence out of sync");
  1933   assert(_recent_CS_bytes_used_before->num() ==
  1934          _recent_CS_bytes_surviving->num(), "Sequence out of sync");
  1935   return _recent_pause_times_ms->num();
  1938 double G1CollectorPolicy::recent_avg_survival_fraction() {
  1939   return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
  1940                                            _recent_CS_bytes_used_before);
  1943 double G1CollectorPolicy::last_survival_fraction() {
  1944   return last_survival_fraction_work(_recent_CS_bytes_surviving,
  1945                                      _recent_CS_bytes_used_before);
  1948 double
  1949 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
  1950                                                      TruncatedSeq* before) {
  1951   assert(surviving->num() == before->num(), "Sequence out of sync");
  1952   if (before->sum() > 0.0) {
  1953       double recent_survival_rate = surviving->sum() / before->sum();
  1954       // We exempt parallel collection from this check because Alloc Buffer
  1955       // fragmentation can produce negative collections.
  1956       // Further, we're now always doing parallel collection.  But I'm still
  1957       // leaving this here as a placeholder for a more precise assertion later.
  1958       // (DLD, 10/05.)
  1959       assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
  1960              _g1->evacuation_failed() ||
  1961              recent_survival_rate <= 1.0, "Or bad frac");
  1962       return recent_survival_rate;
  1963   } else {
  1964     return 1.0; // Be conservative.
  1968 double
  1969 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
  1970                                                TruncatedSeq* before) {
  1971   assert(surviving->num() == before->num(), "Sequence out of sync");
  1972   if (surviving->num() > 0 && before->last() > 0.0) {
  1973     double last_survival_rate = surviving->last() / before->last();
  1974     // We exempt parallel collection from this check because Alloc Buffer
  1975     // fragmentation can produce negative collections.
  1976     // Further, we're now always doing parallel collection.  But I'm still
  1977     // leaving this here as a placeholder for a more precise assertion later.
  1978     // (DLD, 10/05.)
  1979     assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
  1980            last_survival_rate <= 1.0, "Or bad frac");
  1981     return last_survival_rate;
  1982   } else {
  1983     return 1.0;
  1987 static const int survival_min_obs = 5;
  1988 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
  1989 static const double min_survival_rate = 0.1;
  1991 double
  1992 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
  1993                                                            double latest) {
  1994   double res = avg;
  1995   if (number_of_recent_gcs() < survival_min_obs) {
  1996     res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
  1998   res = MAX2(res, latest);
  1999   res = MAX2(res, min_survival_rate);
  2000   // In the parallel case, LAB fragmentation can produce "negative
  2001   // collections"; so can evac failure.  Cap at 1.0
  2002   res = MIN2(res, 1.0);
  2003   return res;
  2006 size_t G1CollectorPolicy::expansion_amount() {
  2007   if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) {
  2008     // We will double the existing space, or take
  2009     // G1ExpandByPercentOfAvailable % of the available expansion
  2010     // space, whichever is smaller, bounded below by a minimum
  2011     // expansion (unless that's all that's left.)
  2012     const size_t min_expand_bytes = 1*M;
  2013     size_t reserved_bytes = _g1->g1_reserved_obj_bytes();
  2014     size_t committed_bytes = _g1->capacity();
  2015     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
  2016     size_t expand_bytes;
  2017     size_t expand_bytes_via_pct =
  2018       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
  2019     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
  2020     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
  2021     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
  2022     if (G1PolicyVerbose > 1) {
  2023       gclog_or_tty->print("Decided to expand: ratio = %5.2f, "
  2024                  "committed = %d%s, uncommited = %d%s, via pct = %d%s.\n"
  2025                  "                   Answer = %d.\n",
  2026                  recent_avg_pause_time_ratio(),
  2027                  byte_size_in_proper_unit(committed_bytes),
  2028                  proper_unit_for_byte_size(committed_bytes),
  2029                  byte_size_in_proper_unit(uncommitted_bytes),
  2030                  proper_unit_for_byte_size(uncommitted_bytes),
  2031                  byte_size_in_proper_unit(expand_bytes_via_pct),
  2032                  proper_unit_for_byte_size(expand_bytes_via_pct),
  2033                  byte_size_in_proper_unit(expand_bytes),
  2034                  proper_unit_for_byte_size(expand_bytes));
  2036     return expand_bytes;
  2037   } else {
  2038     return 0;
  2042 void G1CollectorPolicy::note_start_of_mark_thread() {
  2043   _mark_thread_startup_sec = os::elapsedTime();
  2046 class CountCSClosure: public HeapRegionClosure {
  2047   G1CollectorPolicy* _g1_policy;
  2048 public:
  2049   CountCSClosure(G1CollectorPolicy* g1_policy) :
  2050     _g1_policy(g1_policy) {}
  2051   bool doHeapRegion(HeapRegion* r) {
  2052     _g1_policy->_bytes_in_collection_set_before_gc += r->used();
  2053     return false;
  2055 };
  2057 void G1CollectorPolicy::count_CS_bytes_used() {
  2058   CountCSClosure cs_closure(this);
  2059   _g1->collection_set_iterate(&cs_closure);
  2062 static void print_indent(int level) {
  2063   for (int j = 0; j < level+1; ++j)
  2064     gclog_or_tty->print("   ");
  2067 void G1CollectorPolicy::print_summary (int level,
  2068                                        const char* str,
  2069                                        NumberSeq* seq) const {
  2070   double sum = seq->sum();
  2071   print_indent(level);
  2072   gclog_or_tty->print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
  2073                 str, sum / 1000.0, seq->avg());
  2076 void G1CollectorPolicy::print_summary_sd (int level,
  2077                                           const char* str,
  2078                                           NumberSeq* seq) const {
  2079   print_summary(level, str, seq);
  2080   print_indent(level + 5);
  2081   gclog_or_tty->print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
  2082                 seq->num(), seq->sd(), seq->maximum());
  2085 void G1CollectorPolicy::check_other_times(int level,
  2086                                         NumberSeq* other_times_ms,
  2087                                         NumberSeq* calc_other_times_ms) const {
  2088   bool should_print = false;
  2090   double max_sum = MAX2(fabs(other_times_ms->sum()),
  2091                         fabs(calc_other_times_ms->sum()));
  2092   double min_sum = MIN2(fabs(other_times_ms->sum()),
  2093                         fabs(calc_other_times_ms->sum()));
  2094   double sum_ratio = max_sum / min_sum;
  2095   if (sum_ratio > 1.1) {
  2096     should_print = true;
  2097     print_indent(level + 1);
  2098     gclog_or_tty->print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
  2101   double max_avg = MAX2(fabs(other_times_ms->avg()),
  2102                         fabs(calc_other_times_ms->avg()));
  2103   double min_avg = MIN2(fabs(other_times_ms->avg()),
  2104                         fabs(calc_other_times_ms->avg()));
  2105   double avg_ratio = max_avg / min_avg;
  2106   if (avg_ratio > 1.1) {
  2107     should_print = true;
  2108     print_indent(level + 1);
  2109     gclog_or_tty->print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
  2112   if (other_times_ms->sum() < -0.01) {
  2113     print_indent(level + 1);
  2114     gclog_or_tty->print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
  2117   if (other_times_ms->avg() < -0.01) {
  2118     print_indent(level + 1);
  2119     gclog_or_tty->print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
  2122   if (calc_other_times_ms->sum() < -0.01) {
  2123     should_print = true;
  2124     print_indent(level + 1);
  2125     gclog_or_tty->print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
  2128   if (calc_other_times_ms->avg() < -0.01) {
  2129     should_print = true;
  2130     print_indent(level + 1);
  2131     gclog_or_tty->print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
  2134   if (should_print)
  2135     print_summary(level, "Other(Calc)", calc_other_times_ms);
  2138 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
  2139   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
  2140   MainBodySummary*    body_summary = summary->main_body_summary();
  2141   if (summary->get_total_seq()->num() > 0) {
  2142     print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
  2143     if (body_summary != NULL) {
  2144       print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
  2145       if (parallel) {
  2146         print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
  2147         print_summary(2, "Update RS", body_summary->get_update_rs_seq());
  2148         print_summary(2, "Ext Root Scanning",
  2149                       body_summary->get_ext_root_scan_seq());
  2150         print_summary(2, "Mark Stack Scanning",
  2151                       body_summary->get_mark_stack_scan_seq());
  2152         print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
  2153         print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
  2154         print_summary(2, "Termination", body_summary->get_termination_seq());
  2155         print_summary(2, "Other", body_summary->get_parallel_other_seq());
  2157           NumberSeq* other_parts[] = {
  2158             body_summary->get_update_rs_seq(),
  2159             body_summary->get_ext_root_scan_seq(),
  2160             body_summary->get_mark_stack_scan_seq(),
  2161             body_summary->get_scan_rs_seq(),
  2162             body_summary->get_obj_copy_seq(),
  2163             body_summary->get_termination_seq()
  2164           };
  2165           NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
  2166                                         6, other_parts);
  2167           check_other_times(2, body_summary->get_parallel_other_seq(),
  2168                             &calc_other_times_ms);
  2170         print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
  2171         print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
  2172       } else {
  2173         print_summary(1, "Update RS", body_summary->get_update_rs_seq());
  2174         print_summary(1, "Ext Root Scanning",
  2175                       body_summary->get_ext_root_scan_seq());
  2176         print_summary(1, "Mark Stack Scanning",
  2177                       body_summary->get_mark_stack_scan_seq());
  2178         print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
  2179         print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
  2182     print_summary(1, "Other", summary->get_other_seq());
  2184       if (body_summary != NULL) {
  2185         NumberSeq calc_other_times_ms;
  2186         if (parallel) {
  2187           // parallel
  2188           NumberSeq* other_parts[] = {
  2189             body_summary->get_satb_drain_seq(),
  2190             body_summary->get_parallel_seq(),
  2191             body_summary->get_clear_ct_seq()
  2192           };
  2193           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2194                                                 3, other_parts);
  2195         } else {
  2196           // serial
  2197           NumberSeq* other_parts[] = {
  2198             body_summary->get_satb_drain_seq(),
  2199             body_summary->get_update_rs_seq(),
  2200             body_summary->get_ext_root_scan_seq(),
  2201             body_summary->get_mark_stack_scan_seq(),
  2202             body_summary->get_scan_rs_seq(),
  2203             body_summary->get_obj_copy_seq()
  2204           };
  2205           calc_other_times_ms = NumberSeq(summary->get_total_seq(),
  2206                                                 6, other_parts);
  2208         check_other_times(1,  summary->get_other_seq(), &calc_other_times_ms);
  2211   } else {
  2212     print_indent(0);
  2213     gclog_or_tty->print_cr("none");
  2215   gclog_or_tty->print_cr("");
  2218 void G1CollectorPolicy::print_tracing_info() const {
  2219   if (TraceGen0Time) {
  2220     gclog_or_tty->print_cr("ALL PAUSES");
  2221     print_summary_sd(0, "Total", _all_pause_times_ms);
  2222     gclog_or_tty->print_cr("");
  2223     gclog_or_tty->print_cr("");
  2224     gclog_or_tty->print_cr("   Full Young GC Pauses:    %8d", _full_young_pause_num);
  2225     gclog_or_tty->print_cr("   Partial Young GC Pauses: %8d", _partial_young_pause_num);
  2226     gclog_or_tty->print_cr("");
  2228     gclog_or_tty->print_cr("EVACUATION PAUSES");
  2229     print_summary(_summary);
  2231     gclog_or_tty->print_cr("MISC");
  2232     print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
  2233     print_summary_sd(0, "Yields", _all_yield_times_ms);
  2234     for (int i = 0; i < _aux_num; ++i) {
  2235       if (_all_aux_times_ms[i].num() > 0) {
  2236         char buffer[96];
  2237         sprintf(buffer, "Aux%d", i);
  2238         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
  2242     size_t all_region_num = _region_num_young + _region_num_tenured;
  2243     gclog_or_tty->print_cr("   New Regions %8d, Young %8d (%6.2lf%%), "
  2244                "Tenured %8d (%6.2lf%%)",
  2245                all_region_num,
  2246                _region_num_young,
  2247                (double) _region_num_young / (double) all_region_num * 100.0,
  2248                _region_num_tenured,
  2249                (double) _region_num_tenured / (double) all_region_num * 100.0);
  2251   if (TraceGen1Time) {
  2252     if (_all_full_gc_times_ms->num() > 0) {
  2253       gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
  2254                  _all_full_gc_times_ms->num(),
  2255                  _all_full_gc_times_ms->sum() / 1000.0);
  2256       gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
  2257       gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
  2258                     _all_full_gc_times_ms->sd(),
  2259                     _all_full_gc_times_ms->maximum());
  2264 void G1CollectorPolicy::print_yg_surv_rate_info() const {
  2265 #ifndef PRODUCT
  2266   _short_lived_surv_rate_group->print_surv_rate_summary();
  2267   // add this call for any other surv rate groups
  2268 #endif // PRODUCT
  2271 void
  2272 G1CollectorPolicy::update_region_num(bool young) {
  2273   if (young) {
  2274     ++_region_num_young;
  2275   } else {
  2276     ++_region_num_tenured;
  2280 #ifndef PRODUCT
  2281 // for debugging, bit of a hack...
  2282 static char*
  2283 region_num_to_mbs(int length) {
  2284   static char buffer[64];
  2285   double bytes = (double) (length * HeapRegion::GrainBytes);
  2286   double mbs = bytes / (double) (1024 * 1024);
  2287   sprintf(buffer, "%7.2lfMB", mbs);
  2288   return buffer;
  2290 #endif // PRODUCT
  2292 size_t G1CollectorPolicy::max_regions(int purpose) {
  2293   switch (purpose) {
  2294     case GCAllocForSurvived:
  2295       return _max_survivor_regions;
  2296     case GCAllocForTenured:
  2297       return REGIONS_UNLIMITED;
  2298     default:
  2299       ShouldNotReachHere();
  2300       return REGIONS_UNLIMITED;
  2301   };
  2304 // Calculates survivor space parameters.
  2305 void G1CollectorPolicy::calculate_survivors_policy()
  2307   if (G1FixedSurvivorSpaceSize == 0) {
  2308     _max_survivor_regions = _young_list_target_length / SurvivorRatio;
  2309   } else {
  2310     _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
  2313   if (G1FixedTenuringThreshold) {
  2314     _tenuring_threshold = MaxTenuringThreshold;
  2315   } else {
  2316     _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  2317         HeapRegion::GrainWords * _max_survivor_regions);
  2321 #ifndef PRODUCT
  2322 class HRSortIndexIsOKClosure: public HeapRegionClosure {
  2323   CollectionSetChooser* _chooser;
  2324 public:
  2325   HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
  2326     _chooser(chooser) {}
  2328   bool doHeapRegion(HeapRegion* r) {
  2329     if (!r->continuesHumongous()) {
  2330       assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
  2332     return false;
  2334 };
  2336 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
  2337   HRSortIndexIsOKClosure cl(_collectionSetChooser);
  2338   _g1->heap_region_iterate(&cl);
  2339   return true;
  2341 #endif
  2343 bool
  2344 G1CollectorPolicy::force_initial_mark_if_outside_cycle() {
  2345   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2346   if (!during_cycle) {
  2347     set_initiate_conc_mark_if_possible();
  2348     return true;
  2349   } else {
  2350     return false;
  2354 void
  2355 G1CollectorPolicy::decide_on_conc_mark_initiation() {
  2356   // We are about to decide on whether this pause will be an
  2357   // initial-mark pause.
  2359   // First, during_initial_mark_pause() should not be already set. We
  2360   // will set it here if we have to. However, it should be cleared by
  2361   // the end of the pause (it's only set for the duration of an
  2362   // initial-mark pause).
  2363   assert(!during_initial_mark_pause(), "pre-condition");
  2365   if (initiate_conc_mark_if_possible()) {
  2366     // We had noticed on a previous pause that the heap occupancy has
  2367     // gone over the initiating threshold and we should start a
  2368     // concurrent marking cycle. So we might initiate one.
  2370     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  2371     if (!during_cycle) {
  2372       // The concurrent marking thread is not "during a cycle", i.e.,
  2373       // it has completed the last one. So we can go ahead and
  2374       // initiate a new cycle.
  2376       set_during_initial_mark_pause();
  2378       // And we can now clear initiate_conc_mark_if_possible() as
  2379       // we've already acted on it.
  2380       clear_initiate_conc_mark_if_possible();
  2381     } else {
  2382       // The concurrent marking thread is still finishing up the
  2383       // previous cycle. If we start one right now the two cycles
  2384       // overlap. In particular, the concurrent marking thread might
  2385       // be in the process of clearing the next marking bitmap (which
  2386       // we will use for the next cycle if we start one). Starting a
  2387       // cycle now will be bad given that parts of the marking
  2388       // information might get cleared by the marking thread. And we
  2389       // cannot wait for the marking thread to finish the cycle as it
  2390       // periodically yields while clearing the next marking bitmap
  2391       // and, if it's in a yield point, it's waiting for us to
  2392       // finish. So, at this point we will not start a cycle and we'll
  2393       // let the concurrent marking thread complete the last one.
  2398 void
  2399 G1CollectorPolicy_BestRegionsFirst::
  2400 record_collection_pause_start(double start_time_sec, size_t start_used) {
  2401   G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
  2404 class NextNonCSElemFinder: public HeapRegionClosure {
  2405   HeapRegion* _res;
  2406 public:
  2407   NextNonCSElemFinder(): _res(NULL) {}
  2408   bool doHeapRegion(HeapRegion* r) {
  2409     if (!r->in_collection_set()) {
  2410       _res = r;
  2411       return true;
  2412     } else {
  2413       return false;
  2416   HeapRegion* res() { return _res; }
  2417 };
  2419 class KnownGarbageClosure: public HeapRegionClosure {
  2420   CollectionSetChooser* _hrSorted;
  2422 public:
  2423   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
  2424     _hrSorted(hrSorted)
  2425   {}
  2427   bool doHeapRegion(HeapRegion* r) {
  2428     // We only include humongous regions in collection
  2429     // sets when concurrent mark shows that their contained object is
  2430     // unreachable.
  2432     // Do we have any marking information for this region?
  2433     if (r->is_marked()) {
  2434       // We don't include humongous regions in collection
  2435       // sets because we collect them immediately at the end of a marking
  2436       // cycle.  We also don't include young regions because we *must*
  2437       // include them in the next collection pause.
  2438       if (!r->isHumongous() && !r->is_young()) {
  2439         _hrSorted->addMarkedHeapRegion(r);
  2442     return false;
  2444 };
  2446 class ParKnownGarbageHRClosure: public HeapRegionClosure {
  2447   CollectionSetChooser* _hrSorted;
  2448   jint _marked_regions_added;
  2449   jint _chunk_size;
  2450   jint _cur_chunk_idx;
  2451   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
  2452   int _worker;
  2453   int _invokes;
  2455   void get_new_chunk() {
  2456     _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
  2457     _cur_chunk_end = _cur_chunk_idx + _chunk_size;
  2459   void add_region(HeapRegion* r) {
  2460     if (_cur_chunk_idx == _cur_chunk_end) {
  2461       get_new_chunk();
  2463     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
  2464     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
  2465     _marked_regions_added++;
  2466     _cur_chunk_idx++;
  2469 public:
  2470   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  2471                            jint chunk_size,
  2472                            int worker) :
  2473     _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
  2474     _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
  2475     _invokes(0)
  2476   {}
  2478   bool doHeapRegion(HeapRegion* r) {
  2479     // We only include humongous regions in collection
  2480     // sets when concurrent mark shows that their contained object is
  2481     // unreachable.
  2482     _invokes++;
  2484     // Do we have any marking information for this region?
  2485     if (r->is_marked()) {
  2486       // We don't include humongous regions in collection
  2487       // sets because we collect them immediately at the end of a marking
  2488       // cycle.
  2489       // We also do not include young regions in collection sets
  2490       if (!r->isHumongous() && !r->is_young()) {
  2491         add_region(r);
  2494     return false;
  2496   jint marked_regions_added() { return _marked_regions_added; }
  2497   int invokes() { return _invokes; }
  2498 };
  2500 class ParKnownGarbageTask: public AbstractGangTask {
  2501   CollectionSetChooser* _hrSorted;
  2502   jint _chunk_size;
  2503   G1CollectedHeap* _g1;
  2504 public:
  2505   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
  2506     AbstractGangTask("ParKnownGarbageTask"),
  2507     _hrSorted(hrSorted), _chunk_size(chunk_size),
  2508     _g1(G1CollectedHeap::heap())
  2509   {}
  2511   void work(int i) {
  2512     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
  2513     // Back to zero for the claim value.
  2514     _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
  2515                                          HeapRegion::InitialClaimValue);
  2516     jint regions_added = parKnownGarbageCl.marked_regions_added();
  2517     _hrSorted->incNumMarkedHeapRegions(regions_added);
  2518     if (G1PrintParCleanupStats) {
  2519       gclog_or_tty->print("     Thread %d called %d times, added %d regions to list.\n",
  2520                  i, parKnownGarbageCl.invokes(), regions_added);
  2523 };
  2525 void
  2526 G1CollectorPolicy_BestRegionsFirst::
  2527 record_concurrent_mark_cleanup_end(size_t freed_bytes,
  2528                                    size_t max_live_bytes) {
  2529   double start;
  2530   if (G1PrintParCleanupStats) start = os::elapsedTime();
  2531   record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
  2533   _collectionSetChooser->clearMarkedHeapRegions();
  2534   double clear_marked_end;
  2535   if (G1PrintParCleanupStats) {
  2536     clear_marked_end = os::elapsedTime();
  2537     gclog_or_tty->print_cr("  clear marked regions + work1: %8.3f ms.",
  2538                   (clear_marked_end - start)*1000.0);
  2540   if (G1CollectedHeap::use_parallel_gc_threads()) {
  2541     const size_t OverpartitionFactor = 4;
  2542     const size_t MinWorkUnit = 8;
  2543     const size_t WorkUnit =
  2544       MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
  2545            MinWorkUnit);
  2546     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
  2547                                                              WorkUnit);
  2548     ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  2549                                             (int) WorkUnit);
  2550     _g1->workers()->run_task(&parKnownGarbageTask);
  2552     assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  2553            "sanity check");
  2554   } else {
  2555     KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  2556     _g1->heap_region_iterate(&knownGarbagecl);
  2558   double known_garbage_end;
  2559   if (G1PrintParCleanupStats) {
  2560     known_garbage_end = os::elapsedTime();
  2561     gclog_or_tty->print_cr("  compute known garbage: %8.3f ms.",
  2562                   (known_garbage_end - clear_marked_end)*1000.0);
  2564   _collectionSetChooser->sortMarkedHeapRegions();
  2565   double sort_end;
  2566   if (G1PrintParCleanupStats) {
  2567     sort_end = os::elapsedTime();
  2568     gclog_or_tty->print_cr("  sorting: %8.3f ms.",
  2569                   (sort_end - known_garbage_end)*1000.0);
  2572   record_concurrent_mark_cleanup_end_work2();
  2573   double work2_end;
  2574   if (G1PrintParCleanupStats) {
  2575     work2_end = os::elapsedTime();
  2576     gclog_or_tty->print_cr("  work2: %8.3f ms.",
  2577                   (work2_end - sort_end)*1000.0);
  2581 // Add the heap region at the head of the non-incremental collection set
  2582 void G1CollectorPolicy::
  2583 add_to_collection_set(HeapRegion* hr) {
  2584   assert(_inc_cset_build_state == Active, "Precondition");
  2585   assert(!hr->is_young(), "non-incremental add of young region");
  2587   if (G1PrintHeapRegions) {
  2588     gclog_or_tty->print_cr("added region to cset "
  2589                            "%d:["PTR_FORMAT", "PTR_FORMAT"], "
  2590                            "top "PTR_FORMAT", %s",
  2591                            hr->hrs_index(), hr->bottom(), hr->end(),
  2592                            hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG");
  2595   if (_g1->mark_in_progress())
  2596     _g1->concurrent_mark()->registerCSetRegion(hr);
  2598   assert(!hr->in_collection_set(), "should not already be in the CSet");
  2599   hr->set_in_collection_set(true);
  2600   hr->set_next_in_collection_set(_collection_set);
  2601   _collection_set = hr;
  2602   _collection_set_size++;
  2603   _collection_set_bytes_used_before += hr->used();
  2604   _g1->register_region_with_in_cset_fast_test(hr);
  2607 // Initialize the per-collection-set information
  2608 void G1CollectorPolicy::start_incremental_cset_building() {
  2609   assert(_inc_cset_build_state == Inactive, "Precondition");
  2611   _inc_cset_head = NULL;
  2612   _inc_cset_tail = NULL;
  2613   _inc_cset_size = 0;
  2614   _inc_cset_bytes_used_before = 0;
  2616   if (in_young_gc_mode()) {
  2617     _inc_cset_young_index = 0;
  2620   _inc_cset_max_finger = 0;
  2621   _inc_cset_recorded_young_bytes = 0;
  2622   _inc_cset_recorded_rs_lengths = 0;
  2623   _inc_cset_predicted_elapsed_time_ms = 0;
  2624   _inc_cset_predicted_bytes_to_copy = 0;
  2625   _inc_cset_build_state = Active;
  2628 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  2629   // This routine is used when:
  2630   // * adding survivor regions to the incremental cset at the end of an
  2631   //   evacuation pause,
  2632   // * adding the current allocation region to the incremental cset
  2633   //   when it is retired, and
  2634   // * updating existing policy information for a region in the
  2635   //   incremental cset via young list RSet sampling.
  2636   // Therefore this routine may be called at a safepoint by the
  2637   // VM thread, or in-between safepoints by mutator threads (when
  2638   // retiring the current allocation region) or a concurrent
  2639   // refine thread (RSet sampling).
  2641   double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
  2642   size_t used_bytes = hr->used();
  2644   _inc_cset_recorded_rs_lengths += rs_length;
  2645   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  2647   _inc_cset_bytes_used_before += used_bytes;
  2649   // Cache the values we have added to the aggregated informtion
  2650   // in the heap region in case we have to remove this region from
  2651   // the incremental collection set, or it is updated by the
  2652   // rset sampling code
  2653   hr->set_recorded_rs_length(rs_length);
  2654   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  2656 #if PREDICTIONS_VERBOSE
  2657   size_t bytes_to_copy = predict_bytes_to_copy(hr);
  2658   _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
  2660   // Record the number of bytes used in this region
  2661   _inc_cset_recorded_young_bytes += used_bytes;
  2663   // Cache the values we have added to the aggregated informtion
  2664   // in the heap region in case we have to remove this region from
  2665   // the incremental collection set, or it is updated by the
  2666   // rset sampling code
  2667   hr->set_predicted_bytes_to_copy(bytes_to_copy);
  2668 #endif // PREDICTIONS_VERBOSE
  2671 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
  2672   // This routine is currently only called as part of the updating of
  2673   // existing policy information for regions in the incremental cset that
  2674   // is performed by the concurrent refine thread(s) as part of young list
  2675   // RSet sampling. Therefore we should not be at a safepoint.
  2677   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  2678   assert(hr->is_young(), "it should be");
  2680   size_t used_bytes = hr->used();
  2681   size_t old_rs_length = hr->recorded_rs_length();
  2682   double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  2684   // Subtract the old recorded/predicted policy information for
  2685   // the given heap region from the collection set info.
  2686   _inc_cset_recorded_rs_lengths -= old_rs_length;
  2687   _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
  2689   _inc_cset_bytes_used_before -= used_bytes;
  2691   // Clear the values cached in the heap region
  2692   hr->set_recorded_rs_length(0);
  2693   hr->set_predicted_elapsed_time_ms(0);
  2695 #if PREDICTIONS_VERBOSE
  2696   size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
  2697   _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
  2699   // Subtract the number of bytes used in this region
  2700   _inc_cset_recorded_young_bytes -= used_bytes;
  2702   // Clear the values cached in the heap region
  2703   hr->set_predicted_bytes_to_copy(0);
  2704 #endif // PREDICTIONS_VERBOSE
  2707 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
  2708   // Update the collection set information that is dependent on the new RS length
  2709   assert(hr->is_young(), "Precondition");
  2711   remove_from_incremental_cset_info(hr);
  2712   add_to_incremental_cset_info(hr, new_rs_length);
  2715 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  2716   assert( hr->is_young(), "invariant");
  2717   assert( hr->young_index_in_cset() == -1, "invariant" );
  2718   assert(_inc_cset_build_state == Active, "Precondition");
  2720   // We need to clear and set the cached recorded/cached collection set
  2721   // information in the heap region here (before the region gets added
  2722   // to the collection set). An individual heap region's cached values
  2723   // are calculated, aggregated with the policy collection set info,
  2724   // and cached in the heap region here (initially) and (subsequently)
  2725   // by the Young List sampling code.
  2727   size_t rs_length = hr->rem_set()->occupied();
  2728   add_to_incremental_cset_info(hr, rs_length);
  2730   HeapWord* hr_end = hr->end();
  2731   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  2733   assert(!hr->in_collection_set(), "invariant");
  2734   hr->set_in_collection_set(true);
  2735   assert( hr->next_in_collection_set() == NULL, "invariant");
  2737   _inc_cset_size++;
  2738   _g1->register_region_with_in_cset_fast_test(hr);
  2740   hr->set_young_index_in_cset((int) _inc_cset_young_index);
  2741   ++_inc_cset_young_index;
  2744 // Add the region at the RHS of the incremental cset
  2745 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  2746   // We should only ever be appending survivors at the end of a pause
  2747   assert( hr->is_survivor(), "Logic");
  2749   // Do the 'common' stuff
  2750   add_region_to_incremental_cset_common(hr);
  2752   // Now add the region at the right hand side
  2753   if (_inc_cset_tail == NULL) {
  2754     assert(_inc_cset_head == NULL, "invariant");
  2755     _inc_cset_head = hr;
  2756   } else {
  2757     _inc_cset_tail->set_next_in_collection_set(hr);
  2759   _inc_cset_tail = hr;
  2761   if (G1PrintHeapRegions) {
  2762     gclog_or_tty->print_cr(" added region to incremental cset (RHS) "
  2763                   "%d:["PTR_FORMAT", "PTR_FORMAT"], "
  2764                   "top "PTR_FORMAT", young %s",
  2765                   hr->hrs_index(), hr->bottom(), hr->end(),
  2766                   hr->top(), (hr->is_young()) ? "YES" : "NO");
  2770 // Add the region to the LHS of the incremental cset
  2771 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  2772   // Survivors should be added to the RHS at the end of a pause
  2773   assert(!hr->is_survivor(), "Logic");
  2775   // Do the 'common' stuff
  2776   add_region_to_incremental_cset_common(hr);
  2778   // Add the region at the left hand side
  2779   hr->set_next_in_collection_set(_inc_cset_head);
  2780   if (_inc_cset_head == NULL) {
  2781     assert(_inc_cset_tail == NULL, "Invariant");
  2782     _inc_cset_tail = hr;
  2784   _inc_cset_head = hr;
  2786   if (G1PrintHeapRegions) {
  2787     gclog_or_tty->print_cr(" added region to incremental cset (LHS) "
  2788                   "%d:["PTR_FORMAT", "PTR_FORMAT"], "
  2789                   "top "PTR_FORMAT", young %s",
  2790                   hr->hrs_index(), hr->bottom(), hr->end(),
  2791                   hr->top(), (hr->is_young()) ? "YES" : "NO");
  2795 #ifndef PRODUCT
  2796 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  2797   assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  2799   st->print_cr("\nCollection_set:");
  2800   HeapRegion* csr = list_head;
  2801   while (csr != NULL) {
  2802     HeapRegion* next = csr->next_in_collection_set();
  2803     assert(csr->in_collection_set(), "bad CS");
  2804     st->print_cr("  [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
  2805                  "age: %4d, y: %d, surv: %d",
  2806                         csr->bottom(), csr->end(),
  2807                         csr->top(),
  2808                         csr->prev_top_at_mark_start(),
  2809                         csr->next_top_at_mark_start(),
  2810                         csr->top_at_conc_mark_count(),
  2811                         csr->age_in_surv_rate_group_cond(),
  2812                         csr->is_young(),
  2813                         csr->is_survivor());
  2814     csr = next;
  2817 #endif // !PRODUCT
  2819 void
  2820 G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
  2821                                                   double target_pause_time_ms) {
  2822   // Set this here - in case we're not doing young collections.
  2823   double non_young_start_time_sec = os::elapsedTime();
  2825   start_recording_regions();
  2827   guarantee(target_pause_time_ms > 0.0,
  2828             err_msg("target_pause_time_ms = %1.6lf should be positive",
  2829                     target_pause_time_ms));
  2830   guarantee(_collection_set == NULL, "Precondition");
  2832   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
  2833   double predicted_pause_time_ms = base_time_ms;
  2835   double time_remaining_ms = target_pause_time_ms - base_time_ms;
  2837   // the 10% and 50% values are arbitrary...
  2838   if (time_remaining_ms < 0.10 * target_pause_time_ms) {
  2839     time_remaining_ms = 0.50 * target_pause_time_ms;
  2840     _within_target = false;
  2841   } else {
  2842     _within_target = true;
  2845   // We figure out the number of bytes available for future to-space.
  2846   // For new regions without marking information, we must assume the
  2847   // worst-case of complete survival.  If we have marking information for a
  2848   // region, we can bound the amount of live data.  We can add a number of
  2849   // such regions, as long as the sum of the live data bounds does not
  2850   // exceed the available evacuation space.
  2851   size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
  2853   size_t expansion_bytes =
  2854     _g1->expansion_regions() * HeapRegion::GrainBytes;
  2856   _collection_set_bytes_used_before = 0;
  2857   _collection_set_size = 0;
  2859   // Adjust for expansion and slop.
  2860   max_live_bytes = max_live_bytes + expansion_bytes;
  2862   assert(_g1->regions_accounted_for(), "Region leakage!");
  2864   HeapRegion* hr;
  2865   if (in_young_gc_mode()) {
  2866     double young_start_time_sec = os::elapsedTime();
  2868     if (G1PolicyVerbose > 0) {
  2869       gclog_or_tty->print_cr("Adding %d young regions to the CSet",
  2870                     _g1->young_list()->length());
  2873     _young_cset_length  = 0;
  2874     _last_young_gc_full = full_young_gcs() ? true : false;
  2876     if (_last_young_gc_full)
  2877       ++_full_young_pause_num;
  2878     else
  2879       ++_partial_young_pause_num;
  2881     // The young list is laid with the survivor regions from the previous
  2882     // pause are appended to the RHS of the young list, i.e.
  2883     //   [Newly Young Regions ++ Survivors from last pause].
  2885     hr = _g1->young_list()->first_survivor_region();
  2886     while (hr != NULL) {
  2887       assert(hr->is_survivor(), "badly formed young list");
  2888       hr->set_young();
  2889       hr = hr->get_next_young_region();
  2892     // Clear the fields that point to the survivor list - they are
  2893     // all young now.
  2894     _g1->young_list()->clear_survivors();
  2896     if (_g1->mark_in_progress())
  2897       _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
  2899     _young_cset_length = _inc_cset_young_index;
  2900     _collection_set = _inc_cset_head;
  2901     _collection_set_size = _inc_cset_size;
  2902     _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  2904     // For young regions in the collection set, we assume the worst
  2905     // case of complete survival
  2906     max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes;
  2908     time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
  2909     predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  2911     // The number of recorded young regions is the incremental
  2912     // collection set's current size
  2913     set_recorded_young_regions(_inc_cset_size);
  2914     set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  2915     set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
  2916 #if PREDICTIONS_VERBOSE
  2917     set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
  2918 #endif // PREDICTIONS_VERBOSE
  2920     if (G1PolicyVerbose > 0) {
  2921       gclog_or_tty->print_cr("  Added " PTR_FORMAT " Young Regions to CS.",
  2922                              _inc_cset_size);
  2923       gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
  2924                             max_live_bytes/K);
  2927     assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
  2929     double young_end_time_sec = os::elapsedTime();
  2930     _recorded_young_cset_choice_time_ms =
  2931       (young_end_time_sec - young_start_time_sec) * 1000.0;
  2933     // We are doing young collections so reset this.
  2934     non_young_start_time_sec = young_end_time_sec;
  2936     // Note we can use either _collection_set_size or
  2937     // _young_cset_length here
  2938     if (_collection_set_size > 0 && _last_young_gc_full) {
  2939       // don't bother adding more regions...
  2940       goto choose_collection_set_end;
  2944   if (!in_young_gc_mode() || !full_young_gcs()) {
  2945     bool should_continue = true;
  2946     NumberSeq seq;
  2947     double avg_prediction = 100000000000000000.0; // something very large
  2949     do {
  2950       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
  2951                                                       avg_prediction);
  2952       if (hr != NULL) {
  2953         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
  2954         time_remaining_ms -= predicted_time_ms;
  2955         predicted_pause_time_ms += predicted_time_ms;
  2956         add_to_collection_set(hr);
  2957         record_non_young_cset_region(hr);
  2958         max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
  2959         if (G1PolicyVerbose > 0) {
  2960           gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
  2961                         max_live_bytes/K);
  2963         seq.add(predicted_time_ms);
  2964         avg_prediction = seq.avg() + seq.sd();
  2966       should_continue =
  2967         ( hr != NULL) &&
  2968         ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0
  2969           : _collection_set_size < _young_list_fixed_length );
  2970     } while (should_continue);
  2972     if (!adaptive_young_list_length() &&
  2973         _collection_set_size < _young_list_fixed_length)
  2974       _should_revert_to_full_young_gcs  = true;
  2977 choose_collection_set_end:
  2978   stop_incremental_cset_building();
  2980   count_CS_bytes_used();
  2982   end_recording_regions();
  2984   double non_young_end_time_sec = os::elapsedTime();
  2985   _recorded_non_young_cset_choice_time_ms =
  2986     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
  2989 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
  2990   G1CollectorPolicy::record_full_collection_end();
  2991   _collectionSetChooser->updateAfterFullCollection();
  2994 void G1CollectorPolicy_BestRegionsFirst::
  2995 expand_if_possible(size_t numRegions) {
  2996   size_t expansion_bytes = numRegions * HeapRegion::GrainBytes;
  2997   _g1->expand(expansion_bytes);
  3000 void G1CollectorPolicy_BestRegionsFirst::
  3001 record_collection_pause_end() {
  3002   G1CollectorPolicy::record_collection_pause_end();
  3003   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");

mercurial