src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,2280 @@
     1.4 +/*
     1.5 + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef __clang_major__
    1.29 +#define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess.
    1.30 +#endif
    1.31 +
    1.32 +#include "precompiled.hpp"
    1.33 +#include "gc_implementation/g1/concurrentG1Refine.hpp"
    1.34 +#include "gc_implementation/g1/concurrentMark.hpp"
    1.35 +#include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    1.36 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    1.37 +#include "gc_implementation/g1/g1CollectorPolicy.hpp"
    1.38 +#include "gc_implementation/g1/g1ErgoVerbose.hpp"
    1.39 +#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
    1.40 +#include "gc_implementation/g1/g1Log.hpp"
    1.41 +#include "gc_implementation/g1/heapRegionRemSet.hpp"
    1.42 +#include "gc_implementation/shared/gcPolicyCounters.hpp"
    1.43 +#include "runtime/arguments.hpp"
    1.44 +#include "runtime/java.hpp"
    1.45 +#include "runtime/mutexLocker.hpp"
    1.46 +#include "utilities/debug.hpp"
    1.47 +
    1.48 +// Different defaults for different number of GC threads
    1.49 +// They were chosen by running GCOld and SPECjbb on debris with different
    1.50 +//   numbers of GC threads and choosing them based on the results
    1.51 +
    1.52 +// all the same
    1.53 +static double rs_length_diff_defaults[] = {
    1.54 +  0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    1.55 +};
    1.56 +
    1.57 +static double cost_per_card_ms_defaults[] = {
    1.58 +  0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
    1.59 +};
    1.60 +
    1.61 +// all the same
    1.62 +static double young_cards_per_entry_ratio_defaults[] = {
    1.63 +  1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    1.64 +};
    1.65 +
    1.66 +static double cost_per_entry_ms_defaults[] = {
    1.67 +  0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
    1.68 +};
    1.69 +
    1.70 +static double cost_per_byte_ms_defaults[] = {
    1.71 +  0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
    1.72 +};
    1.73 +
    1.74 +// these should be pretty consistent
    1.75 +static double constant_other_time_ms_defaults[] = {
    1.76 +  5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
    1.77 +};
    1.78 +
    1.79 +
    1.80 +static double young_other_cost_per_region_ms_defaults[] = {
    1.81 +  0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
    1.82 +};
    1.83 +
    1.84 +static double non_young_other_cost_per_region_ms_defaults[] = {
    1.85 +  1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
    1.86 +};
    1.87 +
    1.88 +G1CollectorPolicy::G1CollectorPolicy() :
    1.89 +  _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
    1.90 +                        ? ParallelGCThreads : 1),
    1.91 +
    1.92 +  _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    1.93 +  _stop_world_start(0.0),
    1.94 +
    1.95 +  _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    1.96 +  _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
    1.97 +
    1.98 +  _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
    1.99 +  _prev_collection_pause_end_ms(0.0),
   1.100 +  _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   1.101 +  _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   1.102 +  _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   1.103 +  _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   1.104 +  _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   1.105 +  _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   1.106 +  _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   1.107 +  _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
   1.108 +  _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   1.109 +  _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   1.110 +  _non_young_other_cost_per_region_ms_seq(
   1.111 +                                         new TruncatedSeq(TruncatedSeqLength)),
   1.112 +
   1.113 +  _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   1.114 +  _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
   1.115 +
   1.116 +  _pause_time_target_ms((double) MaxGCPauseMillis),
   1.117 +
   1.118 +  _gcs_are_young(true),
   1.119 +
   1.120 +  _during_marking(false),
   1.121 +  _in_marking_window(false),
   1.122 +  _in_marking_window_im(false),
   1.123 +
   1.124 +  _recent_prev_end_times_for_all_gcs_sec(
   1.125 +                                new TruncatedSeq(NumPrevPausesForHeuristics)),
   1.126 +
   1.127 +  _recent_avg_pause_time_ratio(0.0),
   1.128 +
   1.129 +  _initiate_conc_mark_if_possible(false),
   1.130 +  _during_initial_mark_pause(false),
   1.131 +  _last_young_gc(false),
   1.132 +  _last_gc_was_young(false),
   1.133 +
   1.134 +  _eden_used_bytes_before_gc(0),
   1.135 +  _survivor_used_bytes_before_gc(0),
   1.136 +  _heap_used_bytes_before_gc(0),
   1.137 +  _metaspace_used_bytes_before_gc(0),
   1.138 +  _eden_capacity_bytes_before_gc(0),
   1.139 +  _heap_capacity_bytes_before_gc(0),
   1.140 +
   1.141 +  _eden_cset_region_length(0),
   1.142 +  _survivor_cset_region_length(0),
   1.143 +  _old_cset_region_length(0),
   1.144 +
   1.145 +  _collection_set(NULL),
   1.146 +  _collection_set_bytes_used_before(0),
   1.147 +
   1.148 +  // Incremental CSet attributes
   1.149 +  _inc_cset_build_state(Inactive),
   1.150 +  _inc_cset_head(NULL),
   1.151 +  _inc_cset_tail(NULL),
   1.152 +  _inc_cset_bytes_used_before(0),
   1.153 +  _inc_cset_max_finger(NULL),
   1.154 +  _inc_cset_recorded_rs_lengths(0),
   1.155 +  _inc_cset_recorded_rs_lengths_diffs(0),
   1.156 +  _inc_cset_predicted_elapsed_time_ms(0.0),
   1.157 +  _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
   1.158 +
   1.159 +#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
   1.160 +#pragma warning( disable:4355 ) // 'this' : used in base member initializer list
   1.161 +#endif // _MSC_VER
   1.162 +
   1.163 +  _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
   1.164 +                                                 G1YoungSurvRateNumRegionsSummary)),
   1.165 +  _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
   1.166 +                                              G1YoungSurvRateNumRegionsSummary)),
   1.167 +  // add here any more surv rate groups
   1.168 +  _recorded_survivor_regions(0),
   1.169 +  _recorded_survivor_head(NULL),
   1.170 +  _recorded_survivor_tail(NULL),
   1.171 +  _survivors_age_table(true),
   1.172 +
   1.173 +  _gc_overhead_perc(0.0) {
   1.174 +
   1.175 +  // Set up the region size and associated fields. Given that the
   1.176 +  // policy is created before the heap, we have to set this up here,
   1.177 +  // so it's done as soon as possible.
   1.178 +
   1.179 +  // It would have been natural to pass initial_heap_byte_size() and
   1.180 +  // max_heap_byte_size() to setup_heap_region_size() but those have
   1.181 +  // not been set up at this point since they should be aligned with
   1.182 +  // the region size. So, there is a circular dependency here. We base
   1.183 +  // the region size on the heap size, but the heap size should be
   1.184 +  // aligned with the region size. To get around this we use the
   1.185 +  // unaligned values for the heap.
   1.186 +  HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
   1.187 +  HeapRegionRemSet::setup_remset_size();
   1.188 +
   1.189 +  G1ErgoVerbose::initialize();
   1.190 +  if (PrintAdaptiveSizePolicy) {
   1.191 +    // Currently, we only use a single switch for all the heuristics.
   1.192 +    G1ErgoVerbose::set_enabled(true);
   1.193 +    // Given that we don't currently have a verboseness level
   1.194 +    // parameter, we'll hardcode this to high. This can be easily
   1.195 +    // changed in the future.
   1.196 +    G1ErgoVerbose::set_level(ErgoHigh);
   1.197 +  } else {
   1.198 +    G1ErgoVerbose::set_enabled(false);
   1.199 +  }
   1.200 +
   1.201 +  // Verify PLAB sizes
   1.202 +  const size_t region_size = HeapRegion::GrainWords;
   1.203 +  if (YoungPLABSize > region_size || OldPLABSize > region_size) {
   1.204 +    char buffer[128];
   1.205 +    jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
   1.206 +                 OldPLABSize > region_size ? "Old" : "Young", region_size);
   1.207 +    vm_exit_during_initialization(buffer);
   1.208 +  }
   1.209 +
   1.210 +  _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   1.211 +  _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
   1.212 +
   1.213 +  _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
   1.214 +
   1.215 +  int index = MIN2(_parallel_gc_threads - 1, 7);
   1.216 +
   1.217 +  _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   1.218 +  _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
   1.219 +  _young_cards_per_entry_ratio_seq->add(
   1.220 +                                  young_cards_per_entry_ratio_defaults[index]);
   1.221 +  _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
   1.222 +  _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
   1.223 +  _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
   1.224 +  _young_other_cost_per_region_ms_seq->add(
   1.225 +                               young_other_cost_per_region_ms_defaults[index]);
   1.226 +  _non_young_other_cost_per_region_ms_seq->add(
   1.227 +                           non_young_other_cost_per_region_ms_defaults[index]);
   1.228 +
   1.229 +  // Below, we might need to calculate the pause time target based on
   1.230 +  // the pause interval. When we do so we are going to give G1 maximum
   1.231 +  // flexibility and allow it to do pauses when it needs to. So, we'll
   1.232 +  // arrange that the pause interval to be pause time target + 1 to
   1.233 +  // ensure that a) the pause time target is maximized with respect to
   1.234 +  // the pause interval and b) we maintain the invariant that pause
   1.235 +  // time target < pause interval. If the user does not want this
   1.236 +  // maximum flexibility, they will have to set the pause interval
   1.237 +  // explicitly.
   1.238 +
   1.239 +  // First make sure that, if either parameter is set, its value is
   1.240 +  // reasonable.
   1.241 +  if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   1.242 +    if (MaxGCPauseMillis < 1) {
   1.243 +      vm_exit_during_initialization("MaxGCPauseMillis should be "
   1.244 +                                    "greater than 0");
   1.245 +    }
   1.246 +  }
   1.247 +  if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   1.248 +    if (GCPauseIntervalMillis < 1) {
   1.249 +      vm_exit_during_initialization("GCPauseIntervalMillis should be "
   1.250 +                                    "greater than 0");
   1.251 +    }
   1.252 +  }
   1.253 +
   1.254 +  // Then, if the pause time target parameter was not set, set it to
   1.255 +  // the default value.
   1.256 +  if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   1.257 +    if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   1.258 +      // The default pause time target in G1 is 200ms
   1.259 +      FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   1.260 +    } else {
   1.261 +      // We do not allow the pause interval to be set without the
   1.262 +      // pause time target
   1.263 +      vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
   1.264 +                                    "without setting MaxGCPauseMillis");
   1.265 +    }
   1.266 +  }
   1.267 +
   1.268 +  // Then, if the interval parameter was not set, set it according to
   1.269 +  // the pause time target (this will also deal with the case when the
   1.270 +  // pause time target is the default value).
   1.271 +  if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
   1.272 +    FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
   1.273 +  }
   1.274 +
   1.275 +  // Finally, make sure that the two parameters are consistent.
   1.276 +  if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
   1.277 +    char buffer[256];
   1.278 +    jio_snprintf(buffer, 256,
   1.279 +                 "MaxGCPauseMillis (%u) should be less than "
   1.280 +                 "GCPauseIntervalMillis (%u)",
   1.281 +                 MaxGCPauseMillis, GCPauseIntervalMillis);
   1.282 +    vm_exit_during_initialization(buffer);
   1.283 +  }
   1.284 +
   1.285 +  double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
   1.286 +  double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
   1.287 +  _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
   1.288 +
   1.289 +  uintx confidence_perc = G1ConfidencePercent;
   1.290 +  // Put an artificial ceiling on this so that it's not set to a silly value.
   1.291 +  if (confidence_perc > 100) {
   1.292 +    confidence_perc = 100;
   1.293 +    warning("G1ConfidencePercent is set to a value that is too large, "
   1.294 +            "it's been updated to %u", confidence_perc);
   1.295 +  }
   1.296 +  _sigma = (double) confidence_perc / 100.0;
   1.297 +
   1.298 +  // start conservatively (around 50ms is about right)
   1.299 +  _concurrent_mark_remark_times_ms->add(0.05);
   1.300 +  _concurrent_mark_cleanup_times_ms->add(0.20);
   1.301 +  _tenuring_threshold = MaxTenuringThreshold;
   1.302 +  // _max_survivor_regions will be calculated by
   1.303 +  // update_young_list_target_length() during initialization.
   1.304 +  _max_survivor_regions = 0;
   1.305 +
   1.306 +  assert(GCTimeRatio > 0,
   1.307 +         "we should have set it to a default value set_g1_gc_flags() "
   1.308 +         "if a user set it to 0");
   1.309 +  _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
   1.310 +
   1.311 +  uintx reserve_perc = G1ReservePercent;
   1.312 +  // Put an artificial ceiling on this so that it's not set to a silly value.
   1.313 +  if (reserve_perc > 50) {
   1.314 +    reserve_perc = 50;
   1.315 +    warning("G1ReservePercent is set to a value that is too large, "
   1.316 +            "it's been updated to %u", reserve_perc);
   1.317 +  }
   1.318 +  _reserve_factor = (double) reserve_perc / 100.0;
   1.319 +  // This will be set when the heap is expanded
   1.320 +  // for the first time during initialization.
   1.321 +  _reserve_regions = 0;
   1.322 +
   1.323 +  _collectionSetChooser = new CollectionSetChooser();
   1.324 +}
   1.325 +
   1.326 +void G1CollectorPolicy::initialize_alignments() {
   1.327 +  _space_alignment = HeapRegion::GrainBytes;
   1.328 +  size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
   1.329 +  size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
   1.330 +  _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
   1.331 +}
   1.332 +
   1.333 +void G1CollectorPolicy::initialize_flags() {
   1.334 +  if (G1HeapRegionSize != HeapRegion::GrainBytes) {
   1.335 +    FLAG_SET_ERGO(uintx, G1HeapRegionSize, HeapRegion::GrainBytes);
   1.336 +  }
   1.337 +
   1.338 +  if (SurvivorRatio < 1) {
   1.339 +    vm_exit_during_initialization("Invalid survivor ratio specified");
   1.340 +  }
   1.341 +  CollectorPolicy::initialize_flags();
   1.342 +  _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
   1.343 +}
   1.344 +
   1.345 +void G1CollectorPolicy::post_heap_initialize() {
   1.346 +  uintx max_regions = G1CollectedHeap::heap()->max_regions();
   1.347 +  size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
   1.348 +  if (max_young_size != MaxNewSize) {
   1.349 +    FLAG_SET_ERGO(uintx, MaxNewSize, max_young_size);
   1.350 +  }
   1.351 +}
   1.352 +
   1.353 +G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
   1.354 +        _min_desired_young_length(0), _max_desired_young_length(0) {
   1.355 +  if (FLAG_IS_CMDLINE(NewRatio)) {
   1.356 +    if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
   1.357 +      warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
   1.358 +    } else {
   1.359 +      _sizer_kind = SizerNewRatio;
   1.360 +      _adaptive_size = false;
   1.361 +      return;
   1.362 +    }
   1.363 +  }
   1.364 +
   1.365 +  if (NewSize > MaxNewSize) {
   1.366 +    if (FLAG_IS_CMDLINE(MaxNewSize)) {
   1.367 +      warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
   1.368 +              "A new max generation size of " SIZE_FORMAT "k will be used.",
   1.369 +              NewSize/K, MaxNewSize/K, NewSize/K);
   1.370 +    }
   1.371 +    MaxNewSize = NewSize;
   1.372 +  }
   1.373 +
   1.374 +  if (FLAG_IS_CMDLINE(NewSize)) {
   1.375 +    _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
   1.376 +                                     1U);
   1.377 +    if (FLAG_IS_CMDLINE(MaxNewSize)) {
   1.378 +      _max_desired_young_length =
   1.379 +                             MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
   1.380 +                                  1U);
   1.381 +      _sizer_kind = SizerMaxAndNewSize;
   1.382 +      _adaptive_size = _min_desired_young_length == _max_desired_young_length;
   1.383 +    } else {
   1.384 +      _sizer_kind = SizerNewSizeOnly;
   1.385 +    }
   1.386 +  } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
   1.387 +    _max_desired_young_length =
   1.388 +                             MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
   1.389 +                                  1U);
   1.390 +    _sizer_kind = SizerMaxNewSizeOnly;
   1.391 +  }
   1.392 +}
   1.393 +
   1.394 +uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
   1.395 +  uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100;
   1.396 +  return MAX2(1U, default_value);
   1.397 +}
   1.398 +
   1.399 +uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
   1.400 +  uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100;
   1.401 +  return MAX2(1U, default_value);
   1.402 +}
   1.403 +
   1.404 +void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) {
   1.405 +  assert(number_of_heap_regions > 0, "Heap must be initialized");
   1.406 +
   1.407 +  switch (_sizer_kind) {
   1.408 +    case SizerDefaults:
   1.409 +      *min_young_length = calculate_default_min_length(number_of_heap_regions);
   1.410 +      *max_young_length = calculate_default_max_length(number_of_heap_regions);
   1.411 +      break;
   1.412 +    case SizerNewSizeOnly:
   1.413 +      *max_young_length = calculate_default_max_length(number_of_heap_regions);
   1.414 +      *max_young_length = MAX2(*min_young_length, *max_young_length);
   1.415 +      break;
   1.416 +    case SizerMaxNewSizeOnly:
   1.417 +      *min_young_length = calculate_default_min_length(number_of_heap_regions);
   1.418 +      *min_young_length = MIN2(*min_young_length, *max_young_length);
   1.419 +      break;
   1.420 +    case SizerMaxAndNewSize:
   1.421 +      // Do nothing. Values set on the command line, don't update them at runtime.
   1.422 +      break;
   1.423 +    case SizerNewRatio:
   1.424 +      *min_young_length = number_of_heap_regions / (NewRatio + 1);
   1.425 +      *max_young_length = *min_young_length;
   1.426 +      break;
   1.427 +    default:
   1.428 +      ShouldNotReachHere();
   1.429 +  }
   1.430 +
   1.431 +  assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values");
   1.432 +}
   1.433 +
   1.434 +uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) {
   1.435 +  // We need to pass the desired values because recalculation may not update these
   1.436 +  // values in some cases.
   1.437 +  uint temp = _min_desired_young_length;
   1.438 +  uint result = _max_desired_young_length;
   1.439 +  recalculate_min_max_young_length(number_of_heap_regions, &temp, &result);
   1.440 +  return result;
   1.441 +}
   1.442 +
   1.443 +void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
   1.444 +  recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
   1.445 +          &_max_desired_young_length);
   1.446 +}
   1.447 +
   1.448 +void G1CollectorPolicy::init() {
   1.449 +  // Set aside an initial future to_space.
   1.450 +  _g1 = G1CollectedHeap::heap();
   1.451 +
   1.452 +  assert(Heap_lock->owned_by_self(), "Locking discipline.");
   1.453 +
   1.454 +  initialize_gc_policy_counters();
   1.455 +
   1.456 +  if (adaptive_young_list_length()) {
   1.457 +    _young_list_fixed_length = 0;
   1.458 +  } else {
   1.459 +    _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   1.460 +  }
   1.461 +  _free_regions_at_end_of_collection = _g1->free_regions();
   1.462 +  update_young_list_target_length();
   1.463 +
   1.464 +  // We may immediately start allocating regions and placing them on the
   1.465 +  // collection set list. Initialize the per-collection set info
   1.466 +  start_incremental_cset_building();
   1.467 +}
   1.468 +
   1.469 +// Create the jstat counters for the policy.
   1.470 +void G1CollectorPolicy::initialize_gc_policy_counters() {
   1.471 +  _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
   1.472 +}
   1.473 +
   1.474 +bool G1CollectorPolicy::predict_will_fit(uint young_length,
   1.475 +                                         double base_time_ms,
   1.476 +                                         uint base_free_regions,
   1.477 +                                         double target_pause_time_ms) {
   1.478 +  if (young_length >= base_free_regions) {
   1.479 +    // end condition 1: not enough space for the young regions
   1.480 +    return false;
   1.481 +  }
   1.482 +
   1.483 +  double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
   1.484 +  size_t bytes_to_copy =
   1.485 +               (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   1.486 +  double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
   1.487 +  double young_other_time_ms = predict_young_other_time_ms(young_length);
   1.488 +  double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
   1.489 +  if (pause_time_ms > target_pause_time_ms) {
   1.490 +    // end condition 2: prediction is over the target pause time
   1.491 +    return false;
   1.492 +  }
   1.493 +
   1.494 +  size_t free_bytes =
   1.495 +                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
   1.496 +  if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
   1.497 +    // end condition 3: out-of-space (conservatively!)
   1.498 +    return false;
   1.499 +  }
   1.500 +
   1.501 +  // success!
   1.502 +  return true;
   1.503 +}
   1.504 +
   1.505 +void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
   1.506 +  // re-calculate the necessary reserve
   1.507 +  double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
   1.508 +  // We use ceiling so that if reserve_regions_d is > 0.0 (but
   1.509 +  // smaller than 1.0) we'll get 1.
   1.510 +  _reserve_regions = (uint) ceil(reserve_regions_d);
   1.511 +
   1.512 +  _young_gen_sizer->heap_size_changed(new_number_of_regions);
   1.513 +}
   1.514 +
   1.515 +uint G1CollectorPolicy::calculate_young_list_desired_min_length(
   1.516 +                                                       uint base_min_length) {
   1.517 +  uint desired_min_length = 0;
   1.518 +  if (adaptive_young_list_length()) {
   1.519 +    if (_alloc_rate_ms_seq->num() > 3) {
   1.520 +      double now_sec = os::elapsedTime();
   1.521 +      double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
   1.522 +      double alloc_rate_ms = predict_alloc_rate_ms();
   1.523 +      desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
   1.524 +    } else {
   1.525 +      // otherwise we don't have enough info to make the prediction
   1.526 +    }
   1.527 +  }
   1.528 +  desired_min_length += base_min_length;
   1.529 +  // make sure we don't go below any user-defined minimum bound
   1.530 +  return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
   1.531 +}
   1.532 +
   1.533 +uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
   1.534 +  // Here, we might want to also take into account any additional
   1.535 +  // constraints (i.e., user-defined minimum bound). Currently, we
   1.536 +  // effectively don't set this bound.
   1.537 +  return _young_gen_sizer->max_desired_young_length();
   1.538 +}
   1.539 +
   1.540 +void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
   1.541 +  if (rs_lengths == (size_t) -1) {
   1.542 +    // if it's set to the default value (-1), we should predict it;
   1.543 +    // otherwise, use the given value.
   1.544 +    rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
   1.545 +  }
   1.546 +
   1.547 +  // Calculate the absolute and desired min bounds.
   1.548 +
   1.549 +  // This is how many young regions we already have (currently: the survivors).
   1.550 +  uint base_min_length = recorded_survivor_regions();
   1.551 +  // This is the absolute minimum young length, which ensures that we
   1.552 +  // can allocate one eden region in the worst-case.
   1.553 +  uint absolute_min_length = base_min_length + 1;
   1.554 +  uint desired_min_length =
   1.555 +                     calculate_young_list_desired_min_length(base_min_length);
   1.556 +  if (desired_min_length < absolute_min_length) {
   1.557 +    desired_min_length = absolute_min_length;
   1.558 +  }
   1.559 +
   1.560 +  // Calculate the absolute and desired max bounds.
   1.561 +
   1.562 +  // We will try our best not to "eat" into the reserve.
   1.563 +  uint absolute_max_length = 0;
   1.564 +  if (_free_regions_at_end_of_collection > _reserve_regions) {
   1.565 +    absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
   1.566 +  }
   1.567 +  uint desired_max_length = calculate_young_list_desired_max_length();
   1.568 +  if (desired_max_length > absolute_max_length) {
   1.569 +    desired_max_length = absolute_max_length;
   1.570 +  }
   1.571 +
   1.572 +  uint young_list_target_length = 0;
   1.573 +  if (adaptive_young_list_length()) {
   1.574 +    if (gcs_are_young()) {
   1.575 +      young_list_target_length =
   1.576 +                        calculate_young_list_target_length(rs_lengths,
   1.577 +                                                           base_min_length,
   1.578 +                                                           desired_min_length,
   1.579 +                                                           desired_max_length);
   1.580 +      _rs_lengths_prediction = rs_lengths;
   1.581 +    } else {
   1.582 +      // Don't calculate anything and let the code below bound it to
   1.583 +      // the desired_min_length, i.e., do the next GC as soon as
   1.584 +      // possible to maximize how many old regions we can add to it.
   1.585 +    }
   1.586 +  } else {
   1.587 +    // The user asked for a fixed young gen so we'll fix the young gen
   1.588 +    // whether the next GC is young or mixed.
   1.589 +    young_list_target_length = _young_list_fixed_length;
   1.590 +  }
   1.591 +
   1.592 +  // Make sure we don't go over the desired max length, nor under the
   1.593 +  // desired min length. In case they clash, desired_min_length wins
   1.594 +  // which is why that test is second.
   1.595 +  if (young_list_target_length > desired_max_length) {
   1.596 +    young_list_target_length = desired_max_length;
   1.597 +  }
   1.598 +  if (young_list_target_length < desired_min_length) {
   1.599 +    young_list_target_length = desired_min_length;
   1.600 +  }
   1.601 +
   1.602 +  assert(young_list_target_length > recorded_survivor_regions(),
   1.603 +         "we should be able to allocate at least one eden region");
   1.604 +  assert(young_list_target_length >= absolute_min_length, "post-condition");
   1.605 +  _young_list_target_length = young_list_target_length;
   1.606 +
   1.607 +  update_max_gc_locker_expansion();
   1.608 +}
   1.609 +
   1.610 +uint
   1.611 +G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
   1.612 +                                                     uint base_min_length,
   1.613 +                                                     uint desired_min_length,
   1.614 +                                                     uint desired_max_length) {
   1.615 +  assert(adaptive_young_list_length(), "pre-condition");
   1.616 +  assert(gcs_are_young(), "only call this for young GCs");
   1.617 +
   1.618 +  // In case some edge-condition makes the desired max length too small...
   1.619 +  if (desired_max_length <= desired_min_length) {
   1.620 +    return desired_min_length;
   1.621 +  }
   1.622 +
   1.623 +  // We'll adjust min_young_length and max_young_length not to include
   1.624 +  // the already allocated young regions (i.e., so they reflect the
   1.625 +  // min and max eden regions we'll allocate). The base_min_length
   1.626 +  // will be reflected in the predictions by the
   1.627 +  // survivor_regions_evac_time prediction.
   1.628 +  assert(desired_min_length > base_min_length, "invariant");
   1.629 +  uint min_young_length = desired_min_length - base_min_length;
   1.630 +  assert(desired_max_length > base_min_length, "invariant");
   1.631 +  uint max_young_length = desired_max_length - base_min_length;
   1.632 +
   1.633 +  double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   1.634 +  double survivor_regions_evac_time = predict_survivor_regions_evac_time();
   1.635 +  size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
   1.636 +  size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
   1.637 +  size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
   1.638 +  double base_time_ms =
   1.639 +    predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
   1.640 +    survivor_regions_evac_time;
   1.641 +  uint available_free_regions = _free_regions_at_end_of_collection;
   1.642 +  uint base_free_regions = 0;
   1.643 +  if (available_free_regions > _reserve_regions) {
   1.644 +    base_free_regions = available_free_regions - _reserve_regions;
   1.645 +  }
   1.646 +
   1.647 +  // Here, we will make sure that the shortest young length that
   1.648 +  // makes sense fits within the target pause time.
   1.649 +
   1.650 +  if (predict_will_fit(min_young_length, base_time_ms,
   1.651 +                       base_free_regions, target_pause_time_ms)) {
   1.652 +    // The shortest young length will fit into the target pause time;
   1.653 +    // we'll now check whether the absolute maximum number of young
   1.654 +    // regions will fit in the target pause time. If not, we'll do
   1.655 +    // a binary search between min_young_length and max_young_length.
   1.656 +    if (predict_will_fit(max_young_length, base_time_ms,
   1.657 +                         base_free_regions, target_pause_time_ms)) {
   1.658 +      // The maximum young length will fit into the target pause time.
   1.659 +      // We are done so set min young length to the maximum length (as
   1.660 +      // the result is assumed to be returned in min_young_length).
   1.661 +      min_young_length = max_young_length;
   1.662 +    } else {
   1.663 +      // The maximum possible number of young regions will not fit within
   1.664 +      // the target pause time so we'll search for the optimal
   1.665 +      // length. The loop invariants are:
   1.666 +      //
   1.667 +      // min_young_length < max_young_length
   1.668 +      // min_young_length is known to fit into the target pause time
   1.669 +      // max_young_length is known not to fit into the target pause time
   1.670 +      //
   1.671 +      // Going into the loop we know the above hold as we've just
   1.672 +      // checked them. Every time around the loop we check whether
   1.673 +      // the middle value between min_young_length and
   1.674 +      // max_young_length fits into the target pause time. If it
   1.675 +      // does, it becomes the new min. If it doesn't, it becomes
   1.676 +      // the new max. This way we maintain the loop invariants.
   1.677 +
   1.678 +      assert(min_young_length < max_young_length, "invariant");
   1.679 +      uint diff = (max_young_length - min_young_length) / 2;
   1.680 +      while (diff > 0) {
   1.681 +        uint young_length = min_young_length + diff;
   1.682 +        if (predict_will_fit(young_length, base_time_ms,
   1.683 +                             base_free_regions, target_pause_time_ms)) {
   1.684 +          min_young_length = young_length;
   1.685 +        } else {
   1.686 +          max_young_length = young_length;
   1.687 +        }
   1.688 +        assert(min_young_length <  max_young_length, "invariant");
   1.689 +        diff = (max_young_length - min_young_length) / 2;
   1.690 +      }
   1.691 +      // The results is min_young_length which, according to the
   1.692 +      // loop invariants, should fit within the target pause time.
   1.693 +
   1.694 +      // These are the post-conditions of the binary search above:
   1.695 +      assert(min_young_length < max_young_length,
   1.696 +             "otherwise we should have discovered that max_young_length "
   1.697 +             "fits into the pause target and not done the binary search");
   1.698 +      assert(predict_will_fit(min_young_length, base_time_ms,
   1.699 +                              base_free_regions, target_pause_time_ms),
   1.700 +             "min_young_length, the result of the binary search, should "
   1.701 +             "fit into the pause target");
   1.702 +      assert(!predict_will_fit(min_young_length + 1, base_time_ms,
   1.703 +                               base_free_regions, target_pause_time_ms),
   1.704 +             "min_young_length, the result of the binary search, should be "
   1.705 +             "optimal, so no larger length should fit into the pause target");
   1.706 +    }
   1.707 +  } else {
   1.708 +    // Even the minimum length doesn't fit into the pause time
   1.709 +    // target, return it as the result nevertheless.
   1.710 +  }
   1.711 +  return base_min_length + min_young_length;
   1.712 +}
   1.713 +
   1.714 +double G1CollectorPolicy::predict_survivor_regions_evac_time() {
   1.715 +  double survivor_regions_evac_time = 0.0;
   1.716 +  for (HeapRegion * r = _recorded_survivor_head;
   1.717 +       r != NULL && r != _recorded_survivor_tail->get_next_young_region();
   1.718 +       r = r->get_next_young_region()) {
   1.719 +    survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
   1.720 +  }
   1.721 +  return survivor_regions_evac_time;
   1.722 +}
   1.723 +
   1.724 +void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
   1.725 +  guarantee( adaptive_young_list_length(), "should not call this otherwise" );
   1.726 +
   1.727 +  size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
   1.728 +  if (rs_lengths > _rs_lengths_prediction) {
   1.729 +    // add 10% to avoid having to recalculate often
   1.730 +    size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
   1.731 +    update_young_list_target_length(rs_lengths_prediction);
   1.732 +  }
   1.733 +}
   1.734 +
   1.735 +
   1.736 +
   1.737 +HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
   1.738 +                                               bool is_tlab,
   1.739 +                                               bool* gc_overhead_limit_was_exceeded) {
   1.740 +  guarantee(false, "Not using this policy feature yet.");
   1.741 +  return NULL;
   1.742 +}
   1.743 +
   1.744 +// This method controls how a collector handles one or more
   1.745 +// of its generations being fully allocated.
   1.746 +HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
   1.747 +                                                       bool is_tlab) {
   1.748 +  guarantee(false, "Not using this policy feature yet.");
   1.749 +  return NULL;
   1.750 +}
   1.751 +
   1.752 +
   1.753 +#ifndef PRODUCT
   1.754 +bool G1CollectorPolicy::verify_young_ages() {
   1.755 +  HeapRegion* head = _g1->young_list()->first_region();
   1.756 +  return
   1.757 +    verify_young_ages(head, _short_lived_surv_rate_group);
   1.758 +  // also call verify_young_ages on any additional surv rate groups
   1.759 +}
   1.760 +
   1.761 +bool
   1.762 +G1CollectorPolicy::verify_young_ages(HeapRegion* head,
   1.763 +                                     SurvRateGroup *surv_rate_group) {
   1.764 +  guarantee( surv_rate_group != NULL, "pre-condition" );
   1.765 +
   1.766 +  const char* name = surv_rate_group->name();
   1.767 +  bool ret = true;
   1.768 +  int prev_age = -1;
   1.769 +
   1.770 +  for (HeapRegion* curr = head;
   1.771 +       curr != NULL;
   1.772 +       curr = curr->get_next_young_region()) {
   1.773 +    SurvRateGroup* group = curr->surv_rate_group();
   1.774 +    if (group == NULL && !curr->is_survivor()) {
   1.775 +      gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
   1.776 +      ret = false;
   1.777 +    }
   1.778 +
   1.779 +    if (surv_rate_group == group) {
   1.780 +      int age = curr->age_in_surv_rate_group();
   1.781 +
   1.782 +      if (age < 0) {
   1.783 +        gclog_or_tty->print_cr("## %s: encountered negative age", name);
   1.784 +        ret = false;
   1.785 +      }
   1.786 +
   1.787 +      if (age <= prev_age) {
   1.788 +        gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
   1.789 +                               "(%d, %d)", name, age, prev_age);
   1.790 +        ret = false;
   1.791 +      }
   1.792 +      prev_age = age;
   1.793 +    }
   1.794 +  }
   1.795 +
   1.796 +  return ret;
   1.797 +}
   1.798 +#endif // PRODUCT
   1.799 +
   1.800 +void G1CollectorPolicy::record_full_collection_start() {
   1.801 +  _full_collection_start_sec = os::elapsedTime();
   1.802 +  record_heap_size_info_at_start(true /* full */);
   1.803 +  // Release the future to-space so that it is available for compaction into.
   1.804 +  _g1->set_full_collection();
   1.805 +}
   1.806 +
   1.807 +void G1CollectorPolicy::record_full_collection_end() {
   1.808 +  // Consider this like a collection pause for the purposes of allocation
   1.809 +  // since last pause.
   1.810 +  double end_sec = os::elapsedTime();
   1.811 +  double full_gc_time_sec = end_sec - _full_collection_start_sec;
   1.812 +  double full_gc_time_ms = full_gc_time_sec * 1000.0;
   1.813 +
   1.814 +  _trace_gen1_time_data.record_full_collection(full_gc_time_ms);
   1.815 +
   1.816 +  update_recent_gc_times(end_sec, full_gc_time_ms);
   1.817 +
   1.818 +  _g1->clear_full_collection();
   1.819 +
   1.820 +  // "Nuke" the heuristics that control the young/mixed GC
   1.821 +  // transitions and make sure we start with young GCs after the Full GC.
   1.822 +  set_gcs_are_young(true);
   1.823 +  _last_young_gc = false;
   1.824 +  clear_initiate_conc_mark_if_possible();
   1.825 +  clear_during_initial_mark_pause();
   1.826 +  _in_marking_window = false;
   1.827 +  _in_marking_window_im = false;
   1.828 +
   1.829 +  _short_lived_surv_rate_group->start_adding_regions();
   1.830 +  // also call this on any additional surv rate groups
   1.831 +
   1.832 +  record_survivor_regions(0, NULL, NULL);
   1.833 +
   1.834 +  _free_regions_at_end_of_collection = _g1->free_regions();
   1.835 +  // Reset survivors SurvRateGroup.
   1.836 +  _survivor_surv_rate_group->reset();
   1.837 +  update_young_list_target_length();
   1.838 +  _collectionSetChooser->clear();
   1.839 +}
   1.840 +
   1.841 +void G1CollectorPolicy::record_stop_world_start() {
   1.842 +  _stop_world_start = os::elapsedTime();
   1.843 +}
   1.844 +
   1.845 +void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
   1.846 +  // We only need to do this here as the policy will only be applied
   1.847 +  // to the GC we're about to start. so, no point is calculating this
   1.848 +  // every time we calculate / recalculate the target young length.
   1.849 +  update_survivors_policy();
   1.850 +
   1.851 +  assert(_g1->used() == _g1->recalculate_used(),
   1.852 +         err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
   1.853 +                 _g1->used(), _g1->recalculate_used()));
   1.854 +
   1.855 +  double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
   1.856 +  _trace_gen0_time_data.record_start_collection(s_w_t_ms);
   1.857 +  _stop_world_start = 0.0;
   1.858 +
   1.859 +  record_heap_size_info_at_start(false /* full */);
   1.860 +
   1.861 +  phase_times()->record_cur_collection_start_sec(start_time_sec);
   1.862 +  _pending_cards = _g1->pending_card_num();
   1.863 +
   1.864 +  _collection_set_bytes_used_before = 0;
   1.865 +  _bytes_copied_during_gc = 0;
   1.866 +
   1.867 +  _last_gc_was_young = false;
   1.868 +
   1.869 +  // do that for any other surv rate groups
   1.870 +  _short_lived_surv_rate_group->stop_adding_regions();
   1.871 +  _survivors_age_table.clear();
   1.872 +
   1.873 +  assert( verify_young_ages(), "region age verification" );
   1.874 +}
   1.875 +
   1.876 +void G1CollectorPolicy::record_concurrent_mark_init_end(double
   1.877 +                                                   mark_init_elapsed_time_ms) {
   1.878 +  _during_marking = true;
   1.879 +  assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
   1.880 +  clear_during_initial_mark_pause();
   1.881 +  _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
   1.882 +}
   1.883 +
   1.884 +void G1CollectorPolicy::record_concurrent_mark_remark_start() {
   1.885 +  _mark_remark_start_sec = os::elapsedTime();
   1.886 +  _during_marking = false;
   1.887 +}
   1.888 +
   1.889 +void G1CollectorPolicy::record_concurrent_mark_remark_end() {
   1.890 +  double end_time_sec = os::elapsedTime();
   1.891 +  double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
   1.892 +  _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
   1.893 +  _cur_mark_stop_world_time_ms += elapsed_time_ms;
   1.894 +  _prev_collection_pause_end_ms += elapsed_time_ms;
   1.895 +
   1.896 +  _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
   1.897 +}
   1.898 +
   1.899 +void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
   1.900 +  _mark_cleanup_start_sec = os::elapsedTime();
   1.901 +}
   1.902 +
   1.903 +void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
   1.904 +  _last_young_gc = true;
   1.905 +  _in_marking_window = false;
   1.906 +}
   1.907 +
   1.908 +void G1CollectorPolicy::record_concurrent_pause() {
   1.909 +  if (_stop_world_start > 0.0) {
   1.910 +    double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
   1.911 +    _trace_gen0_time_data.record_yield_time(yield_ms);
   1.912 +  }
   1.913 +}
   1.914 +
   1.915 +bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
   1.916 +  if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
   1.917 +    return false;
   1.918 +  }
   1.919 +
   1.920 +  size_t marking_initiating_used_threshold =
   1.921 +    (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
   1.922 +  size_t cur_used_bytes = _g1->non_young_capacity_bytes();
   1.923 +  size_t alloc_byte_size = alloc_word_size * HeapWordSize;
   1.924 +
   1.925 +  if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
   1.926 +    if (gcs_are_young() && !_last_young_gc) {
   1.927 +      ergo_verbose5(ErgoConcCycles,
   1.928 +        "request concurrent cycle initiation",
   1.929 +        ergo_format_reason("occupancy higher than threshold")
   1.930 +        ergo_format_byte("occupancy")
   1.931 +        ergo_format_byte("allocation request")
   1.932 +        ergo_format_byte_perc("threshold")
   1.933 +        ergo_format_str("source"),
   1.934 +        cur_used_bytes,
   1.935 +        alloc_byte_size,
   1.936 +        marking_initiating_used_threshold,
   1.937 +        (double) InitiatingHeapOccupancyPercent,
   1.938 +        source);
   1.939 +      return true;
   1.940 +    } else {
   1.941 +      ergo_verbose5(ErgoConcCycles,
   1.942 +        "do not request concurrent cycle initiation",
   1.943 +        ergo_format_reason("still doing mixed collections")
   1.944 +        ergo_format_byte("occupancy")
   1.945 +        ergo_format_byte("allocation request")
   1.946 +        ergo_format_byte_perc("threshold")
   1.947 +        ergo_format_str("source"),
   1.948 +        cur_used_bytes,
   1.949 +        alloc_byte_size,
   1.950 +        marking_initiating_used_threshold,
   1.951 +        (double) InitiatingHeapOccupancyPercent,
   1.952 +        source);
   1.953 +    }
   1.954 +  }
   1.955 +
   1.956 +  return false;
   1.957 +}
   1.958 +
   1.959 +// Anything below that is considered to be zero
   1.960 +#define MIN_TIMER_GRANULARITY 0.0000001
   1.961 +
   1.962 +void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
   1.963 +  double end_time_sec = os::elapsedTime();
   1.964 +  assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
   1.965 +         "otherwise, the subtraction below does not make sense");
   1.966 +  size_t rs_size =
   1.967 +            _cur_collection_pause_used_regions_at_start - cset_region_length();
   1.968 +  size_t cur_used_bytes = _g1->used();
   1.969 +  assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
   1.970 +  bool last_pause_included_initial_mark = false;
   1.971 +  bool update_stats = !_g1->evacuation_failed();
   1.972 +
   1.973 +#ifndef PRODUCT
   1.974 +  if (G1YoungSurvRateVerbose) {
   1.975 +    gclog_or_tty->cr();
   1.976 +    _short_lived_surv_rate_group->print();
   1.977 +    // do that for any other surv rate groups too
   1.978 +  }
   1.979 +#endif // PRODUCT
   1.980 +
   1.981 +  last_pause_included_initial_mark = during_initial_mark_pause();
   1.982 +  if (last_pause_included_initial_mark) {
   1.983 +    record_concurrent_mark_init_end(0.0);
   1.984 +  } else if (need_to_start_conc_mark("end of GC")) {
   1.985 +    // Note: this might have already been set, if during the last
   1.986 +    // pause we decided to start a cycle but at the beginning of
   1.987 +    // this pause we decided to postpone it. That's OK.
   1.988 +    set_initiate_conc_mark_if_possible();
   1.989 +  }
   1.990 +
   1.991 +  _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
   1.992 +                          end_time_sec, false);
   1.993 +
   1.994 +  evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
   1.995 +  evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
   1.996 +
   1.997 +  if (update_stats) {
   1.998 +    _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
   1.999 +    // this is where we update the allocation rate of the application
  1.1000 +    double app_time_ms =
  1.1001 +      (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
  1.1002 +    if (app_time_ms < MIN_TIMER_GRANULARITY) {
  1.1003 +      // This usually happens due to the timer not having the required
  1.1004 +      // granularity. Some Linuxes are the usual culprits.
  1.1005 +      // We'll just set it to something (arbitrarily) small.
  1.1006 +      app_time_ms = 1.0;
  1.1007 +    }
  1.1008 +    // We maintain the invariant that all objects allocated by mutator
  1.1009 +    // threads will be allocated out of eden regions. So, we can use
  1.1010 +    // the eden region number allocated since the previous GC to
  1.1011 +    // calculate the application's allocate rate. The only exception
  1.1012 +    // to that is humongous objects that are allocated separately. But
  1.1013 +    // given that humongous object allocations do not really affect
  1.1014 +    // either the pause's duration nor when the next pause will take
  1.1015 +    // place we can safely ignore them here.
  1.1016 +    uint regions_allocated = eden_cset_region_length();
  1.1017 +    double alloc_rate_ms = (double) regions_allocated / app_time_ms;
  1.1018 +    _alloc_rate_ms_seq->add(alloc_rate_ms);
  1.1019 +
  1.1020 +    double interval_ms =
  1.1021 +      (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
  1.1022 +    update_recent_gc_times(end_time_sec, pause_time_ms);
  1.1023 +    _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
  1.1024 +    if (recent_avg_pause_time_ratio() < 0.0 ||
  1.1025 +        (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
  1.1026 +#ifndef PRODUCT
  1.1027 +      // Dump info to allow post-facto debugging
  1.1028 +      gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
  1.1029 +      gclog_or_tty->print_cr("-------------------------------------------");
  1.1030 +      gclog_or_tty->print_cr("Recent GC Times (ms):");
  1.1031 +      _recent_gc_times_ms->dump();
  1.1032 +      gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
  1.1033 +      _recent_prev_end_times_for_all_gcs_sec->dump();
  1.1034 +      gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
  1.1035 +                             _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
  1.1036 +      // In debug mode, terminate the JVM if the user wants to debug at this point.
  1.1037 +      assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
  1.1038 +#endif  // !PRODUCT
  1.1039 +      // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
  1.1040 +      // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
  1.1041 +      if (_recent_avg_pause_time_ratio < 0.0) {
  1.1042 +        _recent_avg_pause_time_ratio = 0.0;
  1.1043 +      } else {
  1.1044 +        assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
  1.1045 +        _recent_avg_pause_time_ratio = 1.0;
  1.1046 +      }
  1.1047 +    }
  1.1048 +  }
  1.1049 +
  1.1050 +  bool new_in_marking_window = _in_marking_window;
  1.1051 +  bool new_in_marking_window_im = false;
  1.1052 +  if (during_initial_mark_pause()) {
  1.1053 +    new_in_marking_window = true;
  1.1054 +    new_in_marking_window_im = true;
  1.1055 +  }
  1.1056 +
  1.1057 +  if (_last_young_gc) {
  1.1058 +    // This is supposed to to be the "last young GC" before we start
  1.1059 +    // doing mixed GCs. Here we decide whether to start mixed GCs or not.
  1.1060 +
  1.1061 +    if (!last_pause_included_initial_mark) {
  1.1062 +      if (next_gc_should_be_mixed("start mixed GCs",
  1.1063 +                                  "do not start mixed GCs")) {
  1.1064 +        set_gcs_are_young(false);
  1.1065 +      }
  1.1066 +    } else {
  1.1067 +      ergo_verbose0(ErgoMixedGCs,
  1.1068 +                    "do not start mixed GCs",
  1.1069 +                    ergo_format_reason("concurrent cycle is about to start"));
  1.1070 +    }
  1.1071 +    _last_young_gc = false;
  1.1072 +  }
  1.1073 +
  1.1074 +  if (!_last_gc_was_young) {
  1.1075 +    // This is a mixed GC. Here we decide whether to continue doing
  1.1076 +    // mixed GCs or not.
  1.1077 +
  1.1078 +    if (!next_gc_should_be_mixed("continue mixed GCs",
  1.1079 +                                 "do not continue mixed GCs")) {
  1.1080 +      set_gcs_are_young(true);
  1.1081 +    }
  1.1082 +  }
  1.1083 +
  1.1084 +  _short_lived_surv_rate_group->start_adding_regions();
  1.1085 +  // do that for any other surv rate groupsx
  1.1086 +
  1.1087 +  if (update_stats) {
  1.1088 +    double cost_per_card_ms = 0.0;
  1.1089 +    if (_pending_cards > 0) {
  1.1090 +      cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
  1.1091 +      _cost_per_card_ms_seq->add(cost_per_card_ms);
  1.1092 +    }
  1.1093 +
  1.1094 +    size_t cards_scanned = _g1->cards_scanned();
  1.1095 +
  1.1096 +    double cost_per_entry_ms = 0.0;
  1.1097 +    if (cards_scanned > 10) {
  1.1098 +      cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
  1.1099 +      if (_last_gc_was_young) {
  1.1100 +        _cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1.1101 +      } else {
  1.1102 +        _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
  1.1103 +      }
  1.1104 +    }
  1.1105 +
  1.1106 +    if (_max_rs_lengths > 0) {
  1.1107 +      double cards_per_entry_ratio =
  1.1108 +        (double) cards_scanned / (double) _max_rs_lengths;
  1.1109 +      if (_last_gc_was_young) {
  1.1110 +        _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1.1111 +      } else {
  1.1112 +        _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
  1.1113 +      }
  1.1114 +    }
  1.1115 +
  1.1116 +    // This is defensive. For a while _max_rs_lengths could get
  1.1117 +    // smaller than _recorded_rs_lengths which was causing
  1.1118 +    // rs_length_diff to get very large and mess up the RSet length
  1.1119 +    // predictions. The reason was unsafe concurrent updates to the
  1.1120 +    // _inc_cset_recorded_rs_lengths field which the code below guards
  1.1121 +    // against (see CR 7118202). This bug has now been fixed (see CR
  1.1122 +    // 7119027). However, I'm still worried that
  1.1123 +    // _inc_cset_recorded_rs_lengths might still end up somewhat
  1.1124 +    // inaccurate. The concurrent refinement thread calculates an
  1.1125 +    // RSet's length concurrently with other CR threads updating it
  1.1126 +    // which might cause it to calculate the length incorrectly (if,
  1.1127 +    // say, it's in mid-coarsening). So I'll leave in the defensive
  1.1128 +    // conditional below just in case.
  1.1129 +    size_t rs_length_diff = 0;
  1.1130 +    if (_max_rs_lengths > _recorded_rs_lengths) {
  1.1131 +      rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
  1.1132 +    }
  1.1133 +    _rs_length_diff_seq->add((double) rs_length_diff);
  1.1134 +
  1.1135 +    size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
  1.1136 +    size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
  1.1137 +    double cost_per_byte_ms = 0.0;
  1.1138 +
  1.1139 +    if (copied_bytes > 0) {
  1.1140 +      cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
  1.1141 +      if (_in_marking_window) {
  1.1142 +        _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
  1.1143 +      } else {
  1.1144 +        _cost_per_byte_ms_seq->add(cost_per_byte_ms);
  1.1145 +      }
  1.1146 +    }
  1.1147 +
  1.1148 +    double all_other_time_ms = pause_time_ms -
  1.1149 +      (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
  1.1150 +      + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
  1.1151 +
  1.1152 +    double young_other_time_ms = 0.0;
  1.1153 +    if (young_cset_region_length() > 0) {
  1.1154 +      young_other_time_ms =
  1.1155 +        phase_times()->young_cset_choice_time_ms() +
  1.1156 +        phase_times()->young_free_cset_time_ms();
  1.1157 +      _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
  1.1158 +                                          (double) young_cset_region_length());
  1.1159 +    }
  1.1160 +    double non_young_other_time_ms = 0.0;
  1.1161 +    if (old_cset_region_length() > 0) {
  1.1162 +      non_young_other_time_ms =
  1.1163 +        phase_times()->non_young_cset_choice_time_ms() +
  1.1164 +        phase_times()->non_young_free_cset_time_ms();
  1.1165 +
  1.1166 +      _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
  1.1167 +                                            (double) old_cset_region_length());
  1.1168 +    }
  1.1169 +
  1.1170 +    double constant_other_time_ms = all_other_time_ms -
  1.1171 +      (young_other_time_ms + non_young_other_time_ms);
  1.1172 +    _constant_other_time_ms_seq->add(constant_other_time_ms);
  1.1173 +
  1.1174 +    double survival_ratio = 0.0;
  1.1175 +    if (_collection_set_bytes_used_before > 0) {
  1.1176 +      survival_ratio = (double) _bytes_copied_during_gc /
  1.1177 +                                   (double) _collection_set_bytes_used_before;
  1.1178 +    }
  1.1179 +
  1.1180 +    _pending_cards_seq->add((double) _pending_cards);
  1.1181 +    _rs_lengths_seq->add((double) _max_rs_lengths);
  1.1182 +  }
  1.1183 +
  1.1184 +  _in_marking_window = new_in_marking_window;
  1.1185 +  _in_marking_window_im = new_in_marking_window_im;
  1.1186 +  _free_regions_at_end_of_collection = _g1->free_regions();
  1.1187 +  update_young_list_target_length();
  1.1188 +
  1.1189 +  // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
  1.1190 +  double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
  1.1191 +  adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
  1.1192 +                               phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
  1.1193 +
  1.1194 +  _collectionSetChooser->verify();
  1.1195 +}
  1.1196 +
  1.1197 +#define EXT_SIZE_FORMAT "%.1f%s"
  1.1198 +#define EXT_SIZE_PARAMS(bytes)                                  \
  1.1199 +  byte_size_in_proper_unit((double)(bytes)),                    \
  1.1200 +  proper_unit_for_byte_size((bytes))
  1.1201 +
  1.1202 +void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
  1.1203 +  YoungList* young_list = _g1->young_list();
  1.1204 +  _eden_used_bytes_before_gc = young_list->eden_used_bytes();
  1.1205 +  _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
  1.1206 +  _heap_capacity_bytes_before_gc = _g1->capacity();
  1.1207 +  _heap_used_bytes_before_gc = _g1->used();
  1.1208 +  _cur_collection_pause_used_regions_at_start = _g1->used_regions();
  1.1209 +
  1.1210 +  _eden_capacity_bytes_before_gc =
  1.1211 +         (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
  1.1212 +
  1.1213 +  if (full) {
  1.1214 +    _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
  1.1215 +  }
  1.1216 +}
  1.1217 +
  1.1218 +void G1CollectorPolicy::print_heap_transition() {
  1.1219 +  _g1->print_size_transition(gclog_or_tty,
  1.1220 +                             _heap_used_bytes_before_gc,
  1.1221 +                             _g1->used(),
  1.1222 +                             _g1->capacity());
  1.1223 +}
  1.1224 +
  1.1225 +void G1CollectorPolicy::print_detailed_heap_transition(bool full) {
  1.1226 +  YoungList* young_list = _g1->young_list();
  1.1227 +
  1.1228 +  size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
  1.1229 +  size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
  1.1230 +  size_t heap_used_bytes_after_gc = _g1->used();
  1.1231 +
  1.1232 +  size_t heap_capacity_bytes_after_gc = _g1->capacity();
  1.1233 +  size_t eden_capacity_bytes_after_gc =
  1.1234 +    (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
  1.1235 +
  1.1236 +  gclog_or_tty->print(
  1.1237 +    "   [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
  1.1238 +    "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
  1.1239 +    "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
  1.1240 +    EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
  1.1241 +    EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),
  1.1242 +    EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),
  1.1243 +    EXT_SIZE_PARAMS(eden_used_bytes_after_gc),
  1.1244 +    EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),
  1.1245 +    EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),
  1.1246 +    EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),
  1.1247 +    EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),
  1.1248 +    EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),
  1.1249 +    EXT_SIZE_PARAMS(heap_used_bytes_after_gc),
  1.1250 +    EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));
  1.1251 +
  1.1252 +  if (full) {
  1.1253 +    MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
  1.1254 +  }
  1.1255 +
  1.1256 +  gclog_or_tty->cr();
  1.1257 +}
  1.1258 +
  1.1259 +void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
  1.1260 +                                                     double update_rs_processed_buffers,
  1.1261 +                                                     double goal_ms) {
  1.1262 +  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  1.1263 +  ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
  1.1264 +
  1.1265 +  if (G1UseAdaptiveConcRefinement) {
  1.1266 +    const int k_gy = 3, k_gr = 6;
  1.1267 +    const double inc_k = 1.1, dec_k = 0.9;
  1.1268 +
  1.1269 +    int g = cg1r->green_zone();
  1.1270 +    if (update_rs_time > goal_ms) {
  1.1271 +      g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
  1.1272 +    } else {
  1.1273 +      if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
  1.1274 +        g = (int)MAX2(g * inc_k, g + 1.0);
  1.1275 +      }
  1.1276 +    }
  1.1277 +    // Change the refinement threads params
  1.1278 +    cg1r->set_green_zone(g);
  1.1279 +    cg1r->set_yellow_zone(g * k_gy);
  1.1280 +    cg1r->set_red_zone(g * k_gr);
  1.1281 +    cg1r->reinitialize_threads();
  1.1282 +
  1.1283 +    int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
  1.1284 +    int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
  1.1285 +                                    cg1r->yellow_zone());
  1.1286 +    // Change the barrier params
  1.1287 +    dcqs.set_process_completed_threshold(processing_threshold);
  1.1288 +    dcqs.set_max_completed_queue(cg1r->red_zone());
  1.1289 +  }
  1.1290 +
  1.1291 +  int curr_queue_size = dcqs.completed_buffers_num();
  1.1292 +  if (curr_queue_size >= cg1r->yellow_zone()) {
  1.1293 +    dcqs.set_completed_queue_padding(curr_queue_size);
  1.1294 +  } else {
  1.1295 +    dcqs.set_completed_queue_padding(0);
  1.1296 +  }
  1.1297 +  dcqs.notify_if_necessary();
  1.1298 +}
  1.1299 +
  1.1300 +double
  1.1301 +G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
  1.1302 +                                                size_t scanned_cards) {
  1.1303 +  return
  1.1304 +    predict_rs_update_time_ms(pending_cards) +
  1.1305 +    predict_rs_scan_time_ms(scanned_cards) +
  1.1306 +    predict_constant_other_time_ms();
  1.1307 +}
  1.1308 +
  1.1309 +double
  1.1310 +G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
  1.1311 +  size_t rs_length = predict_rs_length_diff();
  1.1312 +  size_t card_num;
  1.1313 +  if (gcs_are_young()) {
  1.1314 +    card_num = predict_young_card_num(rs_length);
  1.1315 +  } else {
  1.1316 +    card_num = predict_non_young_card_num(rs_length);
  1.1317 +  }
  1.1318 +  return predict_base_elapsed_time_ms(pending_cards, card_num);
  1.1319 +}
  1.1320 +
  1.1321 +size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
  1.1322 +  size_t bytes_to_copy;
  1.1323 +  if (hr->is_marked())
  1.1324 +    bytes_to_copy = hr->max_live_bytes();
  1.1325 +  else {
  1.1326 +    assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
  1.1327 +    int age = hr->age_in_surv_rate_group();
  1.1328 +    double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
  1.1329 +    bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
  1.1330 +  }
  1.1331 +  return bytes_to_copy;
  1.1332 +}
  1.1333 +
  1.1334 +double
  1.1335 +G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
  1.1336 +                                                  bool for_young_gc) {
  1.1337 +  size_t rs_length = hr->rem_set()->occupied();
  1.1338 +  size_t card_num;
  1.1339 +
  1.1340 +  // Predicting the number of cards is based on which type of GC
  1.1341 +  // we're predicting for.
  1.1342 +  if (for_young_gc) {
  1.1343 +    card_num = predict_young_card_num(rs_length);
  1.1344 +  } else {
  1.1345 +    card_num = predict_non_young_card_num(rs_length);
  1.1346 +  }
  1.1347 +  size_t bytes_to_copy = predict_bytes_to_copy(hr);
  1.1348 +
  1.1349 +  double region_elapsed_time_ms =
  1.1350 +    predict_rs_scan_time_ms(card_num) +
  1.1351 +    predict_object_copy_time_ms(bytes_to_copy);
  1.1352 +
  1.1353 +  // The prediction of the "other" time for this region is based
  1.1354 +  // upon the region type and NOT the GC type.
  1.1355 +  if (hr->is_young()) {
  1.1356 +    region_elapsed_time_ms += predict_young_other_time_ms(1);
  1.1357 +  } else {
  1.1358 +    region_elapsed_time_ms += predict_non_young_other_time_ms(1);
  1.1359 +  }
  1.1360 +  return region_elapsed_time_ms;
  1.1361 +}
  1.1362 +
  1.1363 +void
  1.1364 +G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
  1.1365 +                                            uint survivor_cset_region_length) {
  1.1366 +  _eden_cset_region_length     = eden_cset_region_length;
  1.1367 +  _survivor_cset_region_length = survivor_cset_region_length;
  1.1368 +  _old_cset_region_length      = 0;
  1.1369 +}
  1.1370 +
  1.1371 +void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
  1.1372 +  _recorded_rs_lengths = rs_lengths;
  1.1373 +}
  1.1374 +
  1.1375 +void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
  1.1376 +                                               double elapsed_ms) {
  1.1377 +  _recent_gc_times_ms->add(elapsed_ms);
  1.1378 +  _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
  1.1379 +  _prev_collection_pause_end_ms = end_time_sec * 1000.0;
  1.1380 +}
  1.1381 +
  1.1382 +size_t G1CollectorPolicy::expansion_amount() {
  1.1383 +  double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
  1.1384 +  double threshold = _gc_overhead_perc;
  1.1385 +  if (recent_gc_overhead > threshold) {
  1.1386 +    // We will double the existing space, or take
  1.1387 +    // G1ExpandByPercentOfAvailable % of the available expansion
  1.1388 +    // space, whichever is smaller, bounded below by a minimum
  1.1389 +    // expansion (unless that's all that's left.)
  1.1390 +    const size_t min_expand_bytes = 1*M;
  1.1391 +    size_t reserved_bytes = _g1->max_capacity();
  1.1392 +    size_t committed_bytes = _g1->capacity();
  1.1393 +    size_t uncommitted_bytes = reserved_bytes - committed_bytes;
  1.1394 +    size_t expand_bytes;
  1.1395 +    size_t expand_bytes_via_pct =
  1.1396 +      uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
  1.1397 +    expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
  1.1398 +    expand_bytes = MAX2(expand_bytes, min_expand_bytes);
  1.1399 +    expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
  1.1400 +
  1.1401 +    ergo_verbose5(ErgoHeapSizing,
  1.1402 +                  "attempt heap expansion",
  1.1403 +                  ergo_format_reason("recent GC overhead higher than "
  1.1404 +                                     "threshold after GC")
  1.1405 +                  ergo_format_perc("recent GC overhead")
  1.1406 +                  ergo_format_perc("threshold")
  1.1407 +                  ergo_format_byte("uncommitted")
  1.1408 +                  ergo_format_byte_perc("calculated expansion amount"),
  1.1409 +                  recent_gc_overhead, threshold,
  1.1410 +                  uncommitted_bytes,
  1.1411 +                  expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
  1.1412 +
  1.1413 +    return expand_bytes;
  1.1414 +  } else {
  1.1415 +    return 0;
  1.1416 +  }
  1.1417 +}
  1.1418 +
  1.1419 +void G1CollectorPolicy::print_tracing_info() const {
  1.1420 +  _trace_gen0_time_data.print();
  1.1421 +  _trace_gen1_time_data.print();
  1.1422 +}
  1.1423 +
  1.1424 +void G1CollectorPolicy::print_yg_surv_rate_info() const {
  1.1425 +#ifndef PRODUCT
  1.1426 +  _short_lived_surv_rate_group->print_surv_rate_summary();
  1.1427 +  // add this call for any other surv rate groups
  1.1428 +#endif // PRODUCT
  1.1429 +}
  1.1430 +
  1.1431 +uint G1CollectorPolicy::max_regions(int purpose) {
  1.1432 +  switch (purpose) {
  1.1433 +    case GCAllocForSurvived:
  1.1434 +      return _max_survivor_regions;
  1.1435 +    case GCAllocForTenured:
  1.1436 +      return REGIONS_UNLIMITED;
  1.1437 +    default:
  1.1438 +      ShouldNotReachHere();
  1.1439 +      return REGIONS_UNLIMITED;
  1.1440 +  };
  1.1441 +}
  1.1442 +
  1.1443 +void G1CollectorPolicy::update_max_gc_locker_expansion() {
  1.1444 +  uint expansion_region_num = 0;
  1.1445 +  if (GCLockerEdenExpansionPercent > 0) {
  1.1446 +    double perc = (double) GCLockerEdenExpansionPercent / 100.0;
  1.1447 +    double expansion_region_num_d = perc * (double) _young_list_target_length;
  1.1448 +    // We use ceiling so that if expansion_region_num_d is > 0.0 (but
  1.1449 +    // less than 1.0) we'll get 1.
  1.1450 +    expansion_region_num = (uint) ceil(expansion_region_num_d);
  1.1451 +  } else {
  1.1452 +    assert(expansion_region_num == 0, "sanity");
  1.1453 +  }
  1.1454 +  _young_list_max_length = _young_list_target_length + expansion_region_num;
  1.1455 +  assert(_young_list_target_length <= _young_list_max_length, "post-condition");
  1.1456 +}
  1.1457 +
  1.1458 +// Calculates survivor space parameters.
  1.1459 +void G1CollectorPolicy::update_survivors_policy() {
  1.1460 +  double max_survivor_regions_d =
  1.1461 +                 (double) _young_list_target_length / (double) SurvivorRatio;
  1.1462 +  // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
  1.1463 +  // smaller than 1.0) we'll get 1.
  1.1464 +  _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
  1.1465 +
  1.1466 +  _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
  1.1467 +        HeapRegion::GrainWords * _max_survivor_regions);
  1.1468 +}
  1.1469 +
  1.1470 +bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
  1.1471 +                                                     GCCause::Cause gc_cause) {
  1.1472 +  bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  1.1473 +  if (!during_cycle) {
  1.1474 +    ergo_verbose1(ErgoConcCycles,
  1.1475 +                  "request concurrent cycle initiation",
  1.1476 +                  ergo_format_reason("requested by GC cause")
  1.1477 +                  ergo_format_str("GC cause"),
  1.1478 +                  GCCause::to_string(gc_cause));
  1.1479 +    set_initiate_conc_mark_if_possible();
  1.1480 +    return true;
  1.1481 +  } else {
  1.1482 +    ergo_verbose1(ErgoConcCycles,
  1.1483 +                  "do not request concurrent cycle initiation",
  1.1484 +                  ergo_format_reason("concurrent cycle already in progress")
  1.1485 +                  ergo_format_str("GC cause"),
  1.1486 +                  GCCause::to_string(gc_cause));
  1.1487 +    return false;
  1.1488 +  }
  1.1489 +}
  1.1490 +
  1.1491 +void
  1.1492 +G1CollectorPolicy::decide_on_conc_mark_initiation() {
  1.1493 +  // We are about to decide on whether this pause will be an
  1.1494 +  // initial-mark pause.
  1.1495 +
  1.1496 +  // First, during_initial_mark_pause() should not be already set. We
  1.1497 +  // will set it here if we have to. However, it should be cleared by
  1.1498 +  // the end of the pause (it's only set for the duration of an
  1.1499 +  // initial-mark pause).
  1.1500 +  assert(!during_initial_mark_pause(), "pre-condition");
  1.1501 +
  1.1502 +  if (initiate_conc_mark_if_possible()) {
  1.1503 +    // We had noticed on a previous pause that the heap occupancy has
  1.1504 +    // gone over the initiating threshold and we should start a
  1.1505 +    // concurrent marking cycle. So we might initiate one.
  1.1506 +
  1.1507 +    bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
  1.1508 +    if (!during_cycle) {
  1.1509 +      // The concurrent marking thread is not "during a cycle", i.e.,
  1.1510 +      // it has completed the last one. So we can go ahead and
  1.1511 +      // initiate a new cycle.
  1.1512 +
  1.1513 +      set_during_initial_mark_pause();
  1.1514 +      // We do not allow mixed GCs during marking.
  1.1515 +      if (!gcs_are_young()) {
  1.1516 +        set_gcs_are_young(true);
  1.1517 +        ergo_verbose0(ErgoMixedGCs,
  1.1518 +                      "end mixed GCs",
  1.1519 +                      ergo_format_reason("concurrent cycle is about to start"));
  1.1520 +      }
  1.1521 +
  1.1522 +      // And we can now clear initiate_conc_mark_if_possible() as
  1.1523 +      // we've already acted on it.
  1.1524 +      clear_initiate_conc_mark_if_possible();
  1.1525 +
  1.1526 +      ergo_verbose0(ErgoConcCycles,
  1.1527 +                  "initiate concurrent cycle",
  1.1528 +                  ergo_format_reason("concurrent cycle initiation requested"));
  1.1529 +    } else {
  1.1530 +      // The concurrent marking thread is still finishing up the
  1.1531 +      // previous cycle. If we start one right now the two cycles
  1.1532 +      // overlap. In particular, the concurrent marking thread might
  1.1533 +      // be in the process of clearing the next marking bitmap (which
  1.1534 +      // we will use for the next cycle if we start one). Starting a
  1.1535 +      // cycle now will be bad given that parts of the marking
  1.1536 +      // information might get cleared by the marking thread. And we
  1.1537 +      // cannot wait for the marking thread to finish the cycle as it
  1.1538 +      // periodically yields while clearing the next marking bitmap
  1.1539 +      // and, if it's in a yield point, it's waiting for us to
  1.1540 +      // finish. So, at this point we will not start a cycle and we'll
  1.1541 +      // let the concurrent marking thread complete the last one.
  1.1542 +      ergo_verbose0(ErgoConcCycles,
  1.1543 +                    "do not initiate concurrent cycle",
  1.1544 +                    ergo_format_reason("concurrent cycle already in progress"));
  1.1545 +    }
  1.1546 +  }
  1.1547 +}
  1.1548 +
  1.1549 +class KnownGarbageClosure: public HeapRegionClosure {
  1.1550 +  G1CollectedHeap* _g1h;
  1.1551 +  CollectionSetChooser* _hrSorted;
  1.1552 +
  1.1553 +public:
  1.1554 +  KnownGarbageClosure(CollectionSetChooser* hrSorted) :
  1.1555 +    _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
  1.1556 +
  1.1557 +  bool doHeapRegion(HeapRegion* r) {
  1.1558 +    // We only include humongous regions in collection
  1.1559 +    // sets when concurrent mark shows that their contained object is
  1.1560 +    // unreachable.
  1.1561 +
  1.1562 +    // Do we have any marking information for this region?
  1.1563 +    if (r->is_marked()) {
  1.1564 +      // We will skip any region that's currently used as an old GC
  1.1565 +      // alloc region (we should not consider those for collection
  1.1566 +      // before we fill them up).
  1.1567 +      if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
  1.1568 +        _hrSorted->add_region(r);
  1.1569 +      }
  1.1570 +    }
  1.1571 +    return false;
  1.1572 +  }
  1.1573 +};
  1.1574 +
  1.1575 +class ParKnownGarbageHRClosure: public HeapRegionClosure {
  1.1576 +  G1CollectedHeap* _g1h;
  1.1577 +  CSetChooserParUpdater _cset_updater;
  1.1578 +
  1.1579 +public:
  1.1580 +  ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
  1.1581 +                           uint chunk_size) :
  1.1582 +    _g1h(G1CollectedHeap::heap()),
  1.1583 +    _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
  1.1584 +
  1.1585 +  bool doHeapRegion(HeapRegion* r) {
  1.1586 +    // Do we have any marking information for this region?
  1.1587 +    if (r->is_marked()) {
  1.1588 +      // We will skip any region that's currently used as an old GC
  1.1589 +      // alloc region (we should not consider those for collection
  1.1590 +      // before we fill them up).
  1.1591 +      if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
  1.1592 +        _cset_updater.add_region(r);
  1.1593 +      }
  1.1594 +    }
  1.1595 +    return false;
  1.1596 +  }
  1.1597 +};
  1.1598 +
  1.1599 +class ParKnownGarbageTask: public AbstractGangTask {
  1.1600 +  CollectionSetChooser* _hrSorted;
  1.1601 +  uint _chunk_size;
  1.1602 +  G1CollectedHeap* _g1;
  1.1603 +public:
  1.1604 +  ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
  1.1605 +    AbstractGangTask("ParKnownGarbageTask"),
  1.1606 +    _hrSorted(hrSorted), _chunk_size(chunk_size),
  1.1607 +    _g1(G1CollectedHeap::heap()) { }
  1.1608 +
  1.1609 +  void work(uint worker_id) {
  1.1610 +    ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
  1.1611 +
  1.1612 +    // Back to zero for the claim value.
  1.1613 +    _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
  1.1614 +                                         _g1->workers()->active_workers(),
  1.1615 +                                         HeapRegion::InitialClaimValue);
  1.1616 +  }
  1.1617 +};
  1.1618 +
  1.1619 +void
  1.1620 +G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
  1.1621 +  _collectionSetChooser->clear();
  1.1622 +
  1.1623 +  uint region_num = _g1->n_regions();
  1.1624 +  if (G1CollectedHeap::use_parallel_gc_threads()) {
  1.1625 +    const uint OverpartitionFactor = 4;
  1.1626 +    uint WorkUnit;
  1.1627 +    // The use of MinChunkSize = 8 in the original code
  1.1628 +    // causes some assertion failures when the total number of
  1.1629 +    // region is less than 8.  The code here tries to fix that.
  1.1630 +    // Should the original code also be fixed?
  1.1631 +    if (no_of_gc_threads > 0) {
  1.1632 +      const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
  1.1633 +      WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
  1.1634 +                      MinWorkUnit);
  1.1635 +    } else {
  1.1636 +      assert(no_of_gc_threads > 0,
  1.1637 +        "The active gc workers should be greater than 0");
  1.1638 +      // In a product build do something reasonable to avoid a crash.
  1.1639 +      const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
  1.1640 +      WorkUnit =
  1.1641 +        MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
  1.1642 +             MinWorkUnit);
  1.1643 +    }
  1.1644 +    _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
  1.1645 +                                                           WorkUnit);
  1.1646 +    ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
  1.1647 +                                            (int) WorkUnit);
  1.1648 +    _g1->workers()->run_task(&parKnownGarbageTask);
  1.1649 +
  1.1650 +    assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
  1.1651 +           "sanity check");
  1.1652 +  } else {
  1.1653 +    KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
  1.1654 +    _g1->heap_region_iterate(&knownGarbagecl);
  1.1655 +  }
  1.1656 +
  1.1657 +  _collectionSetChooser->sort_regions();
  1.1658 +
  1.1659 +  double end_sec = os::elapsedTime();
  1.1660 +  double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
  1.1661 +  _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
  1.1662 +  _cur_mark_stop_world_time_ms += elapsed_time_ms;
  1.1663 +  _prev_collection_pause_end_ms += elapsed_time_ms;
  1.1664 +  _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
  1.1665 +}
  1.1666 +
  1.1667 +// Add the heap region at the head of the non-incremental collection set
  1.1668 +void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
  1.1669 +  assert(_inc_cset_build_state == Active, "Precondition");
  1.1670 +  assert(!hr->is_young(), "non-incremental add of young region");
  1.1671 +
  1.1672 +  assert(!hr->in_collection_set(), "should not already be in the CSet");
  1.1673 +  hr->set_in_collection_set(true);
  1.1674 +  hr->set_next_in_collection_set(_collection_set);
  1.1675 +  _collection_set = hr;
  1.1676 +  _collection_set_bytes_used_before += hr->used();
  1.1677 +  _g1->register_region_with_in_cset_fast_test(hr);
  1.1678 +  size_t rs_length = hr->rem_set()->occupied();
  1.1679 +  _recorded_rs_lengths += rs_length;
  1.1680 +  _old_cset_region_length += 1;
  1.1681 +}
  1.1682 +
  1.1683 +// Initialize the per-collection-set information
  1.1684 +void G1CollectorPolicy::start_incremental_cset_building() {
  1.1685 +  assert(_inc_cset_build_state == Inactive, "Precondition");
  1.1686 +
  1.1687 +  _inc_cset_head = NULL;
  1.1688 +  _inc_cset_tail = NULL;
  1.1689 +  _inc_cset_bytes_used_before = 0;
  1.1690 +
  1.1691 +  _inc_cset_max_finger = 0;
  1.1692 +  _inc_cset_recorded_rs_lengths = 0;
  1.1693 +  _inc_cset_recorded_rs_lengths_diffs = 0;
  1.1694 +  _inc_cset_predicted_elapsed_time_ms = 0.0;
  1.1695 +  _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
  1.1696 +  _inc_cset_build_state = Active;
  1.1697 +}
  1.1698 +
  1.1699 +void G1CollectorPolicy::finalize_incremental_cset_building() {
  1.1700 +  assert(_inc_cset_build_state == Active, "Precondition");
  1.1701 +  assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
  1.1702 +
  1.1703 +  // The two "main" fields, _inc_cset_recorded_rs_lengths and
  1.1704 +  // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
  1.1705 +  // that adds a new region to the CSet. Further updates by the
  1.1706 +  // concurrent refinement thread that samples the young RSet lengths
  1.1707 +  // are accumulated in the *_diffs fields. Here we add the diffs to
  1.1708 +  // the "main" fields.
  1.1709 +
  1.1710 +  if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
  1.1711 +    _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
  1.1712 +  } else {
  1.1713 +    // This is defensive. The diff should in theory be always positive
  1.1714 +    // as RSets can only grow between GCs. However, given that we
  1.1715 +    // sample their size concurrently with other threads updating them
  1.1716 +    // it's possible that we might get the wrong size back, which
  1.1717 +    // could make the calculations somewhat inaccurate.
  1.1718 +    size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
  1.1719 +    if (_inc_cset_recorded_rs_lengths >= diffs) {
  1.1720 +      _inc_cset_recorded_rs_lengths -= diffs;
  1.1721 +    } else {
  1.1722 +      _inc_cset_recorded_rs_lengths = 0;
  1.1723 +    }
  1.1724 +  }
  1.1725 +  _inc_cset_predicted_elapsed_time_ms +=
  1.1726 +                                     _inc_cset_predicted_elapsed_time_ms_diffs;
  1.1727 +
  1.1728 +  _inc_cset_recorded_rs_lengths_diffs = 0;
  1.1729 +  _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
  1.1730 +}
  1.1731 +
  1.1732 +void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
  1.1733 +  // This routine is used when:
  1.1734 +  // * adding survivor regions to the incremental cset at the end of an
  1.1735 +  //   evacuation pause,
  1.1736 +  // * adding the current allocation region to the incremental cset
  1.1737 +  //   when it is retired, and
  1.1738 +  // * updating existing policy information for a region in the
  1.1739 +  //   incremental cset via young list RSet sampling.
  1.1740 +  // Therefore this routine may be called at a safepoint by the
  1.1741 +  // VM thread, or in-between safepoints by mutator threads (when
  1.1742 +  // retiring the current allocation region) or a concurrent
  1.1743 +  // refine thread (RSet sampling).
  1.1744 +
  1.1745 +  double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
  1.1746 +  size_t used_bytes = hr->used();
  1.1747 +  _inc_cset_recorded_rs_lengths += rs_length;
  1.1748 +  _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
  1.1749 +  _inc_cset_bytes_used_before += used_bytes;
  1.1750 +
  1.1751 +  // Cache the values we have added to the aggregated informtion
  1.1752 +  // in the heap region in case we have to remove this region from
  1.1753 +  // the incremental collection set, or it is updated by the
  1.1754 +  // rset sampling code
  1.1755 +  hr->set_recorded_rs_length(rs_length);
  1.1756 +  hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
  1.1757 +}
  1.1758 +
  1.1759 +void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
  1.1760 +                                                     size_t new_rs_length) {
  1.1761 +  // Update the CSet information that is dependent on the new RS length
  1.1762 +  assert(hr->is_young(), "Precondition");
  1.1763 +  assert(!SafepointSynchronize::is_at_safepoint(),
  1.1764 +                                               "should not be at a safepoint");
  1.1765 +
  1.1766 +  // We could have updated _inc_cset_recorded_rs_lengths and
  1.1767 +  // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
  1.1768 +  // that atomically, as this code is executed by a concurrent
  1.1769 +  // refinement thread, potentially concurrently with a mutator thread
  1.1770 +  // allocating a new region and also updating the same fields. To
  1.1771 +  // avoid the atomic operations we accumulate these updates on two
  1.1772 +  // separate fields (*_diffs) and we'll just add them to the "main"
  1.1773 +  // fields at the start of a GC.
  1.1774 +
  1.1775 +  ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
  1.1776 +  ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
  1.1777 +  _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
  1.1778 +
  1.1779 +  double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
  1.1780 +  double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
  1.1781 +  double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
  1.1782 +  _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
  1.1783 +
  1.1784 +  hr->set_recorded_rs_length(new_rs_length);
  1.1785 +  hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
  1.1786 +}
  1.1787 +
  1.1788 +void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
  1.1789 +  assert(hr->is_young(), "invariant");
  1.1790 +  assert(hr->young_index_in_cset() > -1, "should have already been set");
  1.1791 +  assert(_inc_cset_build_state == Active, "Precondition");
  1.1792 +
  1.1793 +  // We need to clear and set the cached recorded/cached collection set
  1.1794 +  // information in the heap region here (before the region gets added
  1.1795 +  // to the collection set). An individual heap region's cached values
  1.1796 +  // are calculated, aggregated with the policy collection set info,
  1.1797 +  // and cached in the heap region here (initially) and (subsequently)
  1.1798 +  // by the Young List sampling code.
  1.1799 +
  1.1800 +  size_t rs_length = hr->rem_set()->occupied();
  1.1801 +  add_to_incremental_cset_info(hr, rs_length);
  1.1802 +
  1.1803 +  HeapWord* hr_end = hr->end();
  1.1804 +  _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
  1.1805 +
  1.1806 +  assert(!hr->in_collection_set(), "invariant");
  1.1807 +  hr->set_in_collection_set(true);
  1.1808 +  assert( hr->next_in_collection_set() == NULL, "invariant");
  1.1809 +
  1.1810 +  _g1->register_region_with_in_cset_fast_test(hr);
  1.1811 +}
  1.1812 +
  1.1813 +// Add the region at the RHS of the incremental cset
  1.1814 +void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
  1.1815 +  // We should only ever be appending survivors at the end of a pause
  1.1816 +  assert( hr->is_survivor(), "Logic");
  1.1817 +
  1.1818 +  // Do the 'common' stuff
  1.1819 +  add_region_to_incremental_cset_common(hr);
  1.1820 +
  1.1821 +  // Now add the region at the right hand side
  1.1822 +  if (_inc_cset_tail == NULL) {
  1.1823 +    assert(_inc_cset_head == NULL, "invariant");
  1.1824 +    _inc_cset_head = hr;
  1.1825 +  } else {
  1.1826 +    _inc_cset_tail->set_next_in_collection_set(hr);
  1.1827 +  }
  1.1828 +  _inc_cset_tail = hr;
  1.1829 +}
  1.1830 +
  1.1831 +// Add the region to the LHS of the incremental cset
  1.1832 +void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
  1.1833 +  // Survivors should be added to the RHS at the end of a pause
  1.1834 +  assert(!hr->is_survivor(), "Logic");
  1.1835 +
  1.1836 +  // Do the 'common' stuff
  1.1837 +  add_region_to_incremental_cset_common(hr);
  1.1838 +
  1.1839 +  // Add the region at the left hand side
  1.1840 +  hr->set_next_in_collection_set(_inc_cset_head);
  1.1841 +  if (_inc_cset_head == NULL) {
  1.1842 +    assert(_inc_cset_tail == NULL, "Invariant");
  1.1843 +    _inc_cset_tail = hr;
  1.1844 +  }
  1.1845 +  _inc_cset_head = hr;
  1.1846 +}
  1.1847 +
  1.1848 +#ifndef PRODUCT
  1.1849 +void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
  1.1850 +  assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
  1.1851 +
  1.1852 +  st->print_cr("\nCollection_set:");
  1.1853 +  HeapRegion* csr = list_head;
  1.1854 +  while (csr != NULL) {
  1.1855 +    HeapRegion* next = csr->next_in_collection_set();
  1.1856 +    assert(csr->in_collection_set(), "bad CS");
  1.1857 +    st->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
  1.1858 +                 HR_FORMAT_PARAMS(csr),
  1.1859 +                 csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),
  1.1860 +                 csr->age_in_surv_rate_group_cond());
  1.1861 +    csr = next;
  1.1862 +  }
  1.1863 +}
  1.1864 +#endif // !PRODUCT
  1.1865 +
  1.1866 +double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) {
  1.1867 +  // Returns the given amount of reclaimable bytes (that represents
  1.1868 +  // the amount of reclaimable space still to be collected) as a
  1.1869 +  // percentage of the current heap capacity.
  1.1870 +  size_t capacity_bytes = _g1->capacity();
  1.1871 +  return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
  1.1872 +}
  1.1873 +
  1.1874 +bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
  1.1875 +                                                const char* false_action_str) {
  1.1876 +  CollectionSetChooser* cset_chooser = _collectionSetChooser;
  1.1877 +  if (cset_chooser->is_empty()) {
  1.1878 +    ergo_verbose0(ErgoMixedGCs,
  1.1879 +                  false_action_str,
  1.1880 +                  ergo_format_reason("candidate old regions not available"));
  1.1881 +    return false;
  1.1882 +  }
  1.1883 +
  1.1884 +  // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
  1.1885 +  size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
  1.1886 +  double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
  1.1887 +  double threshold = (double) G1HeapWastePercent;
  1.1888 +  if (reclaimable_perc <= threshold) {
  1.1889 +    ergo_verbose4(ErgoMixedGCs,
  1.1890 +              false_action_str,
  1.1891 +              ergo_format_reason("reclaimable percentage not over threshold")
  1.1892 +              ergo_format_region("candidate old regions")
  1.1893 +              ergo_format_byte_perc("reclaimable")
  1.1894 +              ergo_format_perc("threshold"),
  1.1895 +              cset_chooser->remaining_regions(),
  1.1896 +              reclaimable_bytes,
  1.1897 +              reclaimable_perc, threshold);
  1.1898 +    return false;
  1.1899 +  }
  1.1900 +
  1.1901 +  ergo_verbose4(ErgoMixedGCs,
  1.1902 +                true_action_str,
  1.1903 +                ergo_format_reason("candidate old regions available")
  1.1904 +                ergo_format_region("candidate old regions")
  1.1905 +                ergo_format_byte_perc("reclaimable")
  1.1906 +                ergo_format_perc("threshold"),
  1.1907 +                cset_chooser->remaining_regions(),
  1.1908 +                reclaimable_bytes,
  1.1909 +                reclaimable_perc, threshold);
  1.1910 +  return true;
  1.1911 +}
  1.1912 +
  1.1913 +uint G1CollectorPolicy::calc_min_old_cset_length() {
  1.1914 +  // The min old CSet region bound is based on the maximum desired
  1.1915 +  // number of mixed GCs after a cycle. I.e., even if some old regions
  1.1916 +  // look expensive, we should add them to the CSet anyway to make
  1.1917 +  // sure we go through the available old regions in no more than the
  1.1918 +  // maximum desired number of mixed GCs.
  1.1919 +  //
  1.1920 +  // The calculation is based on the number of marked regions we added
  1.1921 +  // to the CSet chooser in the first place, not how many remain, so
  1.1922 +  // that the result is the same during all mixed GCs that follow a cycle.
  1.1923 +
  1.1924 +  const size_t region_num = (size_t) _collectionSetChooser->length();
  1.1925 +  const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
  1.1926 +  size_t result = region_num / gc_num;
  1.1927 +  // emulate ceiling
  1.1928 +  if (result * gc_num < region_num) {
  1.1929 +    result += 1;
  1.1930 +  }
  1.1931 +  return (uint) result;
  1.1932 +}
  1.1933 +
  1.1934 +uint G1CollectorPolicy::calc_max_old_cset_length() {
  1.1935 +  // The max old CSet region bound is based on the threshold expressed
  1.1936 +  // as a percentage of the heap size. I.e., it should bound the
  1.1937 +  // number of old regions added to the CSet irrespective of how many
  1.1938 +  // of them are available.
  1.1939 +
  1.1940 +  G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1.1941 +  const size_t region_num = g1h->n_regions();
  1.1942 +  const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
  1.1943 +  size_t result = region_num * perc / 100;
  1.1944 +  // emulate ceiling
  1.1945 +  if (100 * result < region_num * perc) {
  1.1946 +    result += 1;
  1.1947 +  }
  1.1948 +  return (uint) result;
  1.1949 +}
  1.1950 +
  1.1951 +
  1.1952 +void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
  1.1953 +  double young_start_time_sec = os::elapsedTime();
  1.1954 +
  1.1955 +  YoungList* young_list = _g1->young_list();
  1.1956 +  finalize_incremental_cset_building();
  1.1957 +
  1.1958 +  guarantee(target_pause_time_ms > 0.0,
  1.1959 +            err_msg("target_pause_time_ms = %1.6lf should be positive",
  1.1960 +                    target_pause_time_ms));
  1.1961 +  guarantee(_collection_set == NULL, "Precondition");
  1.1962 +
  1.1963 +  double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
  1.1964 +  double predicted_pause_time_ms = base_time_ms;
  1.1965 +  double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
  1.1966 +
  1.1967 +  ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
  1.1968 +                "start choosing CSet",
  1.1969 +                ergo_format_size("_pending_cards")
  1.1970 +                ergo_format_ms("predicted base time")
  1.1971 +                ergo_format_ms("remaining time")
  1.1972 +                ergo_format_ms("target pause time"),
  1.1973 +                _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
  1.1974 +
  1.1975 +  _last_gc_was_young = gcs_are_young() ? true : false;
  1.1976 +
  1.1977 +  if (_last_gc_was_young) {
  1.1978 +    _trace_gen0_time_data.increment_young_collection_count();
  1.1979 +  } else {
  1.1980 +    _trace_gen0_time_data.increment_mixed_collection_count();
  1.1981 +  }
  1.1982 +
  1.1983 +  // The young list is laid with the survivor regions from the previous
  1.1984 +  // pause are appended to the RHS of the young list, i.e.
  1.1985 +  //   [Newly Young Regions ++ Survivors from last pause].
  1.1986 +
  1.1987 +  uint survivor_region_length = young_list->survivor_length();
  1.1988 +  uint eden_region_length = young_list->length() - survivor_region_length;
  1.1989 +  init_cset_region_lengths(eden_region_length, survivor_region_length);
  1.1990 +
  1.1991 +  HeapRegion* hr = young_list->first_survivor_region();
  1.1992 +  while (hr != NULL) {
  1.1993 +    assert(hr->is_survivor(), "badly formed young list");
  1.1994 +    hr->set_young();
  1.1995 +    hr = hr->get_next_young_region();
  1.1996 +  }
  1.1997 +
  1.1998 +  // Clear the fields that point to the survivor list - they are all young now.
  1.1999 +  young_list->clear_survivors();
  1.2000 +
  1.2001 +  _collection_set = _inc_cset_head;
  1.2002 +  _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
  1.2003 +  time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
  1.2004 +  predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
  1.2005 +
  1.2006 +  ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
  1.2007 +                "add young regions to CSet",
  1.2008 +                ergo_format_region("eden")
  1.2009 +                ergo_format_region("survivors")
  1.2010 +                ergo_format_ms("predicted young region time"),
  1.2011 +                eden_region_length, survivor_region_length,
  1.2012 +                _inc_cset_predicted_elapsed_time_ms);
  1.2013 +
  1.2014 +  // The number of recorded young regions is the incremental
  1.2015 +  // collection set's current size
  1.2016 +  set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
  1.2017 +
  1.2018 +  double young_end_time_sec = os::elapsedTime();
  1.2019 +  phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
  1.2020 +
  1.2021 +  // Set the start of the non-young choice time.
  1.2022 +  double non_young_start_time_sec = young_end_time_sec;
  1.2023 +
  1.2024 +  if (!gcs_are_young()) {
  1.2025 +    CollectionSetChooser* cset_chooser = _collectionSetChooser;
  1.2026 +    cset_chooser->verify();
  1.2027 +    const uint min_old_cset_length = calc_min_old_cset_length();
  1.2028 +    const uint max_old_cset_length = calc_max_old_cset_length();
  1.2029 +
  1.2030 +    uint expensive_region_num = 0;
  1.2031 +    bool check_time_remaining = adaptive_young_list_length();
  1.2032 +
  1.2033 +    HeapRegion* hr = cset_chooser->peek();
  1.2034 +    while (hr != NULL) {
  1.2035 +      if (old_cset_region_length() >= max_old_cset_length) {
  1.2036 +        // Added maximum number of old regions to the CSet.
  1.2037 +        ergo_verbose2(ErgoCSetConstruction,
  1.2038 +                      "finish adding old regions to CSet",
  1.2039 +                      ergo_format_reason("old CSet region num reached max")
  1.2040 +                      ergo_format_region("old")
  1.2041 +                      ergo_format_region("max"),
  1.2042 +                      old_cset_region_length(), max_old_cset_length);
  1.2043 +        break;
  1.2044 +      }
  1.2045 +
  1.2046 +
  1.2047 +      // Stop adding regions if the remaining reclaimable space is
  1.2048 +      // not above G1HeapWastePercent.
  1.2049 +      size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
  1.2050 +      double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
  1.2051 +      double threshold = (double) G1HeapWastePercent;
  1.2052 +      if (reclaimable_perc <= threshold) {
  1.2053 +        // We've added enough old regions that the amount of uncollected
  1.2054 +        // reclaimable space is at or below the waste threshold. Stop
  1.2055 +        // adding old regions to the CSet.
  1.2056 +        ergo_verbose5(ErgoCSetConstruction,
  1.2057 +                      "finish adding old regions to CSet",
  1.2058 +                      ergo_format_reason("reclaimable percentage not over threshold")
  1.2059 +                      ergo_format_region("old")
  1.2060 +                      ergo_format_region("max")
  1.2061 +                      ergo_format_byte_perc("reclaimable")
  1.2062 +                      ergo_format_perc("threshold"),
  1.2063 +                      old_cset_region_length(),
  1.2064 +                      max_old_cset_length,
  1.2065 +                      reclaimable_bytes,
  1.2066 +                      reclaimable_perc, threshold);
  1.2067 +        break;
  1.2068 +      }
  1.2069 +
  1.2070 +      double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
  1.2071 +      if (check_time_remaining) {
  1.2072 +        if (predicted_time_ms > time_remaining_ms) {
  1.2073 +          // Too expensive for the current CSet.
  1.2074 +
  1.2075 +          if (old_cset_region_length() >= min_old_cset_length) {
  1.2076 +            // We have added the minimum number of old regions to the CSet,
  1.2077 +            // we are done with this CSet.
  1.2078 +            ergo_verbose4(ErgoCSetConstruction,
  1.2079 +                          "finish adding old regions to CSet",
  1.2080 +                          ergo_format_reason("predicted time is too high")
  1.2081 +                          ergo_format_ms("predicted time")
  1.2082 +                          ergo_format_ms("remaining time")
  1.2083 +                          ergo_format_region("old")
  1.2084 +                          ergo_format_region("min"),
  1.2085 +                          predicted_time_ms, time_remaining_ms,
  1.2086 +                          old_cset_region_length(), min_old_cset_length);
  1.2087 +            break;
  1.2088 +          }
  1.2089 +
  1.2090 +          // We'll add it anyway given that we haven't reached the
  1.2091 +          // minimum number of old regions.
  1.2092 +          expensive_region_num += 1;
  1.2093 +        }
  1.2094 +      } else {
  1.2095 +        if (old_cset_region_length() >= min_old_cset_length) {
  1.2096 +          // In the non-auto-tuning case, we'll finish adding regions
  1.2097 +          // to the CSet if we reach the minimum.
  1.2098 +          ergo_verbose2(ErgoCSetConstruction,
  1.2099 +                        "finish adding old regions to CSet",
  1.2100 +                        ergo_format_reason("old CSet region num reached min")
  1.2101 +                        ergo_format_region("old")
  1.2102 +                        ergo_format_region("min"),
  1.2103 +                        old_cset_region_length(), min_old_cset_length);
  1.2104 +          break;
  1.2105 +        }
  1.2106 +      }
  1.2107 +
  1.2108 +      // We will add this region to the CSet.
  1.2109 +      time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
  1.2110 +      predicted_pause_time_ms += predicted_time_ms;
  1.2111 +      cset_chooser->remove_and_move_to_next(hr);
  1.2112 +      _g1->old_set_remove(hr);
  1.2113 +      add_old_region_to_cset(hr);
  1.2114 +
  1.2115 +      hr = cset_chooser->peek();
  1.2116 +    }
  1.2117 +    if (hr == NULL) {
  1.2118 +      ergo_verbose0(ErgoCSetConstruction,
  1.2119 +                    "finish adding old regions to CSet",
  1.2120 +                    ergo_format_reason("candidate old regions not available"));
  1.2121 +    }
  1.2122 +
  1.2123 +    if (expensive_region_num > 0) {
  1.2124 +      // We print the information once here at the end, predicated on
  1.2125 +      // whether we added any apparently expensive regions or not, to
  1.2126 +      // avoid generating output per region.
  1.2127 +      ergo_verbose4(ErgoCSetConstruction,
  1.2128 +                    "added expensive regions to CSet",
  1.2129 +                    ergo_format_reason("old CSet region num not reached min")
  1.2130 +                    ergo_format_region("old")
  1.2131 +                    ergo_format_region("expensive")
  1.2132 +                    ergo_format_region("min")
  1.2133 +                    ergo_format_ms("remaining time"),
  1.2134 +                    old_cset_region_length(),
  1.2135 +                    expensive_region_num,
  1.2136 +                    min_old_cset_length,
  1.2137 +                    time_remaining_ms);
  1.2138 +    }
  1.2139 +
  1.2140 +    cset_chooser->verify();
  1.2141 +  }
  1.2142 +
  1.2143 +  stop_incremental_cset_building();
  1.2144 +
  1.2145 +  ergo_verbose5(ErgoCSetConstruction,
  1.2146 +                "finish choosing CSet",
  1.2147 +                ergo_format_region("eden")
  1.2148 +                ergo_format_region("survivors")
  1.2149 +                ergo_format_region("old")
  1.2150 +                ergo_format_ms("predicted pause time")
  1.2151 +                ergo_format_ms("target pause time"),
  1.2152 +                eden_region_length, survivor_region_length,
  1.2153 +                old_cset_region_length(),
  1.2154 +                predicted_pause_time_ms, target_pause_time_ms);
  1.2155 +
  1.2156 +  double non_young_end_time_sec = os::elapsedTime();
  1.2157 +  phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
  1.2158 +  evacuation_info.set_collectionset_regions(cset_region_length());
  1.2159 +}
  1.2160 +
  1.2161 +void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
  1.2162 +  if(TraceGen0Time) {
  1.2163 +    _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
  1.2164 +  }
  1.2165 +}
  1.2166 +
  1.2167 +void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
  1.2168 +  if(TraceGen0Time) {
  1.2169 +    _all_yield_times_ms.add(yield_time_ms);
  1.2170 +  }
  1.2171 +}
  1.2172 +
  1.2173 +void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
  1.2174 +  if(TraceGen0Time) {
  1.2175 +    _total.add(pause_time_ms);
  1.2176 +    _other.add(pause_time_ms - phase_times->accounted_time_ms());
  1.2177 +    _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
  1.2178 +    _parallel.add(phase_times->cur_collection_par_time_ms());
  1.2179 +    _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
  1.2180 +    _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
  1.2181 +    _update_rs.add(phase_times->average_last_update_rs_time());
  1.2182 +    _scan_rs.add(phase_times->average_last_scan_rs_time());
  1.2183 +    _obj_copy.add(phase_times->average_last_obj_copy_time());
  1.2184 +    _termination.add(phase_times->average_last_termination_time());
  1.2185 +
  1.2186 +    double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
  1.2187 +      phase_times->average_last_satb_filtering_times_ms() +
  1.2188 +      phase_times->average_last_update_rs_time() +
  1.2189 +      phase_times->average_last_scan_rs_time() +
  1.2190 +      phase_times->average_last_obj_copy_time() +
  1.2191 +      + phase_times->average_last_termination_time();
  1.2192 +
  1.2193 +    double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
  1.2194 +    _parallel_other.add(parallel_other_time);
  1.2195 +    _clear_ct.add(phase_times->cur_clear_ct_time_ms());
  1.2196 +  }
  1.2197 +}
  1.2198 +
  1.2199 +void TraceGen0TimeData::increment_young_collection_count() {
  1.2200 +  if(TraceGen0Time) {
  1.2201 +    ++_young_pause_num;
  1.2202 +  }
  1.2203 +}
  1.2204 +
  1.2205 +void TraceGen0TimeData::increment_mixed_collection_count() {
  1.2206 +  if(TraceGen0Time) {
  1.2207 +    ++_mixed_pause_num;
  1.2208 +  }
  1.2209 +}
  1.2210 +
  1.2211 +void TraceGen0TimeData::print_summary(const char* str,
  1.2212 +                                      const NumberSeq* seq) const {
  1.2213 +  double sum = seq->sum();
  1.2214 +  gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
  1.2215 +                str, sum / 1000.0, seq->avg());
  1.2216 +}
  1.2217 +
  1.2218 +void TraceGen0TimeData::print_summary_sd(const char* str,
  1.2219 +                                         const NumberSeq* seq) const {
  1.2220 +  print_summary(str, seq);
  1.2221 +  gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
  1.2222 +                "(num", seq->num(), seq->sd(), seq->maximum());
  1.2223 +}
  1.2224 +
  1.2225 +void TraceGen0TimeData::print() const {
  1.2226 +  if (!TraceGen0Time) {
  1.2227 +    return;
  1.2228 +  }
  1.2229 +
  1.2230 +  gclog_or_tty->print_cr("ALL PAUSES");
  1.2231 +  print_summary_sd("   Total", &_total);
  1.2232 +  gclog_or_tty->cr();
  1.2233 +  gclog_or_tty->cr();
  1.2234 +  gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
  1.2235 +  gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
  1.2236 +  gclog_or_tty->cr();
  1.2237 +
  1.2238 +  gclog_or_tty->print_cr("EVACUATION PAUSES");
  1.2239 +
  1.2240 +  if (_young_pause_num == 0 && _mixed_pause_num == 0) {
  1.2241 +    gclog_or_tty->print_cr("none");
  1.2242 +  } else {
  1.2243 +    print_summary_sd("   Evacuation Pauses", &_total);
  1.2244 +    print_summary("      Root Region Scan Wait", &_root_region_scan_wait);
  1.2245 +    print_summary("      Parallel Time", &_parallel);
  1.2246 +    print_summary("         Ext Root Scanning", &_ext_root_scan);
  1.2247 +    print_summary("         SATB Filtering", &_satb_filtering);
  1.2248 +    print_summary("         Update RS", &_update_rs);
  1.2249 +    print_summary("         Scan RS", &_scan_rs);
  1.2250 +    print_summary("         Object Copy", &_obj_copy);
  1.2251 +    print_summary("         Termination", &_termination);
  1.2252 +    print_summary("         Parallel Other", &_parallel_other);
  1.2253 +    print_summary("      Clear CT", &_clear_ct);
  1.2254 +    print_summary("      Other", &_other);
  1.2255 +  }
  1.2256 +  gclog_or_tty->cr();
  1.2257 +
  1.2258 +  gclog_or_tty->print_cr("MISC");
  1.2259 +  print_summary_sd("   Stop World", &_all_stop_world_times_ms);
  1.2260 +  print_summary_sd("   Yields", &_all_yield_times_ms);
  1.2261 +}
  1.2262 +
  1.2263 +void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) {
  1.2264 +  if (TraceGen1Time) {
  1.2265 +    _all_full_gc_times.add(full_gc_time_ms);
  1.2266 +  }
  1.2267 +}
  1.2268 +
  1.2269 +void TraceGen1TimeData::print() const {
  1.2270 +  if (!TraceGen1Time) {
  1.2271 +    return;
  1.2272 +  }
  1.2273 +
  1.2274 +  if (_all_full_gc_times.num() > 0) {
  1.2275 +    gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
  1.2276 +      _all_full_gc_times.num(),
  1.2277 +      _all_full_gc_times.sum() / 1000.0);
  1.2278 +    gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
  1.2279 +    gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
  1.2280 +      _all_full_gc_times.sd(),
  1.2281 +      _all_full_gc_times.maximum());
  1.2282 +  }
  1.2283 +}

mercurial