src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7195
c02ec279b062
child 7369
b840813adfcc
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

ysr@777 1 /*
drchase@6680 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
drchase@6680 25 #ifndef __clang_major__
drchase@6680 26 #define ATTRIBUTE_PRINTF(x,y) // FIXME, formats are a mess.
drchase@6680 27 #endif
drchase@6680 28
stefank@2314 29 #include "precompiled.hpp"
stefank@2314 30 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 31 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 32 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 33 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 34 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 35 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
brutisso@3923 36 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
brutisso@3710 37 #include "gc_implementation/g1/g1Log.hpp"
stefank@2314 38 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 39 #include "gc_implementation/shared/gcPolicyCounters.hpp"
stefank@2314 40 #include "runtime/arguments.hpp"
stefank@2314 41 #include "runtime/java.hpp"
stefank@2314 42 #include "runtime/mutexLocker.hpp"
stefank@2314 43 #include "utilities/debug.hpp"
ysr@777 44
ysr@777 45 // Different defaults for different number of GC threads
ysr@777 46 // They were chosen by running GCOld and SPECjbb on debris with different
ysr@777 47 // numbers of GC threads and choosing them based on the results
ysr@777 48
ysr@777 49 // all the same
ysr@777 50 static double rs_length_diff_defaults[] = {
ysr@777 51 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
ysr@777 52 };
ysr@777 53
ysr@777 54 static double cost_per_card_ms_defaults[] = {
ysr@777 55 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
ysr@777 56 };
ysr@777 57
ysr@777 58 // all the same
tonyp@3337 59 static double young_cards_per_entry_ratio_defaults[] = {
ysr@777 60 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
ysr@777 61 };
ysr@777 62
ysr@777 63 static double cost_per_entry_ms_defaults[] = {
ysr@777 64 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
ysr@777 65 };
ysr@777 66
ysr@777 67 static double cost_per_byte_ms_defaults[] = {
ysr@777 68 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
ysr@777 69 };
ysr@777 70
ysr@777 71 // these should be pretty consistent
ysr@777 72 static double constant_other_time_ms_defaults[] = {
ysr@777 73 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
ysr@777 74 };
ysr@777 75
ysr@777 76
ysr@777 77 static double young_other_cost_per_region_ms_defaults[] = {
ysr@777 78 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
ysr@777 79 };
ysr@777 80
ysr@777 81 static double non_young_other_cost_per_region_ms_defaults[] = {
ysr@777 82 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
ysr@777 83 };
ysr@777 84
ysr@777 85 G1CollectorPolicy::G1CollectorPolicy() :
jmasa@2188 86 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
johnc@3021 87 ? ParallelGCThreads : 1),
jmasa@2188 88
ysr@777 89 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 90 _stop_world_start(0.0),
ysr@777 91
ysr@777 92 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 93 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 94
ysr@777 95 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 96 _prev_collection_pause_end_ms(0.0),
ysr@777 97 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 98 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 99 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 100 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 101 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 102 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 103 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 104 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 105 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 106 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 107 _non_young_other_cost_per_region_ms_seq(
ysr@777 108 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 109
ysr@777 110 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 111 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 112
johnc@1186 113 _pause_time_target_ms((double) MaxGCPauseMillis),
ysr@777 114
tonyp@3337 115 _gcs_are_young(true),
ysr@777 116
ysr@777 117 _during_marking(false),
ysr@777 118 _in_marking_window(false),
ysr@777 119 _in_marking_window_im(false),
ysr@777 120
tonyp@3337 121 _recent_prev_end_times_for_all_gcs_sec(
tonyp@3337 122 new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 123
ysr@777 124 _recent_avg_pause_time_ratio(0.0),
ysr@777 125
tonyp@1794 126 _initiate_conc_mark_if_possible(false),
tonyp@1794 127 _during_initial_mark_pause(false),
tonyp@3337 128 _last_young_gc(false),
tonyp@3337 129 _last_gc_was_young(false),
ysr@777 130
johnc@5123 131 _eden_used_bytes_before_gc(0),
johnc@5123 132 _survivor_used_bytes_before_gc(0),
johnc@5123 133 _heap_used_bytes_before_gc(0),
johnc@5123 134 _metaspace_used_bytes_before_gc(0),
johnc@5123 135 _eden_capacity_bytes_before_gc(0),
johnc@5123 136 _heap_capacity_bytes_before_gc(0),
tonyp@2961 137
tonyp@3289 138 _eden_cset_region_length(0),
tonyp@3289 139 _survivor_cset_region_length(0),
tonyp@3289 140 _old_cset_region_length(0),
tonyp@3289 141
ysr@777 142 _collection_set(NULL),
johnc@1829 143 _collection_set_bytes_used_before(0),
johnc@1829 144
johnc@1829 145 // Incremental CSet attributes
johnc@1829 146 _inc_cset_build_state(Inactive),
johnc@1829 147 _inc_cset_head(NULL),
johnc@1829 148 _inc_cset_tail(NULL),
johnc@1829 149 _inc_cset_bytes_used_before(0),
johnc@1829 150 _inc_cset_max_finger(NULL),
johnc@1829 151 _inc_cset_recorded_rs_lengths(0),
tonyp@3356 152 _inc_cset_recorded_rs_lengths_diffs(0),
johnc@1829 153 _inc_cset_predicted_elapsed_time_ms(0.0),
tonyp@3356 154 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
johnc@1829 155
ysr@777 156 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 157 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 158 #endif // _MSC_VER
ysr@777 159
ysr@777 160 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
ysr@777 161 G1YoungSurvRateNumRegionsSummary)),
ysr@777 162 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
apetrusenko@980 163 G1YoungSurvRateNumRegionsSummary)),
ysr@777 164 // add here any more surv rate groups
apetrusenko@980 165 _recorded_survivor_regions(0),
apetrusenko@980 166 _recorded_survivor_head(NULL),
apetrusenko@980 167 _recorded_survivor_tail(NULL),
tonyp@1791 168 _survivors_age_table(true),
tonyp@1791 169
tonyp@3114 170 _gc_overhead_perc(0.0) {
tonyp@3114 171
tonyp@1377 172 // Set up the region size and associated fields. Given that the
tonyp@1377 173 // policy is created before the heap, we have to set this up here,
tonyp@1377 174 // so it's done as soon as possible.
brutisso@5646 175
brutisso@5646 176 // It would have been natural to pass initial_heap_byte_size() and
brutisso@5646 177 // max_heap_byte_size() to setup_heap_region_size() but those have
brutisso@5646 178 // not been set up at this point since they should be aligned with
brutisso@5646 179 // the region size. So, there is a circular dependency here. We base
brutisso@5646 180 // the region size on the heap size, but the heap size should be
brutisso@5646 181 // aligned with the region size. To get around this we use the
brutisso@5646 182 // unaligned values for the heap.
brutisso@5646 183 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
iveresov@1696 184 HeapRegionRemSet::setup_remset_size();
tonyp@1377 185
tonyp@3114 186 G1ErgoVerbose::initialize();
tonyp@3114 187 if (PrintAdaptiveSizePolicy) {
tonyp@3114 188 // Currently, we only use a single switch for all the heuristics.
tonyp@3114 189 G1ErgoVerbose::set_enabled(true);
tonyp@3114 190 // Given that we don't currently have a verboseness level
tonyp@3114 191 // parameter, we'll hardcode this to high. This can be easily
tonyp@3114 192 // changed in the future.
tonyp@3114 193 G1ErgoVerbose::set_level(ErgoHigh);
tonyp@3114 194 } else {
tonyp@3114 195 G1ErgoVerbose::set_enabled(false);
tonyp@3114 196 }
tonyp@3114 197
apetrusenko@1826 198 // Verify PLAB sizes
johnc@3182 199 const size_t region_size = HeapRegion::GrainWords;
apetrusenko@1826 200 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
apetrusenko@1826 201 char buffer[128];
johnc@3182 202 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
apetrusenko@1826 203 OldPLABSize > region_size ? "Old" : "Young", region_size);
apetrusenko@1826 204 vm_exit_during_initialization(buffer);
apetrusenko@1826 205 }
apetrusenko@1826 206
ysr@777 207 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
ysr@777 208 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
ysr@777 209
brutisso@3923 210 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
ysr@777 211
brutisso@3923 212 int index = MIN2(_parallel_gc_threads - 1, 7);
ysr@777 213
ysr@777 214 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
ysr@777 215 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
tonyp@3337 216 _young_cards_per_entry_ratio_seq->add(
tonyp@3337 217 young_cards_per_entry_ratio_defaults[index]);
ysr@777 218 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
ysr@777 219 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
ysr@777 220 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
ysr@777 221 _young_other_cost_per_region_ms_seq->add(
ysr@777 222 young_other_cost_per_region_ms_defaults[index]);
ysr@777 223 _non_young_other_cost_per_region_ms_seq->add(
ysr@777 224 non_young_other_cost_per_region_ms_defaults[index]);
ysr@777 225
tonyp@1965 226 // Below, we might need to calculate the pause time target based on
tonyp@1965 227 // the pause interval. When we do so we are going to give G1 maximum
tonyp@1965 228 // flexibility and allow it to do pauses when it needs to. So, we'll
tonyp@1965 229 // arrange that the pause interval to be pause time target + 1 to
tonyp@1965 230 // ensure that a) the pause time target is maximized with respect to
tonyp@1965 231 // the pause interval and b) we maintain the invariant that pause
tonyp@1965 232 // time target < pause interval. If the user does not want this
tonyp@1965 233 // maximum flexibility, they will have to set the pause interval
tonyp@1965 234 // explicitly.
tonyp@1965 235
tonyp@1965 236 // First make sure that, if either parameter is set, its value is
tonyp@1965 237 // reasonable.
tonyp@1965 238 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 239 if (MaxGCPauseMillis < 1) {
tonyp@1965 240 vm_exit_during_initialization("MaxGCPauseMillis should be "
tonyp@1965 241 "greater than 0");
tonyp@1965 242 }
tonyp@1965 243 }
tonyp@1965 244 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 245 if (GCPauseIntervalMillis < 1) {
tonyp@1965 246 vm_exit_during_initialization("GCPauseIntervalMillis should be "
tonyp@1965 247 "greater than 0");
tonyp@1965 248 }
tonyp@1965 249 }
tonyp@1965 250
tonyp@1965 251 // Then, if the pause time target parameter was not set, set it to
tonyp@1965 252 // the default value.
tonyp@1965 253 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 254 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 255 // The default pause time target in G1 is 200ms
tonyp@1965 256 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
tonyp@1965 257 } else {
tonyp@1965 258 // We do not allow the pause interval to be set without the
tonyp@1965 259 // pause time target
tonyp@1965 260 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
tonyp@1965 261 "without setting MaxGCPauseMillis");
tonyp@1965 262 }
tonyp@1965 263 }
tonyp@1965 264
tonyp@1965 265 // Then, if the interval parameter was not set, set it according to
tonyp@1965 266 // the pause time target (this will also deal with the case when the
tonyp@1965 267 // pause time target is the default value).
tonyp@1965 268 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 269 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
tonyp@1965 270 }
tonyp@1965 271
tonyp@1965 272 // Finally, make sure that the two parameters are consistent.
tonyp@1965 273 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
tonyp@1965 274 char buffer[256];
tonyp@1965 275 jio_snprintf(buffer, 256,
tonyp@1965 276 "MaxGCPauseMillis (%u) should be less than "
tonyp@1965 277 "GCPauseIntervalMillis (%u)",
tonyp@1965 278 MaxGCPauseMillis, GCPauseIntervalMillis);
tonyp@1965 279 vm_exit_during_initialization(buffer);
tonyp@1965 280 }
tonyp@1965 281
tonyp@1965 282 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
johnc@1186 283 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
ysr@777 284 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
poonam@4650 285
poonam@4650 286 uintx confidence_perc = G1ConfidencePercent;
poonam@4650 287 // Put an artificial ceiling on this so that it's not set to a silly value.
poonam@4650 288 if (confidence_perc > 100) {
poonam@4650 289 confidence_perc = 100;
poonam@4650 290 warning("G1ConfidencePercent is set to a value that is too large, "
poonam@4650 291 "it's been updated to %u", confidence_perc);
poonam@4650 292 }
poonam@4650 293 _sigma = (double) confidence_perc / 100.0;
ysr@777 294
ysr@777 295 // start conservatively (around 50ms is about right)
ysr@777 296 _concurrent_mark_remark_times_ms->add(0.05);
ysr@777 297 _concurrent_mark_cleanup_times_ms->add(0.20);
ysr@777 298 _tenuring_threshold = MaxTenuringThreshold;
tonyp@3066 299 // _max_survivor_regions will be calculated by
tonyp@3119 300 // update_young_list_target_length() during initialization.
tonyp@3066 301 _max_survivor_regions = 0;
apetrusenko@980 302
tonyp@1791 303 assert(GCTimeRatio > 0,
tonyp@1791 304 "we should have set it to a default value set_g1_gc_flags() "
tonyp@1791 305 "if a user set it to 0");
tonyp@1791 306 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
tonyp@1791 307
tonyp@3119 308 uintx reserve_perc = G1ReservePercent;
tonyp@3119 309 // Put an artificial ceiling on this so that it's not set to a silly value.
tonyp@3119 310 if (reserve_perc > 50) {
tonyp@3119 311 reserve_perc = 50;
tonyp@3119 312 warning("G1ReservePercent is set to a value that is too large, "
tonyp@3119 313 "it's been updated to %u", reserve_perc);
tonyp@3119 314 }
tonyp@3119 315 _reserve_factor = (double) reserve_perc / 100.0;
brutisso@3120 316 // This will be set when the heap is expanded
tonyp@3119 317 // for the first time during initialization.
tonyp@3119 318 _reserve_regions = 0;
tonyp@3119 319
tonyp@3209 320 _collectionSetChooser = new CollectionSetChooser();
jwilhelm@6085 321 }
jwilhelm@6085 322
jwilhelm@6085 323 void G1CollectorPolicy::initialize_alignments() {
jwilhelm@6085 324 _space_alignment = HeapRegion::GrainBytes;
jwilhelm@6085 325 size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
jwilhelm@6085 326 size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
jwilhelm@6085 327 _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
ysr@777 328 }
ysr@777 329
ysr@777 330 void G1CollectorPolicy::initialize_flags() {
jwilhelm@6085 331 if (G1HeapRegionSize != HeapRegion::GrainBytes) {
jwilhelm@6085 332 FLAG_SET_ERGO(uintx, G1HeapRegionSize, HeapRegion::GrainBytes);
jwilhelm@6085 333 }
jwilhelm@6085 334
apetrusenko@982 335 if (SurvivorRatio < 1) {
apetrusenko@982 336 vm_exit_during_initialization("Invalid survivor ratio specified");
apetrusenko@982 337 }
ysr@777 338 CollectorPolicy::initialize_flags();
jwilhelm@6085 339 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
ysr@777 340 }
ysr@777 341
jwilhelm@6085 342 void G1CollectorPolicy::post_heap_initialize() {
jwilhelm@6085 343 uintx max_regions = G1CollectedHeap::heap()->max_regions();
jwilhelm@6085 344 size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes;
jwilhelm@6085 345 if (max_young_size != MaxNewSize) {
jwilhelm@6085 346 FLAG_SET_ERGO(uintx, MaxNewSize, max_young_size);
jwilhelm@6085 347 }
jwilhelm@6085 348 }
brutisso@3120 349
jwilhelm@6085 350 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
jwilhelm@6085 351 _min_desired_young_length(0), _max_desired_young_length(0) {
brutisso@3120 352 if (FLAG_IS_CMDLINE(NewRatio)) {
brutisso@3120 353 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
tonyp@3172 354 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
brutisso@3120 355 } else {
brutisso@3358 356 _sizer_kind = SizerNewRatio;
brutisso@3358 357 _adaptive_size = false;
brutisso@3358 358 return;
brutisso@3120 359 }
brutisso@3120 360 }
brutisso@3120 361
jwilhelm@6085 362 if (NewSize > MaxNewSize) {
jwilhelm@6085 363 if (FLAG_IS_CMDLINE(MaxNewSize)) {
jwilhelm@6085 364 warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). "
jwilhelm@6085 365 "A new max generation size of " SIZE_FORMAT "k will be used.",
jwilhelm@6085 366 NewSize/K, MaxNewSize/K, NewSize/K);
jwilhelm@6085 367 }
jwilhelm@6085 368 MaxNewSize = NewSize;
jwilhelm@5933 369 }
jwilhelm@5933 370
brutisso@3358 371 if (FLAG_IS_CMDLINE(NewSize)) {
tonyp@3713 372 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
tonyp@3713 373 1U);
brutisso@3358 374 if (FLAG_IS_CMDLINE(MaxNewSize)) {
tonyp@3713 375 _max_desired_young_length =
tonyp@3713 376 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
tonyp@3713 377 1U);
brutisso@3358 378 _sizer_kind = SizerMaxAndNewSize;
brutisso@3358 379 _adaptive_size = _min_desired_young_length == _max_desired_young_length;
brutisso@3358 380 } else {
brutisso@3358 381 _sizer_kind = SizerNewSizeOnly;
brutisso@3358 382 }
brutisso@3358 383 } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
tonyp@3713 384 _max_desired_young_length =
tonyp@3713 385 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
tonyp@3713 386 1U);
brutisso@3358 387 _sizer_kind = SizerMaxNewSizeOnly;
brutisso@3358 388 }
brutisso@3358 389 }
brutisso@3358 390
tonyp@3713 391 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
johnc@4385 392 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100;
tonyp@3713 393 return MAX2(1U, default_value);
brutisso@3358 394 }
brutisso@3358 395
tonyp@3713 396 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
johnc@4385 397 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100;
tonyp@3713 398 return MAX2(1U, default_value);
brutisso@3358 399 }
brutisso@3358 400
jwilhelm@6085 401 void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) {
jwilhelm@6085 402 assert(number_of_heap_regions > 0, "Heap must be initialized");
brutisso@3358 403
brutisso@3358 404 switch (_sizer_kind) {
brutisso@3358 405 case SizerDefaults:
jwilhelm@6085 406 *min_young_length = calculate_default_min_length(number_of_heap_regions);
jwilhelm@6085 407 *max_young_length = calculate_default_max_length(number_of_heap_regions);
brutisso@3358 408 break;
brutisso@3358 409 case SizerNewSizeOnly:
jwilhelm@6085 410 *max_young_length = calculate_default_max_length(number_of_heap_regions);
jwilhelm@6085 411 *max_young_length = MAX2(*min_young_length, *max_young_length);
brutisso@3358 412 break;
brutisso@3358 413 case SizerMaxNewSizeOnly:
jwilhelm@6085 414 *min_young_length = calculate_default_min_length(number_of_heap_regions);
jwilhelm@6085 415 *min_young_length = MIN2(*min_young_length, *max_young_length);
brutisso@3358 416 break;
brutisso@3358 417 case SizerMaxAndNewSize:
brutisso@3358 418 // Do nothing. Values set on the command line, don't update them at runtime.
brutisso@3358 419 break;
brutisso@3358 420 case SizerNewRatio:
jwilhelm@6085 421 *min_young_length = number_of_heap_regions / (NewRatio + 1);
jwilhelm@6085 422 *max_young_length = *min_young_length;
brutisso@3358 423 break;
brutisso@3358 424 default:
brutisso@3358 425 ShouldNotReachHere();
brutisso@3358 426 }
brutisso@3358 427
jwilhelm@6085 428 assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values");
jwilhelm@6085 429 }
jwilhelm@6085 430
jwilhelm@6085 431 uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) {
jwilhelm@6085 432 // We need to pass the desired values because recalculation may not update these
jwilhelm@6085 433 // values in some cases.
jwilhelm@6085 434 uint temp = _min_desired_young_length;
jwilhelm@6085 435 uint result = _max_desired_young_length;
jwilhelm@6085 436 recalculate_min_max_young_length(number_of_heap_regions, &temp, &result);
jwilhelm@6085 437 return result;
jwilhelm@6085 438 }
jwilhelm@6085 439
jwilhelm@6085 440 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
jwilhelm@6085 441 recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length,
jwilhelm@6085 442 &_max_desired_young_length);
brutisso@3358 443 }
brutisso@3358 444
brutisso@3358 445 void G1CollectorPolicy::init() {
brutisso@3358 446 // Set aside an initial future to_space.
brutisso@3358 447 _g1 = G1CollectedHeap::heap();
brutisso@3358 448
brutisso@3358 449 assert(Heap_lock->owned_by_self(), "Locking discipline.");
brutisso@3358 450
brutisso@3358 451 initialize_gc_policy_counters();
brutisso@3358 452
brutisso@3120 453 if (adaptive_young_list_length()) {
brutisso@3065 454 _young_list_fixed_length = 0;
johnc@1829 455 } else {
brutisso@3358 456 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
ysr@777 457 }
tschatzl@7050 458 _free_regions_at_end_of_collection = _g1->num_free_regions();
tonyp@3119 459 update_young_list_target_length();
johnc@1829 460
johnc@1829 461 // We may immediately start allocating regions and placing them on the
johnc@1829 462 // collection set list. Initialize the per-collection set info
johnc@1829 463 start_incremental_cset_building();
ysr@777 464 }
ysr@777 465
apetrusenko@980 466 // Create the jstat counters for the policy.
tonyp@3119 467 void G1CollectorPolicy::initialize_gc_policy_counters() {
brutisso@3065 468 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
apetrusenko@980 469 }
apetrusenko@980 470
tonyp@3713 471 bool G1CollectorPolicy::predict_will_fit(uint young_length,
tonyp@3119 472 double base_time_ms,
tonyp@3713 473 uint base_free_regions,
tonyp@3119 474 double target_pause_time_ms) {
tonyp@3119 475 if (young_length >= base_free_regions) {
tonyp@3119 476 // end condition 1: not enough space for the young regions
tonyp@3119 477 return false;
ysr@777 478 }
tonyp@3119 479
tonyp@3713 480 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
tonyp@3119 481 size_t bytes_to_copy =
tonyp@3119 482 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
tonyp@3119 483 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
tonyp@3119 484 double young_other_time_ms = predict_young_other_time_ms(young_length);
tonyp@3119 485 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
tonyp@3119 486 if (pause_time_ms > target_pause_time_ms) {
tonyp@3119 487 // end condition 2: prediction is over the target pause time
tonyp@3119 488 return false;
tonyp@3119 489 }
tonyp@3119 490
tonyp@3119 491 size_t free_bytes =
tonyp@3713 492 (base_free_regions - young_length) * HeapRegion::GrainBytes;
tonyp@3119 493 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
tonyp@3119 494 // end condition 3: out-of-space (conservatively!)
tonyp@3119 495 return false;
tonyp@3119 496 }
tonyp@3119 497
tonyp@3119 498 // success!
tonyp@3119 499 return true;
ysr@777 500 }
ysr@777 501
tonyp@3713 502 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
brutisso@3120 503 // re-calculate the necessary reserve
brutisso@3120 504 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
tonyp@3119 505 // We use ceiling so that if reserve_regions_d is > 0.0 (but
tonyp@3119 506 // smaller than 1.0) we'll get 1.
tonyp@3713 507 _reserve_regions = (uint) ceil(reserve_regions_d);
brutisso@3120 508
brutisso@3358 509 _young_gen_sizer->heap_size_changed(new_number_of_regions);
tonyp@3119 510 }
tonyp@3119 511
tonyp@3713 512 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
tonyp@3713 513 uint base_min_length) {
tonyp@3713 514 uint desired_min_length = 0;
ysr@777 515 if (adaptive_young_list_length()) {
tonyp@3119 516 if (_alloc_rate_ms_seq->num() > 3) {
tonyp@3119 517 double now_sec = os::elapsedTime();
tonyp@3119 518 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
tonyp@3119 519 double alloc_rate_ms = predict_alloc_rate_ms();
tonyp@3713 520 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
tonyp@3119 521 } else {
tonyp@3119 522 // otherwise we don't have enough info to make the prediction
tonyp@3119 523 }
ysr@777 524 }
brutisso@3120 525 desired_min_length += base_min_length;
brutisso@3120 526 // make sure we don't go below any user-defined minimum bound
brutisso@3358 527 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
ysr@777 528 }
ysr@777 529
tonyp@3713 530 uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
tonyp@3119 531 // Here, we might want to also take into account any additional
tonyp@3119 532 // constraints (i.e., user-defined minimum bound). Currently, we
tonyp@3119 533 // effectively don't set this bound.
brutisso@3358 534 return _young_gen_sizer->max_desired_young_length();
tonyp@3119 535 }
tonyp@3119 536
tonyp@3119 537 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
tonyp@3119 538 if (rs_lengths == (size_t) -1) {
tonyp@3119 539 // if it's set to the default value (-1), we should predict it;
tonyp@3119 540 // otherwise, use the given value.
tonyp@3119 541 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
tonyp@3119 542 }
tonyp@3119 543
tonyp@3119 544 // Calculate the absolute and desired min bounds.
tonyp@3119 545
tonyp@3119 546 // This is how many young regions we already have (currently: the survivors).
tonyp@3713 547 uint base_min_length = recorded_survivor_regions();
tonyp@3119 548 // This is the absolute minimum young length, which ensures that we
tonyp@3119 549 // can allocate one eden region in the worst-case.
tonyp@3713 550 uint absolute_min_length = base_min_length + 1;
tonyp@3713 551 uint desired_min_length =
tonyp@3119 552 calculate_young_list_desired_min_length(base_min_length);
tonyp@3119 553 if (desired_min_length < absolute_min_length) {
tonyp@3119 554 desired_min_length = absolute_min_length;
tonyp@3119 555 }
tonyp@3119 556
tonyp@3119 557 // Calculate the absolute and desired max bounds.
tonyp@3119 558
tonyp@3119 559 // We will try our best not to "eat" into the reserve.
tonyp@3713 560 uint absolute_max_length = 0;
tonyp@3119 561 if (_free_regions_at_end_of_collection > _reserve_regions) {
tonyp@3119 562 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
tonyp@3119 563 }
tonyp@3713 564 uint desired_max_length = calculate_young_list_desired_max_length();
tonyp@3119 565 if (desired_max_length > absolute_max_length) {
tonyp@3119 566 desired_max_length = absolute_max_length;
tonyp@3119 567 }
tonyp@3119 568
tonyp@3713 569 uint young_list_target_length = 0;
tonyp@3119 570 if (adaptive_young_list_length()) {
tonyp@3337 571 if (gcs_are_young()) {
tonyp@3119 572 young_list_target_length =
tonyp@3119 573 calculate_young_list_target_length(rs_lengths,
tonyp@3119 574 base_min_length,
tonyp@3119 575 desired_min_length,
tonyp@3119 576 desired_max_length);
tonyp@3119 577 _rs_lengths_prediction = rs_lengths;
tonyp@3119 578 } else {
tonyp@3119 579 // Don't calculate anything and let the code below bound it to
tonyp@3119 580 // the desired_min_length, i.e., do the next GC as soon as
tonyp@3119 581 // possible to maximize how many old regions we can add to it.
ysr@777 582 }
ysr@777 583 } else {
tonyp@3539 584 // The user asked for a fixed young gen so we'll fix the young gen
tonyp@3539 585 // whether the next GC is young or mixed.
tonyp@3539 586 young_list_target_length = _young_list_fixed_length;
ysr@777 587 }
ysr@777 588
tonyp@3119 589 // Make sure we don't go over the desired max length, nor under the
tonyp@3119 590 // desired min length. In case they clash, desired_min_length wins
tonyp@3119 591 // which is why that test is second.
tonyp@3119 592 if (young_list_target_length > desired_max_length) {
tonyp@3119 593 young_list_target_length = desired_max_length;
tonyp@3119 594 }
tonyp@3119 595 if (young_list_target_length < desired_min_length) {
tonyp@3119 596 young_list_target_length = desired_min_length;
tonyp@3119 597 }
tonyp@3119 598
tonyp@3119 599 assert(young_list_target_length > recorded_survivor_regions(),
tonyp@3119 600 "we should be able to allocate at least one eden region");
tonyp@3119 601 assert(young_list_target_length >= absolute_min_length, "post-condition");
tonyp@3119 602 _young_list_target_length = young_list_target_length;
tonyp@3119 603
tonyp@3119 604 update_max_gc_locker_expansion();
ysr@777 605 }
ysr@777 606
tonyp@3713 607 uint
tonyp@3119 608 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
tonyp@3713 609 uint base_min_length,
tonyp@3713 610 uint desired_min_length,
tonyp@3713 611 uint desired_max_length) {
tonyp@3119 612 assert(adaptive_young_list_length(), "pre-condition");
tonyp@3337 613 assert(gcs_are_young(), "only call this for young GCs");
tonyp@3119 614
tonyp@3119 615 // In case some edge-condition makes the desired max length too small...
tonyp@3119 616 if (desired_max_length <= desired_min_length) {
tonyp@3119 617 return desired_min_length;
tonyp@3119 618 }
tonyp@3119 619
tonyp@3119 620 // We'll adjust min_young_length and max_young_length not to include
tonyp@3119 621 // the already allocated young regions (i.e., so they reflect the
tonyp@3119 622 // min and max eden regions we'll allocate). The base_min_length
tonyp@3119 623 // will be reflected in the predictions by the
tonyp@3119 624 // survivor_regions_evac_time prediction.
tonyp@3119 625 assert(desired_min_length > base_min_length, "invariant");
tonyp@3713 626 uint min_young_length = desired_min_length - base_min_length;
tonyp@3119 627 assert(desired_max_length > base_min_length, "invariant");
tonyp@3713 628 uint max_young_length = desired_max_length - base_min_length;
tonyp@3119 629
tonyp@3119 630 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
tonyp@3119 631 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
tonyp@3119 632 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
tonyp@3119 633 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
tonyp@3119 634 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
tonyp@3119 635 double base_time_ms =
tonyp@3119 636 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
tonyp@3119 637 survivor_regions_evac_time;
tonyp@3713 638 uint available_free_regions = _free_regions_at_end_of_collection;
tonyp@3713 639 uint base_free_regions = 0;
tonyp@3119 640 if (available_free_regions > _reserve_regions) {
tonyp@3119 641 base_free_regions = available_free_regions - _reserve_regions;
tonyp@3119 642 }
tonyp@3119 643
tonyp@3119 644 // Here, we will make sure that the shortest young length that
tonyp@3119 645 // makes sense fits within the target pause time.
tonyp@3119 646
tonyp@3119 647 if (predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 648 base_free_regions, target_pause_time_ms)) {
tonyp@3119 649 // The shortest young length will fit into the target pause time;
tonyp@3119 650 // we'll now check whether the absolute maximum number of young
tonyp@3119 651 // regions will fit in the target pause time. If not, we'll do
tonyp@3119 652 // a binary search between min_young_length and max_young_length.
tonyp@3119 653 if (predict_will_fit(max_young_length, base_time_ms,
tonyp@3119 654 base_free_regions, target_pause_time_ms)) {
tonyp@3119 655 // The maximum young length will fit into the target pause time.
tonyp@3119 656 // We are done so set min young length to the maximum length (as
tonyp@3119 657 // the result is assumed to be returned in min_young_length).
tonyp@3119 658 min_young_length = max_young_length;
tonyp@3119 659 } else {
tonyp@3119 660 // The maximum possible number of young regions will not fit within
tonyp@3119 661 // the target pause time so we'll search for the optimal
tonyp@3119 662 // length. The loop invariants are:
tonyp@3119 663 //
tonyp@3119 664 // min_young_length < max_young_length
tonyp@3119 665 // min_young_length is known to fit into the target pause time
tonyp@3119 666 // max_young_length is known not to fit into the target pause time
tonyp@3119 667 //
tonyp@3119 668 // Going into the loop we know the above hold as we've just
tonyp@3119 669 // checked them. Every time around the loop we check whether
tonyp@3119 670 // the middle value between min_young_length and
tonyp@3119 671 // max_young_length fits into the target pause time. If it
tonyp@3119 672 // does, it becomes the new min. If it doesn't, it becomes
tonyp@3119 673 // the new max. This way we maintain the loop invariants.
tonyp@3119 674
tonyp@3119 675 assert(min_young_length < max_young_length, "invariant");
tonyp@3713 676 uint diff = (max_young_length - min_young_length) / 2;
tonyp@3119 677 while (diff > 0) {
tonyp@3713 678 uint young_length = min_young_length + diff;
tonyp@3119 679 if (predict_will_fit(young_length, base_time_ms,
tonyp@3119 680 base_free_regions, target_pause_time_ms)) {
tonyp@3119 681 min_young_length = young_length;
tonyp@3119 682 } else {
tonyp@3119 683 max_young_length = young_length;
tonyp@3119 684 }
tonyp@3119 685 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 686 diff = (max_young_length - min_young_length) / 2;
tonyp@3119 687 }
tonyp@3119 688 // The results is min_young_length which, according to the
tonyp@3119 689 // loop invariants, should fit within the target pause time.
tonyp@3119 690
tonyp@3119 691 // These are the post-conditions of the binary search above:
tonyp@3119 692 assert(min_young_length < max_young_length,
tonyp@3119 693 "otherwise we should have discovered that max_young_length "
tonyp@3119 694 "fits into the pause target and not done the binary search");
tonyp@3119 695 assert(predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 696 base_free_regions, target_pause_time_ms),
tonyp@3119 697 "min_young_length, the result of the binary search, should "
tonyp@3119 698 "fit into the pause target");
tonyp@3119 699 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
tonyp@3119 700 base_free_regions, target_pause_time_ms),
tonyp@3119 701 "min_young_length, the result of the binary search, should be "
tonyp@3119 702 "optimal, so no larger length should fit into the pause target");
tonyp@3119 703 }
tonyp@3119 704 } else {
tonyp@3119 705 // Even the minimum length doesn't fit into the pause time
tonyp@3119 706 // target, return it as the result nevertheless.
tonyp@3119 707 }
tonyp@3119 708 return base_min_length + min_young_length;
ysr@777 709 }
ysr@777 710
apetrusenko@980 711 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
apetrusenko@980 712 double survivor_regions_evac_time = 0.0;
apetrusenko@980 713 for (HeapRegion * r = _recorded_survivor_head;
apetrusenko@980 714 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
apetrusenko@980 715 r = r->get_next_young_region()) {
johnc@3998 716 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
apetrusenko@980 717 }
apetrusenko@980 718 return survivor_regions_evac_time;
apetrusenko@980 719 }
apetrusenko@980 720
tonyp@3119 721 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
ysr@777 722 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
ysr@777 723
johnc@1829 724 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
ysr@777 725 if (rs_lengths > _rs_lengths_prediction) {
ysr@777 726 // add 10% to avoid having to recalculate often
ysr@777 727 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
tonyp@3119 728 update_young_list_target_length(rs_lengths_prediction);
ysr@777 729 }
ysr@777 730 }
ysr@777 731
tonyp@3119 732
tonyp@3119 733
ysr@777 734 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
ysr@777 735 bool is_tlab,
ysr@777 736 bool* gc_overhead_limit_was_exceeded) {
ysr@777 737 guarantee(false, "Not using this policy feature yet.");
ysr@777 738 return NULL;
ysr@777 739 }
ysr@777 740
ysr@777 741 // This method controls how a collector handles one or more
ysr@777 742 // of its generations being fully allocated.
ysr@777 743 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
ysr@777 744 bool is_tlab) {
ysr@777 745 guarantee(false, "Not using this policy feature yet.");
ysr@777 746 return NULL;
ysr@777 747 }
ysr@777 748
ysr@777 749
ysr@777 750 #ifndef PRODUCT
ysr@777 751 bool G1CollectorPolicy::verify_young_ages() {
johnc@1829 752 HeapRegion* head = _g1->young_list()->first_region();
ysr@777 753 return
ysr@777 754 verify_young_ages(head, _short_lived_surv_rate_group);
ysr@777 755 // also call verify_young_ages on any additional surv rate groups
ysr@777 756 }
ysr@777 757
ysr@777 758 bool
ysr@777 759 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
ysr@777 760 SurvRateGroup *surv_rate_group) {
ysr@777 761 guarantee( surv_rate_group != NULL, "pre-condition" );
ysr@777 762
ysr@777 763 const char* name = surv_rate_group->name();
ysr@777 764 bool ret = true;
ysr@777 765 int prev_age = -1;
ysr@777 766
ysr@777 767 for (HeapRegion* curr = head;
ysr@777 768 curr != NULL;
ysr@777 769 curr = curr->get_next_young_region()) {
ysr@777 770 SurvRateGroup* group = curr->surv_rate_group();
ysr@777 771 if (group == NULL && !curr->is_survivor()) {
ysr@777 772 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
ysr@777 773 ret = false;
ysr@777 774 }
ysr@777 775
ysr@777 776 if (surv_rate_group == group) {
ysr@777 777 int age = curr->age_in_surv_rate_group();
ysr@777 778
ysr@777 779 if (age < 0) {
ysr@777 780 gclog_or_tty->print_cr("## %s: encountered negative age", name);
ysr@777 781 ret = false;
ysr@777 782 }
ysr@777 783
ysr@777 784 if (age <= prev_age) {
ysr@777 785 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
ysr@777 786 "(%d, %d)", name, age, prev_age);
ysr@777 787 ret = false;
ysr@777 788 }
ysr@777 789 prev_age = age;
ysr@777 790 }
ysr@777 791 }
ysr@777 792
ysr@777 793 return ret;
ysr@777 794 }
ysr@777 795 #endif // PRODUCT
ysr@777 796
ysr@777 797 void G1CollectorPolicy::record_full_collection_start() {
brutisso@3923 798 _full_collection_start_sec = os::elapsedTime();
johnc@5123 799 record_heap_size_info_at_start(true /* full */);
ysr@777 800 // Release the future to-space so that it is available for compaction into.
ysr@777 801 _g1->set_full_collection();
ysr@777 802 }
ysr@777 803
ysr@777 804 void G1CollectorPolicy::record_full_collection_end() {
ysr@777 805 // Consider this like a collection pause for the purposes of allocation
ysr@777 806 // since last pause.
ysr@777 807 double end_sec = os::elapsedTime();
brutisso@3923 808 double full_gc_time_sec = end_sec - _full_collection_start_sec;
ysr@777 809 double full_gc_time_ms = full_gc_time_sec * 1000.0;
ysr@777 810
brutisso@3812 811 _trace_gen1_time_data.record_full_collection(full_gc_time_ms);
ysr@777 812
tonyp@1030 813 update_recent_gc_times(end_sec, full_gc_time_ms);
ysr@777 814
ysr@777 815 _g1->clear_full_collection();
ysr@777 816
tonyp@3337 817 // "Nuke" the heuristics that control the young/mixed GC
tonyp@3337 818 // transitions and make sure we start with young GCs after the Full GC.
tonyp@3337 819 set_gcs_are_young(true);
tonyp@3337 820 _last_young_gc = false;
tonyp@1794 821 clear_initiate_conc_mark_if_possible();
tonyp@1794 822 clear_during_initial_mark_pause();
ysr@777 823 _in_marking_window = false;
ysr@777 824 _in_marking_window_im = false;
ysr@777 825
ysr@777 826 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 827 // also call this on any additional surv rate groups
ysr@777 828
apetrusenko@980 829 record_survivor_regions(0, NULL, NULL);
apetrusenko@980 830
tschatzl@7050 831 _free_regions_at_end_of_collection = _g1->num_free_regions();
apetrusenko@980 832 // Reset survivors SurvRateGroup.
apetrusenko@980 833 _survivor_surv_rate_group->reset();
tonyp@3119 834 update_young_list_target_length();
tonyp@3714 835 _collectionSetChooser->clear();
tonyp@2315 836 }
ysr@777 837
ysr@777 838 void G1CollectorPolicy::record_stop_world_start() {
ysr@777 839 _stop_world_start = os::elapsedTime();
ysr@777 840 }
ysr@777 841
johnc@4929 842 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
tonyp@3464 843 // We only need to do this here as the policy will only be applied
tonyp@3464 844 // to the GC we're about to start. so, no point is calculating this
tonyp@3464 845 // every time we calculate / recalculate the target young length.
tonyp@3464 846 update_survivors_policy();
tonyp@3119 847
tonyp@2315 848 assert(_g1->used() == _g1->recalculate_used(),
tonyp@2315 849 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
tonyp@2315 850 _g1->used(), _g1->recalculate_used()));
ysr@777 851
ysr@777 852 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
brutisso@3812 853 _trace_gen0_time_data.record_start_collection(s_w_t_ms);
ysr@777 854 _stop_world_start = 0.0;
ysr@777 855
johnc@5123 856 record_heap_size_info_at_start(false /* full */);
johnc@4929 857
brutisso@4015 858 phase_times()->record_cur_collection_start_sec(start_time_sec);
ysr@777 859 _pending_cards = _g1->pending_card_num();
ysr@777 860
johnc@3998 861 _collection_set_bytes_used_before = 0;
tonyp@3028 862 _bytes_copied_during_gc = 0;
ysr@777 863
tonyp@3337 864 _last_gc_was_young = false;
ysr@777 865
ysr@777 866 // do that for any other surv rate groups
ysr@777 867 _short_lived_surv_rate_group->stop_adding_regions();
tonyp@1717 868 _survivors_age_table.clear();
apetrusenko@980 869
ysr@777 870 assert( verify_young_ages(), "region age verification" );
ysr@777 871 }
ysr@777 872
brutisso@3065 873 void G1CollectorPolicy::record_concurrent_mark_init_end(double
ysr@777 874 mark_init_elapsed_time_ms) {
ysr@777 875 _during_marking = true;
tonyp@1794 876 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
tonyp@1794 877 clear_during_initial_mark_pause();
ysr@777 878 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
ysr@777 879 }
ysr@777 880
ysr@777 881 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
ysr@777 882 _mark_remark_start_sec = os::elapsedTime();
ysr@777 883 _during_marking = false;
ysr@777 884 }
ysr@777 885
ysr@777 886 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
ysr@777 887 double end_time_sec = os::elapsedTime();
ysr@777 888 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
ysr@777 889 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
ysr@777 890 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 891 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 892
ysr@777 893 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
ysr@777 894 }
ysr@777 895
ysr@777 896 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
ysr@777 897 _mark_cleanup_start_sec = os::elapsedTime();
ysr@777 898 }
ysr@777 899
tonyp@3209 900 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
tonyp@3337 901 _last_young_gc = true;
brutisso@3065 902 _in_marking_window = false;
ysr@777 903 }
ysr@777 904
ysr@777 905 void G1CollectorPolicy::record_concurrent_pause() {
ysr@777 906 if (_stop_world_start > 0.0) {
ysr@777 907 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
brutisso@3812 908 _trace_gen0_time_data.record_yield_time(yield_ms);
ysr@777 909 }
ysr@777 910 }
ysr@777 911
brutisso@3461 912 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
brutisso@3461 913 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
brutisso@3456 914 return false;
brutisso@3456 915 }
brutisso@3456 916
brutisso@3456 917 size_t marking_initiating_used_threshold =
brutisso@3456 918 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
brutisso@3456 919 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
brutisso@3461 920 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
brutisso@3461 921
brutisso@3461 922 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
brutisso@5398 923 if (gcs_are_young() && !_last_young_gc) {
brutisso@3461 924 ergo_verbose5(ErgoConcCycles,
brutisso@3456 925 "request concurrent cycle initiation",
brutisso@3456 926 ergo_format_reason("occupancy higher than threshold")
brutisso@3456 927 ergo_format_byte("occupancy")
brutisso@3461 928 ergo_format_byte("allocation request")
brutisso@3456 929 ergo_format_byte_perc("threshold")
brutisso@3456 930 ergo_format_str("source"),
brutisso@3456 931 cur_used_bytes,
brutisso@3461 932 alloc_byte_size,
brutisso@3456 933 marking_initiating_used_threshold,
brutisso@3456 934 (double) InitiatingHeapOccupancyPercent,
brutisso@3456 935 source);
brutisso@3456 936 return true;
brutisso@3456 937 } else {
brutisso@3461 938 ergo_verbose5(ErgoConcCycles,
brutisso@3456 939 "do not request concurrent cycle initiation",
brutisso@3456 940 ergo_format_reason("still doing mixed collections")
brutisso@3456 941 ergo_format_byte("occupancy")
brutisso@3461 942 ergo_format_byte("allocation request")
brutisso@3456 943 ergo_format_byte_perc("threshold")
brutisso@3456 944 ergo_format_str("source"),
brutisso@3456 945 cur_used_bytes,
brutisso@3461 946 alloc_byte_size,
brutisso@3456 947 marking_initiating_used_threshold,
brutisso@3456 948 (double) InitiatingHeapOccupancyPercent,
brutisso@3456 949 source);
brutisso@3456 950 }
brutisso@3456 951 }
brutisso@3456 952
brutisso@3456 953 return false;
brutisso@3456 954 }
brutisso@3456 955
ysr@777 956 // Anything below that is considered to be zero
ysr@777 957 #define MIN_TIMER_GRANULARITY 0.0000001
ysr@777 958
sla@5237 959 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
ysr@777 960 double end_time_sec = os::elapsedTime();
tonyp@3289 961 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
tonyp@3289 962 "otherwise, the subtraction below does not make sense");
ysr@777 963 size_t rs_size =
tonyp@3289 964 _cur_collection_pause_used_regions_at_start - cset_region_length();
ysr@777 965 size_t cur_used_bytes = _g1->used();
ysr@777 966 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
ysr@777 967 bool last_pause_included_initial_mark = false;
tonyp@2062 968 bool update_stats = !_g1->evacuation_failed();
ysr@777 969
ysr@777 970 #ifndef PRODUCT
ysr@777 971 if (G1YoungSurvRateVerbose) {
drchase@6680 972 gclog_or_tty->cr();
ysr@777 973 _short_lived_surv_rate_group->print();
ysr@777 974 // do that for any other surv rate groups too
ysr@777 975 }
ysr@777 976 #endif // PRODUCT
ysr@777 977
brutisso@3065 978 last_pause_included_initial_mark = during_initial_mark_pause();
brutisso@3456 979 if (last_pause_included_initial_mark) {
brutisso@3065 980 record_concurrent_mark_init_end(0.0);
brutisso@5398 981 } else if (need_to_start_conc_mark("end of GC")) {
brutisso@3456 982 // Note: this might have already been set, if during the last
brutisso@3456 983 // pause we decided to start a cycle but at the beginning of
brutisso@3456 984 // this pause we decided to postpone it. That's OK.
brutisso@3456 985 set_initiate_conc_mark_if_possible();
brutisso@3456 986 }
brutisso@3065 987
brutisso@3923 988 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
ysr@777 989 end_time_sec, false);
ysr@777 990
sla@5237 991 evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
sla@5237 992 evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
sla@5237 993
tonyp@1030 994 if (update_stats) {
brutisso@3923 995 _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
ysr@777 996 // this is where we update the allocation rate of the application
ysr@777 997 double app_time_ms =
brutisso@4015 998 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
ysr@777 999 if (app_time_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1000 // This usually happens due to the timer not having the required
ysr@777 1001 // granularity. Some Linuxes are the usual culprits.
ysr@777 1002 // We'll just set it to something (arbitrarily) small.
ysr@777 1003 app_time_ms = 1.0;
ysr@777 1004 }
tonyp@3289 1005 // We maintain the invariant that all objects allocated by mutator
tonyp@3289 1006 // threads will be allocated out of eden regions. So, we can use
tonyp@3289 1007 // the eden region number allocated since the previous GC to
tonyp@3289 1008 // calculate the application's allocate rate. The only exception
tonyp@3289 1009 // to that is humongous objects that are allocated separately. But
tonyp@3289 1010 // given that humongous object allocations do not really affect
tonyp@3289 1011 // either the pause's duration nor when the next pause will take
tonyp@3289 1012 // place we can safely ignore them here.
tonyp@3713 1013 uint regions_allocated = eden_cset_region_length();
ysr@777 1014 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
ysr@777 1015 _alloc_rate_ms_seq->add(alloc_rate_ms);
ysr@777 1016
ysr@777 1017 double interval_ms =
ysr@777 1018 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
brutisso@3923 1019 update_recent_gc_times(end_time_sec, pause_time_ms);
ysr@777 1020 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
ysr@1521 1021 if (recent_avg_pause_time_ratio() < 0.0 ||
ysr@1521 1022 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
ysr@1521 1023 #ifndef PRODUCT
ysr@1521 1024 // Dump info to allow post-facto debugging
ysr@1521 1025 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
ysr@1521 1026 gclog_or_tty->print_cr("-------------------------------------------");
ysr@1521 1027 gclog_or_tty->print_cr("Recent GC Times (ms):");
ysr@1521 1028 _recent_gc_times_ms->dump();
ysr@1521 1029 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
ysr@1521 1030 _recent_prev_end_times_for_all_gcs_sec->dump();
ysr@1521 1031 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
ysr@1521 1032 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
ysr@1522 1033 // In debug mode, terminate the JVM if the user wants to debug at this point.
ysr@1522 1034 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
ysr@1522 1035 #endif // !PRODUCT
ysr@1522 1036 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
ysr@1522 1037 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
ysr@1521 1038 if (_recent_avg_pause_time_ratio < 0.0) {
ysr@1521 1039 _recent_avg_pause_time_ratio = 0.0;
ysr@1521 1040 } else {
ysr@1521 1041 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
ysr@1521 1042 _recent_avg_pause_time_ratio = 1.0;
ysr@1521 1043 }
ysr@1521 1044 }
ysr@777 1045 }
johnc@5123 1046
ysr@777 1047 bool new_in_marking_window = _in_marking_window;
ysr@777 1048 bool new_in_marking_window_im = false;
tschatzl@7006 1049 if (last_pause_included_initial_mark) {
ysr@777 1050 new_in_marking_window = true;
ysr@777 1051 new_in_marking_window_im = true;
ysr@777 1052 }
ysr@777 1053
tonyp@3337 1054 if (_last_young_gc) {
tonyp@3539 1055 // This is supposed to to be the "last young GC" before we start
tonyp@3539 1056 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
tonyp@3539 1057
johnc@3178 1058 if (!last_pause_included_initial_mark) {
tonyp@3539 1059 if (next_gc_should_be_mixed("start mixed GCs",
tonyp@3539 1060 "do not start mixed GCs")) {
tonyp@3539 1061 set_gcs_are_young(false);
tonyp@3539 1062 }
johnc@3178 1063 } else {
tonyp@3337 1064 ergo_verbose0(ErgoMixedGCs,
tonyp@3337 1065 "do not start mixed GCs",
johnc@3178 1066 ergo_format_reason("concurrent cycle is about to start"));
johnc@3178 1067 }
tonyp@3337 1068 _last_young_gc = false;
brutisso@3065 1069 }
brutisso@3065 1070
tonyp@3337 1071 if (!_last_gc_was_young) {
tonyp@3539 1072 // This is a mixed GC. Here we decide whether to continue doing
tonyp@3539 1073 // mixed GCs or not.
tonyp@3539 1074
tonyp@3539 1075 if (!next_gc_should_be_mixed("continue mixed GCs",
tonyp@3539 1076 "do not continue mixed GCs")) {
tonyp@3337 1077 set_gcs_are_young(true);
ysr@777 1078 }
brutisso@3065 1079 }
tonyp@3337 1080
ysr@777 1081 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 1082 // do that for any other surv rate groupsx
ysr@777 1083
apetrusenko@1112 1084 if (update_stats) {
ysr@777 1085 double cost_per_card_ms = 0.0;
ysr@777 1086 if (_pending_cards > 0) {
brutisso@4015 1087 cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
ysr@777 1088 _cost_per_card_ms_seq->add(cost_per_card_ms);
ysr@777 1089 }
ysr@777 1090
ysr@777 1091 size_t cards_scanned = _g1->cards_scanned();
ysr@777 1092
ysr@777 1093 double cost_per_entry_ms = 0.0;
ysr@777 1094 if (cards_scanned > 10) {
brutisso@4015 1095 cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
tonyp@3337 1096 if (_last_gc_was_young) {
ysr@777 1097 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
tonyp@3337 1098 } else {
tonyp@3337 1099 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
tonyp@3337 1100 }
ysr@777 1101 }
ysr@777 1102
ysr@777 1103 if (_max_rs_lengths > 0) {
ysr@777 1104 double cards_per_entry_ratio =
ysr@777 1105 (double) cards_scanned / (double) _max_rs_lengths;
tonyp@3337 1106 if (_last_gc_was_young) {
tonyp@3337 1107 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
tonyp@3337 1108 } else {
tonyp@3337 1109 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
tonyp@3337 1110 }
ysr@777 1111 }
ysr@777 1112
tonyp@3356 1113 // This is defensive. For a while _max_rs_lengths could get
tonyp@3356 1114 // smaller than _recorded_rs_lengths which was causing
tonyp@3356 1115 // rs_length_diff to get very large and mess up the RSet length
tonyp@3356 1116 // predictions. The reason was unsafe concurrent updates to the
tonyp@3356 1117 // _inc_cset_recorded_rs_lengths field which the code below guards
tonyp@3356 1118 // against (see CR 7118202). This bug has now been fixed (see CR
tonyp@3356 1119 // 7119027). However, I'm still worried that
tonyp@3356 1120 // _inc_cset_recorded_rs_lengths might still end up somewhat
tonyp@3356 1121 // inaccurate. The concurrent refinement thread calculates an
tonyp@3356 1122 // RSet's length concurrently with other CR threads updating it
tonyp@3356 1123 // which might cause it to calculate the length incorrectly (if,
tonyp@3356 1124 // say, it's in mid-coarsening). So I'll leave in the defensive
tonyp@3356 1125 // conditional below just in case.
tonyp@3326 1126 size_t rs_length_diff = 0;
tonyp@3326 1127 if (_max_rs_lengths > _recorded_rs_lengths) {
tonyp@3326 1128 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
tonyp@3326 1129 }
tonyp@3326 1130 _rs_length_diff_seq->add((double) rs_length_diff);
ysr@777 1131
johnc@5123 1132 size_t freed_bytes = _heap_used_bytes_before_gc - cur_used_bytes;
johnc@5123 1133 size_t copied_bytes = _collection_set_bytes_used_before - freed_bytes;
ysr@777 1134 double cost_per_byte_ms = 0.0;
johnc@5123 1135
ysr@777 1136 if (copied_bytes > 0) {
brutisso@4015 1137 cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
tonyp@3337 1138 if (_in_marking_window) {
ysr@777 1139 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
tonyp@3337 1140 } else {
ysr@777 1141 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
tonyp@3337 1142 }
ysr@777 1143 }
ysr@777 1144
ysr@777 1145 double all_other_time_ms = pause_time_ms -
brutisso@4015 1146 (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
brutisso@4015 1147 + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
ysr@777 1148
ysr@777 1149 double young_other_time_ms = 0.0;
tonyp@3289 1150 if (young_cset_region_length() > 0) {
ysr@777 1151 young_other_time_ms =
brutisso@4015 1152 phase_times()->young_cset_choice_time_ms() +
brutisso@4015 1153 phase_times()->young_free_cset_time_ms();
ysr@777 1154 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
tonyp@3289 1155 (double) young_cset_region_length());
ysr@777 1156 }
ysr@777 1157 double non_young_other_time_ms = 0.0;
tonyp@3289 1158 if (old_cset_region_length() > 0) {
ysr@777 1159 non_young_other_time_ms =
brutisso@4015 1160 phase_times()->non_young_cset_choice_time_ms() +
brutisso@4015 1161 phase_times()->non_young_free_cset_time_ms();
ysr@777 1162
ysr@777 1163 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
tonyp@3289 1164 (double) old_cset_region_length());
ysr@777 1165 }
ysr@777 1166
ysr@777 1167 double constant_other_time_ms = all_other_time_ms -
ysr@777 1168 (young_other_time_ms + non_young_other_time_ms);
ysr@777 1169 _constant_other_time_ms_seq->add(constant_other_time_ms);
ysr@777 1170
ysr@777 1171 double survival_ratio = 0.0;
johnc@3998 1172 if (_collection_set_bytes_used_before > 0) {
tonyp@3028 1173 survival_ratio = (double) _bytes_copied_during_gc /
johnc@3998 1174 (double) _collection_set_bytes_used_before;
ysr@777 1175 }
ysr@777 1176
ysr@777 1177 _pending_cards_seq->add((double) _pending_cards);
ysr@777 1178 _rs_lengths_seq->add((double) _max_rs_lengths);
ysr@777 1179 }
ysr@777 1180
ysr@777 1181 _in_marking_window = new_in_marking_window;
ysr@777 1182 _in_marking_window_im = new_in_marking_window_im;
tschatzl@7050 1183 _free_regions_at_end_of_collection = _g1->num_free_regions();
tonyp@3119 1184 update_young_list_target_length();
ysr@777 1185
iveresov@1546 1186 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
tonyp@1717 1187 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
brutisso@4015 1188 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
brutisso@4015 1189 phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
tonyp@3209 1190
tonyp@3714 1191 _collectionSetChooser->verify();
ysr@777 1192 }
ysr@777 1193
brutisso@3762 1194 #define EXT_SIZE_FORMAT "%.1f%s"
tonyp@2961 1195 #define EXT_SIZE_PARAMS(bytes) \
brutisso@3762 1196 byte_size_in_proper_unit((double)(bytes)), \
tonyp@2961 1197 proper_unit_for_byte_size((bytes))
tonyp@2961 1198
johnc@5123 1199 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
johnc@4929 1200 YoungList* young_list = _g1->young_list();
johnc@5123 1201 _eden_used_bytes_before_gc = young_list->eden_used_bytes();
johnc@5123 1202 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
johnc@5123 1203 _heap_capacity_bytes_before_gc = _g1->capacity();
johnc@5123 1204 _heap_used_bytes_before_gc = _g1->used();
tschatzl@7050 1205 _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
johnc@4929 1206
johnc@5123 1207 _eden_capacity_bytes_before_gc =
johnc@5123 1208 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
johnc@4929 1209
johnc@5123 1210 if (full) {
ehelin@6609 1211 _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
johnc@5123 1212 }
johnc@4929 1213 }
johnc@4929 1214
tonyp@2961 1215 void G1CollectorPolicy::print_heap_transition() {
brutisso@4015 1216 _g1->print_size_transition(gclog_or_tty,
johnc@5123 1217 _heap_used_bytes_before_gc,
johnc@5123 1218 _g1->used(),
johnc@5123 1219 _g1->capacity());
brutisso@4015 1220 }
brutisso@4015 1221
johnc@5123 1222 void G1CollectorPolicy::print_detailed_heap_transition(bool full) {
johnc@5123 1223 YoungList* young_list = _g1->young_list();
tonyp@2961 1224
johnc@5123 1225 size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
johnc@5123 1226 size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
johnc@5123 1227 size_t heap_used_bytes_after_gc = _g1->used();
johnc@5123 1228
johnc@5123 1229 size_t heap_capacity_bytes_after_gc = _g1->capacity();
johnc@5123 1230 size_t eden_capacity_bytes_after_gc =
johnc@5123 1231 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
johnc@5123 1232
johnc@5123 1233 gclog_or_tty->print(
johnc@5123 1234 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
johnc@5123 1235 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
johnc@5123 1236 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
johnc@5123 1237 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
johnc@5123 1238 EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),
johnc@5123 1239 EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),
johnc@5123 1240 EXT_SIZE_PARAMS(eden_used_bytes_after_gc),
johnc@5123 1241 EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),
johnc@5123 1242 EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),
johnc@5123 1243 EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),
johnc@5123 1244 EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),
johnc@5123 1245 EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),
johnc@5123 1246 EXT_SIZE_PARAMS(heap_used_bytes_after_gc),
johnc@5123 1247 EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));
johnc@5123 1248
johnc@5123 1249 if (full) {
johnc@5123 1250 MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
johnc@5123 1251 }
johnc@5123 1252
johnc@5123 1253 gclog_or_tty->cr();
tonyp@2961 1254 }
tonyp@2961 1255
iveresov@1546 1256 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 1257 double update_rs_processed_buffers,
iveresov@1546 1258 double goal_ms) {
iveresov@1546 1259 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
iveresov@1546 1260 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
iveresov@1546 1261
tonyp@1717 1262 if (G1UseAdaptiveConcRefinement) {
iveresov@1546 1263 const int k_gy = 3, k_gr = 6;
iveresov@1546 1264 const double inc_k = 1.1, dec_k = 0.9;
iveresov@1546 1265
iveresov@1546 1266 int g = cg1r->green_zone();
iveresov@1546 1267 if (update_rs_time > goal_ms) {
iveresov@1546 1268 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
iveresov@1546 1269 } else {
iveresov@1546 1270 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
iveresov@1546 1271 g = (int)MAX2(g * inc_k, g + 1.0);
iveresov@1546 1272 }
iveresov@1546 1273 }
iveresov@1546 1274 // Change the refinement threads params
iveresov@1546 1275 cg1r->set_green_zone(g);
iveresov@1546 1276 cg1r->set_yellow_zone(g * k_gy);
iveresov@1546 1277 cg1r->set_red_zone(g * k_gr);
iveresov@1546 1278 cg1r->reinitialize_threads();
iveresov@1546 1279
iveresov@1546 1280 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
iveresov@1546 1281 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
iveresov@1546 1282 cg1r->yellow_zone());
iveresov@1546 1283 // Change the barrier params
iveresov@1546 1284 dcqs.set_process_completed_threshold(processing_threshold);
iveresov@1546 1285 dcqs.set_max_completed_queue(cg1r->red_zone());
iveresov@1546 1286 }
iveresov@1546 1287
iveresov@1546 1288 int curr_queue_size = dcqs.completed_buffers_num();
iveresov@1546 1289 if (curr_queue_size >= cg1r->yellow_zone()) {
iveresov@1546 1290 dcqs.set_completed_queue_padding(curr_queue_size);
iveresov@1546 1291 } else {
iveresov@1546 1292 dcqs.set_completed_queue_padding(0);
iveresov@1546 1293 }
iveresov@1546 1294 dcqs.notify_if_necessary();
iveresov@1546 1295 }
iveresov@1546 1296
ysr@777 1297 double
johnc@3998 1298 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
johnc@3998 1299 size_t scanned_cards) {
johnc@3998 1300 return
johnc@3998 1301 predict_rs_update_time_ms(pending_cards) +
johnc@3998 1302 predict_rs_scan_time_ms(scanned_cards) +
johnc@3998 1303 predict_constant_other_time_ms();
johnc@3998 1304 }
johnc@3998 1305
johnc@3998 1306 double
ysr@777 1307 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
ysr@777 1308 size_t rs_length = predict_rs_length_diff();
ysr@777 1309 size_t card_num;
tonyp@3337 1310 if (gcs_are_young()) {
ysr@777 1311 card_num = predict_young_card_num(rs_length);
tonyp@3337 1312 } else {
ysr@777 1313 card_num = predict_non_young_card_num(rs_length);
tonyp@3337 1314 }
ysr@777 1315 return predict_base_elapsed_time_ms(pending_cards, card_num);
ysr@777 1316 }
ysr@777 1317
tonyp@3713 1318 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
ysr@777 1319 size_t bytes_to_copy;
ysr@777 1320 if (hr->is_marked())
ysr@777 1321 bytes_to_copy = hr->max_live_bytes();
ysr@777 1322 else {
tonyp@3539 1323 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
ysr@777 1324 int age = hr->age_in_surv_rate_group();
apetrusenko@980 1325 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
ysr@777 1326 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
ysr@777 1327 }
ysr@777 1328 return bytes_to_copy;
ysr@777 1329 }
ysr@777 1330
johnc@3998 1331 double
johnc@3998 1332 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
johnc@3998 1333 bool for_young_gc) {
johnc@3998 1334 size_t rs_length = hr->rem_set()->occupied();
johnc@3998 1335 size_t card_num;
johnc@3998 1336
johnc@3998 1337 // Predicting the number of cards is based on which type of GC
johnc@3998 1338 // we're predicting for.
johnc@3998 1339 if (for_young_gc) {
johnc@3998 1340 card_num = predict_young_card_num(rs_length);
johnc@3998 1341 } else {
johnc@3998 1342 card_num = predict_non_young_card_num(rs_length);
johnc@3998 1343 }
johnc@3998 1344 size_t bytes_to_copy = predict_bytes_to_copy(hr);
johnc@3998 1345
johnc@3998 1346 double region_elapsed_time_ms =
johnc@3998 1347 predict_rs_scan_time_ms(card_num) +
johnc@3998 1348 predict_object_copy_time_ms(bytes_to_copy);
johnc@3998 1349
johnc@3998 1350 // The prediction of the "other" time for this region is based
johnc@3998 1351 // upon the region type and NOT the GC type.
johnc@3998 1352 if (hr->is_young()) {
johnc@3998 1353 region_elapsed_time_ms += predict_young_other_time_ms(1);
johnc@3998 1354 } else {
johnc@3998 1355 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
johnc@3998 1356 }
johnc@3998 1357 return region_elapsed_time_ms;
johnc@3998 1358 }
johnc@3998 1359
ysr@777 1360 void
tonyp@3713 1361 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
tonyp@3713 1362 uint survivor_cset_region_length) {
tonyp@3289 1363 _eden_cset_region_length = eden_cset_region_length;
tonyp@3289 1364 _survivor_cset_region_length = survivor_cset_region_length;
tonyp@3289 1365 _old_cset_region_length = 0;
johnc@1829 1366 }
johnc@1829 1367
johnc@1829 1368 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
johnc@1829 1369 _recorded_rs_lengths = rs_lengths;
johnc@1829 1370 }
johnc@1829 1371
ysr@777 1372 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
ysr@777 1373 double elapsed_ms) {
ysr@777 1374 _recent_gc_times_ms->add(elapsed_ms);
ysr@777 1375 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
ysr@777 1376 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
ysr@777 1377 }
ysr@777 1378
ysr@777 1379 size_t G1CollectorPolicy::expansion_amount() {
tonyp@3114 1380 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
tonyp@3114 1381 double threshold = _gc_overhead_perc;
tonyp@3114 1382 if (recent_gc_overhead > threshold) {
johnc@1186 1383 // We will double the existing space, or take
johnc@1186 1384 // G1ExpandByPercentOfAvailable % of the available expansion
johnc@1186 1385 // space, whichever is smaller, bounded below by a minimum
johnc@1186 1386 // expansion (unless that's all that's left.)
ysr@777 1387 const size_t min_expand_bytes = 1*M;
johnc@2504 1388 size_t reserved_bytes = _g1->max_capacity();
ysr@777 1389 size_t committed_bytes = _g1->capacity();
ysr@777 1390 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
ysr@777 1391 size_t expand_bytes;
ysr@777 1392 size_t expand_bytes_via_pct =
johnc@1186 1393 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
ysr@777 1394 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
ysr@777 1395 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
ysr@777 1396 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
tonyp@3114 1397
tonyp@3114 1398 ergo_verbose5(ErgoHeapSizing,
tonyp@3114 1399 "attempt heap expansion",
tonyp@3114 1400 ergo_format_reason("recent GC overhead higher than "
tonyp@3114 1401 "threshold after GC")
tonyp@3114 1402 ergo_format_perc("recent GC overhead")
tonyp@3114 1403 ergo_format_perc("threshold")
tonyp@3114 1404 ergo_format_byte("uncommitted")
tonyp@3114 1405 ergo_format_byte_perc("calculated expansion amount"),
tonyp@3114 1406 recent_gc_overhead, threshold,
tonyp@3114 1407 uncommitted_bytes,
tonyp@3114 1408 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
tonyp@3114 1409
ysr@777 1410 return expand_bytes;
ysr@777 1411 } else {
ysr@777 1412 return 0;
ysr@777 1413 }
ysr@777 1414 }
ysr@777 1415
ysr@777 1416 void G1CollectorPolicy::print_tracing_info() const {
brutisso@3812 1417 _trace_gen0_time_data.print();
brutisso@3812 1418 _trace_gen1_time_data.print();
ysr@777 1419 }
ysr@777 1420
ysr@777 1421 void G1CollectorPolicy::print_yg_surv_rate_info() const {
ysr@777 1422 #ifndef PRODUCT
ysr@777 1423 _short_lived_surv_rate_group->print_surv_rate_summary();
ysr@777 1424 // add this call for any other surv rate groups
ysr@777 1425 #endif // PRODUCT
ysr@777 1426 }
ysr@777 1427
tonyp@3713 1428 uint G1CollectorPolicy::max_regions(int purpose) {
ysr@777 1429 switch (purpose) {
ysr@777 1430 case GCAllocForSurvived:
apetrusenko@980 1431 return _max_survivor_regions;
ysr@777 1432 case GCAllocForTenured:
apetrusenko@980 1433 return REGIONS_UNLIMITED;
ysr@777 1434 default:
apetrusenko@980 1435 ShouldNotReachHere();
apetrusenko@980 1436 return REGIONS_UNLIMITED;
ysr@777 1437 };
ysr@777 1438 }
ysr@777 1439
tonyp@3119 1440 void G1CollectorPolicy::update_max_gc_locker_expansion() {
tonyp@3713 1441 uint expansion_region_num = 0;
tonyp@2333 1442 if (GCLockerEdenExpansionPercent > 0) {
tonyp@2333 1443 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
tonyp@2333 1444 double expansion_region_num_d = perc * (double) _young_list_target_length;
tonyp@2333 1445 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
tonyp@2333 1446 // less than 1.0) we'll get 1.
tonyp@3713 1447 expansion_region_num = (uint) ceil(expansion_region_num_d);
tonyp@2333 1448 } else {
tonyp@2333 1449 assert(expansion_region_num == 0, "sanity");
tonyp@2333 1450 }
tonyp@2333 1451 _young_list_max_length = _young_list_target_length + expansion_region_num;
tonyp@2333 1452 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
tonyp@2333 1453 }
tonyp@2333 1454
apetrusenko@980 1455 // Calculates survivor space parameters.
tonyp@3119 1456 void G1CollectorPolicy::update_survivors_policy() {
tonyp@3119 1457 double max_survivor_regions_d =
tonyp@3119 1458 (double) _young_list_target_length / (double) SurvivorRatio;
tonyp@3119 1459 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
tonyp@3119 1460 // smaller than 1.0) we'll get 1.
tonyp@3713 1461 _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
tonyp@3119 1462
tonyp@3066 1463 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
apetrusenko@980 1464 HeapRegion::GrainWords * _max_survivor_regions);
apetrusenko@980 1465 }
apetrusenko@980 1466
tonyp@3114 1467 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
tonyp@3114 1468 GCCause::Cause gc_cause) {
tonyp@2011 1469 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@2011 1470 if (!during_cycle) {
tonyp@3114 1471 ergo_verbose1(ErgoConcCycles,
tonyp@3114 1472 "request concurrent cycle initiation",
tonyp@3114 1473 ergo_format_reason("requested by GC cause")
tonyp@3114 1474 ergo_format_str("GC cause"),
tonyp@3114 1475 GCCause::to_string(gc_cause));
tonyp@2011 1476 set_initiate_conc_mark_if_possible();
tonyp@2011 1477 return true;
tonyp@2011 1478 } else {
tonyp@3114 1479 ergo_verbose1(ErgoConcCycles,
tonyp@3114 1480 "do not request concurrent cycle initiation",
tonyp@3114 1481 ergo_format_reason("concurrent cycle already in progress")
tonyp@3114 1482 ergo_format_str("GC cause"),
tonyp@3114 1483 GCCause::to_string(gc_cause));
tonyp@2011 1484 return false;
tonyp@2011 1485 }
tonyp@2011 1486 }
tonyp@2011 1487
ysr@777 1488 void
tonyp@1794 1489 G1CollectorPolicy::decide_on_conc_mark_initiation() {
tonyp@1794 1490 // We are about to decide on whether this pause will be an
tonyp@1794 1491 // initial-mark pause.
tonyp@1794 1492
tonyp@1794 1493 // First, during_initial_mark_pause() should not be already set. We
tonyp@1794 1494 // will set it here if we have to. However, it should be cleared by
tonyp@1794 1495 // the end of the pause (it's only set for the duration of an
tonyp@1794 1496 // initial-mark pause).
tonyp@1794 1497 assert(!during_initial_mark_pause(), "pre-condition");
tonyp@1794 1498
tonyp@1794 1499 if (initiate_conc_mark_if_possible()) {
tonyp@1794 1500 // We had noticed on a previous pause that the heap occupancy has
tonyp@1794 1501 // gone over the initiating threshold and we should start a
tonyp@1794 1502 // concurrent marking cycle. So we might initiate one.
tonyp@1794 1503
tonyp@1794 1504 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@1794 1505 if (!during_cycle) {
tonyp@1794 1506 // The concurrent marking thread is not "during a cycle", i.e.,
tonyp@1794 1507 // it has completed the last one. So we can go ahead and
tonyp@1794 1508 // initiate a new cycle.
tonyp@1794 1509
tonyp@1794 1510 set_during_initial_mark_pause();
tonyp@3337 1511 // We do not allow mixed GCs during marking.
tonyp@3337 1512 if (!gcs_are_young()) {
tonyp@3337 1513 set_gcs_are_young(true);
tonyp@3337 1514 ergo_verbose0(ErgoMixedGCs,
tonyp@3337 1515 "end mixed GCs",
johnc@3178 1516 ergo_format_reason("concurrent cycle is about to start"));
johnc@3178 1517 }
tonyp@1794 1518
tonyp@1794 1519 // And we can now clear initiate_conc_mark_if_possible() as
tonyp@1794 1520 // we've already acted on it.
tonyp@1794 1521 clear_initiate_conc_mark_if_possible();
tonyp@3114 1522
tonyp@3114 1523 ergo_verbose0(ErgoConcCycles,
tonyp@3114 1524 "initiate concurrent cycle",
tonyp@3114 1525 ergo_format_reason("concurrent cycle initiation requested"));
tonyp@1794 1526 } else {
tonyp@1794 1527 // The concurrent marking thread is still finishing up the
tonyp@1794 1528 // previous cycle. If we start one right now the two cycles
tonyp@1794 1529 // overlap. In particular, the concurrent marking thread might
tonyp@1794 1530 // be in the process of clearing the next marking bitmap (which
tonyp@1794 1531 // we will use for the next cycle if we start one). Starting a
tonyp@1794 1532 // cycle now will be bad given that parts of the marking
tonyp@1794 1533 // information might get cleared by the marking thread. And we
tonyp@1794 1534 // cannot wait for the marking thread to finish the cycle as it
tonyp@1794 1535 // periodically yields while clearing the next marking bitmap
tonyp@1794 1536 // and, if it's in a yield point, it's waiting for us to
tonyp@1794 1537 // finish. So, at this point we will not start a cycle and we'll
tonyp@1794 1538 // let the concurrent marking thread complete the last one.
tonyp@3114 1539 ergo_verbose0(ErgoConcCycles,
tonyp@3114 1540 "do not initiate concurrent cycle",
tonyp@3114 1541 ergo_format_reason("concurrent cycle already in progress"));
tonyp@1794 1542 }
tonyp@1794 1543 }
tonyp@1794 1544 }
tonyp@1794 1545
ysr@777 1546 class KnownGarbageClosure: public HeapRegionClosure {
tonyp@3539 1547 G1CollectedHeap* _g1h;
ysr@777 1548 CollectionSetChooser* _hrSorted;
ysr@777 1549
ysr@777 1550 public:
ysr@777 1551 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
tonyp@3539 1552 _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
ysr@777 1553
ysr@777 1554 bool doHeapRegion(HeapRegion* r) {
ysr@777 1555 // We only include humongous regions in collection
ysr@777 1556 // sets when concurrent mark shows that their contained object is
ysr@777 1557 // unreachable.
ysr@777 1558
ysr@777 1559 // Do we have any marking information for this region?
ysr@777 1560 if (r->is_marked()) {
tonyp@3539 1561 // We will skip any region that's currently used as an old GC
tonyp@3539 1562 // alloc region (we should not consider those for collection
tonyp@3539 1563 // before we fill them up).
tonyp@3714 1564 if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
tonyp@3714 1565 _hrSorted->add_region(r);
ysr@777 1566 }
ysr@777 1567 }
ysr@777 1568 return false;
ysr@777 1569 }
ysr@777 1570 };
ysr@777 1571
ysr@777 1572 class ParKnownGarbageHRClosure: public HeapRegionClosure {
tonyp@3539 1573 G1CollectedHeap* _g1h;
tonyp@3957 1574 CSetChooserParUpdater _cset_updater;
ysr@777 1575
ysr@777 1576 public:
ysr@777 1577 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
tonyp@3714 1578 uint chunk_size) :
tonyp@3957 1579 _g1h(G1CollectedHeap::heap()),
tonyp@3957 1580 _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
ysr@777 1581
ysr@777 1582 bool doHeapRegion(HeapRegion* r) {
ysr@777 1583 // Do we have any marking information for this region?
ysr@777 1584 if (r->is_marked()) {
tonyp@3539 1585 // We will skip any region that's currently used as an old GC
tonyp@3539 1586 // alloc region (we should not consider those for collection
tonyp@3539 1587 // before we fill them up).
tonyp@3957 1588 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
tonyp@3957 1589 _cset_updater.add_region(r);
ysr@777 1590 }
ysr@777 1591 }
ysr@777 1592 return false;
ysr@777 1593 }
ysr@777 1594 };
ysr@777 1595
ysr@777 1596 class ParKnownGarbageTask: public AbstractGangTask {
ysr@777 1597 CollectionSetChooser* _hrSorted;
tonyp@3714 1598 uint _chunk_size;
ysr@777 1599 G1CollectedHeap* _g1;
ysr@777 1600 public:
tonyp@3714 1601 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
ysr@777 1602 AbstractGangTask("ParKnownGarbageTask"),
ysr@777 1603 _hrSorted(hrSorted), _chunk_size(chunk_size),
tonyp@3539 1604 _g1(G1CollectedHeap::heap()) { }
ysr@777 1605
jmasa@3357 1606 void work(uint worker_id) {
tonyp@3714 1607 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
tonyp@3714 1608
ysr@777 1609 // Back to zero for the claim value.
jmasa@3357 1610 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
jmasa@3294 1611 _g1->workers()->active_workers(),
tonyp@790 1612 HeapRegion::InitialClaimValue);
ysr@777 1613 }
ysr@777 1614 };
ysr@777 1615
ysr@777 1616 void
jmasa@3294 1617 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
tonyp@3714 1618 _collectionSetChooser->clear();
tonyp@3209 1619
tschatzl@7050 1620 uint region_num = _g1->num_regions();
jmasa@2188 1621 if (G1CollectedHeap::use_parallel_gc_threads()) {
tonyp@3713 1622 const uint OverpartitionFactor = 4;
tonyp@3713 1623 uint WorkUnit;
jmasa@3294 1624 // The use of MinChunkSize = 8 in the original code
jmasa@3294 1625 // causes some assertion failures when the total number of
jmasa@3294 1626 // region is less than 8. The code here tries to fix that.
jmasa@3294 1627 // Should the original code also be fixed?
jmasa@3294 1628 if (no_of_gc_threads > 0) {
tonyp@3713 1629 const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
tonyp@3713 1630 WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
tonyp@3713 1631 MinWorkUnit);
jmasa@3294 1632 } else {
jmasa@3294 1633 assert(no_of_gc_threads > 0,
jmasa@3294 1634 "The active gc workers should be greater than 0");
jmasa@3294 1635 // In a product build do something reasonable to avoid a crash.
tonyp@3713 1636 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
jmasa@3294 1637 WorkUnit =
tonyp@3713 1638 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
jmasa@3294 1639 MinWorkUnit);
jmasa@3294 1640 }
tschatzl@7050 1641 _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
tonyp@3714 1642 WorkUnit);
ysr@777 1643 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
kvn@1926 1644 (int) WorkUnit);
ysr@777 1645 _g1->workers()->run_task(&parKnownGarbageTask);
tonyp@790 1646
tonyp@790 1647 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 1648 "sanity check");
ysr@777 1649 } else {
ysr@777 1650 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
ysr@777 1651 _g1->heap_region_iterate(&knownGarbagecl);
ysr@777 1652 }
tonyp@3209 1653
tonyp@3714 1654 _collectionSetChooser->sort_regions();
tonyp@3714 1655
tonyp@3209 1656 double end_sec = os::elapsedTime();
tonyp@3209 1657 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
tonyp@3209 1658 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
tonyp@3209 1659 _cur_mark_stop_world_time_ms += elapsed_time_ms;
tonyp@3209 1660 _prev_collection_pause_end_ms += elapsed_time_ms;
tonyp@3209 1661 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
ysr@777 1662 }
ysr@777 1663
johnc@1829 1664 // Add the heap region at the head of the non-incremental collection set
tonyp@3289 1665 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
johnc@1829 1666 assert(_inc_cset_build_state == Active, "Precondition");
brutisso@7195 1667 assert(hr->is_old(), "the region should be old");
johnc@1829 1668
johnc@1829 1669 assert(!hr->in_collection_set(), "should not already be in the CSet");
ysr@777 1670 hr->set_in_collection_set(true);
ysr@777 1671 hr->set_next_in_collection_set(_collection_set);
ysr@777 1672 _collection_set = hr;
ysr@777 1673 _collection_set_bytes_used_before += hr->used();
tonyp@961 1674 _g1->register_region_with_in_cset_fast_test(hr);
tonyp@3289 1675 size_t rs_length = hr->rem_set()->occupied();
tonyp@3289 1676 _recorded_rs_lengths += rs_length;
tonyp@3289 1677 _old_cset_region_length += 1;
ysr@777 1678 }
ysr@777 1679
johnc@1829 1680 // Initialize the per-collection-set information
johnc@1829 1681 void G1CollectorPolicy::start_incremental_cset_building() {
johnc@1829 1682 assert(_inc_cset_build_state == Inactive, "Precondition");
johnc@1829 1683
johnc@1829 1684 _inc_cset_head = NULL;
johnc@1829 1685 _inc_cset_tail = NULL;
johnc@1829 1686 _inc_cset_bytes_used_before = 0;
johnc@1829 1687
johnc@1829 1688 _inc_cset_max_finger = 0;
johnc@1829 1689 _inc_cset_recorded_rs_lengths = 0;
tonyp@3356 1690 _inc_cset_recorded_rs_lengths_diffs = 0;
tonyp@3356 1691 _inc_cset_predicted_elapsed_time_ms = 0.0;
tonyp@3356 1692 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
johnc@1829 1693 _inc_cset_build_state = Active;
johnc@1829 1694 }
johnc@1829 1695
tonyp@3356 1696 void G1CollectorPolicy::finalize_incremental_cset_building() {
tonyp@3356 1697 assert(_inc_cset_build_state == Active, "Precondition");
tonyp@3356 1698 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
tonyp@3356 1699
tonyp@3356 1700 // The two "main" fields, _inc_cset_recorded_rs_lengths and
tonyp@3356 1701 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
tonyp@3356 1702 // that adds a new region to the CSet. Further updates by the
tonyp@3356 1703 // concurrent refinement thread that samples the young RSet lengths
tonyp@3356 1704 // are accumulated in the *_diffs fields. Here we add the diffs to
tonyp@3356 1705 // the "main" fields.
tonyp@3356 1706
tonyp@3356 1707 if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
tonyp@3356 1708 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
tonyp@3356 1709 } else {
tonyp@3356 1710 // This is defensive. The diff should in theory be always positive
tonyp@3356 1711 // as RSets can only grow between GCs. However, given that we
tonyp@3356 1712 // sample their size concurrently with other threads updating them
tonyp@3356 1713 // it's possible that we might get the wrong size back, which
tonyp@3356 1714 // could make the calculations somewhat inaccurate.
tonyp@3356 1715 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
tonyp@3356 1716 if (_inc_cset_recorded_rs_lengths >= diffs) {
tonyp@3356 1717 _inc_cset_recorded_rs_lengths -= diffs;
tonyp@3356 1718 } else {
tonyp@3356 1719 _inc_cset_recorded_rs_lengths = 0;
tonyp@3356 1720 }
tonyp@3356 1721 }
tonyp@3356 1722 _inc_cset_predicted_elapsed_time_ms +=
tonyp@3356 1723 _inc_cset_predicted_elapsed_time_ms_diffs;
tonyp@3356 1724
tonyp@3356 1725 _inc_cset_recorded_rs_lengths_diffs = 0;
tonyp@3356 1726 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
tonyp@3356 1727 }
tonyp@3356 1728
johnc@1829 1729 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
johnc@1829 1730 // This routine is used when:
johnc@1829 1731 // * adding survivor regions to the incremental cset at the end of an
johnc@1829 1732 // evacuation pause,
johnc@1829 1733 // * adding the current allocation region to the incremental cset
johnc@1829 1734 // when it is retired, and
johnc@1829 1735 // * updating existing policy information for a region in the
johnc@1829 1736 // incremental cset via young list RSet sampling.
johnc@1829 1737 // Therefore this routine may be called at a safepoint by the
johnc@1829 1738 // VM thread, or in-between safepoints by mutator threads (when
johnc@1829 1739 // retiring the current allocation region) or a concurrent
johnc@1829 1740 // refine thread (RSet sampling).
johnc@1829 1741
johnc@3998 1742 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
johnc@1829 1743 size_t used_bytes = hr->used();
johnc@1829 1744 _inc_cset_recorded_rs_lengths += rs_length;
johnc@1829 1745 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
johnc@1829 1746 _inc_cset_bytes_used_before += used_bytes;
johnc@1829 1747
johnc@1829 1748 // Cache the values we have added to the aggregated informtion
johnc@1829 1749 // in the heap region in case we have to remove this region from
johnc@1829 1750 // the incremental collection set, or it is updated by the
johnc@1829 1751 // rset sampling code
johnc@1829 1752 hr->set_recorded_rs_length(rs_length);
johnc@1829 1753 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
johnc@1829 1754 }
johnc@1829 1755
tonyp@3356 1756 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
tonyp@3356 1757 size_t new_rs_length) {
tonyp@3356 1758 // Update the CSet information that is dependent on the new RS length
tonyp@3356 1759 assert(hr->is_young(), "Precondition");
tonyp@3356 1760 assert(!SafepointSynchronize::is_at_safepoint(),
tonyp@3356 1761 "should not be at a safepoint");
tonyp@3356 1762
tonyp@3356 1763 // We could have updated _inc_cset_recorded_rs_lengths and
tonyp@3356 1764 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
tonyp@3356 1765 // that atomically, as this code is executed by a concurrent
tonyp@3356 1766 // refinement thread, potentially concurrently with a mutator thread
tonyp@3356 1767 // allocating a new region and also updating the same fields. To
tonyp@3356 1768 // avoid the atomic operations we accumulate these updates on two
tonyp@3356 1769 // separate fields (*_diffs) and we'll just add them to the "main"
tonyp@3356 1770 // fields at the start of a GC.
tonyp@3356 1771
tonyp@3356 1772 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
tonyp@3356 1773 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
tonyp@3356 1774 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
tonyp@3356 1775
johnc@1829 1776 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
johnc@3998 1777 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
tonyp@3356 1778 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
tonyp@3356 1779 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
tonyp@3356 1780
tonyp@3356 1781 hr->set_recorded_rs_length(new_rs_length);
tonyp@3356 1782 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
johnc@1829 1783 }
johnc@1829 1784
johnc@1829 1785 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
tonyp@3289 1786 assert(hr->is_young(), "invariant");
tonyp@3289 1787 assert(hr->young_index_in_cset() > -1, "should have already been set");
johnc@1829 1788 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 1789
johnc@1829 1790 // We need to clear and set the cached recorded/cached collection set
johnc@1829 1791 // information in the heap region here (before the region gets added
johnc@1829 1792 // to the collection set). An individual heap region's cached values
johnc@1829 1793 // are calculated, aggregated with the policy collection set info,
johnc@1829 1794 // and cached in the heap region here (initially) and (subsequently)
johnc@1829 1795 // by the Young List sampling code.
johnc@1829 1796
johnc@1829 1797 size_t rs_length = hr->rem_set()->occupied();
johnc@1829 1798 add_to_incremental_cset_info(hr, rs_length);
johnc@1829 1799
johnc@1829 1800 HeapWord* hr_end = hr->end();
johnc@1829 1801 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
johnc@1829 1802
johnc@1829 1803 assert(!hr->in_collection_set(), "invariant");
johnc@1829 1804 hr->set_in_collection_set(true);
johnc@1829 1805 assert( hr->next_in_collection_set() == NULL, "invariant");
johnc@1829 1806
johnc@1829 1807 _g1->register_region_with_in_cset_fast_test(hr);
johnc@1829 1808 }
johnc@1829 1809
johnc@1829 1810 // Add the region at the RHS of the incremental cset
johnc@1829 1811 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
johnc@1829 1812 // We should only ever be appending survivors at the end of a pause
brutisso@7195 1813 assert(hr->is_survivor(), "Logic");
johnc@1829 1814
johnc@1829 1815 // Do the 'common' stuff
johnc@1829 1816 add_region_to_incremental_cset_common(hr);
johnc@1829 1817
johnc@1829 1818 // Now add the region at the right hand side
johnc@1829 1819 if (_inc_cset_tail == NULL) {
johnc@1829 1820 assert(_inc_cset_head == NULL, "invariant");
johnc@1829 1821 _inc_cset_head = hr;
johnc@1829 1822 } else {
johnc@1829 1823 _inc_cset_tail->set_next_in_collection_set(hr);
johnc@1829 1824 }
johnc@1829 1825 _inc_cset_tail = hr;
johnc@1829 1826 }
johnc@1829 1827
johnc@1829 1828 // Add the region to the LHS of the incremental cset
johnc@1829 1829 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
johnc@1829 1830 // Survivors should be added to the RHS at the end of a pause
brutisso@7195 1831 assert(hr->is_eden(), "Logic");
johnc@1829 1832
johnc@1829 1833 // Do the 'common' stuff
johnc@1829 1834 add_region_to_incremental_cset_common(hr);
johnc@1829 1835
johnc@1829 1836 // Add the region at the left hand side
johnc@1829 1837 hr->set_next_in_collection_set(_inc_cset_head);
johnc@1829 1838 if (_inc_cset_head == NULL) {
johnc@1829 1839 assert(_inc_cset_tail == NULL, "Invariant");
johnc@1829 1840 _inc_cset_tail = hr;
johnc@1829 1841 }
johnc@1829 1842 _inc_cset_head = hr;
johnc@1829 1843 }
johnc@1829 1844
johnc@1829 1845 #ifndef PRODUCT
johnc@1829 1846 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
johnc@1829 1847 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
johnc@1829 1848
johnc@1829 1849 st->print_cr("\nCollection_set:");
johnc@1829 1850 HeapRegion* csr = list_head;
johnc@1829 1851 while (csr != NULL) {
johnc@1829 1852 HeapRegion* next = csr->next_in_collection_set();
johnc@1829 1853 assert(csr->in_collection_set(), "bad CS");
johnc@3731 1854 st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
johnc@3731 1855 HR_FORMAT_PARAMS(csr),
johnc@3731 1856 csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),
johnc@3731 1857 csr->age_in_surv_rate_group_cond());
johnc@1829 1858 csr = next;
johnc@1829 1859 }
johnc@1829 1860 }
johnc@1829 1861 #endif // !PRODUCT
johnc@1829 1862
johnc@4681 1863 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) {
johnc@4681 1864 // Returns the given amount of reclaimable bytes (that represents
johnc@4681 1865 // the amount of reclaimable space still to be collected) as a
johnc@4681 1866 // percentage of the current heap capacity.
johnc@4681 1867 size_t capacity_bytes = _g1->capacity();
johnc@4681 1868 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
johnc@4681 1869 }
johnc@4681 1870
tonyp@3539 1871 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
tonyp@3539 1872 const char* false_action_str) {
tonyp@3539 1873 CollectionSetChooser* cset_chooser = _collectionSetChooser;
tonyp@3714 1874 if (cset_chooser->is_empty()) {
tonyp@3539 1875 ergo_verbose0(ErgoMixedGCs,
tonyp@3539 1876 false_action_str,
tonyp@3539 1877 ergo_format_reason("candidate old regions not available"));
tonyp@3539 1878 return false;
tonyp@3539 1879 }
johnc@4681 1880
johnc@4681 1881 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
tonyp@3714 1882 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
johnc@4681 1883 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
tonyp@3667 1884 double threshold = (double) G1HeapWastePercent;
johnc@4681 1885 if (reclaimable_perc <= threshold) {
tonyp@3539 1886 ergo_verbose4(ErgoMixedGCs,
tonyp@3539 1887 false_action_str,
johnc@4681 1888 ergo_format_reason("reclaimable percentage not over threshold")
tonyp@3539 1889 ergo_format_region("candidate old regions")
tonyp@3539 1890 ergo_format_byte_perc("reclaimable")
tonyp@3539 1891 ergo_format_perc("threshold"),
tonyp@3714 1892 cset_chooser->remaining_regions(),
johnc@4681 1893 reclaimable_bytes,
johnc@4681 1894 reclaimable_perc, threshold);
tonyp@3539 1895 return false;
tonyp@3539 1896 }
tonyp@3539 1897
tonyp@3539 1898 ergo_verbose4(ErgoMixedGCs,
tonyp@3539 1899 true_action_str,
tonyp@3539 1900 ergo_format_reason("candidate old regions available")
tonyp@3539 1901 ergo_format_region("candidate old regions")
tonyp@3539 1902 ergo_format_byte_perc("reclaimable")
tonyp@3539 1903 ergo_format_perc("threshold"),
tonyp@3714 1904 cset_chooser->remaining_regions(),
johnc@4681 1905 reclaimable_bytes,
johnc@4681 1906 reclaimable_perc, threshold);
tonyp@3539 1907 return true;
tonyp@3539 1908 }
tonyp@3539 1909
johnc@4681 1910 uint G1CollectorPolicy::calc_min_old_cset_length() {
johnc@4681 1911 // The min old CSet region bound is based on the maximum desired
johnc@4681 1912 // number of mixed GCs after a cycle. I.e., even if some old regions
johnc@4681 1913 // look expensive, we should add them to the CSet anyway to make
johnc@4681 1914 // sure we go through the available old regions in no more than the
johnc@4681 1915 // maximum desired number of mixed GCs.
johnc@4681 1916 //
johnc@4681 1917 // The calculation is based on the number of marked regions we added
johnc@4681 1918 // to the CSet chooser in the first place, not how many remain, so
johnc@4681 1919 // that the result is the same during all mixed GCs that follow a cycle.
johnc@4681 1920
johnc@4681 1921 const size_t region_num = (size_t) _collectionSetChooser->length();
johnc@4681 1922 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
johnc@4681 1923 size_t result = region_num / gc_num;
johnc@4681 1924 // emulate ceiling
johnc@4681 1925 if (result * gc_num < region_num) {
johnc@4681 1926 result += 1;
johnc@4681 1927 }
johnc@4681 1928 return (uint) result;
johnc@4681 1929 }
johnc@4681 1930
johnc@4681 1931 uint G1CollectorPolicy::calc_max_old_cset_length() {
johnc@4681 1932 // The max old CSet region bound is based on the threshold expressed
johnc@4681 1933 // as a percentage of the heap size. I.e., it should bound the
johnc@4681 1934 // number of old regions added to the CSet irrespective of how many
johnc@4681 1935 // of them are available.
johnc@4681 1936
johnc@4681 1937 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tschatzl@7050 1938 const size_t region_num = g1h->num_regions();
johnc@4681 1939 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
johnc@4681 1940 size_t result = region_num * perc / 100;
johnc@4681 1941 // emulate ceiling
johnc@4681 1942 if (100 * result < region_num * perc) {
johnc@4681 1943 result += 1;
johnc@4681 1944 }
johnc@4681 1945 return (uint) result;
johnc@4681 1946 }
johnc@4681 1947
johnc@4681 1948
sla@5237 1949 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
johnc@3998 1950 double young_start_time_sec = os::elapsedTime();
johnc@1829 1951
tonyp@3114 1952 YoungList* young_list = _g1->young_list();
tonyp@3356 1953 finalize_incremental_cset_building();
tonyp@3114 1954
tonyp@2011 1955 guarantee(target_pause_time_ms > 0.0,
tonyp@2011 1956 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2011 1957 target_pause_time_ms));
tonyp@2011 1958 guarantee(_collection_set == NULL, "Precondition");
ysr@777 1959
ysr@777 1960 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
ysr@777 1961 double predicted_pause_time_ms = base_time_ms;
johnc@4681 1962 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
ysr@777 1963
johnc@3998 1964 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 1965 "start choosing CSet",
johnc@3998 1966 ergo_format_size("_pending_cards")
tonyp@3114 1967 ergo_format_ms("predicted base time")
tonyp@3114 1968 ergo_format_ms("remaining time")
tonyp@3114 1969 ergo_format_ms("target pause time"),
johnc@3998 1970 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
tonyp@3114 1971
tonyp@3337 1972 _last_gc_was_young = gcs_are_young() ? true : false;
tonyp@3337 1973
tonyp@3337 1974 if (_last_gc_was_young) {
brutisso@3812 1975 _trace_gen0_time_data.increment_young_collection_count();
tonyp@3114 1976 } else {
brutisso@3812 1977 _trace_gen0_time_data.increment_mixed_collection_count();
tonyp@3114 1978 }
brutisso@3065 1979
brutisso@3065 1980 // The young list is laid with the survivor regions from the previous
brutisso@3065 1981 // pause are appended to the RHS of the young list, i.e.
brutisso@3065 1982 // [Newly Young Regions ++ Survivors from last pause].
brutisso@3065 1983
tonyp@3713 1984 uint survivor_region_length = young_list->survivor_length();
tonyp@3713 1985 uint eden_region_length = young_list->length() - survivor_region_length;
tonyp@3289 1986 init_cset_region_lengths(eden_region_length, survivor_region_length);
johnc@3998 1987
johnc@3998 1988 HeapRegion* hr = young_list->first_survivor_region();
brutisso@3065 1989 while (hr != NULL) {
brutisso@3065 1990 assert(hr->is_survivor(), "badly formed young list");
brutisso@7195 1991 // There is a convention that all the young regions in the CSet
brutisso@7195 1992 // are tagged as "eden", so we do this for the survivors here. We
brutisso@7195 1993 // use the special set_eden_pre_gc() as it doesn't check that the
brutisso@7195 1994 // region is free (which is not the case here).
brutisso@7195 1995 hr->set_eden_pre_gc();
brutisso@3065 1996 hr = hr->get_next_young_region();
brutisso@3065 1997 }
brutisso@3065 1998
tonyp@3114 1999 // Clear the fields that point to the survivor list - they are all young now.
tonyp@3114 2000 young_list->clear_survivors();
brutisso@3065 2001
brutisso@3065 2002 _collection_set = _inc_cset_head;
brutisso@3065 2003 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
johnc@4681 2004 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
brutisso@3065 2005 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2006
tonyp@3114 2007 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2008 "add young regions to CSet",
tonyp@3114 2009 ergo_format_region("eden")
tonyp@3114 2010 ergo_format_region("survivors")
tonyp@3114 2011 ergo_format_ms("predicted young region time"),
tonyp@3289 2012 eden_region_length, survivor_region_length,
tonyp@3114 2013 _inc_cset_predicted_elapsed_time_ms);
tonyp@3114 2014
brutisso@3065 2015 // The number of recorded young regions is the incremental
brutisso@3065 2016 // collection set's current size
brutisso@3065 2017 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
brutisso@3065 2018
brutisso@3065 2019 double young_end_time_sec = os::elapsedTime();
brutisso@4015 2020 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
brutisso@3065 2021
johnc@3998 2022 // Set the start of the non-young choice time.
johnc@3998 2023 double non_young_start_time_sec = young_end_time_sec;
brutisso@3065 2024
tonyp@3337 2025 if (!gcs_are_young()) {
tonyp@3539 2026 CollectionSetChooser* cset_chooser = _collectionSetChooser;
tonyp@3714 2027 cset_chooser->verify();
johnc@4681 2028 const uint min_old_cset_length = calc_min_old_cset_length();
johnc@4681 2029 const uint max_old_cset_length = calc_max_old_cset_length();
tonyp@3713 2030
tonyp@3713 2031 uint expensive_region_num = 0;
tonyp@3539 2032 bool check_time_remaining = adaptive_young_list_length();
johnc@3998 2033
tonyp@3539 2034 HeapRegion* hr = cset_chooser->peek();
tonyp@3539 2035 while (hr != NULL) {
tonyp@3539 2036 if (old_cset_region_length() >= max_old_cset_length) {
tonyp@3539 2037 // Added maximum number of old regions to the CSet.
tonyp@3539 2038 ergo_verbose2(ErgoCSetConstruction,
tonyp@3539 2039 "finish adding old regions to CSet",
tonyp@3539 2040 ergo_format_reason("old CSet region num reached max")
tonyp@3539 2041 ergo_format_region("old")
tonyp@3539 2042 ergo_format_region("max"),
tonyp@3539 2043 old_cset_region_length(), max_old_cset_length);
tonyp@3539 2044 break;
ysr@777 2045 }
tonyp@3114 2046
johnc@4681 2047
johnc@4681 2048 // Stop adding regions if the remaining reclaimable space is
johnc@4681 2049 // not above G1HeapWastePercent.
johnc@4681 2050 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
johnc@4681 2051 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
johnc@4681 2052 double threshold = (double) G1HeapWastePercent;
johnc@4681 2053 if (reclaimable_perc <= threshold) {
johnc@4681 2054 // We've added enough old regions that the amount of uncollected
johnc@4681 2055 // reclaimable space is at or below the waste threshold. Stop
johnc@4681 2056 // adding old regions to the CSet.
johnc@4681 2057 ergo_verbose5(ErgoCSetConstruction,
johnc@4681 2058 "finish adding old regions to CSet",
johnc@4681 2059 ergo_format_reason("reclaimable percentage not over threshold")
johnc@4681 2060 ergo_format_region("old")
johnc@4681 2061 ergo_format_region("max")
johnc@4681 2062 ergo_format_byte_perc("reclaimable")
johnc@4681 2063 ergo_format_perc("threshold"),
johnc@4681 2064 old_cset_region_length(),
johnc@4681 2065 max_old_cset_length,
johnc@4681 2066 reclaimable_bytes,
johnc@4681 2067 reclaimable_perc, threshold);
johnc@4681 2068 break;
johnc@4681 2069 }
johnc@4681 2070
johnc@3998 2071 double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
tonyp@3539 2072 if (check_time_remaining) {
tonyp@3539 2073 if (predicted_time_ms > time_remaining_ms) {
tonyp@3539 2074 // Too expensive for the current CSet.
tonyp@3539 2075
tonyp@3539 2076 if (old_cset_region_length() >= min_old_cset_length) {
tonyp@3539 2077 // We have added the minimum number of old regions to the CSet,
tonyp@3539 2078 // we are done with this CSet.
tonyp@3539 2079 ergo_verbose4(ErgoCSetConstruction,
tonyp@3539 2080 "finish adding old regions to CSet",
tonyp@3539 2081 ergo_format_reason("predicted time is too high")
tonyp@3539 2082 ergo_format_ms("predicted time")
tonyp@3539 2083 ergo_format_ms("remaining time")
tonyp@3539 2084 ergo_format_region("old")
tonyp@3539 2085 ergo_format_region("min"),
tonyp@3539 2086 predicted_time_ms, time_remaining_ms,
tonyp@3539 2087 old_cset_region_length(), min_old_cset_length);
tonyp@3539 2088 break;
tonyp@3539 2089 }
tonyp@3539 2090
tonyp@3539 2091 // We'll add it anyway given that we haven't reached the
tonyp@3539 2092 // minimum number of old regions.
tonyp@3539 2093 expensive_region_num += 1;
tonyp@3539 2094 }
tonyp@3114 2095 } else {
tonyp@3539 2096 if (old_cset_region_length() >= min_old_cset_length) {
tonyp@3539 2097 // In the non-auto-tuning case, we'll finish adding regions
tonyp@3539 2098 // to the CSet if we reach the minimum.
tonyp@3539 2099 ergo_verbose2(ErgoCSetConstruction,
tonyp@3539 2100 "finish adding old regions to CSet",
tonyp@3539 2101 ergo_format_reason("old CSet region num reached min")
tonyp@3539 2102 ergo_format_region("old")
tonyp@3539 2103 ergo_format_region("min"),
tonyp@3539 2104 old_cset_region_length(), min_old_cset_length);
tonyp@3539 2105 break;
tonyp@3114 2106 }
tonyp@3114 2107 }
tonyp@3539 2108
tonyp@3539 2109 // We will add this region to the CSet.
johnc@4681 2110 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
tonyp@3539 2111 predicted_pause_time_ms += predicted_time_ms;
tonyp@3539 2112 cset_chooser->remove_and_move_to_next(hr);
tonyp@3539 2113 _g1->old_set_remove(hr);
tonyp@3539 2114 add_old_region_to_cset(hr);
tonyp@3539 2115
tonyp@3539 2116 hr = cset_chooser->peek();
tonyp@3114 2117 }
tonyp@3539 2118 if (hr == NULL) {
tonyp@3539 2119 ergo_verbose0(ErgoCSetConstruction,
tonyp@3539 2120 "finish adding old regions to CSet",
tonyp@3539 2121 ergo_format_reason("candidate old regions not available"));
tonyp@3539 2122 }
tonyp@3539 2123
tonyp@3539 2124 if (expensive_region_num > 0) {
tonyp@3539 2125 // We print the information once here at the end, predicated on
tonyp@3539 2126 // whether we added any apparently expensive regions or not, to
tonyp@3539 2127 // avoid generating output per region.
tonyp@3539 2128 ergo_verbose4(ErgoCSetConstruction,
tonyp@3539 2129 "added expensive regions to CSet",
tonyp@3539 2130 ergo_format_reason("old CSet region num not reached min")
tonyp@3539 2131 ergo_format_region("old")
tonyp@3539 2132 ergo_format_region("expensive")
tonyp@3539 2133 ergo_format_region("min")
tonyp@3539 2134 ergo_format_ms("remaining time"),
tonyp@3539 2135 old_cset_region_length(),
tonyp@3539 2136 expensive_region_num,
tonyp@3539 2137 min_old_cset_length,
tonyp@3539 2138 time_remaining_ms);
tonyp@3539 2139 }
tonyp@3539 2140
tonyp@3714 2141 cset_chooser->verify();
ysr@777 2142 }
ysr@777 2143
johnc@1829 2144 stop_incremental_cset_building();
johnc@1829 2145
tonyp@3114 2146 ergo_verbose5(ErgoCSetConstruction,
tonyp@3114 2147 "finish choosing CSet",
tonyp@3114 2148 ergo_format_region("eden")
tonyp@3114 2149 ergo_format_region("survivors")
tonyp@3114 2150 ergo_format_region("old")
tonyp@3114 2151 ergo_format_ms("predicted pause time")
tonyp@3114 2152 ergo_format_ms("target pause time"),
tonyp@3289 2153 eden_region_length, survivor_region_length,
tonyp@3289 2154 old_cset_region_length(),
tonyp@3114 2155 predicted_pause_time_ms, target_pause_time_ms);
tonyp@3114 2156
ysr@777 2157 double non_young_end_time_sec = os::elapsedTime();
brutisso@4015 2158 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
sla@5237 2159 evacuation_info.set_collectionset_regions(cset_region_length());
ysr@777 2160 }
brutisso@3812 2161
brutisso@3812 2162 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
brutisso@3812 2163 if(TraceGen0Time) {
brutisso@3812 2164 _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
brutisso@3812 2165 }
brutisso@3812 2166 }
brutisso@3812 2167
brutisso@3812 2168 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
brutisso@3812 2169 if(TraceGen0Time) {
brutisso@3812 2170 _all_yield_times_ms.add(yield_time_ms);
brutisso@3812 2171 }
brutisso@3812 2172 }
brutisso@3812 2173
brutisso@3923 2174 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
brutisso@3812 2175 if(TraceGen0Time) {
brutisso@3923 2176 _total.add(pause_time_ms);
brutisso@3923 2177 _other.add(pause_time_ms - phase_times->accounted_time_ms());
brutisso@4015 2178 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
brutisso@4015 2179 _parallel.add(phase_times->cur_collection_par_time_ms());
brutisso@4015 2180 _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
brutisso@4015 2181 _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
brutisso@4015 2182 _update_rs.add(phase_times->average_last_update_rs_time());
brutisso@4015 2183 _scan_rs.add(phase_times->average_last_scan_rs_time());
brutisso@4015 2184 _obj_copy.add(phase_times->average_last_obj_copy_time());
brutisso@4015 2185 _termination.add(phase_times->average_last_termination_time());
brutisso@3923 2186
brutisso@4015 2187 double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
brutisso@4015 2188 phase_times->average_last_satb_filtering_times_ms() +
brutisso@4015 2189 phase_times->average_last_update_rs_time() +
brutisso@4015 2190 phase_times->average_last_scan_rs_time() +
brutisso@4015 2191 phase_times->average_last_obj_copy_time() +
brutisso@4015 2192 + phase_times->average_last_termination_time();
brutisso@3923 2193
brutisso@4015 2194 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
brutisso@3923 2195 _parallel_other.add(parallel_other_time);
brutisso@4015 2196 _clear_ct.add(phase_times->cur_clear_ct_time_ms());
brutisso@3812 2197 }
brutisso@3812 2198 }
brutisso@3812 2199
brutisso@3812 2200 void TraceGen0TimeData::increment_young_collection_count() {
brutisso@3812 2201 if(TraceGen0Time) {
brutisso@3812 2202 ++_young_pause_num;
brutisso@3812 2203 }
brutisso@3812 2204 }
brutisso@3812 2205
brutisso@3812 2206 void TraceGen0TimeData::increment_mixed_collection_count() {
brutisso@3812 2207 if(TraceGen0Time) {
brutisso@3812 2208 ++_mixed_pause_num;
brutisso@3812 2209 }
brutisso@3812 2210 }
brutisso@3812 2211
brutisso@3923 2212 void TraceGen0TimeData::print_summary(const char* str,
brutisso@3812 2213 const NumberSeq* seq) const {
brutisso@3812 2214 double sum = seq->sum();
brutisso@3923 2215 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
brutisso@3812 2216 str, sum / 1000.0, seq->avg());
brutisso@3812 2217 }
brutisso@3812 2218
brutisso@3923 2219 void TraceGen0TimeData::print_summary_sd(const char* str,
brutisso@3812 2220 const NumberSeq* seq) const {
brutisso@3923 2221 print_summary(str, seq);
brutisso@3923 2222 gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
brutisso@3923 2223 "(num", seq->num(), seq->sd(), seq->maximum());
brutisso@3812 2224 }
brutisso@3812 2225
brutisso@3812 2226 void TraceGen0TimeData::print() const {
brutisso@3812 2227 if (!TraceGen0Time) {
brutisso@3812 2228 return;
brutisso@3812 2229 }
brutisso@3812 2230
brutisso@3812 2231 gclog_or_tty->print_cr("ALL PAUSES");
brutisso@3923 2232 print_summary_sd(" Total", &_total);
drchase@6680 2233 gclog_or_tty->cr();
drchase@6680 2234 gclog_or_tty->cr();
brutisso@3812 2235 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
brutisso@3812 2236 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
drchase@6680 2237 gclog_or_tty->cr();
brutisso@3812 2238
brutisso@3812 2239 gclog_or_tty->print_cr("EVACUATION PAUSES");
brutisso@3812 2240
brutisso@3812 2241 if (_young_pause_num == 0 && _mixed_pause_num == 0) {
brutisso@3812 2242 gclog_or_tty->print_cr("none");
brutisso@3812 2243 } else {
brutisso@3923 2244 print_summary_sd(" Evacuation Pauses", &_total);
brutisso@3923 2245 print_summary(" Root Region Scan Wait", &_root_region_scan_wait);
brutisso@3923 2246 print_summary(" Parallel Time", &_parallel);
brutisso@3923 2247 print_summary(" Ext Root Scanning", &_ext_root_scan);
brutisso@3923 2248 print_summary(" SATB Filtering", &_satb_filtering);
brutisso@3923 2249 print_summary(" Update RS", &_update_rs);
brutisso@3923 2250 print_summary(" Scan RS", &_scan_rs);
brutisso@3923 2251 print_summary(" Object Copy", &_obj_copy);
brutisso@3923 2252 print_summary(" Termination", &_termination);
brutisso@3923 2253 print_summary(" Parallel Other", &_parallel_other);
brutisso@3923 2254 print_summary(" Clear CT", &_clear_ct);
brutisso@3923 2255 print_summary(" Other", &_other);
brutisso@3812 2256 }
drchase@6680 2257 gclog_or_tty->cr();
brutisso@3812 2258
brutisso@3812 2259 gclog_or_tty->print_cr("MISC");
brutisso@3923 2260 print_summary_sd(" Stop World", &_all_stop_world_times_ms);
brutisso@3923 2261 print_summary_sd(" Yields", &_all_yield_times_ms);
brutisso@3812 2262 }
brutisso@3812 2263
brutisso@3812 2264 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) {
brutisso@3812 2265 if (TraceGen1Time) {
brutisso@3812 2266 _all_full_gc_times.add(full_gc_time_ms);
brutisso@3812 2267 }
brutisso@3812 2268 }
brutisso@3812 2269
brutisso@3812 2270 void TraceGen1TimeData::print() const {
brutisso@3812 2271 if (!TraceGen1Time) {
brutisso@3812 2272 return;
brutisso@3812 2273 }
brutisso@3812 2274
brutisso@3812 2275 if (_all_full_gc_times.num() > 0) {
brutisso@3812 2276 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
brutisso@3812 2277 _all_full_gc_times.num(),
brutisso@3812 2278 _all_full_gc_times.sum() / 1000.0);
brutisso@3812 2279 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
brutisso@3812 2280 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
brutisso@3812 2281 _all_full_gc_times.sd(),
brutisso@3812 2282 _all_full_gc_times.maximum());
brutisso@3812 2283 }
brutisso@3812 2284 }

mercurial