src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Wed, 15 Feb 2012 13:06:53 -0500

author
tonyp
date
Wed, 15 Feb 2012 13:06:53 -0500
changeset 3539
a9647476d1a4
parent 3464
eff609af17d7
child 3667
21595f05bc93
permissions
-rw-r--r--

7132029: G1: mixed GC phase lasts for longer than it should
Summary: Revamp of the mechanism that chooses old regions for inclusion in the CSet. It simplifies the code and introduces min and max bounds on the number of old regions added to the CSet at each mixed GC to avoid pathological cases. It also ensures that when we do a mixed GC we'll always find old regions to add to the CSet (i.e., it eliminates the case where a mixed GC will collect no old regions which can happen today).
Reviewed-by: johnc, brutisso

ysr@777 1 /*
tonyp@3416 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
stefank@2314 32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
stefank@2314 34 #include "runtime/arguments.hpp"
stefank@2314 35 #include "runtime/java.hpp"
stefank@2314 36 #include "runtime/mutexLocker.hpp"
stefank@2314 37 #include "utilities/debug.hpp"
ysr@777 38
ysr@777 39 // Different defaults for different number of GC threads
ysr@777 40 // They were chosen by running GCOld and SPECjbb on debris with different
ysr@777 41 // numbers of GC threads and choosing them based on the results
ysr@777 42
ysr@777 43 // all the same
ysr@777 44 static double rs_length_diff_defaults[] = {
ysr@777 45 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
ysr@777 46 };
ysr@777 47
ysr@777 48 static double cost_per_card_ms_defaults[] = {
ysr@777 49 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
ysr@777 50 };
ysr@777 51
ysr@777 52 // all the same
tonyp@3337 53 static double young_cards_per_entry_ratio_defaults[] = {
ysr@777 54 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
ysr@777 55 };
ysr@777 56
ysr@777 57 static double cost_per_entry_ms_defaults[] = {
ysr@777 58 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
ysr@777 59 };
ysr@777 60
ysr@777 61 static double cost_per_byte_ms_defaults[] = {
ysr@777 62 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
ysr@777 63 };
ysr@777 64
ysr@777 65 // these should be pretty consistent
ysr@777 66 static double constant_other_time_ms_defaults[] = {
ysr@777 67 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
ysr@777 68 };
ysr@777 69
ysr@777 70
ysr@777 71 static double young_other_cost_per_region_ms_defaults[] = {
ysr@777 72 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
ysr@777 73 };
ysr@777 74
ysr@777 75 static double non_young_other_cost_per_region_ms_defaults[] = {
ysr@777 76 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
ysr@777 77 };
ysr@777 78
brutisso@2645 79 // Help class for avoiding interleaved logging
brutisso@2645 80 class LineBuffer: public StackObj {
brutisso@2645 81
brutisso@2645 82 private:
brutisso@2645 83 static const int BUFFER_LEN = 1024;
brutisso@2645 84 static const int INDENT_CHARS = 3;
brutisso@2645 85 char _buffer[BUFFER_LEN];
brutisso@2645 86 int _indent_level;
brutisso@2645 87 int _cur;
brutisso@2645 88
brutisso@2645 89 void vappend(const char* format, va_list ap) {
brutisso@2645 90 int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
brutisso@2645 91 if (res != -1) {
brutisso@2645 92 _cur += res;
brutisso@2645 93 } else {
brutisso@2645 94 DEBUG_ONLY(warning("buffer too small in LineBuffer");)
brutisso@2645 95 _buffer[BUFFER_LEN -1] = 0;
brutisso@2645 96 _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
brutisso@2645 97 }
brutisso@2645 98 }
brutisso@2645 99
brutisso@2645 100 public:
brutisso@2645 101 explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
brutisso@2645 102 for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
brutisso@2645 103 _buffer[_cur] = ' ';
brutisso@2645 104 }
brutisso@2645 105 }
brutisso@2645 106
brutisso@2645 107 #ifndef PRODUCT
brutisso@2645 108 ~LineBuffer() {
brutisso@2645 109 assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
brutisso@2645 110 }
brutisso@2645 111 #endif
brutisso@2645 112
brutisso@2645 113 void append(const char* format, ...) {
brutisso@2645 114 va_list ap;
brutisso@2645 115 va_start(ap, format);
brutisso@2645 116 vappend(format, ap);
brutisso@2645 117 va_end(ap);
brutisso@2645 118 }
brutisso@2645 119
brutisso@2645 120 void append_and_print_cr(const char* format, ...) {
brutisso@2645 121 va_list ap;
brutisso@2645 122 va_start(ap, format);
brutisso@2645 123 vappend(format, ap);
brutisso@2645 124 va_end(ap);
brutisso@2645 125 gclog_or_tty->print_cr("%s", _buffer);
brutisso@2645 126 _cur = _indent_level * INDENT_CHARS;
brutisso@2645 127 }
brutisso@2645 128 };
brutisso@2645 129
ysr@777 130 G1CollectorPolicy::G1CollectorPolicy() :
jmasa@2188 131 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
johnc@3021 132 ? ParallelGCThreads : 1),
jmasa@2188 133
ysr@777 134 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 135 _all_pause_times_ms(new NumberSeq()),
ysr@777 136 _stop_world_start(0.0),
ysr@777 137 _all_stop_world_times_ms(new NumberSeq()),
ysr@777 138 _all_yield_times_ms(new NumberSeq()),
ysr@777 139
apetrusenko@1112 140 _summary(new Summary()),
ysr@777 141
johnc@3175 142 _cur_clear_ct_time_ms(0.0),
johnc@3296 143 _mark_closure_time_ms(0.0),
tonyp@3464 144 _root_region_scan_wait_time_ms(0.0),
johnc@3175 145
johnc@3175 146 _cur_ref_proc_time_ms(0.0),
johnc@3175 147 _cur_ref_enq_time_ms(0.0),
johnc@3175 148
johnc@1325 149 #ifndef PRODUCT
johnc@1325 150 _min_clear_cc_time_ms(-1.0),
johnc@1325 151 _max_clear_cc_time_ms(-1.0),
johnc@1325 152 _cur_clear_cc_time_ms(0.0),
johnc@1325 153 _cum_clear_cc_time_ms(0.0),
johnc@1325 154 _num_cc_clears(0L),
johnc@1325 155 #endif
ysr@777 156
ysr@777 157 _aux_num(10),
ysr@777 158 _all_aux_times_ms(new NumberSeq[_aux_num]),
ysr@777 159 _cur_aux_start_times_ms(new double[_aux_num]),
ysr@777 160 _cur_aux_times_ms(new double[_aux_num]),
ysr@777 161 _cur_aux_times_set(new bool[_aux_num]),
ysr@777 162
ysr@777 163 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 164 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 165
ysr@777 166 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 167 _prev_collection_pause_end_ms(0.0),
ysr@777 168 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 169 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 170 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 171 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 172 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 173 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 174 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 175 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 176 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 177 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 178 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 179 _non_young_other_cost_per_region_ms_seq(
ysr@777 180 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 181
ysr@777 182 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 183 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 184
johnc@1186 185 _pause_time_target_ms((double) MaxGCPauseMillis),
ysr@777 186
tonyp@3337 187 _gcs_are_young(true),
tonyp@3337 188 _young_pause_num(0),
tonyp@3337 189 _mixed_pause_num(0),
ysr@777 190
ysr@777 191 _during_marking(false),
ysr@777 192 _in_marking_window(false),
ysr@777 193 _in_marking_window_im(false),
ysr@777 194
ysr@777 195 _known_garbage_ratio(0.0),
ysr@777 196 _known_garbage_bytes(0),
ysr@777 197
ysr@777 198 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 199
tonyp@3337 200 _recent_prev_end_times_for_all_gcs_sec(
tonyp@3337 201 new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 202
ysr@777 203 _recent_avg_pause_time_ratio(0.0),
ysr@777 204
ysr@777 205 _all_full_gc_times_ms(new NumberSeq()),
ysr@777 206
tonyp@1794 207 _initiate_conc_mark_if_possible(false),
tonyp@1794 208 _during_initial_mark_pause(false),
tonyp@3337 209 _last_young_gc(false),
tonyp@3337 210 _last_gc_was_young(false),
ysr@777 211
tonyp@2961 212 _eden_bytes_before_gc(0),
tonyp@2961 213 _survivor_bytes_before_gc(0),
tonyp@2961 214 _capacity_before_gc(0),
tonyp@2961 215
tonyp@3289 216 _eden_cset_region_length(0),
tonyp@3289 217 _survivor_cset_region_length(0),
tonyp@3289 218 _old_cset_region_length(0),
tonyp@3289 219
ysr@777 220 _collection_set(NULL),
johnc@1829 221 _collection_set_bytes_used_before(0),
johnc@1829 222
johnc@1829 223 // Incremental CSet attributes
johnc@1829 224 _inc_cset_build_state(Inactive),
johnc@1829 225 _inc_cset_head(NULL),
johnc@1829 226 _inc_cset_tail(NULL),
johnc@1829 227 _inc_cset_bytes_used_before(0),
johnc@1829 228 _inc_cset_max_finger(NULL),
johnc@1829 229 _inc_cset_recorded_rs_lengths(0),
tonyp@3356 230 _inc_cset_recorded_rs_lengths_diffs(0),
johnc@1829 231 _inc_cset_predicted_elapsed_time_ms(0.0),
tonyp@3356 232 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
johnc@1829 233
ysr@777 234 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 235 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 236 #endif // _MSC_VER
ysr@777 237
ysr@777 238 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
ysr@777 239 G1YoungSurvRateNumRegionsSummary)),
ysr@777 240 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
apetrusenko@980 241 G1YoungSurvRateNumRegionsSummary)),
ysr@777 242 // add here any more surv rate groups
apetrusenko@980 243 _recorded_survivor_regions(0),
apetrusenko@980 244 _recorded_survivor_head(NULL),
apetrusenko@980 245 _recorded_survivor_tail(NULL),
tonyp@1791 246 _survivors_age_table(true),
tonyp@1791 247
tonyp@3114 248 _gc_overhead_perc(0.0) {
tonyp@3114 249
tonyp@1377 250 // Set up the region size and associated fields. Given that the
tonyp@1377 251 // policy is created before the heap, we have to set this up here,
tonyp@1377 252 // so it's done as soon as possible.
tonyp@1377 253 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
iveresov@1696 254 HeapRegionRemSet::setup_remset_size();
tonyp@1377 255
tonyp@3114 256 G1ErgoVerbose::initialize();
tonyp@3114 257 if (PrintAdaptiveSizePolicy) {
tonyp@3114 258 // Currently, we only use a single switch for all the heuristics.
tonyp@3114 259 G1ErgoVerbose::set_enabled(true);
tonyp@3114 260 // Given that we don't currently have a verboseness level
tonyp@3114 261 // parameter, we'll hardcode this to high. This can be easily
tonyp@3114 262 // changed in the future.
tonyp@3114 263 G1ErgoVerbose::set_level(ErgoHigh);
tonyp@3114 264 } else {
tonyp@3114 265 G1ErgoVerbose::set_enabled(false);
tonyp@3114 266 }
tonyp@3114 267
apetrusenko@1826 268 // Verify PLAB sizes
johnc@3182 269 const size_t region_size = HeapRegion::GrainWords;
apetrusenko@1826 270 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
apetrusenko@1826 271 char buffer[128];
johnc@3182 272 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
apetrusenko@1826 273 OldPLABSize > region_size ? "Old" : "Young", region_size);
apetrusenko@1826 274 vm_exit_during_initialization(buffer);
apetrusenko@1826 275 }
apetrusenko@1826 276
ysr@777 277 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
ysr@777 278 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
ysr@777 279
tonyp@1966 280 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
ysr@777 281 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
tonyp@3416 282 _par_last_satb_filtering_times_ms = new double[_parallel_gc_threads];
ysr@777 283
ysr@777 284 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 285 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
ysr@777 286
ysr@777 287 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 288
ysr@777 289 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
ysr@777 290
ysr@777 291 _par_last_termination_times_ms = new double[_parallel_gc_threads];
tonyp@1966 292 _par_last_termination_attempts = new double[_parallel_gc_threads];
tonyp@1966 293 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
brutisso@2712 294 _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
johnc@3219 295 _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
ysr@777 296
ysr@777 297 int index;
ysr@777 298 if (ParallelGCThreads == 0)
ysr@777 299 index = 0;
ysr@777 300 else if (ParallelGCThreads > 8)
ysr@777 301 index = 7;
ysr@777 302 else
ysr@777 303 index = ParallelGCThreads - 1;
ysr@777 304
ysr@777 305 _pending_card_diff_seq->add(0.0);
ysr@777 306 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
ysr@777 307 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
tonyp@3337 308 _young_cards_per_entry_ratio_seq->add(
tonyp@3337 309 young_cards_per_entry_ratio_defaults[index]);
ysr@777 310 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
ysr@777 311 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
ysr@777 312 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
ysr@777 313 _young_other_cost_per_region_ms_seq->add(
ysr@777 314 young_other_cost_per_region_ms_defaults[index]);
ysr@777 315 _non_young_other_cost_per_region_ms_seq->add(
ysr@777 316 non_young_other_cost_per_region_ms_defaults[index]);
ysr@777 317
tonyp@1965 318 // Below, we might need to calculate the pause time target based on
tonyp@1965 319 // the pause interval. When we do so we are going to give G1 maximum
tonyp@1965 320 // flexibility and allow it to do pauses when it needs to. So, we'll
tonyp@1965 321 // arrange that the pause interval to be pause time target + 1 to
tonyp@1965 322 // ensure that a) the pause time target is maximized with respect to
tonyp@1965 323 // the pause interval and b) we maintain the invariant that pause
tonyp@1965 324 // time target < pause interval. If the user does not want this
tonyp@1965 325 // maximum flexibility, they will have to set the pause interval
tonyp@1965 326 // explicitly.
tonyp@1965 327
tonyp@1965 328 // First make sure that, if either parameter is set, its value is
tonyp@1965 329 // reasonable.
tonyp@1965 330 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 331 if (MaxGCPauseMillis < 1) {
tonyp@1965 332 vm_exit_during_initialization("MaxGCPauseMillis should be "
tonyp@1965 333 "greater than 0");
tonyp@1965 334 }
tonyp@1965 335 }
tonyp@1965 336 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 337 if (GCPauseIntervalMillis < 1) {
tonyp@1965 338 vm_exit_during_initialization("GCPauseIntervalMillis should be "
tonyp@1965 339 "greater than 0");
tonyp@1965 340 }
tonyp@1965 341 }
tonyp@1965 342
tonyp@1965 343 // Then, if the pause time target parameter was not set, set it to
tonyp@1965 344 // the default value.
tonyp@1965 345 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 346 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 347 // The default pause time target in G1 is 200ms
tonyp@1965 348 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
tonyp@1965 349 } else {
tonyp@1965 350 // We do not allow the pause interval to be set without the
tonyp@1965 351 // pause time target
tonyp@1965 352 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
tonyp@1965 353 "without setting MaxGCPauseMillis");
tonyp@1965 354 }
tonyp@1965 355 }
tonyp@1965 356
tonyp@1965 357 // Then, if the interval parameter was not set, set it according to
tonyp@1965 358 // the pause time target (this will also deal with the case when the
tonyp@1965 359 // pause time target is the default value).
tonyp@1965 360 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 361 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
tonyp@1965 362 }
tonyp@1965 363
tonyp@1965 364 // Finally, make sure that the two parameters are consistent.
tonyp@1965 365 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
tonyp@1965 366 char buffer[256];
tonyp@1965 367 jio_snprintf(buffer, 256,
tonyp@1965 368 "MaxGCPauseMillis (%u) should be less than "
tonyp@1965 369 "GCPauseIntervalMillis (%u)",
tonyp@1965 370 MaxGCPauseMillis, GCPauseIntervalMillis);
tonyp@1965 371 vm_exit_during_initialization(buffer);
tonyp@1965 372 }
tonyp@1965 373
tonyp@1965 374 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
johnc@1186 375 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
ysr@777 376 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
johnc@1186 377 _sigma = (double) G1ConfidencePercent / 100.0;
ysr@777 378
ysr@777 379 // start conservatively (around 50ms is about right)
ysr@777 380 _concurrent_mark_remark_times_ms->add(0.05);
ysr@777 381 _concurrent_mark_cleanup_times_ms->add(0.20);
ysr@777 382 _tenuring_threshold = MaxTenuringThreshold;
tonyp@3066 383 // _max_survivor_regions will be calculated by
tonyp@3119 384 // update_young_list_target_length() during initialization.
tonyp@3066 385 _max_survivor_regions = 0;
apetrusenko@980 386
tonyp@1791 387 assert(GCTimeRatio > 0,
tonyp@1791 388 "we should have set it to a default value set_g1_gc_flags() "
tonyp@1791 389 "if a user set it to 0");
tonyp@1791 390 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
tonyp@1791 391
tonyp@3119 392 uintx reserve_perc = G1ReservePercent;
tonyp@3119 393 // Put an artificial ceiling on this so that it's not set to a silly value.
tonyp@3119 394 if (reserve_perc > 50) {
tonyp@3119 395 reserve_perc = 50;
tonyp@3119 396 warning("G1ReservePercent is set to a value that is too large, "
tonyp@3119 397 "it's been updated to %u", reserve_perc);
tonyp@3119 398 }
tonyp@3119 399 _reserve_factor = (double) reserve_perc / 100.0;
brutisso@3120 400 // This will be set when the heap is expanded
tonyp@3119 401 // for the first time during initialization.
tonyp@3119 402 _reserve_regions = 0;
tonyp@3119 403
ysr@777 404 initialize_all();
tonyp@3209 405 _collectionSetChooser = new CollectionSetChooser();
brutisso@3358 406 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
ysr@777 407 }
ysr@777 408
ysr@777 409 void G1CollectorPolicy::initialize_flags() {
ysr@777 410 set_min_alignment(HeapRegion::GrainBytes);
ysr@777 411 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
apetrusenko@982 412 if (SurvivorRatio < 1) {
apetrusenko@982 413 vm_exit_during_initialization("Invalid survivor ratio specified");
apetrusenko@982 414 }
ysr@777 415 CollectorPolicy::initialize_flags();
ysr@777 416 }
ysr@777 417
brutisso@3358 418 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
brutisso@3358 419 assert(G1DefaultMinNewGenPercent <= G1DefaultMaxNewGenPercent, "Min larger than max");
brutisso@3358 420 assert(G1DefaultMinNewGenPercent > 0 && G1DefaultMinNewGenPercent < 100, "Min out of bounds");
brutisso@3358 421 assert(G1DefaultMaxNewGenPercent > 0 && G1DefaultMaxNewGenPercent < 100, "Max out of bounds");
brutisso@3120 422
brutisso@3120 423 if (FLAG_IS_CMDLINE(NewRatio)) {
brutisso@3120 424 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
tonyp@3172 425 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
brutisso@3120 426 } else {
brutisso@3358 427 _sizer_kind = SizerNewRatio;
brutisso@3358 428 _adaptive_size = false;
brutisso@3358 429 return;
brutisso@3120 430 }
brutisso@3120 431 }
brutisso@3120 432
brutisso@3358 433 if (FLAG_IS_CMDLINE(NewSize)) {
brutisso@3358 434 _min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes);
brutisso@3358 435 if (FLAG_IS_CMDLINE(MaxNewSize)) {
brutisso@3358 436 _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
brutisso@3358 437 _sizer_kind = SizerMaxAndNewSize;
brutisso@3358 438 _adaptive_size = _min_desired_young_length == _max_desired_young_length;
brutisso@3358 439 } else {
brutisso@3358 440 _sizer_kind = SizerNewSizeOnly;
brutisso@3358 441 }
brutisso@3358 442 } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
brutisso@3358 443 _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
brutisso@3358 444 _sizer_kind = SizerMaxNewSizeOnly;
brutisso@3358 445 }
brutisso@3358 446 }
brutisso@3358 447
brutisso@3358 448 size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) {
brutisso@3358 449 size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
brutisso@3358 450 return MAX2((size_t)1, default_value);
brutisso@3358 451 }
brutisso@3358 452
brutisso@3358 453 size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) {
brutisso@3358 454 size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
brutisso@3358 455 return MAX2((size_t)1, default_value);
brutisso@3358 456 }
brutisso@3358 457
brutisso@3358 458 void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) {
brutisso@3358 459 assert(new_number_of_heap_regions > 0, "Heap must be initialized");
brutisso@3358 460
brutisso@3358 461 switch (_sizer_kind) {
brutisso@3358 462 case SizerDefaults:
brutisso@3358 463 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
brutisso@3358 464 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
brutisso@3358 465 break;
brutisso@3358 466 case SizerNewSizeOnly:
brutisso@3358 467 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
brutisso@3358 468 _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
brutisso@3358 469 break;
brutisso@3358 470 case SizerMaxNewSizeOnly:
brutisso@3358 471 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
brutisso@3358 472 _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
brutisso@3358 473 break;
brutisso@3358 474 case SizerMaxAndNewSize:
brutisso@3358 475 // Do nothing. Values set on the command line, don't update them at runtime.
brutisso@3358 476 break;
brutisso@3358 477 case SizerNewRatio:
brutisso@3358 478 _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
brutisso@3358 479 _max_desired_young_length = _min_desired_young_length;
brutisso@3358 480 break;
brutisso@3358 481 default:
brutisso@3358 482 ShouldNotReachHere();
brutisso@3358 483 }
brutisso@3358 484
brutisso@3120 485 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
brutisso@3358 486 }
brutisso@3358 487
brutisso@3358 488 void G1CollectorPolicy::init() {
brutisso@3358 489 // Set aside an initial future to_space.
brutisso@3358 490 _g1 = G1CollectedHeap::heap();
brutisso@3358 491
brutisso@3358 492 assert(Heap_lock->owned_by_self(), "Locking discipline.");
brutisso@3358 493
brutisso@3358 494 initialize_gc_policy_counters();
brutisso@3358 495
brutisso@3120 496 if (adaptive_young_list_length()) {
brutisso@3065 497 _young_list_fixed_length = 0;
johnc@1829 498 } else {
brutisso@3358 499 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
ysr@777 500 }
brutisso@3065 501 _free_regions_at_end_of_collection = _g1->free_regions();
tonyp@3119 502 update_young_list_target_length();
brutisso@3120 503 _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
johnc@1829 504
johnc@1829 505 // We may immediately start allocating regions and placing them on the
johnc@1829 506 // collection set list. Initialize the per-collection set info
johnc@1829 507 start_incremental_cset_building();
ysr@777 508 }
ysr@777 509
apetrusenko@980 510 // Create the jstat counters for the policy.
tonyp@3119 511 void G1CollectorPolicy::initialize_gc_policy_counters() {
brutisso@3065 512 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
apetrusenko@980 513 }
apetrusenko@980 514
tonyp@3119 515 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
tonyp@3119 516 double base_time_ms,
tonyp@3119 517 size_t base_free_regions,
tonyp@3119 518 double target_pause_time_ms) {
tonyp@3119 519 if (young_length >= base_free_regions) {
tonyp@3119 520 // end condition 1: not enough space for the young regions
tonyp@3119 521 return false;
ysr@777 522 }
tonyp@3119 523
tonyp@3119 524 double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
tonyp@3119 525 size_t bytes_to_copy =
tonyp@3119 526 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
tonyp@3119 527 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
tonyp@3119 528 double young_other_time_ms = predict_young_other_time_ms(young_length);
tonyp@3119 529 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
tonyp@3119 530 if (pause_time_ms > target_pause_time_ms) {
tonyp@3119 531 // end condition 2: prediction is over the target pause time
tonyp@3119 532 return false;
tonyp@3119 533 }
tonyp@3119 534
tonyp@3119 535 size_t free_bytes =
tonyp@3119 536 (base_free_regions - young_length) * HeapRegion::GrainBytes;
tonyp@3119 537 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
tonyp@3119 538 // end condition 3: out-of-space (conservatively!)
tonyp@3119 539 return false;
tonyp@3119 540 }
tonyp@3119 541
tonyp@3119 542 // success!
tonyp@3119 543 return true;
ysr@777 544 }
ysr@777 545
brutisso@3120 546 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
brutisso@3120 547 // re-calculate the necessary reserve
brutisso@3120 548 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
tonyp@3119 549 // We use ceiling so that if reserve_regions_d is > 0.0 (but
tonyp@3119 550 // smaller than 1.0) we'll get 1.
tonyp@3119 551 _reserve_regions = (size_t) ceil(reserve_regions_d);
brutisso@3120 552
brutisso@3358 553 _young_gen_sizer->heap_size_changed(new_number_of_regions);
tonyp@3119 554 }
tonyp@3119 555
tonyp@3119 556 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
tonyp@3119 557 size_t base_min_length) {
tonyp@3119 558 size_t desired_min_length = 0;
ysr@777 559 if (adaptive_young_list_length()) {
tonyp@3119 560 if (_alloc_rate_ms_seq->num() > 3) {
tonyp@3119 561 double now_sec = os::elapsedTime();
tonyp@3119 562 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
tonyp@3119 563 double alloc_rate_ms = predict_alloc_rate_ms();
tonyp@3119 564 desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
tonyp@3119 565 } else {
tonyp@3119 566 // otherwise we don't have enough info to make the prediction
tonyp@3119 567 }
ysr@777 568 }
brutisso@3120 569 desired_min_length += base_min_length;
brutisso@3120 570 // make sure we don't go below any user-defined minimum bound
brutisso@3358 571 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
ysr@777 572 }
ysr@777 573
tonyp@3119 574 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
tonyp@3119 575 // Here, we might want to also take into account any additional
tonyp@3119 576 // constraints (i.e., user-defined minimum bound). Currently, we
tonyp@3119 577 // effectively don't set this bound.
brutisso@3358 578 return _young_gen_sizer->max_desired_young_length();
tonyp@3119 579 }
tonyp@3119 580
tonyp@3119 581 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
tonyp@3119 582 if (rs_lengths == (size_t) -1) {
tonyp@3119 583 // if it's set to the default value (-1), we should predict it;
tonyp@3119 584 // otherwise, use the given value.
tonyp@3119 585 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
tonyp@3119 586 }
tonyp@3119 587
tonyp@3119 588 // Calculate the absolute and desired min bounds.
tonyp@3119 589
tonyp@3119 590 // This is how many young regions we already have (currently: the survivors).
tonyp@3119 591 size_t base_min_length = recorded_survivor_regions();
tonyp@3119 592 // This is the absolute minimum young length, which ensures that we
tonyp@3119 593 // can allocate one eden region in the worst-case.
tonyp@3119 594 size_t absolute_min_length = base_min_length + 1;
tonyp@3119 595 size_t desired_min_length =
tonyp@3119 596 calculate_young_list_desired_min_length(base_min_length);
tonyp@3119 597 if (desired_min_length < absolute_min_length) {
tonyp@3119 598 desired_min_length = absolute_min_length;
tonyp@3119 599 }
tonyp@3119 600
tonyp@3119 601 // Calculate the absolute and desired max bounds.
tonyp@3119 602
tonyp@3119 603 // We will try our best not to "eat" into the reserve.
tonyp@3119 604 size_t absolute_max_length = 0;
tonyp@3119 605 if (_free_regions_at_end_of_collection > _reserve_regions) {
tonyp@3119 606 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
tonyp@3119 607 }
tonyp@3119 608 size_t desired_max_length = calculate_young_list_desired_max_length();
tonyp@3119 609 if (desired_max_length > absolute_max_length) {
tonyp@3119 610 desired_max_length = absolute_max_length;
tonyp@3119 611 }
tonyp@3119 612
tonyp@3119 613 size_t young_list_target_length = 0;
tonyp@3119 614 if (adaptive_young_list_length()) {
tonyp@3337 615 if (gcs_are_young()) {
tonyp@3119 616 young_list_target_length =
tonyp@3119 617 calculate_young_list_target_length(rs_lengths,
tonyp@3119 618 base_min_length,
tonyp@3119 619 desired_min_length,
tonyp@3119 620 desired_max_length);
tonyp@3119 621 _rs_lengths_prediction = rs_lengths;
tonyp@3119 622 } else {
tonyp@3119 623 // Don't calculate anything and let the code below bound it to
tonyp@3119 624 // the desired_min_length, i.e., do the next GC as soon as
tonyp@3119 625 // possible to maximize how many old regions we can add to it.
ysr@777 626 }
ysr@777 627 } else {
tonyp@3539 628 // The user asked for a fixed young gen so we'll fix the young gen
tonyp@3539 629 // whether the next GC is young or mixed.
tonyp@3539 630 young_list_target_length = _young_list_fixed_length;
ysr@777 631 }
ysr@777 632
tonyp@3119 633 // Make sure we don't go over the desired max length, nor under the
tonyp@3119 634 // desired min length. In case they clash, desired_min_length wins
tonyp@3119 635 // which is why that test is second.
tonyp@3119 636 if (young_list_target_length > desired_max_length) {
tonyp@3119 637 young_list_target_length = desired_max_length;
tonyp@3119 638 }
tonyp@3119 639 if (young_list_target_length < desired_min_length) {
tonyp@3119 640 young_list_target_length = desired_min_length;
tonyp@3119 641 }
tonyp@3119 642
tonyp@3119 643 assert(young_list_target_length > recorded_survivor_regions(),
tonyp@3119 644 "we should be able to allocate at least one eden region");
tonyp@3119 645 assert(young_list_target_length >= absolute_min_length, "post-condition");
tonyp@3119 646 _young_list_target_length = young_list_target_length;
tonyp@3119 647
tonyp@3119 648 update_max_gc_locker_expansion();
ysr@777 649 }
ysr@777 650
tonyp@3119 651 size_t
tonyp@3119 652 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
tonyp@3119 653 size_t base_min_length,
tonyp@3119 654 size_t desired_min_length,
tonyp@3119 655 size_t desired_max_length) {
tonyp@3119 656 assert(adaptive_young_list_length(), "pre-condition");
tonyp@3337 657 assert(gcs_are_young(), "only call this for young GCs");
tonyp@3119 658
tonyp@3119 659 // In case some edge-condition makes the desired max length too small...
tonyp@3119 660 if (desired_max_length <= desired_min_length) {
tonyp@3119 661 return desired_min_length;
tonyp@3119 662 }
tonyp@3119 663
tonyp@3119 664 // We'll adjust min_young_length and max_young_length not to include
tonyp@3119 665 // the already allocated young regions (i.e., so they reflect the
tonyp@3119 666 // min and max eden regions we'll allocate). The base_min_length
tonyp@3119 667 // will be reflected in the predictions by the
tonyp@3119 668 // survivor_regions_evac_time prediction.
tonyp@3119 669 assert(desired_min_length > base_min_length, "invariant");
tonyp@3119 670 size_t min_young_length = desired_min_length - base_min_length;
tonyp@3119 671 assert(desired_max_length > base_min_length, "invariant");
tonyp@3119 672 size_t max_young_length = desired_max_length - base_min_length;
tonyp@3119 673
tonyp@3119 674 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
tonyp@3119 675 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
tonyp@3119 676 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
tonyp@3119 677 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
tonyp@3119 678 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
tonyp@3119 679 double base_time_ms =
tonyp@3119 680 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
tonyp@3119 681 survivor_regions_evac_time;
tonyp@3119 682 size_t available_free_regions = _free_regions_at_end_of_collection;
tonyp@3119 683 size_t base_free_regions = 0;
tonyp@3119 684 if (available_free_regions > _reserve_regions) {
tonyp@3119 685 base_free_regions = available_free_regions - _reserve_regions;
tonyp@3119 686 }
tonyp@3119 687
tonyp@3119 688 // Here, we will make sure that the shortest young length that
tonyp@3119 689 // makes sense fits within the target pause time.
tonyp@3119 690
tonyp@3119 691 if (predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 692 base_free_regions, target_pause_time_ms)) {
tonyp@3119 693 // The shortest young length will fit into the target pause time;
tonyp@3119 694 // we'll now check whether the absolute maximum number of young
tonyp@3119 695 // regions will fit in the target pause time. If not, we'll do
tonyp@3119 696 // a binary search between min_young_length and max_young_length.
tonyp@3119 697 if (predict_will_fit(max_young_length, base_time_ms,
tonyp@3119 698 base_free_regions, target_pause_time_ms)) {
tonyp@3119 699 // The maximum young length will fit into the target pause time.
tonyp@3119 700 // We are done so set min young length to the maximum length (as
tonyp@3119 701 // the result is assumed to be returned in min_young_length).
tonyp@3119 702 min_young_length = max_young_length;
tonyp@3119 703 } else {
tonyp@3119 704 // The maximum possible number of young regions will not fit within
tonyp@3119 705 // the target pause time so we'll search for the optimal
tonyp@3119 706 // length. The loop invariants are:
tonyp@3119 707 //
tonyp@3119 708 // min_young_length < max_young_length
tonyp@3119 709 // min_young_length is known to fit into the target pause time
tonyp@3119 710 // max_young_length is known not to fit into the target pause time
tonyp@3119 711 //
tonyp@3119 712 // Going into the loop we know the above hold as we've just
tonyp@3119 713 // checked them. Every time around the loop we check whether
tonyp@3119 714 // the middle value between min_young_length and
tonyp@3119 715 // max_young_length fits into the target pause time. If it
tonyp@3119 716 // does, it becomes the new min. If it doesn't, it becomes
tonyp@3119 717 // the new max. This way we maintain the loop invariants.
tonyp@3119 718
tonyp@3119 719 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 720 size_t diff = (max_young_length - min_young_length) / 2;
tonyp@3119 721 while (diff > 0) {
tonyp@3119 722 size_t young_length = min_young_length + diff;
tonyp@3119 723 if (predict_will_fit(young_length, base_time_ms,
tonyp@3119 724 base_free_regions, target_pause_time_ms)) {
tonyp@3119 725 min_young_length = young_length;
tonyp@3119 726 } else {
tonyp@3119 727 max_young_length = young_length;
tonyp@3119 728 }
tonyp@3119 729 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 730 diff = (max_young_length - min_young_length) / 2;
tonyp@3119 731 }
tonyp@3119 732 // The results is min_young_length which, according to the
tonyp@3119 733 // loop invariants, should fit within the target pause time.
tonyp@3119 734
tonyp@3119 735 // These are the post-conditions of the binary search above:
tonyp@3119 736 assert(min_young_length < max_young_length,
tonyp@3119 737 "otherwise we should have discovered that max_young_length "
tonyp@3119 738 "fits into the pause target and not done the binary search");
tonyp@3119 739 assert(predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 740 base_free_regions, target_pause_time_ms),
tonyp@3119 741 "min_young_length, the result of the binary search, should "
tonyp@3119 742 "fit into the pause target");
tonyp@3119 743 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
tonyp@3119 744 base_free_regions, target_pause_time_ms),
tonyp@3119 745 "min_young_length, the result of the binary search, should be "
tonyp@3119 746 "optimal, so no larger length should fit into the pause target");
tonyp@3119 747 }
tonyp@3119 748 } else {
tonyp@3119 749 // Even the minimum length doesn't fit into the pause time
tonyp@3119 750 // target, return it as the result nevertheless.
tonyp@3119 751 }
tonyp@3119 752 return base_min_length + min_young_length;
ysr@777 753 }
ysr@777 754
apetrusenko@980 755 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
apetrusenko@980 756 double survivor_regions_evac_time = 0.0;
apetrusenko@980 757 for (HeapRegion * r = _recorded_survivor_head;
apetrusenko@980 758 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
apetrusenko@980 759 r = r->get_next_young_region()) {
apetrusenko@980 760 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
apetrusenko@980 761 }
apetrusenko@980 762 return survivor_regions_evac_time;
apetrusenko@980 763 }
apetrusenko@980 764
tonyp@3119 765 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
ysr@777 766 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
ysr@777 767
johnc@1829 768 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
ysr@777 769 if (rs_lengths > _rs_lengths_prediction) {
ysr@777 770 // add 10% to avoid having to recalculate often
ysr@777 771 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
tonyp@3119 772 update_young_list_target_length(rs_lengths_prediction);
ysr@777 773 }
ysr@777 774 }
ysr@777 775
tonyp@3119 776
tonyp@3119 777
ysr@777 778 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
ysr@777 779 bool is_tlab,
ysr@777 780 bool* gc_overhead_limit_was_exceeded) {
ysr@777 781 guarantee(false, "Not using this policy feature yet.");
ysr@777 782 return NULL;
ysr@777 783 }
ysr@777 784
ysr@777 785 // This method controls how a collector handles one or more
ysr@777 786 // of its generations being fully allocated.
ysr@777 787 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
ysr@777 788 bool is_tlab) {
ysr@777 789 guarantee(false, "Not using this policy feature yet.");
ysr@777 790 return NULL;
ysr@777 791 }
ysr@777 792
ysr@777 793
ysr@777 794 #ifndef PRODUCT
ysr@777 795 bool G1CollectorPolicy::verify_young_ages() {
johnc@1829 796 HeapRegion* head = _g1->young_list()->first_region();
ysr@777 797 return
ysr@777 798 verify_young_ages(head, _short_lived_surv_rate_group);
ysr@777 799 // also call verify_young_ages on any additional surv rate groups
ysr@777 800 }
ysr@777 801
ysr@777 802 bool
ysr@777 803 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
ysr@777 804 SurvRateGroup *surv_rate_group) {
ysr@777 805 guarantee( surv_rate_group != NULL, "pre-condition" );
ysr@777 806
ysr@777 807 const char* name = surv_rate_group->name();
ysr@777 808 bool ret = true;
ysr@777 809 int prev_age = -1;
ysr@777 810
ysr@777 811 for (HeapRegion* curr = head;
ysr@777 812 curr != NULL;
ysr@777 813 curr = curr->get_next_young_region()) {
ysr@777 814 SurvRateGroup* group = curr->surv_rate_group();
ysr@777 815 if (group == NULL && !curr->is_survivor()) {
ysr@777 816 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
ysr@777 817 ret = false;
ysr@777 818 }
ysr@777 819
ysr@777 820 if (surv_rate_group == group) {
ysr@777 821 int age = curr->age_in_surv_rate_group();
ysr@777 822
ysr@777 823 if (age < 0) {
ysr@777 824 gclog_or_tty->print_cr("## %s: encountered negative age", name);
ysr@777 825 ret = false;
ysr@777 826 }
ysr@777 827
ysr@777 828 if (age <= prev_age) {
ysr@777 829 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
ysr@777 830 "(%d, %d)", name, age, prev_age);
ysr@777 831 ret = false;
ysr@777 832 }
ysr@777 833 prev_age = age;
ysr@777 834 }
ysr@777 835 }
ysr@777 836
ysr@777 837 return ret;
ysr@777 838 }
ysr@777 839 #endif // PRODUCT
ysr@777 840
ysr@777 841 void G1CollectorPolicy::record_full_collection_start() {
ysr@777 842 _cur_collection_start_sec = os::elapsedTime();
ysr@777 843 // Release the future to-space so that it is available for compaction into.
ysr@777 844 _g1->set_full_collection();
ysr@777 845 }
ysr@777 846
ysr@777 847 void G1CollectorPolicy::record_full_collection_end() {
ysr@777 848 // Consider this like a collection pause for the purposes of allocation
ysr@777 849 // since last pause.
ysr@777 850 double end_sec = os::elapsedTime();
ysr@777 851 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
ysr@777 852 double full_gc_time_ms = full_gc_time_sec * 1000.0;
ysr@777 853
ysr@777 854 _all_full_gc_times_ms->add(full_gc_time_ms);
ysr@777 855
tonyp@1030 856 update_recent_gc_times(end_sec, full_gc_time_ms);
ysr@777 857
ysr@777 858 _g1->clear_full_collection();
ysr@777 859
tonyp@3337 860 // "Nuke" the heuristics that control the young/mixed GC
tonyp@3337 861 // transitions and make sure we start with young GCs after the Full GC.
tonyp@3337 862 set_gcs_are_young(true);
tonyp@3337 863 _last_young_gc = false;
tonyp@1794 864 clear_initiate_conc_mark_if_possible();
tonyp@1794 865 clear_during_initial_mark_pause();
ysr@777 866 _known_garbage_bytes = 0;
ysr@777 867 _known_garbage_ratio = 0.0;
ysr@777 868 _in_marking_window = false;
ysr@777 869 _in_marking_window_im = false;
ysr@777 870
ysr@777 871 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 872 // also call this on any additional surv rate groups
ysr@777 873
apetrusenko@980 874 record_survivor_regions(0, NULL, NULL);
apetrusenko@980 875
ysr@777 876 _free_regions_at_end_of_collection = _g1->free_regions();
apetrusenko@980 877 // Reset survivors SurvRateGroup.
apetrusenko@980 878 _survivor_surv_rate_group->reset();
tonyp@3119 879 update_young_list_target_length();
tonyp@3539 880 _collectionSetChooser->clearMarkedHeapRegions();
tonyp@2315 881 }
ysr@777 882
ysr@777 883 void G1CollectorPolicy::record_stop_world_start() {
ysr@777 884 _stop_world_start = os::elapsedTime();
ysr@777 885 }
ysr@777 886
ysr@777 887 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
ysr@777 888 size_t start_used) {
ysr@777 889 if (PrintGCDetails) {
ysr@777 890 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 891 gclog_or_tty->print("[GC pause");
tonyp@3337 892 gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
ysr@777 893 }
ysr@777 894
tonyp@3464 895 // We only need to do this here as the policy will only be applied
tonyp@3464 896 // to the GC we're about to start. so, no point is calculating this
tonyp@3464 897 // every time we calculate / recalculate the target young length.
tonyp@3464 898 update_survivors_policy();
tonyp@3119 899
tonyp@2315 900 assert(_g1->used() == _g1->recalculate_used(),
tonyp@2315 901 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
tonyp@2315 902 _g1->used(), _g1->recalculate_used()));
ysr@777 903
ysr@777 904 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
ysr@777 905 _all_stop_world_times_ms->add(s_w_t_ms);
ysr@777 906 _stop_world_start = 0.0;
ysr@777 907
ysr@777 908 _cur_collection_start_sec = start_time_sec;
ysr@777 909 _cur_collection_pause_used_at_start_bytes = start_used;
ysr@777 910 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
ysr@777 911 _pending_cards = _g1->pending_card_num();
ysr@777 912 _max_pending_cards = _g1->max_pending_card_num();
ysr@777 913
ysr@777 914 _bytes_in_collection_set_before_gc = 0;
tonyp@3028 915 _bytes_copied_during_gc = 0;
ysr@777 916
tonyp@2961 917 YoungList* young_list = _g1->young_list();
tonyp@2961 918 _eden_bytes_before_gc = young_list->eden_used_bytes();
tonyp@2961 919 _survivor_bytes_before_gc = young_list->survivor_used_bytes();
tonyp@2961 920 _capacity_before_gc = _g1->capacity();
tonyp@2961 921
ysr@777 922 #ifdef DEBUG
ysr@777 923 // initialise these to something well known so that we can spot
ysr@777 924 // if they are not set properly
ysr@777 925
ysr@777 926 for (int i = 0; i < _parallel_gc_threads; ++i) {
tonyp@1966 927 _par_last_gc_worker_start_times_ms[i] = -1234.0;
tonyp@1966 928 _par_last_ext_root_scan_times_ms[i] = -1234.0;
tonyp@3416 929 _par_last_satb_filtering_times_ms[i] = -1234.0;
tonyp@1966 930 _par_last_update_rs_times_ms[i] = -1234.0;
tonyp@1966 931 _par_last_update_rs_processed_buffers[i] = -1234.0;
tonyp@1966 932 _par_last_scan_rs_times_ms[i] = -1234.0;
tonyp@1966 933 _par_last_obj_copy_times_ms[i] = -1234.0;
tonyp@1966 934 _par_last_termination_times_ms[i] = -1234.0;
tonyp@1966 935 _par_last_termination_attempts[i] = -1234.0;
tonyp@1966 936 _par_last_gc_worker_end_times_ms[i] = -1234.0;
brutisso@2712 937 _par_last_gc_worker_times_ms[i] = -1234.0;
johnc@3219 938 _par_last_gc_worker_other_times_ms[i] = -1234.0;
ysr@777 939 }
ysr@777 940 #endif
ysr@777 941
ysr@777 942 for (int i = 0; i < _aux_num; ++i) {
ysr@777 943 _cur_aux_times_ms[i] = 0.0;
ysr@777 944 _cur_aux_times_set[i] = false;
ysr@777 945 }
ysr@777 946
johnc@3295 947 // This is initialized to zero here and is set during
johnc@3219 948 // the evacuation pause if marking is in progress.
johnc@3219 949 _cur_satb_drain_time_ms = 0.0;
tonyp@3464 950 // This is initialized to zero here and is set during the evacuation
tonyp@3464 951 // pause if we actually waited for the root region scanning to finish.
tonyp@3464 952 _root_region_scan_wait_time_ms = 0.0;
ysr@777 953
tonyp@3337 954 _last_gc_was_young = false;
ysr@777 955
ysr@777 956 // do that for any other surv rate groups
ysr@777 957 _short_lived_surv_rate_group->stop_adding_regions();
tonyp@1717 958 _survivors_age_table.clear();
apetrusenko@980 959
ysr@777 960 assert( verify_young_ages(), "region age verification" );
ysr@777 961 }
ysr@777 962
brutisso@3065 963 void G1CollectorPolicy::record_concurrent_mark_init_end(double
ysr@777 964 mark_init_elapsed_time_ms) {
ysr@777 965 _during_marking = true;
tonyp@1794 966 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
tonyp@1794 967 clear_during_initial_mark_pause();
ysr@777 968 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
ysr@777 969 }
ysr@777 970
ysr@777 971 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
ysr@777 972 _mark_remark_start_sec = os::elapsedTime();
ysr@777 973 _during_marking = false;
ysr@777 974 }
ysr@777 975
ysr@777 976 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
ysr@777 977 double end_time_sec = os::elapsedTime();
ysr@777 978 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
ysr@777 979 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
ysr@777 980 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 981 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 982
ysr@777 983 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
ysr@777 984 }
ysr@777 985
ysr@777 986 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
ysr@777 987 _mark_cleanup_start_sec = os::elapsedTime();
ysr@777 988 }
ysr@777 989
tonyp@3209 990 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
tonyp@3337 991 _last_young_gc = true;
brutisso@3065 992 _in_marking_window = false;
ysr@777 993 }
ysr@777 994
ysr@777 995 void G1CollectorPolicy::record_concurrent_pause() {
ysr@777 996 if (_stop_world_start > 0.0) {
ysr@777 997 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
ysr@777 998 _all_yield_times_ms->add(yield_ms);
ysr@777 999 }
ysr@777 1000 }
ysr@777 1001
ysr@777 1002 void G1CollectorPolicy::record_concurrent_pause_end() {
ysr@777 1003 }
ysr@777 1004
ysr@777 1005 template<class T>
ysr@777 1006 T sum_of(T* sum_arr, int start, int n, int N) {
ysr@777 1007 T sum = (T)0;
ysr@777 1008 for (int i = 0; i < n; i++) {
ysr@777 1009 int j = (start + i) % N;
ysr@777 1010 sum += sum_arr[j];
ysr@777 1011 }
ysr@777 1012 return sum;
ysr@777 1013 }
ysr@777 1014
tonyp@1966 1015 void G1CollectorPolicy::print_par_stats(int level,
tonyp@1966 1016 const char* str,
brutisso@2712 1017 double* data) {
ysr@777 1018 double min = data[0], max = data[0];
ysr@777 1019 double total = 0.0;
brutisso@2645 1020 LineBuffer buf(level);
brutisso@2645 1021 buf.append("[%s (ms):", str);
jmasa@3294 1022 for (uint i = 0; i < no_of_gc_threads(); ++i) {
ysr@777 1023 double val = data[i];
ysr@777 1024 if (val < min)
ysr@777 1025 min = val;
ysr@777 1026 if (val > max)
ysr@777 1027 max = val;
ysr@777 1028 total += val;
brutisso@2645 1029 buf.append(" %3.1lf", val);
ysr@777 1030 }
brutisso@2712 1031 buf.append_and_print_cr("");
jmasa@3294 1032 double avg = total / (double) no_of_gc_threads();
brutisso@2712 1033 buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
brutisso@2712 1034 avg, min, max, max - min);
ysr@777 1035 }
ysr@777 1036
tonyp@1966 1037 void G1CollectorPolicy::print_par_sizes(int level,
tonyp@1966 1038 const char* str,
brutisso@2712 1039 double* data) {
ysr@777 1040 double min = data[0], max = data[0];
ysr@777 1041 double total = 0.0;
brutisso@2645 1042 LineBuffer buf(level);
brutisso@2645 1043 buf.append("[%s :", str);
jmasa@3294 1044 for (uint i = 0; i < no_of_gc_threads(); ++i) {
ysr@777 1045 double val = data[i];
ysr@777 1046 if (val < min)
ysr@777 1047 min = val;
ysr@777 1048 if (val > max)
ysr@777 1049 max = val;
ysr@777 1050 total += val;
brutisso@2645 1051 buf.append(" %d", (int) val);
ysr@777 1052 }
brutisso@2712 1053 buf.append_and_print_cr("");
jmasa@3294 1054 double avg = total / (double) no_of_gc_threads();
brutisso@2712 1055 buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
brutisso@2712 1056 (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
ysr@777 1057 }
ysr@777 1058
johnc@3219 1059 void G1CollectorPolicy::print_stats(int level,
johnc@3219 1060 const char* str,
johnc@3219 1061 double value) {
brutisso@2645 1062 LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
ysr@777 1063 }
ysr@777 1064
johnc@3219 1065 void G1CollectorPolicy::print_stats(int level,
johnc@3219 1066 const char* str,
johnc@3219 1067 int value) {
brutisso@2645 1068 LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
ysr@777 1069 }
ysr@777 1070
johnc@3219 1071 double G1CollectorPolicy::avg_value(double* data) {
jmasa@2188 1072 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1073 double ret = 0.0;
jmasa@3294 1074 for (uint i = 0; i < no_of_gc_threads(); ++i) {
ysr@777 1075 ret += data[i];
johnc@3219 1076 }
jmasa@3294 1077 return ret / (double) no_of_gc_threads();
ysr@777 1078 } else {
ysr@777 1079 return data[0];
ysr@777 1080 }
ysr@777 1081 }
ysr@777 1082
johnc@3219 1083 double G1CollectorPolicy::max_value(double* data) {
jmasa@2188 1084 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1085 double ret = data[0];
jmasa@3294 1086 for (uint i = 1; i < no_of_gc_threads(); ++i) {
johnc@3219 1087 if (data[i] > ret) {
ysr@777 1088 ret = data[i];
johnc@3219 1089 }
johnc@3219 1090 }
ysr@777 1091 return ret;
ysr@777 1092 } else {
ysr@777 1093 return data[0];
ysr@777 1094 }
ysr@777 1095 }
ysr@777 1096
johnc@3219 1097 double G1CollectorPolicy::sum_of_values(double* data) {
jmasa@2188 1098 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1099 double sum = 0.0;
jmasa@3294 1100 for (uint i = 0; i < no_of_gc_threads(); i++) {
ysr@777 1101 sum += data[i];
johnc@3219 1102 }
ysr@777 1103 return sum;
ysr@777 1104 } else {
ysr@777 1105 return data[0];
ysr@777 1106 }
ysr@777 1107 }
ysr@777 1108
johnc@3219 1109 double G1CollectorPolicy::max_sum(double* data1, double* data2) {
ysr@777 1110 double ret = data1[0] + data2[0];
ysr@777 1111
jmasa@2188 1112 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 1113 for (uint i = 1; i < no_of_gc_threads(); ++i) {
ysr@777 1114 double data = data1[i] + data2[i];
johnc@3219 1115 if (data > ret) {
ysr@777 1116 ret = data;
johnc@3219 1117 }
ysr@777 1118 }
ysr@777 1119 }
ysr@777 1120 return ret;
ysr@777 1121 }
ysr@777 1122
brutisso@3461 1123 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
brutisso@3461 1124 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
brutisso@3456 1125 return false;
brutisso@3456 1126 }
brutisso@3456 1127
brutisso@3456 1128 size_t marking_initiating_used_threshold =
brutisso@3456 1129 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
brutisso@3456 1130 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
brutisso@3461 1131 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
brutisso@3461 1132
brutisso@3461 1133 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
brutisso@3456 1134 if (gcs_are_young()) {
brutisso@3461 1135 ergo_verbose5(ErgoConcCycles,
brutisso@3456 1136 "request concurrent cycle initiation",
brutisso@3456 1137 ergo_format_reason("occupancy higher than threshold")
brutisso@3456 1138 ergo_format_byte("occupancy")
brutisso@3461 1139 ergo_format_byte("allocation request")
brutisso@3456 1140 ergo_format_byte_perc("threshold")
brutisso@3456 1141 ergo_format_str("source"),
brutisso@3456 1142 cur_used_bytes,
brutisso@3461 1143 alloc_byte_size,
brutisso@3456 1144 marking_initiating_used_threshold,
brutisso@3456 1145 (double) InitiatingHeapOccupancyPercent,
brutisso@3456 1146 source);
brutisso@3456 1147 return true;
brutisso@3456 1148 } else {
brutisso@3461 1149 ergo_verbose5(ErgoConcCycles,
brutisso@3456 1150 "do not request concurrent cycle initiation",
brutisso@3456 1151 ergo_format_reason("still doing mixed collections")
brutisso@3456 1152 ergo_format_byte("occupancy")
brutisso@3461 1153 ergo_format_byte("allocation request")
brutisso@3456 1154 ergo_format_byte_perc("threshold")
brutisso@3456 1155 ergo_format_str("source"),
brutisso@3456 1156 cur_used_bytes,
brutisso@3461 1157 alloc_byte_size,
brutisso@3456 1158 marking_initiating_used_threshold,
brutisso@3456 1159 (double) InitiatingHeapOccupancyPercent,
brutisso@3456 1160 source);
brutisso@3456 1161 }
brutisso@3456 1162 }
brutisso@3456 1163
brutisso@3456 1164 return false;
brutisso@3456 1165 }
brutisso@3456 1166
ysr@777 1167 // Anything below that is considered to be zero
ysr@777 1168 #define MIN_TIMER_GRANULARITY 0.0000001
ysr@777 1169
jmasa@3294 1170 void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
ysr@777 1171 double end_time_sec = os::elapsedTime();
ysr@777 1172 double elapsed_ms = _last_pause_time_ms;
jmasa@2188 1173 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
tonyp@3289 1174 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
tonyp@3289 1175 "otherwise, the subtraction below does not make sense");
ysr@777 1176 size_t rs_size =
tonyp@3289 1177 _cur_collection_pause_used_regions_at_start - cset_region_length();
ysr@777 1178 size_t cur_used_bytes = _g1->used();
ysr@777 1179 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
ysr@777 1180 bool last_pause_included_initial_mark = false;
tonyp@2062 1181 bool update_stats = !_g1->evacuation_failed();
jmasa@3294 1182 set_no_of_gc_threads(no_of_gc_threads);
ysr@777 1183
ysr@777 1184 #ifndef PRODUCT
ysr@777 1185 if (G1YoungSurvRateVerbose) {
ysr@777 1186 gclog_or_tty->print_cr("");
ysr@777 1187 _short_lived_surv_rate_group->print();
ysr@777 1188 // do that for any other surv rate groups too
ysr@777 1189 }
ysr@777 1190 #endif // PRODUCT
ysr@777 1191
brutisso@3065 1192 last_pause_included_initial_mark = during_initial_mark_pause();
brutisso@3456 1193 if (last_pause_included_initial_mark) {
brutisso@3065 1194 record_concurrent_mark_init_end(0.0);
tonyp@3539 1195 } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
brutisso@3456 1196 // Note: this might have already been set, if during the last
brutisso@3456 1197 // pause we decided to start a cycle but at the beginning of
brutisso@3456 1198 // this pause we decided to postpone it. That's OK.
brutisso@3456 1199 set_initiate_conc_mark_if_possible();
brutisso@3456 1200 }
brutisso@3065 1201
ysr@777 1202 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
ysr@777 1203 end_time_sec, false);
ysr@777 1204
ysr@777 1205 // This assert is exempted when we're doing parallel collection pauses,
ysr@777 1206 // because the fragmentation caused by the parallel GC allocation buffers
ysr@777 1207 // can lead to more memory being used during collection than was used
ysr@777 1208 // before. Best leave this out until the fragmentation problem is fixed.
ysr@777 1209 // Pauses in which evacuation failed can also lead to negative
ysr@777 1210 // collections, since no space is reclaimed from a region containing an
ysr@777 1211 // object whose evacuation failed.
ysr@777 1212 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1213 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1214 // (DLD, 10/05.)
ysr@777 1215 assert((true || parallel) // Always using GC LABs now.
ysr@777 1216 || _g1->evacuation_failed()
ysr@777 1217 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
ysr@777 1218 "Negative collection");
ysr@777 1219
ysr@777 1220 size_t freed_bytes =
ysr@777 1221 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
ysr@777 1222 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
johnc@1829 1223
ysr@777 1224 double survival_fraction =
ysr@777 1225 (double)surviving_bytes/
ysr@777 1226 (double)_collection_set_bytes_used_before;
ysr@777 1227
johnc@3219 1228 // These values are used to update the summary information that is
johnc@3219 1229 // displayed when TraceGen0Time is enabled, and are output as part
johnc@3219 1230 // of the PrintGCDetails output, in the non-parallel case.
johnc@3219 1231
johnc@3021 1232 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
tonyp@3416 1233 double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
johnc@3021 1234 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
johnc@3021 1235 double update_rs_processed_buffers =
johnc@3021 1236 sum_of_values(_par_last_update_rs_processed_buffers);
johnc@3021 1237 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
johnc@3021 1238 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
johnc@3021 1239 double termination_time = avg_value(_par_last_termination_times_ms);
johnc@3021 1240
johnc@3219 1241 double known_time = ext_root_scan_time +
tonyp@3416 1242 satb_filtering_time +
johnc@3219 1243 update_rs_time +
johnc@3219 1244 scan_rs_time +
johnc@3219 1245 obj_copy_time;
johnc@3219 1246
johnc@3219 1247 double other_time_ms = elapsed_ms;
johnc@3219 1248
johnc@3219 1249 // Subtract the SATB drain time. It's initialized to zero at the
johnc@3219 1250 // start of the pause and is updated during the pause if marking
johnc@3219 1251 // is in progress.
johnc@3219 1252 other_time_ms -= _cur_satb_drain_time_ms;
johnc@3219 1253
tonyp@3464 1254 // Subtract the root region scanning wait time. It's initialized to
tonyp@3464 1255 // zero at the start of the pause.
tonyp@3464 1256 other_time_ms -= _root_region_scan_wait_time_ms;
tonyp@3464 1257
johnc@3219 1258 if (parallel) {
johnc@3219 1259 other_time_ms -= _cur_collection_par_time_ms;
johnc@3219 1260 } else {
johnc@3219 1261 other_time_ms -= known_time;
johnc@3219 1262 }
johnc@3219 1263
johnc@3219 1264 // Subtract the time taken to clean the card table from the
johnc@3219 1265 // current value of "other time"
johnc@3219 1266 other_time_ms -= _cur_clear_ct_time_ms;
johnc@3219 1267
johnc@3296 1268 // Subtract the time spent completing marking in the collection
johnc@3296 1269 // set. Note if marking is not in progress during the pause
johnc@3296 1270 // the value of _mark_closure_time_ms will be zero.
johnc@3296 1271 other_time_ms -= _mark_closure_time_ms;
johnc@3296 1272
johnc@3219 1273 // TraceGen0Time and TraceGen1Time summary info updating.
johnc@3219 1274 _all_pause_times_ms->add(elapsed_ms);
johnc@3021 1275
tonyp@1030 1276 if (update_stats) {
johnc@3219 1277 _summary->record_total_time_ms(elapsed_ms);
johnc@3219 1278 _summary->record_other_time_ms(other_time_ms);
johnc@3219 1279
johnc@3219 1280 MainBodySummary* body_summary = _summary->main_body_summary();
johnc@3219 1281 assert(body_summary != NULL, "should not be null!");
johnc@3219 1282
johnc@3219 1283 // This will be non-zero iff marking is currently in progress (i.e.
johnc@3219 1284 // _g1->mark_in_progress() == true) and the currrent pause was not
johnc@3219 1285 // an initial mark pause. Since the body_summary items are NumberSeqs,
johnc@3219 1286 // however, they have to be consistent and updated in lock-step with
johnc@3219 1287 // each other. Therefore we unconditionally record the SATB drain
johnc@3219 1288 // time - even if it's zero.
johnc@3219 1289 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
tonyp@3464 1290 body_summary->record_root_region_scan_wait_time_ms(
tonyp@3464 1291 _root_region_scan_wait_time_ms);
johnc@3021 1292
johnc@3021 1293 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
tonyp@3416 1294 body_summary->record_satb_filtering_time_ms(satb_filtering_time);
johnc@3021 1295 body_summary->record_update_rs_time_ms(update_rs_time);
johnc@3021 1296 body_summary->record_scan_rs_time_ms(scan_rs_time);
johnc@3021 1297 body_summary->record_obj_copy_time_ms(obj_copy_time);
johnc@3219 1298
johnc@3021 1299 if (parallel) {
johnc@3021 1300 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
johnc@3021 1301 body_summary->record_termination_time_ms(termination_time);
johnc@3219 1302
johnc@3219 1303 double parallel_known_time = known_time + termination_time;
johnc@3219 1304 double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
johnc@3021 1305 body_summary->record_parallel_other_time_ms(parallel_other_time);
johnc@3021 1306 }
johnc@3219 1307
johnc@3021 1308 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
johnc@3219 1309 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
johnc@3021 1310
ysr@777 1311 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1312 // fragmentation can produce negative collections. Same with evac
ysr@777 1313 // failure.
ysr@777 1314 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1315 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1316 // (DLD, 10/05.
ysr@777 1317 assert((true || parallel)
ysr@777 1318 || _g1->evacuation_failed()
ysr@777 1319 || surviving_bytes <= _collection_set_bytes_used_before,
ysr@777 1320 "Or else negative collection!");
johnc@3219 1321
ysr@777 1322 // this is where we update the allocation rate of the application
ysr@777 1323 double app_time_ms =
ysr@777 1324 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
ysr@777 1325 if (app_time_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1326 // This usually happens due to the timer not having the required
ysr@777 1327 // granularity. Some Linuxes are the usual culprits.
ysr@777 1328 // We'll just set it to something (arbitrarily) small.
ysr@777 1329 app_time_ms = 1.0;
ysr@777 1330 }
tonyp@3289 1331 // We maintain the invariant that all objects allocated by mutator
tonyp@3289 1332 // threads will be allocated out of eden regions. So, we can use
tonyp@3289 1333 // the eden region number allocated since the previous GC to
tonyp@3289 1334 // calculate the application's allocate rate. The only exception
tonyp@3289 1335 // to that is humongous objects that are allocated separately. But
tonyp@3289 1336 // given that humongous object allocations do not really affect
tonyp@3289 1337 // either the pause's duration nor when the next pause will take
tonyp@3289 1338 // place we can safely ignore them here.
tonyp@3289 1339 size_t regions_allocated = eden_cset_region_length();
ysr@777 1340 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
ysr@777 1341 _alloc_rate_ms_seq->add(alloc_rate_ms);
ysr@777 1342
ysr@777 1343 double interval_ms =
ysr@777 1344 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
ysr@777 1345 update_recent_gc_times(end_time_sec, elapsed_ms);
ysr@777 1346 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
ysr@1521 1347 if (recent_avg_pause_time_ratio() < 0.0 ||
ysr@1521 1348 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
ysr@1521 1349 #ifndef PRODUCT
ysr@1521 1350 // Dump info to allow post-facto debugging
ysr@1521 1351 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
ysr@1521 1352 gclog_or_tty->print_cr("-------------------------------------------");
ysr@1521 1353 gclog_or_tty->print_cr("Recent GC Times (ms):");
ysr@1521 1354 _recent_gc_times_ms->dump();
ysr@1521 1355 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
ysr@1521 1356 _recent_prev_end_times_for_all_gcs_sec->dump();
ysr@1521 1357 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
ysr@1521 1358 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
ysr@1522 1359 // In debug mode, terminate the JVM if the user wants to debug at this point.
ysr@1522 1360 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
ysr@1522 1361 #endif // !PRODUCT
ysr@1522 1362 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
ysr@1522 1363 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
ysr@1521 1364 if (_recent_avg_pause_time_ratio < 0.0) {
ysr@1521 1365 _recent_avg_pause_time_ratio = 0.0;
ysr@1521 1366 } else {
ysr@1521 1367 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
ysr@1521 1368 _recent_avg_pause_time_ratio = 1.0;
ysr@1521 1369 }
ysr@1521 1370 }
ysr@777 1371 }
ysr@777 1372
johnc@3219 1373 for (int i = 0; i < _aux_num; ++i) {
johnc@3219 1374 if (_cur_aux_times_set[i]) {
johnc@3219 1375 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
johnc@3219 1376 }
johnc@3219 1377 }
johnc@3219 1378
johnc@3219 1379 // PrintGCDetails output
ysr@777 1380 if (PrintGCDetails) {
johnc@3219 1381 bool print_marking_info =
johnc@3219 1382 _g1->mark_in_progress() && !last_pause_included_initial_mark;
johnc@3219 1383
tonyp@2062 1384 gclog_or_tty->print_cr("%s, %1.8lf secs]",
ysr@777 1385 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
ysr@777 1386 elapsed_ms / 1000.0);
ysr@777 1387
tonyp@3464 1388 if (_root_region_scan_wait_time_ms > 0.0) {
tonyp@3464 1389 print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
tonyp@3464 1390 }
tonyp@2062 1391 if (parallel) {
tonyp@2062 1392 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
johnc@3219 1393 print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
johnc@3219 1394 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
johnc@3219 1395 if (print_marking_info) {
tonyp@3416 1396 print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
johnc@3219 1397 }
tonyp@2062 1398 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
brutisso@2712 1399 print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
tonyp@2062 1400 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
tonyp@2062 1401 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
tonyp@2062 1402 print_par_stats(2, "Termination", _par_last_termination_times_ms);
brutisso@2712 1403 print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
johnc@3219 1404 print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
brutisso@2712 1405
brutisso@2712 1406 for (int i = 0; i < _parallel_gc_threads; i++) {
brutisso@2712 1407 _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
johnc@3219 1408
johnc@3219 1409 double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
tonyp@3416 1410 _par_last_satb_filtering_times_ms[i] +
johnc@3219 1411 _par_last_update_rs_times_ms[i] +
johnc@3219 1412 _par_last_scan_rs_times_ms[i] +
johnc@3219 1413 _par_last_obj_copy_times_ms[i] +
johnc@3219 1414 _par_last_termination_times_ms[i];
johnc@3219 1415
johnc@3219 1416 _par_last_gc_worker_other_times_ms[i] = _cur_collection_par_time_ms - worker_known_time;
brutisso@2712 1417 }
johnc@3219 1418 print_par_stats(2, "GC Worker", _par_last_gc_worker_times_ms);
johnc@3219 1419 print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
tonyp@2062 1420 } else {
johnc@3219 1421 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
johnc@3219 1422 if (print_marking_info) {
tonyp@3416 1423 print_stats(1, "SATB Filtering", satb_filtering_time);
johnc@3219 1424 }
tonyp@2062 1425 print_stats(1, "Update RS", update_rs_time);
johnc@3219 1426 print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
tonyp@2062 1427 print_stats(1, "Scan RS", scan_rs_time);
tonyp@2062 1428 print_stats(1, "Object Copying", obj_copy_time);
ysr@777 1429 }
johnc@3296 1430 if (print_marking_info) {
johnc@3296 1431 print_stats(1, "Complete CSet Marking", _mark_closure_time_ms);
johnc@3296 1432 }
johnc@3219 1433 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
johnc@1325 1434 #ifndef PRODUCT
johnc@1325 1435 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
johnc@1325 1436 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
johnc@1325 1437 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
johnc@1325 1438 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
johnc@1325 1439 if (_num_cc_clears > 0) {
johnc@1325 1440 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
johnc@1325 1441 }
johnc@1325 1442 #endif
ysr@777 1443 print_stats(1, "Other", other_time_ms);
johnc@3296 1444 print_stats(2, "Choose CSet",
johnc@3296 1445 (_recorded_young_cset_choice_time_ms +
johnc@3296 1446 _recorded_non_young_cset_choice_time_ms));
johnc@3175 1447 print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
johnc@3175 1448 print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
johnc@3296 1449 print_stats(2, "Free CSet",
johnc@3296 1450 (_recorded_young_free_cset_time_ms +
johnc@3296 1451 _recorded_non_young_free_cset_time_ms));
johnc@1829 1452
ysr@777 1453 for (int i = 0; i < _aux_num; ++i) {
ysr@777 1454 if (_cur_aux_times_set[i]) {
ysr@777 1455 char buffer[96];
ysr@777 1456 sprintf(buffer, "Aux%d", i);
ysr@777 1457 print_stats(1, buffer, _cur_aux_times_ms[i]);
ysr@777 1458 }
ysr@777 1459 }
ysr@777 1460 }
ysr@777 1461
ysr@777 1462 // Update the efficiency-since-mark vars.
ysr@777 1463 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
ysr@777 1464 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1465 // This usually happens due to the timer not having the required
ysr@777 1466 // granularity. Some Linuxes are the usual culprits.
ysr@777 1467 // We'll just set it to something (arbitrarily) small.
ysr@777 1468 proc_ms = 1.0;
ysr@777 1469 }
ysr@777 1470 double cur_efficiency = (double) freed_bytes / proc_ms;
ysr@777 1471
ysr@777 1472 bool new_in_marking_window = _in_marking_window;
ysr@777 1473 bool new_in_marking_window_im = false;
tonyp@1794 1474 if (during_initial_mark_pause()) {
ysr@777 1475 new_in_marking_window = true;
ysr@777 1476 new_in_marking_window_im = true;
ysr@777 1477 }
ysr@777 1478
tonyp@3337 1479 if (_last_young_gc) {
tonyp@3539 1480 // This is supposed to to be the "last young GC" before we start
tonyp@3539 1481 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
tonyp@3539 1482
johnc@3178 1483 if (!last_pause_included_initial_mark) {
tonyp@3539 1484 if (next_gc_should_be_mixed("start mixed GCs",
tonyp@3539 1485 "do not start mixed GCs")) {
tonyp@3539 1486 set_gcs_are_young(false);
tonyp@3539 1487 }
johnc@3178 1488 } else {
tonyp@3337 1489 ergo_verbose0(ErgoMixedGCs,
tonyp@3337 1490 "do not start mixed GCs",
johnc@3178 1491 ergo_format_reason("concurrent cycle is about to start"));
johnc@3178 1492 }
tonyp@3337 1493 _last_young_gc = false;
brutisso@3065 1494 }
brutisso@3065 1495
tonyp@3337 1496 if (!_last_gc_was_young) {
tonyp@3539 1497 // This is a mixed GC. Here we decide whether to continue doing
tonyp@3539 1498 // mixed GCs or not.
tonyp@3539 1499
tonyp@3539 1500 if (!next_gc_should_be_mixed("continue mixed GCs",
tonyp@3539 1501 "do not continue mixed GCs")) {
tonyp@3337 1502 set_gcs_are_young(true);
ysr@777 1503 }
brutisso@3065 1504 }
tonyp@3337 1505
tonyp@3337 1506 if (_last_gc_was_young && !_during_marking) {
brutisso@3065 1507 _young_gc_eff_seq->add(cur_efficiency);
ysr@777 1508 }
ysr@777 1509
ysr@777 1510 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 1511 // do that for any other surv rate groupsx
ysr@777 1512
apetrusenko@1112 1513 if (update_stats) {
ysr@777 1514 double pause_time_ms = elapsed_ms;
ysr@777 1515
ysr@777 1516 size_t diff = 0;
ysr@777 1517 if (_max_pending_cards >= _pending_cards)
ysr@777 1518 diff = _max_pending_cards - _pending_cards;
ysr@777 1519 _pending_card_diff_seq->add((double) diff);
ysr@777 1520
ysr@777 1521 double cost_per_card_ms = 0.0;
ysr@777 1522 if (_pending_cards > 0) {
ysr@777 1523 cost_per_card_ms = update_rs_time / (double) _pending_cards;
ysr@777 1524 _cost_per_card_ms_seq->add(cost_per_card_ms);
ysr@777 1525 }
ysr@777 1526
ysr@777 1527 size_t cards_scanned = _g1->cards_scanned();
ysr@777 1528
ysr@777 1529 double cost_per_entry_ms = 0.0;
ysr@777 1530 if (cards_scanned > 10) {
ysr@777 1531 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
tonyp@3337 1532 if (_last_gc_was_young) {
ysr@777 1533 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
tonyp@3337 1534 } else {
tonyp@3337 1535 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
tonyp@3337 1536 }
ysr@777 1537 }
ysr@777 1538
ysr@777 1539 if (_max_rs_lengths > 0) {
ysr@777 1540 double cards_per_entry_ratio =
ysr@777 1541 (double) cards_scanned / (double) _max_rs_lengths;
tonyp@3337 1542 if (_last_gc_was_young) {
tonyp@3337 1543 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
tonyp@3337 1544 } else {
tonyp@3337 1545 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
tonyp@3337 1546 }
ysr@777 1547 }
ysr@777 1548
tonyp@3356 1549 // This is defensive. For a while _max_rs_lengths could get
tonyp@3356 1550 // smaller than _recorded_rs_lengths which was causing
tonyp@3356 1551 // rs_length_diff to get very large and mess up the RSet length
tonyp@3356 1552 // predictions. The reason was unsafe concurrent updates to the
tonyp@3356 1553 // _inc_cset_recorded_rs_lengths field which the code below guards
tonyp@3356 1554 // against (see CR 7118202). This bug has now been fixed (see CR
tonyp@3356 1555 // 7119027). However, I'm still worried that
tonyp@3356 1556 // _inc_cset_recorded_rs_lengths might still end up somewhat
tonyp@3356 1557 // inaccurate. The concurrent refinement thread calculates an
tonyp@3356 1558 // RSet's length concurrently with other CR threads updating it
tonyp@3356 1559 // which might cause it to calculate the length incorrectly (if,
tonyp@3356 1560 // say, it's in mid-coarsening). So I'll leave in the defensive
tonyp@3356 1561 // conditional below just in case.
tonyp@3326 1562 size_t rs_length_diff = 0;
tonyp@3326 1563 if (_max_rs_lengths > _recorded_rs_lengths) {
tonyp@3326 1564 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
tonyp@3326 1565 }
tonyp@3326 1566 _rs_length_diff_seq->add((double) rs_length_diff);
ysr@777 1567
ysr@777 1568 size_t copied_bytes = surviving_bytes;
ysr@777 1569 double cost_per_byte_ms = 0.0;
ysr@777 1570 if (copied_bytes > 0) {
ysr@777 1571 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
tonyp@3337 1572 if (_in_marking_window) {
ysr@777 1573 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
tonyp@3337 1574 } else {
ysr@777 1575 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
tonyp@3337 1576 }
ysr@777 1577 }
ysr@777 1578
ysr@777 1579 double all_other_time_ms = pause_time_ms -
johnc@1829 1580 (update_rs_time + scan_rs_time + obj_copy_time +
ysr@777 1581 _mark_closure_time_ms + termination_time);
ysr@777 1582
ysr@777 1583 double young_other_time_ms = 0.0;
tonyp@3289 1584 if (young_cset_region_length() > 0) {
ysr@777 1585 young_other_time_ms =
ysr@777 1586 _recorded_young_cset_choice_time_ms +
ysr@777 1587 _recorded_young_free_cset_time_ms;
ysr@777 1588 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
tonyp@3289 1589 (double) young_cset_region_length());
ysr@777 1590 }
ysr@777 1591 double non_young_other_time_ms = 0.0;
tonyp@3289 1592 if (old_cset_region_length() > 0) {
ysr@777 1593 non_young_other_time_ms =
ysr@777 1594 _recorded_non_young_cset_choice_time_ms +
ysr@777 1595 _recorded_non_young_free_cset_time_ms;
ysr@777 1596
ysr@777 1597 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
tonyp@3289 1598 (double) old_cset_region_length());
ysr@777 1599 }
ysr@777 1600
ysr@777 1601 double constant_other_time_ms = all_other_time_ms -
ysr@777 1602 (young_other_time_ms + non_young_other_time_ms);
ysr@777 1603 _constant_other_time_ms_seq->add(constant_other_time_ms);
ysr@777 1604
ysr@777 1605 double survival_ratio = 0.0;
ysr@777 1606 if (_bytes_in_collection_set_before_gc > 0) {
tonyp@3028 1607 survival_ratio = (double) _bytes_copied_during_gc /
tonyp@3028 1608 (double) _bytes_in_collection_set_before_gc;
ysr@777 1609 }
ysr@777 1610
ysr@777 1611 _pending_cards_seq->add((double) _pending_cards);
ysr@777 1612 _rs_lengths_seq->add((double) _max_rs_lengths);
ysr@777 1613 }
ysr@777 1614
ysr@777 1615 _in_marking_window = new_in_marking_window;
ysr@777 1616 _in_marking_window_im = new_in_marking_window_im;
ysr@777 1617 _free_regions_at_end_of_collection = _g1->free_regions();
tonyp@3119 1618 update_young_list_target_length();
ysr@777 1619
iveresov@1546 1620 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
tonyp@1717 1621 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
iveresov@1546 1622 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
tonyp@3209 1623
tonyp@3209 1624 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
ysr@777 1625 }
ysr@777 1626
tonyp@2961 1627 #define EXT_SIZE_FORMAT "%d%s"
tonyp@2961 1628 #define EXT_SIZE_PARAMS(bytes) \
tonyp@2961 1629 byte_size_in_proper_unit((bytes)), \
tonyp@2961 1630 proper_unit_for_byte_size((bytes))
tonyp@2961 1631
tonyp@2961 1632 void G1CollectorPolicy::print_heap_transition() {
tonyp@2961 1633 if (PrintGCDetails) {
tonyp@2961 1634 YoungList* young_list = _g1->young_list();
tonyp@2961 1635 size_t eden_bytes = young_list->eden_used_bytes();
tonyp@2961 1636 size_t survivor_bytes = young_list->survivor_used_bytes();
tonyp@2961 1637 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
tonyp@2961 1638 size_t used = _g1->used();
tonyp@2961 1639 size_t capacity = _g1->capacity();
brutisso@3120 1640 size_t eden_capacity =
brutisso@3120 1641 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
tonyp@2961 1642
tonyp@2961 1643 gclog_or_tty->print_cr(
brutisso@3120 1644 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
brutisso@3120 1645 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
brutisso@3120 1646 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
brutisso@3120 1647 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
brutisso@3120 1648 EXT_SIZE_PARAMS(_eden_bytes_before_gc),
brutisso@3120 1649 EXT_SIZE_PARAMS(_prev_eden_capacity),
brutisso@3120 1650 EXT_SIZE_PARAMS(eden_bytes),
brutisso@3120 1651 EXT_SIZE_PARAMS(eden_capacity),
brutisso@3120 1652 EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
brutisso@3120 1653 EXT_SIZE_PARAMS(survivor_bytes),
brutisso@3120 1654 EXT_SIZE_PARAMS(used_before_gc),
brutisso@3120 1655 EXT_SIZE_PARAMS(_capacity_before_gc),
brutisso@3120 1656 EXT_SIZE_PARAMS(used),
brutisso@3120 1657 EXT_SIZE_PARAMS(capacity));
brutisso@3120 1658
brutisso@3120 1659 _prev_eden_capacity = eden_capacity;
tonyp@2961 1660 } else if (PrintGC) {
tonyp@2961 1661 _g1->print_size_transition(gclog_or_tty,
tonyp@2961 1662 _cur_collection_pause_used_at_start_bytes,
tonyp@2961 1663 _g1->used(), _g1->capacity());
tonyp@2961 1664 }
tonyp@2961 1665 }
tonyp@2961 1666
iveresov@1546 1667 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 1668 double update_rs_processed_buffers,
iveresov@1546 1669 double goal_ms) {
iveresov@1546 1670 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
iveresov@1546 1671 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
iveresov@1546 1672
tonyp@1717 1673 if (G1UseAdaptiveConcRefinement) {
iveresov@1546 1674 const int k_gy = 3, k_gr = 6;
iveresov@1546 1675 const double inc_k = 1.1, dec_k = 0.9;
iveresov@1546 1676
iveresov@1546 1677 int g = cg1r->green_zone();
iveresov@1546 1678 if (update_rs_time > goal_ms) {
iveresov@1546 1679 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
iveresov@1546 1680 } else {
iveresov@1546 1681 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
iveresov@1546 1682 g = (int)MAX2(g * inc_k, g + 1.0);
iveresov@1546 1683 }
iveresov@1546 1684 }
iveresov@1546 1685 // Change the refinement threads params
iveresov@1546 1686 cg1r->set_green_zone(g);
iveresov@1546 1687 cg1r->set_yellow_zone(g * k_gy);
iveresov@1546 1688 cg1r->set_red_zone(g * k_gr);
iveresov@1546 1689 cg1r->reinitialize_threads();
iveresov@1546 1690
iveresov@1546 1691 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
iveresov@1546 1692 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
iveresov@1546 1693 cg1r->yellow_zone());
iveresov@1546 1694 // Change the barrier params
iveresov@1546 1695 dcqs.set_process_completed_threshold(processing_threshold);
iveresov@1546 1696 dcqs.set_max_completed_queue(cg1r->red_zone());
iveresov@1546 1697 }
iveresov@1546 1698
iveresov@1546 1699 int curr_queue_size = dcqs.completed_buffers_num();
iveresov@1546 1700 if (curr_queue_size >= cg1r->yellow_zone()) {
iveresov@1546 1701 dcqs.set_completed_queue_padding(curr_queue_size);
iveresov@1546 1702 } else {
iveresov@1546 1703 dcqs.set_completed_queue_padding(0);
iveresov@1546 1704 }
iveresov@1546 1705 dcqs.notify_if_necessary();
iveresov@1546 1706 }
iveresov@1546 1707
ysr@777 1708 double
ysr@777 1709 G1CollectorPolicy::
ysr@777 1710 predict_young_collection_elapsed_time_ms(size_t adjustment) {
ysr@777 1711 guarantee( adjustment == 0 || adjustment == 1, "invariant" );
ysr@777 1712
ysr@777 1713 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@1829 1714 size_t young_num = g1h->young_list()->length();
ysr@777 1715 if (young_num == 0)
ysr@777 1716 return 0.0;
ysr@777 1717
ysr@777 1718 young_num += adjustment;
ysr@777 1719 size_t pending_cards = predict_pending_cards();
johnc@1829 1720 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
ysr@777 1721 predict_rs_length_diff();
ysr@777 1722 size_t card_num;
tonyp@3337 1723 if (gcs_are_young()) {
ysr@777 1724 card_num = predict_young_card_num(rs_lengths);
tonyp@3337 1725 } else {
ysr@777 1726 card_num = predict_non_young_card_num(rs_lengths);
tonyp@3337 1727 }
ysr@777 1728 size_t young_byte_size = young_num * HeapRegion::GrainBytes;
ysr@777 1729 double accum_yg_surv_rate =
ysr@777 1730 _short_lived_surv_rate_group->accum_surv_rate(adjustment);
ysr@777 1731
ysr@777 1732 size_t bytes_to_copy =
ysr@777 1733 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
ysr@777 1734
ysr@777 1735 return
ysr@777 1736 predict_rs_update_time_ms(pending_cards) +
ysr@777 1737 predict_rs_scan_time_ms(card_num) +
ysr@777 1738 predict_object_copy_time_ms(bytes_to_copy) +
ysr@777 1739 predict_young_other_time_ms(young_num) +
ysr@777 1740 predict_constant_other_time_ms();
ysr@777 1741 }
ysr@777 1742
ysr@777 1743 double
ysr@777 1744 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
ysr@777 1745 size_t rs_length = predict_rs_length_diff();
ysr@777 1746 size_t card_num;
tonyp@3337 1747 if (gcs_are_young()) {
ysr@777 1748 card_num = predict_young_card_num(rs_length);
tonyp@3337 1749 } else {
ysr@777 1750 card_num = predict_non_young_card_num(rs_length);
tonyp@3337 1751 }
ysr@777 1752 return predict_base_elapsed_time_ms(pending_cards, card_num);
ysr@777 1753 }
ysr@777 1754
ysr@777 1755 double
ysr@777 1756 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 1757 size_t scanned_cards) {
ysr@777 1758 return
ysr@777 1759 predict_rs_update_time_ms(pending_cards) +
ysr@777 1760 predict_rs_scan_time_ms(scanned_cards) +
ysr@777 1761 predict_constant_other_time_ms();
ysr@777 1762 }
ysr@777 1763
ysr@777 1764 double
ysr@777 1765 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
ysr@777 1766 bool young) {
ysr@777 1767 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1768 size_t card_num;
tonyp@3337 1769 if (gcs_are_young()) {
ysr@777 1770 card_num = predict_young_card_num(rs_length);
tonyp@3337 1771 } else {
ysr@777 1772 card_num = predict_non_young_card_num(rs_length);
tonyp@3337 1773 }
ysr@777 1774 size_t bytes_to_copy = predict_bytes_to_copy(hr);
ysr@777 1775
ysr@777 1776 double region_elapsed_time_ms =
ysr@777 1777 predict_rs_scan_time_ms(card_num) +
ysr@777 1778 predict_object_copy_time_ms(bytes_to_copy);
ysr@777 1779
ysr@777 1780 if (young)
ysr@777 1781 region_elapsed_time_ms += predict_young_other_time_ms(1);
ysr@777 1782 else
ysr@777 1783 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
ysr@777 1784
ysr@777 1785 return region_elapsed_time_ms;
ysr@777 1786 }
ysr@777 1787
ysr@777 1788 size_t
ysr@777 1789 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
ysr@777 1790 size_t bytes_to_copy;
ysr@777 1791 if (hr->is_marked())
ysr@777 1792 bytes_to_copy = hr->max_live_bytes();
ysr@777 1793 else {
tonyp@3539 1794 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
ysr@777 1795 int age = hr->age_in_surv_rate_group();
apetrusenko@980 1796 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
ysr@777 1797 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
ysr@777 1798 }
ysr@777 1799 return bytes_to_copy;
ysr@777 1800 }
ysr@777 1801
ysr@777 1802 void
tonyp@3289 1803 G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
tonyp@3289 1804 size_t survivor_cset_region_length) {
tonyp@3289 1805 _eden_cset_region_length = eden_cset_region_length;
tonyp@3289 1806 _survivor_cset_region_length = survivor_cset_region_length;
tonyp@3289 1807 _old_cset_region_length = 0;
johnc@1829 1808 }
johnc@1829 1809
johnc@1829 1810 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
johnc@1829 1811 _recorded_rs_lengths = rs_lengths;
johnc@1829 1812 }
johnc@1829 1813
ysr@777 1814 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
ysr@777 1815 double elapsed_ms) {
ysr@777 1816 _recent_gc_times_ms->add(elapsed_ms);
ysr@777 1817 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
ysr@777 1818 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
ysr@777 1819 }
ysr@777 1820
ysr@777 1821 size_t G1CollectorPolicy::expansion_amount() {
tonyp@3114 1822 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
tonyp@3114 1823 double threshold = _gc_overhead_perc;
tonyp@3114 1824 if (recent_gc_overhead > threshold) {
johnc@1186 1825 // We will double the existing space, or take
johnc@1186 1826 // G1ExpandByPercentOfAvailable % of the available expansion
johnc@1186 1827 // space, whichever is smaller, bounded below by a minimum
johnc@1186 1828 // expansion (unless that's all that's left.)
ysr@777 1829 const size_t min_expand_bytes = 1*M;
johnc@2504 1830 size_t reserved_bytes = _g1->max_capacity();
ysr@777 1831 size_t committed_bytes = _g1->capacity();
ysr@777 1832 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
ysr@777 1833 size_t expand_bytes;
ysr@777 1834 size_t expand_bytes_via_pct =
johnc@1186 1835 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
ysr@777 1836 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
ysr@777 1837 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
ysr@777 1838 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
tonyp@3114 1839
tonyp@3114 1840 ergo_verbose5(ErgoHeapSizing,
tonyp@3114 1841 "attempt heap expansion",
tonyp@3114 1842 ergo_format_reason("recent GC overhead higher than "
tonyp@3114 1843 "threshold after GC")
tonyp@3114 1844 ergo_format_perc("recent GC overhead")
tonyp@3114 1845 ergo_format_perc("threshold")
tonyp@3114 1846 ergo_format_byte("uncommitted")
tonyp@3114 1847 ergo_format_byte_perc("calculated expansion amount"),
tonyp@3114 1848 recent_gc_overhead, threshold,
tonyp@3114 1849 uncommitted_bytes,
tonyp@3114 1850 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
tonyp@3114 1851
ysr@777 1852 return expand_bytes;
ysr@777 1853 } else {
ysr@777 1854 return 0;
ysr@777 1855 }
ysr@777 1856 }
ysr@777 1857
ysr@777 1858 class CountCSClosure: public HeapRegionClosure {
ysr@777 1859 G1CollectorPolicy* _g1_policy;
ysr@777 1860 public:
ysr@777 1861 CountCSClosure(G1CollectorPolicy* g1_policy) :
ysr@777 1862 _g1_policy(g1_policy) {}
ysr@777 1863 bool doHeapRegion(HeapRegion* r) {
ysr@777 1864 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
ysr@777 1865 return false;
ysr@777 1866 }
ysr@777 1867 };
ysr@777 1868
ysr@777 1869 void G1CollectorPolicy::count_CS_bytes_used() {
ysr@777 1870 CountCSClosure cs_closure(this);
ysr@777 1871 _g1->collection_set_iterate(&cs_closure);
ysr@777 1872 }
ysr@777 1873
johnc@3219 1874 void G1CollectorPolicy::print_summary(int level,
johnc@3219 1875 const char* str,
johnc@3219 1876 NumberSeq* seq) const {
ysr@777 1877 double sum = seq->sum();
brutisso@2645 1878 LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
ysr@777 1879 str, sum / 1000.0, seq->avg());
ysr@777 1880 }
ysr@777 1881
johnc@3219 1882 void G1CollectorPolicy::print_summary_sd(int level,
johnc@3219 1883 const char* str,
johnc@3219 1884 NumberSeq* seq) const {
ysr@777 1885 print_summary(level, str, seq);
brutisso@2645 1886 LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
ysr@777 1887 seq->num(), seq->sd(), seq->maximum());
ysr@777 1888 }
ysr@777 1889
ysr@777 1890 void G1CollectorPolicy::check_other_times(int level,
ysr@777 1891 NumberSeq* other_times_ms,
ysr@777 1892 NumberSeq* calc_other_times_ms) const {
ysr@777 1893 bool should_print = false;
brutisso@2645 1894 LineBuffer buf(level + 2);
ysr@777 1895
ysr@777 1896 double max_sum = MAX2(fabs(other_times_ms->sum()),
ysr@777 1897 fabs(calc_other_times_ms->sum()));
ysr@777 1898 double min_sum = MIN2(fabs(other_times_ms->sum()),
ysr@777 1899 fabs(calc_other_times_ms->sum()));
ysr@777 1900 double sum_ratio = max_sum / min_sum;
ysr@777 1901 if (sum_ratio > 1.1) {
ysr@777 1902 should_print = true;
brutisso@2645 1903 buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
ysr@777 1904 }
ysr@777 1905
ysr@777 1906 double max_avg = MAX2(fabs(other_times_ms->avg()),
ysr@777 1907 fabs(calc_other_times_ms->avg()));
ysr@777 1908 double min_avg = MIN2(fabs(other_times_ms->avg()),
ysr@777 1909 fabs(calc_other_times_ms->avg()));
ysr@777 1910 double avg_ratio = max_avg / min_avg;
ysr@777 1911 if (avg_ratio > 1.1) {
ysr@777 1912 should_print = true;
brutisso@2645 1913 buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
ysr@777 1914 }
ysr@777 1915
ysr@777 1916 if (other_times_ms->sum() < -0.01) {
brutisso@2645 1917 buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
ysr@777 1918 }
ysr@777 1919
ysr@777 1920 if (other_times_ms->avg() < -0.01) {
brutisso@2645 1921 buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
ysr@777 1922 }
ysr@777 1923
ysr@777 1924 if (calc_other_times_ms->sum() < -0.01) {
ysr@777 1925 should_print = true;
brutisso@2645 1926 buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
ysr@777 1927 }
ysr@777 1928
ysr@777 1929 if (calc_other_times_ms->avg() < -0.01) {
ysr@777 1930 should_print = true;
brutisso@2645 1931 buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
ysr@777 1932 }
ysr@777 1933
ysr@777 1934 if (should_print)
ysr@777 1935 print_summary(level, "Other(Calc)", calc_other_times_ms);
ysr@777 1936 }
ysr@777 1937
ysr@777 1938 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
jmasa@2188 1939 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 1940 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 1941 if (summary->get_total_seq()->num() > 0) {
apetrusenko@1112 1942 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
ysr@777 1943 if (body_summary != NULL) {
tonyp@3464 1944 print_summary(1, "Root Region Scan Wait", body_summary->get_root_region_scan_wait_seq());
ysr@777 1945 if (parallel) {
ysr@777 1946 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
johnc@3219 1947 print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
tonyp@3416 1948 print_summary(2, "SATB Filtering", body_summary->get_satb_filtering_seq());
ysr@777 1949 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
ysr@777 1950 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 1951 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 1952 print_summary(2, "Termination", body_summary->get_termination_seq());
johnc@3219 1953 print_summary(2, "Parallel Other", body_summary->get_parallel_other_seq());
ysr@777 1954 {
ysr@777 1955 NumberSeq* other_parts[] = {
ysr@777 1956 body_summary->get_ext_root_scan_seq(),
tonyp@3416 1957 body_summary->get_satb_filtering_seq(),
johnc@3219 1958 body_summary->get_update_rs_seq(),
ysr@777 1959 body_summary->get_scan_rs_seq(),
ysr@777 1960 body_summary->get_obj_copy_seq(),
ysr@777 1961 body_summary->get_termination_seq()
ysr@777 1962 };
ysr@777 1963 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
johnc@2134 1964 6, other_parts);
ysr@777 1965 check_other_times(2, body_summary->get_parallel_other_seq(),
ysr@777 1966 &calc_other_times_ms);
ysr@777 1967 }
ysr@777 1968 } else {
johnc@3219 1969 print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
tonyp@3416 1970 print_summary(1, "SATB Filtering", body_summary->get_satb_filtering_seq());
ysr@777 1971 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
ysr@777 1972 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 1973 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 1974 }
ysr@777 1975 }
johnc@3219 1976 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
johnc@3219 1977 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
ysr@777 1978 print_summary(1, "Other", summary->get_other_seq());
ysr@777 1979 {
johnc@2134 1980 if (body_summary != NULL) {
johnc@2134 1981 NumberSeq calc_other_times_ms;
johnc@2134 1982 if (parallel) {
johnc@2134 1983 // parallel
johnc@2134 1984 NumberSeq* other_parts[] = {
johnc@2134 1985 body_summary->get_satb_drain_seq(),
tonyp@3464 1986 body_summary->get_root_region_scan_wait_seq(),
johnc@2134 1987 body_summary->get_parallel_seq(),
johnc@2134 1988 body_summary->get_clear_ct_seq()
johnc@2134 1989 };
johnc@2134 1990 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
tonyp@3464 1991 4, other_parts);
johnc@2134 1992 } else {
johnc@2134 1993 // serial
johnc@2134 1994 NumberSeq* other_parts[] = {
johnc@2134 1995 body_summary->get_satb_drain_seq(),
tonyp@3464 1996 body_summary->get_root_region_scan_wait_seq(),
johnc@2134 1997 body_summary->get_update_rs_seq(),
johnc@2134 1998 body_summary->get_ext_root_scan_seq(),
tonyp@3416 1999 body_summary->get_satb_filtering_seq(),
johnc@2134 2000 body_summary->get_scan_rs_seq(),
johnc@2134 2001 body_summary->get_obj_copy_seq()
johnc@2134 2002 };
johnc@2134 2003 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
tonyp@3464 2004 7, other_parts);
johnc@2134 2005 }
johnc@2134 2006 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
ysr@777 2007 }
ysr@777 2008 }
ysr@777 2009 } else {
brutisso@2645 2010 LineBuffer(1).append_and_print_cr("none");
ysr@777 2011 }
brutisso@2645 2012 LineBuffer(0).append_and_print_cr("");
ysr@777 2013 }
ysr@777 2014
ysr@777 2015 void G1CollectorPolicy::print_tracing_info() const {
ysr@777 2016 if (TraceGen0Time) {
ysr@777 2017 gclog_or_tty->print_cr("ALL PAUSES");
ysr@777 2018 print_summary_sd(0, "Total", _all_pause_times_ms);
ysr@777 2019 gclog_or_tty->print_cr("");
ysr@777 2020 gclog_or_tty->print_cr("");
tonyp@3337 2021 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
tonyp@3337 2022 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
ysr@777 2023 gclog_or_tty->print_cr("");
ysr@777 2024
apetrusenko@1112 2025 gclog_or_tty->print_cr("EVACUATION PAUSES");
apetrusenko@1112 2026 print_summary(_summary);
ysr@777 2027
ysr@777 2028 gclog_or_tty->print_cr("MISC");
ysr@777 2029 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
ysr@777 2030 print_summary_sd(0, "Yields", _all_yield_times_ms);
ysr@777 2031 for (int i = 0; i < _aux_num; ++i) {
ysr@777 2032 if (_all_aux_times_ms[i].num() > 0) {
ysr@777 2033 char buffer[96];
ysr@777 2034 sprintf(buffer, "Aux%d", i);
ysr@777 2035 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
ysr@777 2036 }
ysr@777 2037 }
ysr@777 2038 }
ysr@777 2039 if (TraceGen1Time) {
ysr@777 2040 if (_all_full_gc_times_ms->num() > 0) {
ysr@777 2041 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
ysr@777 2042 _all_full_gc_times_ms->num(),
ysr@777 2043 _all_full_gc_times_ms->sum() / 1000.0);
ysr@777 2044 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
ysr@777 2045 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 2046 _all_full_gc_times_ms->sd(),
ysr@777 2047 _all_full_gc_times_ms->maximum());
ysr@777 2048 }
ysr@777 2049 }
ysr@777 2050 }
ysr@777 2051
ysr@777 2052 void G1CollectorPolicy::print_yg_surv_rate_info() const {
ysr@777 2053 #ifndef PRODUCT
ysr@777 2054 _short_lived_surv_rate_group->print_surv_rate_summary();
ysr@777 2055 // add this call for any other surv rate groups
ysr@777 2056 #endif // PRODUCT
ysr@777 2057 }
ysr@777 2058
ysr@777 2059 #ifndef PRODUCT
ysr@777 2060 // for debugging, bit of a hack...
ysr@777 2061 static char*
ysr@777 2062 region_num_to_mbs(int length) {
ysr@777 2063 static char buffer[64];
ysr@777 2064 double bytes = (double) (length * HeapRegion::GrainBytes);
ysr@777 2065 double mbs = bytes / (double) (1024 * 1024);
ysr@777 2066 sprintf(buffer, "%7.2lfMB", mbs);
ysr@777 2067 return buffer;
ysr@777 2068 }
ysr@777 2069 #endif // PRODUCT
ysr@777 2070
apetrusenko@980 2071 size_t G1CollectorPolicy::max_regions(int purpose) {
ysr@777 2072 switch (purpose) {
ysr@777 2073 case GCAllocForSurvived:
apetrusenko@980 2074 return _max_survivor_regions;
ysr@777 2075 case GCAllocForTenured:
apetrusenko@980 2076 return REGIONS_UNLIMITED;
ysr@777 2077 default:
apetrusenko@980 2078 ShouldNotReachHere();
apetrusenko@980 2079 return REGIONS_UNLIMITED;
ysr@777 2080 };
ysr@777 2081 }
ysr@777 2082
tonyp@3119 2083 void G1CollectorPolicy::update_max_gc_locker_expansion() {
tonyp@2333 2084 size_t expansion_region_num = 0;
tonyp@2333 2085 if (GCLockerEdenExpansionPercent > 0) {
tonyp@2333 2086 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
tonyp@2333 2087 double expansion_region_num_d = perc * (double) _young_list_target_length;
tonyp@2333 2088 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
tonyp@2333 2089 // less than 1.0) we'll get 1.
tonyp@2333 2090 expansion_region_num = (size_t) ceil(expansion_region_num_d);
tonyp@2333 2091 } else {
tonyp@2333 2092 assert(expansion_region_num == 0, "sanity");
tonyp@2333 2093 }
tonyp@2333 2094 _young_list_max_length = _young_list_target_length + expansion_region_num;
tonyp@2333 2095 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
tonyp@2333 2096 }
tonyp@2333 2097
apetrusenko@980 2098 // Calculates survivor space parameters.
tonyp@3119 2099 void G1CollectorPolicy::update_survivors_policy() {
tonyp@3119 2100 double max_survivor_regions_d =
tonyp@3119 2101 (double) _young_list_target_length / (double) SurvivorRatio;
tonyp@3119 2102 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
tonyp@3119 2103 // smaller than 1.0) we'll get 1.
tonyp@3119 2104 _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
tonyp@3119 2105
tonyp@3066 2106 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
apetrusenko@980 2107 HeapRegion::GrainWords * _max_survivor_regions);
apetrusenko@980 2108 }
apetrusenko@980 2109
ysr@777 2110 #ifndef PRODUCT
ysr@777 2111 class HRSortIndexIsOKClosure: public HeapRegionClosure {
ysr@777 2112 CollectionSetChooser* _chooser;
ysr@777 2113 public:
ysr@777 2114 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
ysr@777 2115 _chooser(chooser) {}
ysr@777 2116
ysr@777 2117 bool doHeapRegion(HeapRegion* r) {
ysr@777 2118 if (!r->continuesHumongous()) {
ysr@777 2119 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
ysr@777 2120 }
ysr@777 2121 return false;
ysr@777 2122 }
ysr@777 2123 };
ysr@777 2124
tonyp@3209 2125 bool G1CollectorPolicy::assertMarkedBytesDataOK() {
ysr@777 2126 HRSortIndexIsOKClosure cl(_collectionSetChooser);
ysr@777 2127 _g1->heap_region_iterate(&cl);
ysr@777 2128 return true;
ysr@777 2129 }
ysr@777 2130 #endif
ysr@777 2131
tonyp@3114 2132 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
tonyp@3114 2133 GCCause::Cause gc_cause) {
tonyp@2011 2134 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@2011 2135 if (!during_cycle) {
tonyp@3114 2136 ergo_verbose1(ErgoConcCycles,
tonyp@3114 2137 "request concurrent cycle initiation",
tonyp@3114 2138 ergo_format_reason("requested by GC cause")
tonyp@3114 2139 ergo_format_str("GC cause"),
tonyp@3114 2140 GCCause::to_string(gc_cause));
tonyp@2011 2141 set_initiate_conc_mark_if_possible();
tonyp@2011 2142 return true;
tonyp@2011 2143 } else {
tonyp@3114 2144 ergo_verbose1(ErgoConcCycles,
tonyp@3114 2145 "do not request concurrent cycle initiation",
tonyp@3114 2146 ergo_format_reason("concurrent cycle already in progress")
tonyp@3114 2147 ergo_format_str("GC cause"),
tonyp@3114 2148 GCCause::to_string(gc_cause));
tonyp@2011 2149 return false;
tonyp@2011 2150 }
tonyp@2011 2151 }
tonyp@2011 2152
ysr@777 2153 void
tonyp@1794 2154 G1CollectorPolicy::decide_on_conc_mark_initiation() {
tonyp@1794 2155 // We are about to decide on whether this pause will be an
tonyp@1794 2156 // initial-mark pause.
tonyp@1794 2157
tonyp@1794 2158 // First, during_initial_mark_pause() should not be already set. We
tonyp@1794 2159 // will set it here if we have to. However, it should be cleared by
tonyp@1794 2160 // the end of the pause (it's only set for the duration of an
tonyp@1794 2161 // initial-mark pause).
tonyp@1794 2162 assert(!during_initial_mark_pause(), "pre-condition");
tonyp@1794 2163
tonyp@1794 2164 if (initiate_conc_mark_if_possible()) {
tonyp@1794 2165 // We had noticed on a previous pause that the heap occupancy has
tonyp@1794 2166 // gone over the initiating threshold and we should start a
tonyp@1794 2167 // concurrent marking cycle. So we might initiate one.
tonyp@1794 2168
tonyp@1794 2169 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@1794 2170 if (!during_cycle) {
tonyp@1794 2171 // The concurrent marking thread is not "during a cycle", i.e.,
tonyp@1794 2172 // it has completed the last one. So we can go ahead and
tonyp@1794 2173 // initiate a new cycle.
tonyp@1794 2174
tonyp@1794 2175 set_during_initial_mark_pause();
tonyp@3337 2176 // We do not allow mixed GCs during marking.
tonyp@3337 2177 if (!gcs_are_young()) {
tonyp@3337 2178 set_gcs_are_young(true);
tonyp@3337 2179 ergo_verbose0(ErgoMixedGCs,
tonyp@3337 2180 "end mixed GCs",
johnc@3178 2181 ergo_format_reason("concurrent cycle is about to start"));
johnc@3178 2182 }
tonyp@1794 2183
tonyp@1794 2184 // And we can now clear initiate_conc_mark_if_possible() as
tonyp@1794 2185 // we've already acted on it.
tonyp@1794 2186 clear_initiate_conc_mark_if_possible();
tonyp@3114 2187
tonyp@3114 2188 ergo_verbose0(ErgoConcCycles,
tonyp@3114 2189 "initiate concurrent cycle",
tonyp@3114 2190 ergo_format_reason("concurrent cycle initiation requested"));
tonyp@1794 2191 } else {
tonyp@1794 2192 // The concurrent marking thread is still finishing up the
tonyp@1794 2193 // previous cycle. If we start one right now the two cycles
tonyp@1794 2194 // overlap. In particular, the concurrent marking thread might
tonyp@1794 2195 // be in the process of clearing the next marking bitmap (which
tonyp@1794 2196 // we will use for the next cycle if we start one). Starting a
tonyp@1794 2197 // cycle now will be bad given that parts of the marking
tonyp@1794 2198 // information might get cleared by the marking thread. And we
tonyp@1794 2199 // cannot wait for the marking thread to finish the cycle as it
tonyp@1794 2200 // periodically yields while clearing the next marking bitmap
tonyp@1794 2201 // and, if it's in a yield point, it's waiting for us to
tonyp@1794 2202 // finish. So, at this point we will not start a cycle and we'll
tonyp@1794 2203 // let the concurrent marking thread complete the last one.
tonyp@3114 2204 ergo_verbose0(ErgoConcCycles,
tonyp@3114 2205 "do not initiate concurrent cycle",
tonyp@3114 2206 ergo_format_reason("concurrent cycle already in progress"));
tonyp@1794 2207 }
tonyp@1794 2208 }
tonyp@1794 2209 }
tonyp@1794 2210
ysr@777 2211 class KnownGarbageClosure: public HeapRegionClosure {
tonyp@3539 2212 G1CollectedHeap* _g1h;
ysr@777 2213 CollectionSetChooser* _hrSorted;
ysr@777 2214
ysr@777 2215 public:
ysr@777 2216 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
tonyp@3539 2217 _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
ysr@777 2218
ysr@777 2219 bool doHeapRegion(HeapRegion* r) {
ysr@777 2220 // We only include humongous regions in collection
ysr@777 2221 // sets when concurrent mark shows that their contained object is
ysr@777 2222 // unreachable.
ysr@777 2223
ysr@777 2224 // Do we have any marking information for this region?
ysr@777 2225 if (r->is_marked()) {
tonyp@3539 2226 // We will skip any region that's currently used as an old GC
tonyp@3539 2227 // alloc region (we should not consider those for collection
tonyp@3539 2228 // before we fill them up).
tonyp@3539 2229 if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
ysr@777 2230 _hrSorted->addMarkedHeapRegion(r);
ysr@777 2231 }
ysr@777 2232 }
ysr@777 2233 return false;
ysr@777 2234 }
ysr@777 2235 };
ysr@777 2236
ysr@777 2237 class ParKnownGarbageHRClosure: public HeapRegionClosure {
tonyp@3539 2238 G1CollectedHeap* _g1h;
ysr@777 2239 CollectionSetChooser* _hrSorted;
ysr@777 2240 jint _marked_regions_added;
tonyp@3539 2241 size_t _reclaimable_bytes_added;
ysr@777 2242 jint _chunk_size;
ysr@777 2243 jint _cur_chunk_idx;
ysr@777 2244 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
ysr@777 2245 int _worker;
ysr@777 2246 int _invokes;
ysr@777 2247
ysr@777 2248 void get_new_chunk() {
ysr@777 2249 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
ysr@777 2250 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
ysr@777 2251 }
ysr@777 2252 void add_region(HeapRegion* r) {
ysr@777 2253 if (_cur_chunk_idx == _cur_chunk_end) {
ysr@777 2254 get_new_chunk();
ysr@777 2255 }
ysr@777 2256 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
ysr@777 2257 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
ysr@777 2258 _marked_regions_added++;
tonyp@3539 2259 _reclaimable_bytes_added += r->reclaimable_bytes();
ysr@777 2260 _cur_chunk_idx++;
ysr@777 2261 }
ysr@777 2262
ysr@777 2263 public:
ysr@777 2264 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
ysr@777 2265 jint chunk_size,
ysr@777 2266 int worker) :
tonyp@3539 2267 _g1h(G1CollectedHeap::heap()),
tonyp@3539 2268 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
tonyp@3539 2269 _marked_regions_added(0), _reclaimable_bytes_added(0),
tonyp@3539 2270 _cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { }
ysr@777 2271
ysr@777 2272 bool doHeapRegion(HeapRegion* r) {
ysr@777 2273 // We only include humongous regions in collection
ysr@777 2274 // sets when concurrent mark shows that their contained object is
ysr@777 2275 // unreachable.
ysr@777 2276 _invokes++;
ysr@777 2277
ysr@777 2278 // Do we have any marking information for this region?
ysr@777 2279 if (r->is_marked()) {
tonyp@3539 2280 // We will skip any region that's currently used as an old GC
tonyp@3539 2281 // alloc region (we should not consider those for collection
tonyp@3539 2282 // before we fill them up).
tonyp@3539 2283 if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
ysr@777 2284 add_region(r);
ysr@777 2285 }
ysr@777 2286 }
ysr@777 2287 return false;
ysr@777 2288 }
ysr@777 2289 jint marked_regions_added() { return _marked_regions_added; }
tonyp@3539 2290 size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
ysr@777 2291 int invokes() { return _invokes; }
ysr@777 2292 };
ysr@777 2293
ysr@777 2294 class ParKnownGarbageTask: public AbstractGangTask {
ysr@777 2295 CollectionSetChooser* _hrSorted;
ysr@777 2296 jint _chunk_size;
ysr@777 2297 G1CollectedHeap* _g1;
ysr@777 2298 public:
ysr@777 2299 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
ysr@777 2300 AbstractGangTask("ParKnownGarbageTask"),
ysr@777 2301 _hrSorted(hrSorted), _chunk_size(chunk_size),
tonyp@3539 2302 _g1(G1CollectedHeap::heap()) { }
ysr@777 2303
jmasa@3357 2304 void work(uint worker_id) {
jmasa@3357 2305 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
jmasa@3357 2306 _chunk_size,
jmasa@3357 2307 worker_id);
ysr@777 2308 // Back to zero for the claim value.
jmasa@3357 2309 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
jmasa@3294 2310 _g1->workers()->active_workers(),
tonyp@790 2311 HeapRegion::InitialClaimValue);
ysr@777 2312 jint regions_added = parKnownGarbageCl.marked_regions_added();
tonyp@3539 2313 size_t reclaimable_bytes_added =
tonyp@3539 2314 parKnownGarbageCl.reclaimable_bytes_added();
tonyp@3539 2315 _hrSorted->updateTotals(regions_added, reclaimable_bytes_added);
ysr@777 2316 if (G1PrintParCleanupStats) {
brutisso@2645 2317 gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
jmasa@3357 2318 worker_id, parKnownGarbageCl.invokes(), regions_added);
ysr@777 2319 }
ysr@777 2320 }
ysr@777 2321 };
ysr@777 2322
ysr@777 2323 void
jmasa@3294 2324 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
tonyp@3209 2325 double start_sec;
tonyp@3209 2326 if (G1PrintParCleanupStats) {
tonyp@3209 2327 start_sec = os::elapsedTime();
tonyp@3209 2328 }
ysr@777 2329
ysr@777 2330 _collectionSetChooser->clearMarkedHeapRegions();
tonyp@3209 2331 double clear_marked_end_sec;
ysr@777 2332 if (G1PrintParCleanupStats) {
tonyp@3209 2333 clear_marked_end_sec = os::elapsedTime();
tonyp@3209 2334 gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.",
tonyp@3209 2335 (clear_marked_end_sec - start_sec) * 1000.0);
ysr@777 2336 }
tonyp@3209 2337
jmasa@2188 2338 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 2339 const size_t OverpartitionFactor = 4;
jmasa@3294 2340 size_t WorkUnit;
jmasa@3294 2341 // The use of MinChunkSize = 8 in the original code
jmasa@3294 2342 // causes some assertion failures when the total number of
jmasa@3294 2343 // region is less than 8. The code here tries to fix that.
jmasa@3294 2344 // Should the original code also be fixed?
jmasa@3294 2345 if (no_of_gc_threads > 0) {
jmasa@3294 2346 const size_t MinWorkUnit =
jmasa@3294 2347 MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U);
jmasa@3294 2348 WorkUnit =
jmasa@3294 2349 MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
jmasa@3294 2350 MinWorkUnit);
jmasa@3294 2351 } else {
jmasa@3294 2352 assert(no_of_gc_threads > 0,
jmasa@3294 2353 "The active gc workers should be greater than 0");
jmasa@3294 2354 // In a product build do something reasonable to avoid a crash.
jmasa@3294 2355 const size_t MinWorkUnit =
jmasa@3294 2356 MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
jmasa@3294 2357 WorkUnit =
jmasa@3294 2358 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
jmasa@3294 2359 MinWorkUnit);
jmasa@3294 2360 }
ysr@777 2361 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
kvn@1926 2362 WorkUnit);
ysr@777 2363 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
kvn@1926 2364 (int) WorkUnit);
ysr@777 2365 _g1->workers()->run_task(&parKnownGarbageTask);
tonyp@790 2366
tonyp@790 2367 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2368 "sanity check");
ysr@777 2369 } else {
ysr@777 2370 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
ysr@777 2371 _g1->heap_region_iterate(&knownGarbagecl);
ysr@777 2372 }
tonyp@3209 2373 double known_garbage_end_sec;
ysr@777 2374 if (G1PrintParCleanupStats) {
tonyp@3209 2375 known_garbage_end_sec = os::elapsedTime();
ysr@777 2376 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
tonyp@3209 2377 (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
ysr@777 2378 }
tonyp@3209 2379
ysr@777 2380 _collectionSetChooser->sortMarkedHeapRegions();
tonyp@3209 2381 double end_sec = os::elapsedTime();
ysr@777 2382 if (G1PrintParCleanupStats) {
ysr@777 2383 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
tonyp@3209 2384 (end_sec - known_garbage_end_sec) * 1000.0);
ysr@777 2385 }
ysr@777 2386
tonyp@3209 2387 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
tonyp@3209 2388 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
tonyp@3209 2389 _cur_mark_stop_world_time_ms += elapsed_time_ms;
tonyp@3209 2390 _prev_collection_pause_end_ms += elapsed_time_ms;
tonyp@3209 2391 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
ysr@777 2392 }
ysr@777 2393
johnc@1829 2394 // Add the heap region at the head of the non-incremental collection set
tonyp@3289 2395 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
johnc@1829 2396 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2397 assert(!hr->is_young(), "non-incremental add of young region");
johnc@1829 2398
johnc@1829 2399 assert(!hr->in_collection_set(), "should not already be in the CSet");
ysr@777 2400 hr->set_in_collection_set(true);
ysr@777 2401 hr->set_next_in_collection_set(_collection_set);
ysr@777 2402 _collection_set = hr;
ysr@777 2403 _collection_set_bytes_used_before += hr->used();
tonyp@961 2404 _g1->register_region_with_in_cset_fast_test(hr);
tonyp@3289 2405 size_t rs_length = hr->rem_set()->occupied();
tonyp@3289 2406 _recorded_rs_lengths += rs_length;
tonyp@3289 2407 _old_cset_region_length += 1;
ysr@777 2408 }
ysr@777 2409
johnc@1829 2410 // Initialize the per-collection-set information
johnc@1829 2411 void G1CollectorPolicy::start_incremental_cset_building() {
johnc@1829 2412 assert(_inc_cset_build_state == Inactive, "Precondition");
johnc@1829 2413
johnc@1829 2414 _inc_cset_head = NULL;
johnc@1829 2415 _inc_cset_tail = NULL;
johnc@1829 2416 _inc_cset_bytes_used_before = 0;
johnc@1829 2417
johnc@1829 2418 _inc_cset_max_finger = 0;
johnc@1829 2419 _inc_cset_recorded_rs_lengths = 0;
tonyp@3356 2420 _inc_cset_recorded_rs_lengths_diffs = 0;
tonyp@3356 2421 _inc_cset_predicted_elapsed_time_ms = 0.0;
tonyp@3356 2422 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
johnc@1829 2423 _inc_cset_build_state = Active;
johnc@1829 2424 }
johnc@1829 2425
tonyp@3356 2426 void G1CollectorPolicy::finalize_incremental_cset_building() {
tonyp@3356 2427 assert(_inc_cset_build_state == Active, "Precondition");
tonyp@3356 2428 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
tonyp@3356 2429
tonyp@3356 2430 // The two "main" fields, _inc_cset_recorded_rs_lengths and
tonyp@3356 2431 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
tonyp@3356 2432 // that adds a new region to the CSet. Further updates by the
tonyp@3356 2433 // concurrent refinement thread that samples the young RSet lengths
tonyp@3356 2434 // are accumulated in the *_diffs fields. Here we add the diffs to
tonyp@3356 2435 // the "main" fields.
tonyp@3356 2436
tonyp@3356 2437 if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
tonyp@3356 2438 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
tonyp@3356 2439 } else {
tonyp@3356 2440 // This is defensive. The diff should in theory be always positive
tonyp@3356 2441 // as RSets can only grow between GCs. However, given that we
tonyp@3356 2442 // sample their size concurrently with other threads updating them
tonyp@3356 2443 // it's possible that we might get the wrong size back, which
tonyp@3356 2444 // could make the calculations somewhat inaccurate.
tonyp@3356 2445 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
tonyp@3356 2446 if (_inc_cset_recorded_rs_lengths >= diffs) {
tonyp@3356 2447 _inc_cset_recorded_rs_lengths -= diffs;
tonyp@3356 2448 } else {
tonyp@3356 2449 _inc_cset_recorded_rs_lengths = 0;
tonyp@3356 2450 }
tonyp@3356 2451 }
tonyp@3356 2452 _inc_cset_predicted_elapsed_time_ms +=
tonyp@3356 2453 _inc_cset_predicted_elapsed_time_ms_diffs;
tonyp@3356 2454
tonyp@3356 2455 _inc_cset_recorded_rs_lengths_diffs = 0;
tonyp@3356 2456 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
tonyp@3356 2457 }
tonyp@3356 2458
johnc@1829 2459 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
johnc@1829 2460 // This routine is used when:
johnc@1829 2461 // * adding survivor regions to the incremental cset at the end of an
johnc@1829 2462 // evacuation pause,
johnc@1829 2463 // * adding the current allocation region to the incremental cset
johnc@1829 2464 // when it is retired, and
johnc@1829 2465 // * updating existing policy information for a region in the
johnc@1829 2466 // incremental cset via young list RSet sampling.
johnc@1829 2467 // Therefore this routine may be called at a safepoint by the
johnc@1829 2468 // VM thread, or in-between safepoints by mutator threads (when
johnc@1829 2469 // retiring the current allocation region) or a concurrent
johnc@1829 2470 // refine thread (RSet sampling).
johnc@1829 2471
johnc@1829 2472 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
johnc@1829 2473 size_t used_bytes = hr->used();
johnc@1829 2474 _inc_cset_recorded_rs_lengths += rs_length;
johnc@1829 2475 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
johnc@1829 2476 _inc_cset_bytes_used_before += used_bytes;
johnc@1829 2477
johnc@1829 2478 // Cache the values we have added to the aggregated informtion
johnc@1829 2479 // in the heap region in case we have to remove this region from
johnc@1829 2480 // the incremental collection set, or it is updated by the
johnc@1829 2481 // rset sampling code
johnc@1829 2482 hr->set_recorded_rs_length(rs_length);
johnc@1829 2483 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
johnc@1829 2484 }
johnc@1829 2485
tonyp@3356 2486 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
tonyp@3356 2487 size_t new_rs_length) {
tonyp@3356 2488 // Update the CSet information that is dependent on the new RS length
tonyp@3356 2489 assert(hr->is_young(), "Precondition");
tonyp@3356 2490 assert(!SafepointSynchronize::is_at_safepoint(),
tonyp@3356 2491 "should not be at a safepoint");
tonyp@3356 2492
tonyp@3356 2493 // We could have updated _inc_cset_recorded_rs_lengths and
tonyp@3356 2494 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
tonyp@3356 2495 // that atomically, as this code is executed by a concurrent
tonyp@3356 2496 // refinement thread, potentially concurrently with a mutator thread
tonyp@3356 2497 // allocating a new region and also updating the same fields. To
tonyp@3356 2498 // avoid the atomic operations we accumulate these updates on two
tonyp@3356 2499 // separate fields (*_diffs) and we'll just add them to the "main"
tonyp@3356 2500 // fields at the start of a GC.
tonyp@3356 2501
tonyp@3356 2502 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
tonyp@3356 2503 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
tonyp@3356 2504 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
tonyp@3356 2505
johnc@1829 2506 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
tonyp@3356 2507 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
tonyp@3356 2508 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
tonyp@3356 2509 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
tonyp@3356 2510
tonyp@3356 2511 hr->set_recorded_rs_length(new_rs_length);
tonyp@3356 2512 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
johnc@1829 2513 }
johnc@1829 2514
johnc@1829 2515 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
tonyp@3289 2516 assert(hr->is_young(), "invariant");
tonyp@3289 2517 assert(hr->young_index_in_cset() > -1, "should have already been set");
johnc@1829 2518 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2519
johnc@1829 2520 // We need to clear and set the cached recorded/cached collection set
johnc@1829 2521 // information in the heap region here (before the region gets added
johnc@1829 2522 // to the collection set). An individual heap region's cached values
johnc@1829 2523 // are calculated, aggregated with the policy collection set info,
johnc@1829 2524 // and cached in the heap region here (initially) and (subsequently)
johnc@1829 2525 // by the Young List sampling code.
johnc@1829 2526
johnc@1829 2527 size_t rs_length = hr->rem_set()->occupied();
johnc@1829 2528 add_to_incremental_cset_info(hr, rs_length);
johnc@1829 2529
johnc@1829 2530 HeapWord* hr_end = hr->end();
johnc@1829 2531 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
johnc@1829 2532
johnc@1829 2533 assert(!hr->in_collection_set(), "invariant");
johnc@1829 2534 hr->set_in_collection_set(true);
johnc@1829 2535 assert( hr->next_in_collection_set() == NULL, "invariant");
johnc@1829 2536
johnc@1829 2537 _g1->register_region_with_in_cset_fast_test(hr);
johnc@1829 2538 }
johnc@1829 2539
johnc@1829 2540 // Add the region at the RHS of the incremental cset
johnc@1829 2541 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
johnc@1829 2542 // We should only ever be appending survivors at the end of a pause
johnc@1829 2543 assert( hr->is_survivor(), "Logic");
johnc@1829 2544
johnc@1829 2545 // Do the 'common' stuff
johnc@1829 2546 add_region_to_incremental_cset_common(hr);
johnc@1829 2547
johnc@1829 2548 // Now add the region at the right hand side
johnc@1829 2549 if (_inc_cset_tail == NULL) {
johnc@1829 2550 assert(_inc_cset_head == NULL, "invariant");
johnc@1829 2551 _inc_cset_head = hr;
johnc@1829 2552 } else {
johnc@1829 2553 _inc_cset_tail->set_next_in_collection_set(hr);
johnc@1829 2554 }
johnc@1829 2555 _inc_cset_tail = hr;
johnc@1829 2556 }
johnc@1829 2557
johnc@1829 2558 // Add the region to the LHS of the incremental cset
johnc@1829 2559 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
johnc@1829 2560 // Survivors should be added to the RHS at the end of a pause
johnc@1829 2561 assert(!hr->is_survivor(), "Logic");
johnc@1829 2562
johnc@1829 2563 // Do the 'common' stuff
johnc@1829 2564 add_region_to_incremental_cset_common(hr);
johnc@1829 2565
johnc@1829 2566 // Add the region at the left hand side
johnc@1829 2567 hr->set_next_in_collection_set(_inc_cset_head);
johnc@1829 2568 if (_inc_cset_head == NULL) {
johnc@1829 2569 assert(_inc_cset_tail == NULL, "Invariant");
johnc@1829 2570 _inc_cset_tail = hr;
johnc@1829 2571 }
johnc@1829 2572 _inc_cset_head = hr;
johnc@1829 2573 }
johnc@1829 2574
johnc@1829 2575 #ifndef PRODUCT
johnc@1829 2576 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
johnc@1829 2577 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
johnc@1829 2578
johnc@1829 2579 st->print_cr("\nCollection_set:");
johnc@1829 2580 HeapRegion* csr = list_head;
johnc@1829 2581 while (csr != NULL) {
johnc@1829 2582 HeapRegion* next = csr->next_in_collection_set();
johnc@1829 2583 assert(csr->in_collection_set(), "bad CS");
johnc@1829 2584 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
johnc@1829 2585 "age: %4d, y: %d, surv: %d",
johnc@1829 2586 csr->bottom(), csr->end(),
johnc@1829 2587 csr->top(),
johnc@1829 2588 csr->prev_top_at_mark_start(),
johnc@1829 2589 csr->next_top_at_mark_start(),
johnc@1829 2590 csr->top_at_conc_mark_count(),
johnc@1829 2591 csr->age_in_surv_rate_group_cond(),
johnc@1829 2592 csr->is_young(),
johnc@1829 2593 csr->is_survivor());
johnc@1829 2594 csr = next;
johnc@1829 2595 }
johnc@1829 2596 }
johnc@1829 2597 #endif // !PRODUCT
johnc@1829 2598
tonyp@3539 2599 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
tonyp@3539 2600 const char* false_action_str) {
tonyp@3539 2601 CollectionSetChooser* cset_chooser = _collectionSetChooser;
tonyp@3539 2602 if (cset_chooser->isEmpty()) {
tonyp@3539 2603 ergo_verbose0(ErgoMixedGCs,
tonyp@3539 2604 false_action_str,
tonyp@3539 2605 ergo_format_reason("candidate old regions not available"));
tonyp@3539 2606 return false;
tonyp@3539 2607 }
tonyp@3539 2608 size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes();
tonyp@3539 2609 size_t capacity_bytes = _g1->capacity();
tonyp@3539 2610 double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
tonyp@3539 2611 double threshold = (double) G1OldReclaimableThresholdPercent;
tonyp@3539 2612 if (perc < threshold) {
tonyp@3539 2613 ergo_verbose4(ErgoMixedGCs,
tonyp@3539 2614 false_action_str,
tonyp@3539 2615 ergo_format_reason("reclaimable percentage lower than threshold")
tonyp@3539 2616 ergo_format_region("candidate old regions")
tonyp@3539 2617 ergo_format_byte_perc("reclaimable")
tonyp@3539 2618 ergo_format_perc("threshold"),
tonyp@3539 2619 cset_chooser->remainingRegions(),
tonyp@3539 2620 reclaimable_bytes, perc, threshold);
tonyp@3539 2621 return false;
tonyp@3539 2622 }
tonyp@3539 2623
tonyp@3539 2624 ergo_verbose4(ErgoMixedGCs,
tonyp@3539 2625 true_action_str,
tonyp@3539 2626 ergo_format_reason("candidate old regions available")
tonyp@3539 2627 ergo_format_region("candidate old regions")
tonyp@3539 2628 ergo_format_byte_perc("reclaimable")
tonyp@3539 2629 ergo_format_perc("threshold"),
tonyp@3539 2630 cset_chooser->remainingRegions(),
tonyp@3539 2631 reclaimable_bytes, perc, threshold);
tonyp@3539 2632 return true;
tonyp@3539 2633 }
tonyp@3539 2634
tonyp@3539 2635 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
johnc@1829 2636 // Set this here - in case we're not doing young collections.
johnc@1829 2637 double non_young_start_time_sec = os::elapsedTime();
johnc@1829 2638
tonyp@3114 2639 YoungList* young_list = _g1->young_list();
tonyp@3356 2640 finalize_incremental_cset_building();
tonyp@3114 2641
tonyp@2011 2642 guarantee(target_pause_time_ms > 0.0,
tonyp@2011 2643 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2011 2644 target_pause_time_ms));
tonyp@2011 2645 guarantee(_collection_set == NULL, "Precondition");
ysr@777 2646
ysr@777 2647 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
ysr@777 2648 double predicted_pause_time_ms = base_time_ms;
tonyp@2011 2649 double time_remaining_ms = target_pause_time_ms - base_time_ms;
ysr@777 2650
tonyp@3114 2651 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2652 "start choosing CSet",
tonyp@3114 2653 ergo_format_ms("predicted base time")
tonyp@3114 2654 ergo_format_ms("remaining time")
tonyp@3114 2655 ergo_format_ms("target pause time"),
tonyp@3114 2656 base_time_ms, time_remaining_ms, target_pause_time_ms);
tonyp@3114 2657
tonyp@3114 2658 HeapRegion* hr;
tonyp@3114 2659 double young_start_time_sec = os::elapsedTime();
ysr@777 2660
apetrusenko@1112 2661 _collection_set_bytes_used_before = 0;
tonyp@3337 2662 _last_gc_was_young = gcs_are_young() ? true : false;
tonyp@3337 2663
tonyp@3337 2664 if (_last_gc_was_young) {
tonyp@3337 2665 ++_young_pause_num;
tonyp@3114 2666 } else {
tonyp@3337 2667 ++_mixed_pause_num;
tonyp@3114 2668 }
brutisso@3065 2669
brutisso@3065 2670 // The young list is laid with the survivor regions from the previous
brutisso@3065 2671 // pause are appended to the RHS of the young list, i.e.
brutisso@3065 2672 // [Newly Young Regions ++ Survivors from last pause].
brutisso@3065 2673
tonyp@3289 2674 size_t survivor_region_length = young_list->survivor_length();
tonyp@3289 2675 size_t eden_region_length = young_list->length() - survivor_region_length;
tonyp@3289 2676 init_cset_region_lengths(eden_region_length, survivor_region_length);
tonyp@3114 2677 hr = young_list->first_survivor_region();
brutisso@3065 2678 while (hr != NULL) {
brutisso@3065 2679 assert(hr->is_survivor(), "badly formed young list");
brutisso@3065 2680 hr->set_young();
brutisso@3065 2681 hr = hr->get_next_young_region();
brutisso@3065 2682 }
brutisso@3065 2683
tonyp@3114 2684 // Clear the fields that point to the survivor list - they are all young now.
tonyp@3114 2685 young_list->clear_survivors();
brutisso@3065 2686
brutisso@3065 2687 _collection_set = _inc_cset_head;
brutisso@3065 2688 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
brutisso@3065 2689 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2690 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2691
tonyp@3114 2692 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2693 "add young regions to CSet",
tonyp@3114 2694 ergo_format_region("eden")
tonyp@3114 2695 ergo_format_region("survivors")
tonyp@3114 2696 ergo_format_ms("predicted young region time"),
tonyp@3289 2697 eden_region_length, survivor_region_length,
tonyp@3114 2698 _inc_cset_predicted_elapsed_time_ms);
tonyp@3114 2699
brutisso@3065 2700 // The number of recorded young regions is the incremental
brutisso@3065 2701 // collection set's current size
brutisso@3065 2702 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
brutisso@3065 2703
brutisso@3065 2704 double young_end_time_sec = os::elapsedTime();
brutisso@3065 2705 _recorded_young_cset_choice_time_ms =
brutisso@3065 2706 (young_end_time_sec - young_start_time_sec) * 1000.0;
brutisso@3065 2707
brutisso@3065 2708 // We are doing young collections so reset this.
brutisso@3065 2709 non_young_start_time_sec = young_end_time_sec;
brutisso@3065 2710
tonyp@3337 2711 if (!gcs_are_young()) {
tonyp@3539 2712 CollectionSetChooser* cset_chooser = _collectionSetChooser;
tonyp@3539 2713 assert(cset_chooser->verify(), "CSet Chooser verification - pre");
tonyp@3539 2714 const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength();
tonyp@3539 2715 const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
tonyp@3539 2716
tonyp@3539 2717 size_t expensive_region_num = 0;
tonyp@3539 2718 bool check_time_remaining = adaptive_young_list_length();
tonyp@3539 2719 HeapRegion* hr = cset_chooser->peek();
tonyp@3539 2720 while (hr != NULL) {
tonyp@3539 2721 if (old_cset_region_length() >= max_old_cset_length) {
tonyp@3539 2722 // Added maximum number of old regions to the CSet.
tonyp@3539 2723 ergo_verbose2(ErgoCSetConstruction,
tonyp@3539 2724 "finish adding old regions to CSet",
tonyp@3539 2725 ergo_format_reason("old CSet region num reached max")
tonyp@3539 2726 ergo_format_region("old")
tonyp@3539 2727 ergo_format_region("max"),
tonyp@3539 2728 old_cset_region_length(), max_old_cset_length);
tonyp@3539 2729 break;
ysr@777 2730 }
tonyp@3114 2731
tonyp@3539 2732 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
tonyp@3539 2733 if (check_time_remaining) {
tonyp@3539 2734 if (predicted_time_ms > time_remaining_ms) {
tonyp@3539 2735 // Too expensive for the current CSet.
tonyp@3539 2736
tonyp@3539 2737 if (old_cset_region_length() >= min_old_cset_length) {
tonyp@3539 2738 // We have added the minimum number of old regions to the CSet,
tonyp@3539 2739 // we are done with this CSet.
tonyp@3539 2740 ergo_verbose4(ErgoCSetConstruction,
tonyp@3539 2741 "finish adding old regions to CSet",
tonyp@3539 2742 ergo_format_reason("predicted time is too high")
tonyp@3539 2743 ergo_format_ms("predicted time")
tonyp@3539 2744 ergo_format_ms("remaining time")
tonyp@3539 2745 ergo_format_region("old")
tonyp@3539 2746 ergo_format_region("min"),
tonyp@3539 2747 predicted_time_ms, time_remaining_ms,
tonyp@3539 2748 old_cset_region_length(), min_old_cset_length);
tonyp@3539 2749 break;
tonyp@3539 2750 }
tonyp@3539 2751
tonyp@3539 2752 // We'll add it anyway given that we haven't reached the
tonyp@3539 2753 // minimum number of old regions.
tonyp@3539 2754 expensive_region_num += 1;
tonyp@3539 2755 }
tonyp@3114 2756 } else {
tonyp@3539 2757 if (old_cset_region_length() >= min_old_cset_length) {
tonyp@3539 2758 // In the non-auto-tuning case, we'll finish adding regions
tonyp@3539 2759 // to the CSet if we reach the minimum.
tonyp@3539 2760 ergo_verbose2(ErgoCSetConstruction,
tonyp@3539 2761 "finish adding old regions to CSet",
tonyp@3539 2762 ergo_format_reason("old CSet region num reached min")
tonyp@3539 2763 ergo_format_region("old")
tonyp@3539 2764 ergo_format_region("min"),
tonyp@3539 2765 old_cset_region_length(), min_old_cset_length);
tonyp@3539 2766 break;
tonyp@3114 2767 }
tonyp@3114 2768 }
tonyp@3539 2769
tonyp@3539 2770 // We will add this region to the CSet.
tonyp@3539 2771 time_remaining_ms -= predicted_time_ms;
tonyp@3539 2772 predicted_pause_time_ms += predicted_time_ms;
tonyp@3539 2773 cset_chooser->remove_and_move_to_next(hr);
tonyp@3539 2774 _g1->old_set_remove(hr);
tonyp@3539 2775 add_old_region_to_cset(hr);
tonyp@3539 2776
tonyp@3539 2777 hr = cset_chooser->peek();
tonyp@3114 2778 }
tonyp@3539 2779 if (hr == NULL) {
tonyp@3539 2780 ergo_verbose0(ErgoCSetConstruction,
tonyp@3539 2781 "finish adding old regions to CSet",
tonyp@3539 2782 ergo_format_reason("candidate old regions not available"));
tonyp@3539 2783 }
tonyp@3539 2784
tonyp@3539 2785 if (expensive_region_num > 0) {
tonyp@3539 2786 // We print the information once here at the end, predicated on
tonyp@3539 2787 // whether we added any apparently expensive regions or not, to
tonyp@3539 2788 // avoid generating output per region.
tonyp@3539 2789 ergo_verbose4(ErgoCSetConstruction,
tonyp@3539 2790 "added expensive regions to CSet",
tonyp@3539 2791 ergo_format_reason("old CSet region num not reached min")
tonyp@3539 2792 ergo_format_region("old")
tonyp@3539 2793 ergo_format_region("expensive")
tonyp@3539 2794 ergo_format_region("min")
tonyp@3539 2795 ergo_format_ms("remaining time"),
tonyp@3539 2796 old_cset_region_length(),
tonyp@3539 2797 expensive_region_num,
tonyp@3539 2798 min_old_cset_length,
tonyp@3539 2799 time_remaining_ms);
tonyp@3539 2800 }
tonyp@3539 2801
tonyp@3539 2802 assert(cset_chooser->verify(), "CSet Chooser verification - post");
ysr@777 2803 }
ysr@777 2804
johnc@1829 2805 stop_incremental_cset_building();
johnc@1829 2806
ysr@777 2807 count_CS_bytes_used();
ysr@777 2808
tonyp@3114 2809 ergo_verbose5(ErgoCSetConstruction,
tonyp@3114 2810 "finish choosing CSet",
tonyp@3114 2811 ergo_format_region("eden")
tonyp@3114 2812 ergo_format_region("survivors")
tonyp@3114 2813 ergo_format_region("old")
tonyp@3114 2814 ergo_format_ms("predicted pause time")
tonyp@3114 2815 ergo_format_ms("target pause time"),
tonyp@3289 2816 eden_region_length, survivor_region_length,
tonyp@3289 2817 old_cset_region_length(),
tonyp@3114 2818 predicted_pause_time_ms, target_pause_time_ms);
tonyp@3114 2819
ysr@777 2820 double non_young_end_time_sec = os::elapsedTime();
ysr@777 2821 _recorded_non_young_cset_choice_time_ms =
ysr@777 2822 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
ysr@777 2823 }

mercurial