src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Fri, 13 Apr 2012 01:59:38 +0200

author
brutisso
date
Fri, 13 Apr 2012 01:59:38 +0200
changeset 3710
5c86f8211d1e
parent 3691
2a0172480595
child 3713
720b6a76dd9d
permissions
-rw-r--r--

7160728: Introduce an extra logging level for G1 logging
Summary: Added log levels "fine", "finer" and "finest". Let PrintGC map to "fine" and PrintGCDetails map to "finer". Separated out the per worker information in the G1 logging to the "finest" level.
Reviewed-by: stefank, jwilhelm, tonyp, johnc

ysr@777 1 /*
tonyp@3416 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
brutisso@3710 32 #include "gc_implementation/g1/g1Log.hpp"
stefank@2314 33 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 34 #include "gc_implementation/shared/gcPolicyCounters.hpp"
stefank@2314 35 #include "runtime/arguments.hpp"
stefank@2314 36 #include "runtime/java.hpp"
stefank@2314 37 #include "runtime/mutexLocker.hpp"
stefank@2314 38 #include "utilities/debug.hpp"
ysr@777 39
ysr@777 40 // Different defaults for different number of GC threads
ysr@777 41 // They were chosen by running GCOld and SPECjbb on debris with different
ysr@777 42 // numbers of GC threads and choosing them based on the results
ysr@777 43
ysr@777 44 // all the same
ysr@777 45 static double rs_length_diff_defaults[] = {
ysr@777 46 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
ysr@777 47 };
ysr@777 48
ysr@777 49 static double cost_per_card_ms_defaults[] = {
ysr@777 50 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
ysr@777 51 };
ysr@777 52
ysr@777 53 // all the same
tonyp@3337 54 static double young_cards_per_entry_ratio_defaults[] = {
ysr@777 55 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
ysr@777 56 };
ysr@777 57
ysr@777 58 static double cost_per_entry_ms_defaults[] = {
ysr@777 59 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
ysr@777 60 };
ysr@777 61
ysr@777 62 static double cost_per_byte_ms_defaults[] = {
ysr@777 63 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
ysr@777 64 };
ysr@777 65
ysr@777 66 // these should be pretty consistent
ysr@777 67 static double constant_other_time_ms_defaults[] = {
ysr@777 68 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
ysr@777 69 };
ysr@777 70
ysr@777 71
ysr@777 72 static double young_other_cost_per_region_ms_defaults[] = {
ysr@777 73 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
ysr@777 74 };
ysr@777 75
ysr@777 76 static double non_young_other_cost_per_region_ms_defaults[] = {
ysr@777 77 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
ysr@777 78 };
ysr@777 79
brutisso@2645 80 // Help class for avoiding interleaved logging
brutisso@2645 81 class LineBuffer: public StackObj {
brutisso@2645 82
brutisso@2645 83 private:
brutisso@2645 84 static const int BUFFER_LEN = 1024;
brutisso@2645 85 static const int INDENT_CHARS = 3;
brutisso@2645 86 char _buffer[BUFFER_LEN];
brutisso@2645 87 int _indent_level;
brutisso@2645 88 int _cur;
brutisso@2645 89
brutisso@2645 90 void vappend(const char* format, va_list ap) {
brutisso@2645 91 int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
brutisso@2645 92 if (res != -1) {
brutisso@2645 93 _cur += res;
brutisso@2645 94 } else {
brutisso@2645 95 DEBUG_ONLY(warning("buffer too small in LineBuffer");)
brutisso@2645 96 _buffer[BUFFER_LEN -1] = 0;
brutisso@2645 97 _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
brutisso@2645 98 }
brutisso@2645 99 }
brutisso@2645 100
brutisso@2645 101 public:
brutisso@2645 102 explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
brutisso@2645 103 for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
brutisso@2645 104 _buffer[_cur] = ' ';
brutisso@2645 105 }
brutisso@2645 106 }
brutisso@2645 107
brutisso@2645 108 #ifndef PRODUCT
brutisso@2645 109 ~LineBuffer() {
brutisso@2645 110 assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
brutisso@2645 111 }
brutisso@2645 112 #endif
brutisso@2645 113
brutisso@2645 114 void append(const char* format, ...) {
brutisso@2645 115 va_list ap;
brutisso@2645 116 va_start(ap, format);
brutisso@2645 117 vappend(format, ap);
brutisso@2645 118 va_end(ap);
brutisso@2645 119 }
brutisso@2645 120
brutisso@2645 121 void append_and_print_cr(const char* format, ...) {
brutisso@2645 122 va_list ap;
brutisso@2645 123 va_start(ap, format);
brutisso@2645 124 vappend(format, ap);
brutisso@2645 125 va_end(ap);
brutisso@2645 126 gclog_or_tty->print_cr("%s", _buffer);
brutisso@2645 127 _cur = _indent_level * INDENT_CHARS;
brutisso@2645 128 }
brutisso@2645 129 };
brutisso@2645 130
ysr@777 131 G1CollectorPolicy::G1CollectorPolicy() :
jmasa@2188 132 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
johnc@3021 133 ? ParallelGCThreads : 1),
jmasa@2188 134
ysr@777 135 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 136 _all_pause_times_ms(new NumberSeq()),
ysr@777 137 _stop_world_start(0.0),
ysr@777 138 _all_stop_world_times_ms(new NumberSeq()),
ysr@777 139 _all_yield_times_ms(new NumberSeq()),
ysr@777 140
apetrusenko@1112 141 _summary(new Summary()),
ysr@777 142
johnc@3175 143 _cur_clear_ct_time_ms(0.0),
tonyp@3464 144 _root_region_scan_wait_time_ms(0.0),
johnc@3175 145
johnc@3175 146 _cur_ref_proc_time_ms(0.0),
johnc@3175 147 _cur_ref_enq_time_ms(0.0),
johnc@3175 148
johnc@1325 149 #ifndef PRODUCT
johnc@1325 150 _min_clear_cc_time_ms(-1.0),
johnc@1325 151 _max_clear_cc_time_ms(-1.0),
johnc@1325 152 _cur_clear_cc_time_ms(0.0),
johnc@1325 153 _cum_clear_cc_time_ms(0.0),
johnc@1325 154 _num_cc_clears(0L),
johnc@1325 155 #endif
ysr@777 156
ysr@777 157 _aux_num(10),
ysr@777 158 _all_aux_times_ms(new NumberSeq[_aux_num]),
ysr@777 159 _cur_aux_start_times_ms(new double[_aux_num]),
ysr@777 160 _cur_aux_times_ms(new double[_aux_num]),
ysr@777 161 _cur_aux_times_set(new bool[_aux_num]),
ysr@777 162
ysr@777 163 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 164 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 165
ysr@777 166 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 167 _prev_collection_pause_end_ms(0.0),
ysr@777 168 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 169 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 170 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 171 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 172 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 173 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 174 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 175 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 176 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 177 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 178 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 179 _non_young_other_cost_per_region_ms_seq(
ysr@777 180 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 181
ysr@777 182 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 183 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 184
johnc@1186 185 _pause_time_target_ms((double) MaxGCPauseMillis),
ysr@777 186
tonyp@3337 187 _gcs_are_young(true),
tonyp@3337 188 _young_pause_num(0),
tonyp@3337 189 _mixed_pause_num(0),
ysr@777 190
ysr@777 191 _during_marking(false),
ysr@777 192 _in_marking_window(false),
ysr@777 193 _in_marking_window_im(false),
ysr@777 194
ysr@777 195 _known_garbage_ratio(0.0),
ysr@777 196 _known_garbage_bytes(0),
ysr@777 197
ysr@777 198 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 199
tonyp@3337 200 _recent_prev_end_times_for_all_gcs_sec(
tonyp@3337 201 new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 202
ysr@777 203 _recent_avg_pause_time_ratio(0.0),
ysr@777 204
ysr@777 205 _all_full_gc_times_ms(new NumberSeq()),
ysr@777 206
tonyp@1794 207 _initiate_conc_mark_if_possible(false),
tonyp@1794 208 _during_initial_mark_pause(false),
tonyp@3337 209 _last_young_gc(false),
tonyp@3337 210 _last_gc_was_young(false),
ysr@777 211
tonyp@2961 212 _eden_bytes_before_gc(0),
tonyp@2961 213 _survivor_bytes_before_gc(0),
tonyp@2961 214 _capacity_before_gc(0),
tonyp@2961 215
tonyp@3289 216 _eden_cset_region_length(0),
tonyp@3289 217 _survivor_cset_region_length(0),
tonyp@3289 218 _old_cset_region_length(0),
tonyp@3289 219
ysr@777 220 _collection_set(NULL),
johnc@1829 221 _collection_set_bytes_used_before(0),
johnc@1829 222
johnc@1829 223 // Incremental CSet attributes
johnc@1829 224 _inc_cset_build_state(Inactive),
johnc@1829 225 _inc_cset_head(NULL),
johnc@1829 226 _inc_cset_tail(NULL),
johnc@1829 227 _inc_cset_bytes_used_before(0),
johnc@1829 228 _inc_cset_max_finger(NULL),
johnc@1829 229 _inc_cset_recorded_rs_lengths(0),
tonyp@3356 230 _inc_cset_recorded_rs_lengths_diffs(0),
johnc@1829 231 _inc_cset_predicted_elapsed_time_ms(0.0),
tonyp@3356 232 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
johnc@1829 233
ysr@777 234 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 235 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 236 #endif // _MSC_VER
ysr@777 237
ysr@777 238 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
ysr@777 239 G1YoungSurvRateNumRegionsSummary)),
ysr@777 240 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
apetrusenko@980 241 G1YoungSurvRateNumRegionsSummary)),
ysr@777 242 // add here any more surv rate groups
apetrusenko@980 243 _recorded_survivor_regions(0),
apetrusenko@980 244 _recorded_survivor_head(NULL),
apetrusenko@980 245 _recorded_survivor_tail(NULL),
tonyp@1791 246 _survivors_age_table(true),
tonyp@1791 247
tonyp@3114 248 _gc_overhead_perc(0.0) {
tonyp@3114 249
tonyp@1377 250 // Set up the region size and associated fields. Given that the
tonyp@1377 251 // policy is created before the heap, we have to set this up here,
tonyp@1377 252 // so it's done as soon as possible.
tonyp@1377 253 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
iveresov@1696 254 HeapRegionRemSet::setup_remset_size();
tonyp@1377 255
tonyp@3114 256 G1ErgoVerbose::initialize();
tonyp@3114 257 if (PrintAdaptiveSizePolicy) {
tonyp@3114 258 // Currently, we only use a single switch for all the heuristics.
tonyp@3114 259 G1ErgoVerbose::set_enabled(true);
tonyp@3114 260 // Given that we don't currently have a verboseness level
tonyp@3114 261 // parameter, we'll hardcode this to high. This can be easily
tonyp@3114 262 // changed in the future.
tonyp@3114 263 G1ErgoVerbose::set_level(ErgoHigh);
tonyp@3114 264 } else {
tonyp@3114 265 G1ErgoVerbose::set_enabled(false);
tonyp@3114 266 }
tonyp@3114 267
apetrusenko@1826 268 // Verify PLAB sizes
johnc@3182 269 const size_t region_size = HeapRegion::GrainWords;
apetrusenko@1826 270 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
apetrusenko@1826 271 char buffer[128];
johnc@3182 272 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
apetrusenko@1826 273 OldPLABSize > region_size ? "Old" : "Young", region_size);
apetrusenko@1826 274 vm_exit_during_initialization(buffer);
apetrusenko@1826 275 }
apetrusenko@1826 276
ysr@777 277 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
ysr@777 278 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
ysr@777 279
tonyp@1966 280 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
ysr@777 281 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
tonyp@3416 282 _par_last_satb_filtering_times_ms = new double[_parallel_gc_threads];
ysr@777 283
ysr@777 284 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 285 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
ysr@777 286
ysr@777 287 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 288
ysr@777 289 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
ysr@777 290
ysr@777 291 _par_last_termination_times_ms = new double[_parallel_gc_threads];
tonyp@1966 292 _par_last_termination_attempts = new double[_parallel_gc_threads];
tonyp@1966 293 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
brutisso@2712 294 _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
johnc@3219 295 _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
ysr@777 296
ysr@777 297 int index;
ysr@777 298 if (ParallelGCThreads == 0)
ysr@777 299 index = 0;
ysr@777 300 else if (ParallelGCThreads > 8)
ysr@777 301 index = 7;
ysr@777 302 else
ysr@777 303 index = ParallelGCThreads - 1;
ysr@777 304
ysr@777 305 _pending_card_diff_seq->add(0.0);
ysr@777 306 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
ysr@777 307 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
tonyp@3337 308 _young_cards_per_entry_ratio_seq->add(
tonyp@3337 309 young_cards_per_entry_ratio_defaults[index]);
ysr@777 310 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
ysr@777 311 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
ysr@777 312 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
ysr@777 313 _young_other_cost_per_region_ms_seq->add(
ysr@777 314 young_other_cost_per_region_ms_defaults[index]);
ysr@777 315 _non_young_other_cost_per_region_ms_seq->add(
ysr@777 316 non_young_other_cost_per_region_ms_defaults[index]);
ysr@777 317
tonyp@1965 318 // Below, we might need to calculate the pause time target based on
tonyp@1965 319 // the pause interval. When we do so we are going to give G1 maximum
tonyp@1965 320 // flexibility and allow it to do pauses when it needs to. So, we'll
tonyp@1965 321 // arrange that the pause interval to be pause time target + 1 to
tonyp@1965 322 // ensure that a) the pause time target is maximized with respect to
tonyp@1965 323 // the pause interval and b) we maintain the invariant that pause
tonyp@1965 324 // time target < pause interval. If the user does not want this
tonyp@1965 325 // maximum flexibility, they will have to set the pause interval
tonyp@1965 326 // explicitly.
tonyp@1965 327
tonyp@1965 328 // First make sure that, if either parameter is set, its value is
tonyp@1965 329 // reasonable.
tonyp@1965 330 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 331 if (MaxGCPauseMillis < 1) {
tonyp@1965 332 vm_exit_during_initialization("MaxGCPauseMillis should be "
tonyp@1965 333 "greater than 0");
tonyp@1965 334 }
tonyp@1965 335 }
tonyp@1965 336 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 337 if (GCPauseIntervalMillis < 1) {
tonyp@1965 338 vm_exit_during_initialization("GCPauseIntervalMillis should be "
tonyp@1965 339 "greater than 0");
tonyp@1965 340 }
tonyp@1965 341 }
tonyp@1965 342
tonyp@1965 343 // Then, if the pause time target parameter was not set, set it to
tonyp@1965 344 // the default value.
tonyp@1965 345 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 346 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 347 // The default pause time target in G1 is 200ms
tonyp@1965 348 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
tonyp@1965 349 } else {
tonyp@1965 350 // We do not allow the pause interval to be set without the
tonyp@1965 351 // pause time target
tonyp@1965 352 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
tonyp@1965 353 "without setting MaxGCPauseMillis");
tonyp@1965 354 }
tonyp@1965 355 }
tonyp@1965 356
tonyp@1965 357 // Then, if the interval parameter was not set, set it according to
tonyp@1965 358 // the pause time target (this will also deal with the case when the
tonyp@1965 359 // pause time target is the default value).
tonyp@1965 360 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 361 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
tonyp@1965 362 }
tonyp@1965 363
tonyp@1965 364 // Finally, make sure that the two parameters are consistent.
tonyp@1965 365 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
tonyp@1965 366 char buffer[256];
tonyp@1965 367 jio_snprintf(buffer, 256,
tonyp@1965 368 "MaxGCPauseMillis (%u) should be less than "
tonyp@1965 369 "GCPauseIntervalMillis (%u)",
tonyp@1965 370 MaxGCPauseMillis, GCPauseIntervalMillis);
tonyp@1965 371 vm_exit_during_initialization(buffer);
tonyp@1965 372 }
tonyp@1965 373
tonyp@1965 374 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
johnc@1186 375 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
ysr@777 376 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
johnc@1186 377 _sigma = (double) G1ConfidencePercent / 100.0;
ysr@777 378
ysr@777 379 // start conservatively (around 50ms is about right)
ysr@777 380 _concurrent_mark_remark_times_ms->add(0.05);
ysr@777 381 _concurrent_mark_cleanup_times_ms->add(0.20);
ysr@777 382 _tenuring_threshold = MaxTenuringThreshold;
tonyp@3066 383 // _max_survivor_regions will be calculated by
tonyp@3119 384 // update_young_list_target_length() during initialization.
tonyp@3066 385 _max_survivor_regions = 0;
apetrusenko@980 386
tonyp@1791 387 assert(GCTimeRatio > 0,
tonyp@1791 388 "we should have set it to a default value set_g1_gc_flags() "
tonyp@1791 389 "if a user set it to 0");
tonyp@1791 390 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
tonyp@1791 391
tonyp@3119 392 uintx reserve_perc = G1ReservePercent;
tonyp@3119 393 // Put an artificial ceiling on this so that it's not set to a silly value.
tonyp@3119 394 if (reserve_perc > 50) {
tonyp@3119 395 reserve_perc = 50;
tonyp@3119 396 warning("G1ReservePercent is set to a value that is too large, "
tonyp@3119 397 "it's been updated to %u", reserve_perc);
tonyp@3119 398 }
tonyp@3119 399 _reserve_factor = (double) reserve_perc / 100.0;
brutisso@3120 400 // This will be set when the heap is expanded
tonyp@3119 401 // for the first time during initialization.
tonyp@3119 402 _reserve_regions = 0;
tonyp@3119 403
ysr@777 404 initialize_all();
tonyp@3209 405 _collectionSetChooser = new CollectionSetChooser();
brutisso@3358 406 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
ysr@777 407 }
ysr@777 408
ysr@777 409 void G1CollectorPolicy::initialize_flags() {
ysr@777 410 set_min_alignment(HeapRegion::GrainBytes);
ysr@777 411 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
apetrusenko@982 412 if (SurvivorRatio < 1) {
apetrusenko@982 413 vm_exit_during_initialization("Invalid survivor ratio specified");
apetrusenko@982 414 }
ysr@777 415 CollectorPolicy::initialize_flags();
ysr@777 416 }
ysr@777 417
brutisso@3358 418 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
brutisso@3358 419 assert(G1DefaultMinNewGenPercent <= G1DefaultMaxNewGenPercent, "Min larger than max");
brutisso@3358 420 assert(G1DefaultMinNewGenPercent > 0 && G1DefaultMinNewGenPercent < 100, "Min out of bounds");
brutisso@3358 421 assert(G1DefaultMaxNewGenPercent > 0 && G1DefaultMaxNewGenPercent < 100, "Max out of bounds");
brutisso@3120 422
brutisso@3120 423 if (FLAG_IS_CMDLINE(NewRatio)) {
brutisso@3120 424 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
tonyp@3172 425 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
brutisso@3120 426 } else {
brutisso@3358 427 _sizer_kind = SizerNewRatio;
brutisso@3358 428 _adaptive_size = false;
brutisso@3358 429 return;
brutisso@3120 430 }
brutisso@3120 431 }
brutisso@3120 432
brutisso@3358 433 if (FLAG_IS_CMDLINE(NewSize)) {
brutisso@3358 434 _min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes);
brutisso@3358 435 if (FLAG_IS_CMDLINE(MaxNewSize)) {
brutisso@3358 436 _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
brutisso@3358 437 _sizer_kind = SizerMaxAndNewSize;
brutisso@3358 438 _adaptive_size = _min_desired_young_length == _max_desired_young_length;
brutisso@3358 439 } else {
brutisso@3358 440 _sizer_kind = SizerNewSizeOnly;
brutisso@3358 441 }
brutisso@3358 442 } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
brutisso@3358 443 _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
brutisso@3358 444 _sizer_kind = SizerMaxNewSizeOnly;
brutisso@3358 445 }
brutisso@3358 446 }
brutisso@3358 447
brutisso@3358 448 size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) {
brutisso@3358 449 size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
brutisso@3358 450 return MAX2((size_t)1, default_value);
brutisso@3358 451 }
brutisso@3358 452
brutisso@3358 453 size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) {
brutisso@3358 454 size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
brutisso@3358 455 return MAX2((size_t)1, default_value);
brutisso@3358 456 }
brutisso@3358 457
brutisso@3358 458 void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) {
brutisso@3358 459 assert(new_number_of_heap_regions > 0, "Heap must be initialized");
brutisso@3358 460
brutisso@3358 461 switch (_sizer_kind) {
brutisso@3358 462 case SizerDefaults:
brutisso@3358 463 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
brutisso@3358 464 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
brutisso@3358 465 break;
brutisso@3358 466 case SizerNewSizeOnly:
brutisso@3358 467 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
brutisso@3358 468 _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
brutisso@3358 469 break;
brutisso@3358 470 case SizerMaxNewSizeOnly:
brutisso@3358 471 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
brutisso@3358 472 _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
brutisso@3358 473 break;
brutisso@3358 474 case SizerMaxAndNewSize:
brutisso@3358 475 // Do nothing. Values set on the command line, don't update them at runtime.
brutisso@3358 476 break;
brutisso@3358 477 case SizerNewRatio:
brutisso@3358 478 _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
brutisso@3358 479 _max_desired_young_length = _min_desired_young_length;
brutisso@3358 480 break;
brutisso@3358 481 default:
brutisso@3358 482 ShouldNotReachHere();
brutisso@3358 483 }
brutisso@3358 484
brutisso@3120 485 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
brutisso@3358 486 }
brutisso@3358 487
brutisso@3358 488 void G1CollectorPolicy::init() {
brutisso@3358 489 // Set aside an initial future to_space.
brutisso@3358 490 _g1 = G1CollectedHeap::heap();
brutisso@3358 491
brutisso@3358 492 assert(Heap_lock->owned_by_self(), "Locking discipline.");
brutisso@3358 493
brutisso@3358 494 initialize_gc_policy_counters();
brutisso@3358 495
brutisso@3120 496 if (adaptive_young_list_length()) {
brutisso@3065 497 _young_list_fixed_length = 0;
johnc@1829 498 } else {
brutisso@3358 499 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
ysr@777 500 }
brutisso@3065 501 _free_regions_at_end_of_collection = _g1->free_regions();
tonyp@3119 502 update_young_list_target_length();
brutisso@3120 503 _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
johnc@1829 504
johnc@1829 505 // We may immediately start allocating regions and placing them on the
johnc@1829 506 // collection set list. Initialize the per-collection set info
johnc@1829 507 start_incremental_cset_building();
ysr@777 508 }
ysr@777 509
apetrusenko@980 510 // Create the jstat counters for the policy.
tonyp@3119 511 void G1CollectorPolicy::initialize_gc_policy_counters() {
brutisso@3065 512 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
apetrusenko@980 513 }
apetrusenko@980 514
tonyp@3119 515 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
tonyp@3119 516 double base_time_ms,
tonyp@3119 517 size_t base_free_regions,
tonyp@3119 518 double target_pause_time_ms) {
tonyp@3119 519 if (young_length >= base_free_regions) {
tonyp@3119 520 // end condition 1: not enough space for the young regions
tonyp@3119 521 return false;
ysr@777 522 }
tonyp@3119 523
tonyp@3119 524 double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
tonyp@3119 525 size_t bytes_to_copy =
tonyp@3119 526 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
tonyp@3119 527 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
tonyp@3119 528 double young_other_time_ms = predict_young_other_time_ms(young_length);
tonyp@3119 529 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
tonyp@3119 530 if (pause_time_ms > target_pause_time_ms) {
tonyp@3119 531 // end condition 2: prediction is over the target pause time
tonyp@3119 532 return false;
tonyp@3119 533 }
tonyp@3119 534
tonyp@3119 535 size_t free_bytes =
tonyp@3119 536 (base_free_regions - young_length) * HeapRegion::GrainBytes;
tonyp@3119 537 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
tonyp@3119 538 // end condition 3: out-of-space (conservatively!)
tonyp@3119 539 return false;
tonyp@3119 540 }
tonyp@3119 541
tonyp@3119 542 // success!
tonyp@3119 543 return true;
ysr@777 544 }
ysr@777 545
brutisso@3120 546 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
brutisso@3120 547 // re-calculate the necessary reserve
brutisso@3120 548 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
tonyp@3119 549 // We use ceiling so that if reserve_regions_d is > 0.0 (but
tonyp@3119 550 // smaller than 1.0) we'll get 1.
tonyp@3119 551 _reserve_regions = (size_t) ceil(reserve_regions_d);
brutisso@3120 552
brutisso@3358 553 _young_gen_sizer->heap_size_changed(new_number_of_regions);
tonyp@3119 554 }
tonyp@3119 555
tonyp@3119 556 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
tonyp@3119 557 size_t base_min_length) {
tonyp@3119 558 size_t desired_min_length = 0;
ysr@777 559 if (adaptive_young_list_length()) {
tonyp@3119 560 if (_alloc_rate_ms_seq->num() > 3) {
tonyp@3119 561 double now_sec = os::elapsedTime();
tonyp@3119 562 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
tonyp@3119 563 double alloc_rate_ms = predict_alloc_rate_ms();
tonyp@3119 564 desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
tonyp@3119 565 } else {
tonyp@3119 566 // otherwise we don't have enough info to make the prediction
tonyp@3119 567 }
ysr@777 568 }
brutisso@3120 569 desired_min_length += base_min_length;
brutisso@3120 570 // make sure we don't go below any user-defined minimum bound
brutisso@3358 571 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
ysr@777 572 }
ysr@777 573
tonyp@3119 574 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
tonyp@3119 575 // Here, we might want to also take into account any additional
tonyp@3119 576 // constraints (i.e., user-defined minimum bound). Currently, we
tonyp@3119 577 // effectively don't set this bound.
brutisso@3358 578 return _young_gen_sizer->max_desired_young_length();
tonyp@3119 579 }
tonyp@3119 580
tonyp@3119 581 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
tonyp@3119 582 if (rs_lengths == (size_t) -1) {
tonyp@3119 583 // if it's set to the default value (-1), we should predict it;
tonyp@3119 584 // otherwise, use the given value.
tonyp@3119 585 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
tonyp@3119 586 }
tonyp@3119 587
tonyp@3119 588 // Calculate the absolute and desired min bounds.
tonyp@3119 589
tonyp@3119 590 // This is how many young regions we already have (currently: the survivors).
tonyp@3119 591 size_t base_min_length = recorded_survivor_regions();
tonyp@3119 592 // This is the absolute minimum young length, which ensures that we
tonyp@3119 593 // can allocate one eden region in the worst-case.
tonyp@3119 594 size_t absolute_min_length = base_min_length + 1;
tonyp@3119 595 size_t desired_min_length =
tonyp@3119 596 calculate_young_list_desired_min_length(base_min_length);
tonyp@3119 597 if (desired_min_length < absolute_min_length) {
tonyp@3119 598 desired_min_length = absolute_min_length;
tonyp@3119 599 }
tonyp@3119 600
tonyp@3119 601 // Calculate the absolute and desired max bounds.
tonyp@3119 602
tonyp@3119 603 // We will try our best not to "eat" into the reserve.
tonyp@3119 604 size_t absolute_max_length = 0;
tonyp@3119 605 if (_free_regions_at_end_of_collection > _reserve_regions) {
tonyp@3119 606 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
tonyp@3119 607 }
tonyp@3119 608 size_t desired_max_length = calculate_young_list_desired_max_length();
tonyp@3119 609 if (desired_max_length > absolute_max_length) {
tonyp@3119 610 desired_max_length = absolute_max_length;
tonyp@3119 611 }
tonyp@3119 612
tonyp@3119 613 size_t young_list_target_length = 0;
tonyp@3119 614 if (adaptive_young_list_length()) {
tonyp@3337 615 if (gcs_are_young()) {
tonyp@3119 616 young_list_target_length =
tonyp@3119 617 calculate_young_list_target_length(rs_lengths,
tonyp@3119 618 base_min_length,
tonyp@3119 619 desired_min_length,
tonyp@3119 620 desired_max_length);
tonyp@3119 621 _rs_lengths_prediction = rs_lengths;
tonyp@3119 622 } else {
tonyp@3119 623 // Don't calculate anything and let the code below bound it to
tonyp@3119 624 // the desired_min_length, i.e., do the next GC as soon as
tonyp@3119 625 // possible to maximize how many old regions we can add to it.
ysr@777 626 }
ysr@777 627 } else {
tonyp@3539 628 // The user asked for a fixed young gen so we'll fix the young gen
tonyp@3539 629 // whether the next GC is young or mixed.
tonyp@3539 630 young_list_target_length = _young_list_fixed_length;
ysr@777 631 }
ysr@777 632
tonyp@3119 633 // Make sure we don't go over the desired max length, nor under the
tonyp@3119 634 // desired min length. In case they clash, desired_min_length wins
tonyp@3119 635 // which is why that test is second.
tonyp@3119 636 if (young_list_target_length > desired_max_length) {
tonyp@3119 637 young_list_target_length = desired_max_length;
tonyp@3119 638 }
tonyp@3119 639 if (young_list_target_length < desired_min_length) {
tonyp@3119 640 young_list_target_length = desired_min_length;
tonyp@3119 641 }
tonyp@3119 642
tonyp@3119 643 assert(young_list_target_length > recorded_survivor_regions(),
tonyp@3119 644 "we should be able to allocate at least one eden region");
tonyp@3119 645 assert(young_list_target_length >= absolute_min_length, "post-condition");
tonyp@3119 646 _young_list_target_length = young_list_target_length;
tonyp@3119 647
tonyp@3119 648 update_max_gc_locker_expansion();
ysr@777 649 }
ysr@777 650
tonyp@3119 651 size_t
tonyp@3119 652 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
tonyp@3119 653 size_t base_min_length,
tonyp@3119 654 size_t desired_min_length,
tonyp@3119 655 size_t desired_max_length) {
tonyp@3119 656 assert(adaptive_young_list_length(), "pre-condition");
tonyp@3337 657 assert(gcs_are_young(), "only call this for young GCs");
tonyp@3119 658
tonyp@3119 659 // In case some edge-condition makes the desired max length too small...
tonyp@3119 660 if (desired_max_length <= desired_min_length) {
tonyp@3119 661 return desired_min_length;
tonyp@3119 662 }
tonyp@3119 663
tonyp@3119 664 // We'll adjust min_young_length and max_young_length not to include
tonyp@3119 665 // the already allocated young regions (i.e., so they reflect the
tonyp@3119 666 // min and max eden regions we'll allocate). The base_min_length
tonyp@3119 667 // will be reflected in the predictions by the
tonyp@3119 668 // survivor_regions_evac_time prediction.
tonyp@3119 669 assert(desired_min_length > base_min_length, "invariant");
tonyp@3119 670 size_t min_young_length = desired_min_length - base_min_length;
tonyp@3119 671 assert(desired_max_length > base_min_length, "invariant");
tonyp@3119 672 size_t max_young_length = desired_max_length - base_min_length;
tonyp@3119 673
tonyp@3119 674 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
tonyp@3119 675 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
tonyp@3119 676 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
tonyp@3119 677 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
tonyp@3119 678 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
tonyp@3119 679 double base_time_ms =
tonyp@3119 680 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
tonyp@3119 681 survivor_regions_evac_time;
tonyp@3119 682 size_t available_free_regions = _free_regions_at_end_of_collection;
tonyp@3119 683 size_t base_free_regions = 0;
tonyp@3119 684 if (available_free_regions > _reserve_regions) {
tonyp@3119 685 base_free_regions = available_free_regions - _reserve_regions;
tonyp@3119 686 }
tonyp@3119 687
tonyp@3119 688 // Here, we will make sure that the shortest young length that
tonyp@3119 689 // makes sense fits within the target pause time.
tonyp@3119 690
tonyp@3119 691 if (predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 692 base_free_regions, target_pause_time_ms)) {
tonyp@3119 693 // The shortest young length will fit into the target pause time;
tonyp@3119 694 // we'll now check whether the absolute maximum number of young
tonyp@3119 695 // regions will fit in the target pause time. If not, we'll do
tonyp@3119 696 // a binary search between min_young_length and max_young_length.
tonyp@3119 697 if (predict_will_fit(max_young_length, base_time_ms,
tonyp@3119 698 base_free_regions, target_pause_time_ms)) {
tonyp@3119 699 // The maximum young length will fit into the target pause time.
tonyp@3119 700 // We are done so set min young length to the maximum length (as
tonyp@3119 701 // the result is assumed to be returned in min_young_length).
tonyp@3119 702 min_young_length = max_young_length;
tonyp@3119 703 } else {
tonyp@3119 704 // The maximum possible number of young regions will not fit within
tonyp@3119 705 // the target pause time so we'll search for the optimal
tonyp@3119 706 // length. The loop invariants are:
tonyp@3119 707 //
tonyp@3119 708 // min_young_length < max_young_length
tonyp@3119 709 // min_young_length is known to fit into the target pause time
tonyp@3119 710 // max_young_length is known not to fit into the target pause time
tonyp@3119 711 //
tonyp@3119 712 // Going into the loop we know the above hold as we've just
tonyp@3119 713 // checked them. Every time around the loop we check whether
tonyp@3119 714 // the middle value between min_young_length and
tonyp@3119 715 // max_young_length fits into the target pause time. If it
tonyp@3119 716 // does, it becomes the new min. If it doesn't, it becomes
tonyp@3119 717 // the new max. This way we maintain the loop invariants.
tonyp@3119 718
tonyp@3119 719 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 720 size_t diff = (max_young_length - min_young_length) / 2;
tonyp@3119 721 while (diff > 0) {
tonyp@3119 722 size_t young_length = min_young_length + diff;
tonyp@3119 723 if (predict_will_fit(young_length, base_time_ms,
tonyp@3119 724 base_free_regions, target_pause_time_ms)) {
tonyp@3119 725 min_young_length = young_length;
tonyp@3119 726 } else {
tonyp@3119 727 max_young_length = young_length;
tonyp@3119 728 }
tonyp@3119 729 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 730 diff = (max_young_length - min_young_length) / 2;
tonyp@3119 731 }
tonyp@3119 732 // The results is min_young_length which, according to the
tonyp@3119 733 // loop invariants, should fit within the target pause time.
tonyp@3119 734
tonyp@3119 735 // These are the post-conditions of the binary search above:
tonyp@3119 736 assert(min_young_length < max_young_length,
tonyp@3119 737 "otherwise we should have discovered that max_young_length "
tonyp@3119 738 "fits into the pause target and not done the binary search");
tonyp@3119 739 assert(predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 740 base_free_regions, target_pause_time_ms),
tonyp@3119 741 "min_young_length, the result of the binary search, should "
tonyp@3119 742 "fit into the pause target");
tonyp@3119 743 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
tonyp@3119 744 base_free_regions, target_pause_time_ms),
tonyp@3119 745 "min_young_length, the result of the binary search, should be "
tonyp@3119 746 "optimal, so no larger length should fit into the pause target");
tonyp@3119 747 }
tonyp@3119 748 } else {
tonyp@3119 749 // Even the minimum length doesn't fit into the pause time
tonyp@3119 750 // target, return it as the result nevertheless.
tonyp@3119 751 }
tonyp@3119 752 return base_min_length + min_young_length;
ysr@777 753 }
ysr@777 754
apetrusenko@980 755 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
apetrusenko@980 756 double survivor_regions_evac_time = 0.0;
apetrusenko@980 757 for (HeapRegion * r = _recorded_survivor_head;
apetrusenko@980 758 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
apetrusenko@980 759 r = r->get_next_young_region()) {
apetrusenko@980 760 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
apetrusenko@980 761 }
apetrusenko@980 762 return survivor_regions_evac_time;
apetrusenko@980 763 }
apetrusenko@980 764
tonyp@3119 765 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
ysr@777 766 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
ysr@777 767
johnc@1829 768 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
ysr@777 769 if (rs_lengths > _rs_lengths_prediction) {
ysr@777 770 // add 10% to avoid having to recalculate often
ysr@777 771 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
tonyp@3119 772 update_young_list_target_length(rs_lengths_prediction);
ysr@777 773 }
ysr@777 774 }
ysr@777 775
tonyp@3119 776
tonyp@3119 777
ysr@777 778 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
ysr@777 779 bool is_tlab,
ysr@777 780 bool* gc_overhead_limit_was_exceeded) {
ysr@777 781 guarantee(false, "Not using this policy feature yet.");
ysr@777 782 return NULL;
ysr@777 783 }
ysr@777 784
ysr@777 785 // This method controls how a collector handles one or more
ysr@777 786 // of its generations being fully allocated.
ysr@777 787 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
ysr@777 788 bool is_tlab) {
ysr@777 789 guarantee(false, "Not using this policy feature yet.");
ysr@777 790 return NULL;
ysr@777 791 }
ysr@777 792
ysr@777 793
ysr@777 794 #ifndef PRODUCT
ysr@777 795 bool G1CollectorPolicy::verify_young_ages() {
johnc@1829 796 HeapRegion* head = _g1->young_list()->first_region();
ysr@777 797 return
ysr@777 798 verify_young_ages(head, _short_lived_surv_rate_group);
ysr@777 799 // also call verify_young_ages on any additional surv rate groups
ysr@777 800 }
ysr@777 801
ysr@777 802 bool
ysr@777 803 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
ysr@777 804 SurvRateGroup *surv_rate_group) {
ysr@777 805 guarantee( surv_rate_group != NULL, "pre-condition" );
ysr@777 806
ysr@777 807 const char* name = surv_rate_group->name();
ysr@777 808 bool ret = true;
ysr@777 809 int prev_age = -1;
ysr@777 810
ysr@777 811 for (HeapRegion* curr = head;
ysr@777 812 curr != NULL;
ysr@777 813 curr = curr->get_next_young_region()) {
ysr@777 814 SurvRateGroup* group = curr->surv_rate_group();
ysr@777 815 if (group == NULL && !curr->is_survivor()) {
ysr@777 816 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
ysr@777 817 ret = false;
ysr@777 818 }
ysr@777 819
ysr@777 820 if (surv_rate_group == group) {
ysr@777 821 int age = curr->age_in_surv_rate_group();
ysr@777 822
ysr@777 823 if (age < 0) {
ysr@777 824 gclog_or_tty->print_cr("## %s: encountered negative age", name);
ysr@777 825 ret = false;
ysr@777 826 }
ysr@777 827
ysr@777 828 if (age <= prev_age) {
ysr@777 829 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
ysr@777 830 "(%d, %d)", name, age, prev_age);
ysr@777 831 ret = false;
ysr@777 832 }
ysr@777 833 prev_age = age;
ysr@777 834 }
ysr@777 835 }
ysr@777 836
ysr@777 837 return ret;
ysr@777 838 }
ysr@777 839 #endif // PRODUCT
ysr@777 840
ysr@777 841 void G1CollectorPolicy::record_full_collection_start() {
ysr@777 842 _cur_collection_start_sec = os::elapsedTime();
ysr@777 843 // Release the future to-space so that it is available for compaction into.
ysr@777 844 _g1->set_full_collection();
ysr@777 845 }
ysr@777 846
ysr@777 847 void G1CollectorPolicy::record_full_collection_end() {
ysr@777 848 // Consider this like a collection pause for the purposes of allocation
ysr@777 849 // since last pause.
ysr@777 850 double end_sec = os::elapsedTime();
ysr@777 851 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
ysr@777 852 double full_gc_time_ms = full_gc_time_sec * 1000.0;
ysr@777 853
ysr@777 854 _all_full_gc_times_ms->add(full_gc_time_ms);
ysr@777 855
tonyp@1030 856 update_recent_gc_times(end_sec, full_gc_time_ms);
ysr@777 857
ysr@777 858 _g1->clear_full_collection();
ysr@777 859
tonyp@3337 860 // "Nuke" the heuristics that control the young/mixed GC
tonyp@3337 861 // transitions and make sure we start with young GCs after the Full GC.
tonyp@3337 862 set_gcs_are_young(true);
tonyp@3337 863 _last_young_gc = false;
tonyp@1794 864 clear_initiate_conc_mark_if_possible();
tonyp@1794 865 clear_during_initial_mark_pause();
ysr@777 866 _known_garbage_bytes = 0;
ysr@777 867 _known_garbage_ratio = 0.0;
ysr@777 868 _in_marking_window = false;
ysr@777 869 _in_marking_window_im = false;
ysr@777 870
ysr@777 871 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 872 // also call this on any additional surv rate groups
ysr@777 873
apetrusenko@980 874 record_survivor_regions(0, NULL, NULL);
apetrusenko@980 875
ysr@777 876 _free_regions_at_end_of_collection = _g1->free_regions();
apetrusenko@980 877 // Reset survivors SurvRateGroup.
apetrusenko@980 878 _survivor_surv_rate_group->reset();
tonyp@3119 879 update_young_list_target_length();
tonyp@3539 880 _collectionSetChooser->clearMarkedHeapRegions();
tonyp@2315 881 }
ysr@777 882
ysr@777 883 void G1CollectorPolicy::record_stop_world_start() {
ysr@777 884 _stop_world_start = os::elapsedTime();
ysr@777 885 }
ysr@777 886
ysr@777 887 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
ysr@777 888 size_t start_used) {
brutisso@3710 889 if (G1Log::finer()) {
ysr@777 890 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 891 gclog_or_tty->print("[GC pause");
tonyp@3337 892 gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
ysr@777 893 }
ysr@777 894
tonyp@3464 895 // We only need to do this here as the policy will only be applied
tonyp@3464 896 // to the GC we're about to start. so, no point is calculating this
tonyp@3464 897 // every time we calculate / recalculate the target young length.
tonyp@3464 898 update_survivors_policy();
tonyp@3119 899
tonyp@2315 900 assert(_g1->used() == _g1->recalculate_used(),
tonyp@2315 901 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
tonyp@2315 902 _g1->used(), _g1->recalculate_used()));
ysr@777 903
ysr@777 904 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
ysr@777 905 _all_stop_world_times_ms->add(s_w_t_ms);
ysr@777 906 _stop_world_start = 0.0;
ysr@777 907
ysr@777 908 _cur_collection_start_sec = start_time_sec;
ysr@777 909 _cur_collection_pause_used_at_start_bytes = start_used;
ysr@777 910 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
ysr@777 911 _pending_cards = _g1->pending_card_num();
ysr@777 912 _max_pending_cards = _g1->max_pending_card_num();
ysr@777 913
ysr@777 914 _bytes_in_collection_set_before_gc = 0;
tonyp@3028 915 _bytes_copied_during_gc = 0;
ysr@777 916
tonyp@2961 917 YoungList* young_list = _g1->young_list();
tonyp@2961 918 _eden_bytes_before_gc = young_list->eden_used_bytes();
tonyp@2961 919 _survivor_bytes_before_gc = young_list->survivor_used_bytes();
tonyp@2961 920 _capacity_before_gc = _g1->capacity();
tonyp@2961 921
ysr@777 922 #ifdef DEBUG
ysr@777 923 // initialise these to something well known so that we can spot
ysr@777 924 // if they are not set properly
ysr@777 925
ysr@777 926 for (int i = 0; i < _parallel_gc_threads; ++i) {
tonyp@1966 927 _par_last_gc_worker_start_times_ms[i] = -1234.0;
tonyp@1966 928 _par_last_ext_root_scan_times_ms[i] = -1234.0;
tonyp@3416 929 _par_last_satb_filtering_times_ms[i] = -1234.0;
tonyp@1966 930 _par_last_update_rs_times_ms[i] = -1234.0;
tonyp@1966 931 _par_last_update_rs_processed_buffers[i] = -1234.0;
tonyp@1966 932 _par_last_scan_rs_times_ms[i] = -1234.0;
tonyp@1966 933 _par_last_obj_copy_times_ms[i] = -1234.0;
tonyp@1966 934 _par_last_termination_times_ms[i] = -1234.0;
tonyp@1966 935 _par_last_termination_attempts[i] = -1234.0;
tonyp@1966 936 _par_last_gc_worker_end_times_ms[i] = -1234.0;
brutisso@2712 937 _par_last_gc_worker_times_ms[i] = -1234.0;
johnc@3219 938 _par_last_gc_worker_other_times_ms[i] = -1234.0;
ysr@777 939 }
ysr@777 940 #endif
ysr@777 941
ysr@777 942 for (int i = 0; i < _aux_num; ++i) {
ysr@777 943 _cur_aux_times_ms[i] = 0.0;
ysr@777 944 _cur_aux_times_set[i] = false;
ysr@777 945 }
ysr@777 946
tonyp@3464 947 // This is initialized to zero here and is set during the evacuation
tonyp@3464 948 // pause if we actually waited for the root region scanning to finish.
tonyp@3464 949 _root_region_scan_wait_time_ms = 0.0;
ysr@777 950
tonyp@3337 951 _last_gc_was_young = false;
ysr@777 952
ysr@777 953 // do that for any other surv rate groups
ysr@777 954 _short_lived_surv_rate_group->stop_adding_regions();
tonyp@1717 955 _survivors_age_table.clear();
apetrusenko@980 956
ysr@777 957 assert( verify_young_ages(), "region age verification" );
ysr@777 958 }
ysr@777 959
brutisso@3065 960 void G1CollectorPolicy::record_concurrent_mark_init_end(double
ysr@777 961 mark_init_elapsed_time_ms) {
ysr@777 962 _during_marking = true;
tonyp@1794 963 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
tonyp@1794 964 clear_during_initial_mark_pause();
ysr@777 965 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
ysr@777 966 }
ysr@777 967
ysr@777 968 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
ysr@777 969 _mark_remark_start_sec = os::elapsedTime();
ysr@777 970 _during_marking = false;
ysr@777 971 }
ysr@777 972
ysr@777 973 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
ysr@777 974 double end_time_sec = os::elapsedTime();
ysr@777 975 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
ysr@777 976 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
ysr@777 977 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 978 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 979
ysr@777 980 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
ysr@777 981 }
ysr@777 982
ysr@777 983 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
ysr@777 984 _mark_cleanup_start_sec = os::elapsedTime();
ysr@777 985 }
ysr@777 986
tonyp@3209 987 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
tonyp@3337 988 _last_young_gc = true;
brutisso@3065 989 _in_marking_window = false;
ysr@777 990 }
ysr@777 991
ysr@777 992 void G1CollectorPolicy::record_concurrent_pause() {
ysr@777 993 if (_stop_world_start > 0.0) {
ysr@777 994 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
ysr@777 995 _all_yield_times_ms->add(yield_ms);
ysr@777 996 }
ysr@777 997 }
ysr@777 998
ysr@777 999 void G1CollectorPolicy::record_concurrent_pause_end() {
ysr@777 1000 }
ysr@777 1001
ysr@777 1002 template<class T>
ysr@777 1003 T sum_of(T* sum_arr, int start, int n, int N) {
ysr@777 1004 T sum = (T)0;
ysr@777 1005 for (int i = 0; i < n; i++) {
ysr@777 1006 int j = (start + i) % N;
ysr@777 1007 sum += sum_arr[j];
ysr@777 1008 }
ysr@777 1009 return sum;
ysr@777 1010 }
ysr@777 1011
tonyp@1966 1012 void G1CollectorPolicy::print_par_stats(int level,
tonyp@1966 1013 const char* str,
brutisso@2712 1014 double* data) {
ysr@777 1015 double min = data[0], max = data[0];
ysr@777 1016 double total = 0.0;
brutisso@2645 1017 LineBuffer buf(level);
brutisso@2645 1018 buf.append("[%s (ms):", str);
jmasa@3294 1019 for (uint i = 0; i < no_of_gc_threads(); ++i) {
ysr@777 1020 double val = data[i];
ysr@777 1021 if (val < min)
ysr@777 1022 min = val;
ysr@777 1023 if (val > max)
ysr@777 1024 max = val;
ysr@777 1025 total += val;
brutisso@3710 1026 if (G1Log::finest()) {
brutisso@3710 1027 buf.append(" %.1lf", val);
brutisso@3710 1028 }
ysr@777 1029 }
brutisso@3710 1030
brutisso@3710 1031 if (G1Log::finest()) {
brutisso@3710 1032 buf.append_and_print_cr("");
brutisso@3710 1033 }
jmasa@3294 1034 double avg = total / (double) no_of_gc_threads();
brutisso@3710 1035 buf.append_and_print_cr(" Avg: %.1lf Min: %.1lf Max: %.1lf Diff: %.1lf]",
brutisso@2712 1036 avg, min, max, max - min);
ysr@777 1037 }
ysr@777 1038
tonyp@1966 1039 void G1CollectorPolicy::print_par_sizes(int level,
tonyp@1966 1040 const char* str,
brutisso@2712 1041 double* data) {
ysr@777 1042 double min = data[0], max = data[0];
ysr@777 1043 double total = 0.0;
brutisso@2645 1044 LineBuffer buf(level);
brutisso@2645 1045 buf.append("[%s :", str);
jmasa@3294 1046 for (uint i = 0; i < no_of_gc_threads(); ++i) {
ysr@777 1047 double val = data[i];
ysr@777 1048 if (val < min)
ysr@777 1049 min = val;
ysr@777 1050 if (val > max)
ysr@777 1051 max = val;
ysr@777 1052 total += val;
brutisso@2645 1053 buf.append(" %d", (int) val);
ysr@777 1054 }
brutisso@2712 1055 buf.append_and_print_cr("");
jmasa@3294 1056 double avg = total / (double) no_of_gc_threads();
brutisso@2712 1057 buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
brutisso@2712 1058 (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
ysr@777 1059 }
ysr@777 1060
johnc@3219 1061 void G1CollectorPolicy::print_stats(int level,
johnc@3219 1062 const char* str,
johnc@3219 1063 double value) {
brutisso@2645 1064 LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
ysr@777 1065 }
ysr@777 1066
johnc@3219 1067 void G1CollectorPolicy::print_stats(int level,
johnc@3219 1068 const char* str,
johnc@3219 1069 int value) {
brutisso@2645 1070 LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
ysr@777 1071 }
ysr@777 1072
johnc@3219 1073 double G1CollectorPolicy::avg_value(double* data) {
jmasa@2188 1074 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1075 double ret = 0.0;
jmasa@3294 1076 for (uint i = 0; i < no_of_gc_threads(); ++i) {
ysr@777 1077 ret += data[i];
johnc@3219 1078 }
jmasa@3294 1079 return ret / (double) no_of_gc_threads();
ysr@777 1080 } else {
ysr@777 1081 return data[0];
ysr@777 1082 }
ysr@777 1083 }
ysr@777 1084
johnc@3219 1085 double G1CollectorPolicy::max_value(double* data) {
jmasa@2188 1086 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1087 double ret = data[0];
jmasa@3294 1088 for (uint i = 1; i < no_of_gc_threads(); ++i) {
johnc@3219 1089 if (data[i] > ret) {
ysr@777 1090 ret = data[i];
johnc@3219 1091 }
johnc@3219 1092 }
ysr@777 1093 return ret;
ysr@777 1094 } else {
ysr@777 1095 return data[0];
ysr@777 1096 }
ysr@777 1097 }
ysr@777 1098
johnc@3219 1099 double G1CollectorPolicy::sum_of_values(double* data) {
jmasa@2188 1100 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1101 double sum = 0.0;
jmasa@3294 1102 for (uint i = 0; i < no_of_gc_threads(); i++) {
ysr@777 1103 sum += data[i];
johnc@3219 1104 }
ysr@777 1105 return sum;
ysr@777 1106 } else {
ysr@777 1107 return data[0];
ysr@777 1108 }
ysr@777 1109 }
ysr@777 1110
johnc@3219 1111 double G1CollectorPolicy::max_sum(double* data1, double* data2) {
ysr@777 1112 double ret = data1[0] + data2[0];
ysr@777 1113
jmasa@2188 1114 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 1115 for (uint i = 1; i < no_of_gc_threads(); ++i) {
ysr@777 1116 double data = data1[i] + data2[i];
johnc@3219 1117 if (data > ret) {
ysr@777 1118 ret = data;
johnc@3219 1119 }
ysr@777 1120 }
ysr@777 1121 }
ysr@777 1122 return ret;
ysr@777 1123 }
ysr@777 1124
brutisso@3461 1125 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
brutisso@3461 1126 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
brutisso@3456 1127 return false;
brutisso@3456 1128 }
brutisso@3456 1129
brutisso@3456 1130 size_t marking_initiating_used_threshold =
brutisso@3456 1131 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
brutisso@3456 1132 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
brutisso@3461 1133 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
brutisso@3461 1134
brutisso@3461 1135 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
brutisso@3456 1136 if (gcs_are_young()) {
brutisso@3461 1137 ergo_verbose5(ErgoConcCycles,
brutisso@3456 1138 "request concurrent cycle initiation",
brutisso@3456 1139 ergo_format_reason("occupancy higher than threshold")
brutisso@3456 1140 ergo_format_byte("occupancy")
brutisso@3461 1141 ergo_format_byte("allocation request")
brutisso@3456 1142 ergo_format_byte_perc("threshold")
brutisso@3456 1143 ergo_format_str("source"),
brutisso@3456 1144 cur_used_bytes,
brutisso@3461 1145 alloc_byte_size,
brutisso@3456 1146 marking_initiating_used_threshold,
brutisso@3456 1147 (double) InitiatingHeapOccupancyPercent,
brutisso@3456 1148 source);
brutisso@3456 1149 return true;
brutisso@3456 1150 } else {
brutisso@3461 1151 ergo_verbose5(ErgoConcCycles,
brutisso@3456 1152 "do not request concurrent cycle initiation",
brutisso@3456 1153 ergo_format_reason("still doing mixed collections")
brutisso@3456 1154 ergo_format_byte("occupancy")
brutisso@3461 1155 ergo_format_byte("allocation request")
brutisso@3456 1156 ergo_format_byte_perc("threshold")
brutisso@3456 1157 ergo_format_str("source"),
brutisso@3456 1158 cur_used_bytes,
brutisso@3461 1159 alloc_byte_size,
brutisso@3456 1160 marking_initiating_used_threshold,
brutisso@3456 1161 (double) InitiatingHeapOccupancyPercent,
brutisso@3456 1162 source);
brutisso@3456 1163 }
brutisso@3456 1164 }
brutisso@3456 1165
brutisso@3456 1166 return false;
brutisso@3456 1167 }
brutisso@3456 1168
ysr@777 1169 // Anything below that is considered to be zero
ysr@777 1170 #define MIN_TIMER_GRANULARITY 0.0000001
ysr@777 1171
jmasa@3294 1172 void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
ysr@777 1173 double end_time_sec = os::elapsedTime();
ysr@777 1174 double elapsed_ms = _last_pause_time_ms;
jmasa@2188 1175 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
tonyp@3289 1176 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
tonyp@3289 1177 "otherwise, the subtraction below does not make sense");
ysr@777 1178 size_t rs_size =
tonyp@3289 1179 _cur_collection_pause_used_regions_at_start - cset_region_length();
ysr@777 1180 size_t cur_used_bytes = _g1->used();
ysr@777 1181 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
ysr@777 1182 bool last_pause_included_initial_mark = false;
tonyp@2062 1183 bool update_stats = !_g1->evacuation_failed();
jmasa@3294 1184 set_no_of_gc_threads(no_of_gc_threads);
ysr@777 1185
ysr@777 1186 #ifndef PRODUCT
ysr@777 1187 if (G1YoungSurvRateVerbose) {
ysr@777 1188 gclog_or_tty->print_cr("");
ysr@777 1189 _short_lived_surv_rate_group->print();
ysr@777 1190 // do that for any other surv rate groups too
ysr@777 1191 }
ysr@777 1192 #endif // PRODUCT
ysr@777 1193
brutisso@3065 1194 last_pause_included_initial_mark = during_initial_mark_pause();
brutisso@3456 1195 if (last_pause_included_initial_mark) {
brutisso@3065 1196 record_concurrent_mark_init_end(0.0);
tonyp@3539 1197 } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
brutisso@3456 1198 // Note: this might have already been set, if during the last
brutisso@3456 1199 // pause we decided to start a cycle but at the beginning of
brutisso@3456 1200 // this pause we decided to postpone it. That's OK.
brutisso@3456 1201 set_initiate_conc_mark_if_possible();
brutisso@3456 1202 }
brutisso@3065 1203
ysr@777 1204 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
ysr@777 1205 end_time_sec, false);
ysr@777 1206
ysr@777 1207 // This assert is exempted when we're doing parallel collection pauses,
ysr@777 1208 // because the fragmentation caused by the parallel GC allocation buffers
ysr@777 1209 // can lead to more memory being used during collection than was used
ysr@777 1210 // before. Best leave this out until the fragmentation problem is fixed.
ysr@777 1211 // Pauses in which evacuation failed can also lead to negative
ysr@777 1212 // collections, since no space is reclaimed from a region containing an
ysr@777 1213 // object whose evacuation failed.
ysr@777 1214 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1215 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1216 // (DLD, 10/05.)
ysr@777 1217 assert((true || parallel) // Always using GC LABs now.
ysr@777 1218 || _g1->evacuation_failed()
ysr@777 1219 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
ysr@777 1220 "Negative collection");
ysr@777 1221
ysr@777 1222 size_t freed_bytes =
ysr@777 1223 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
ysr@777 1224 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
johnc@1829 1225
ysr@777 1226 double survival_fraction =
ysr@777 1227 (double)surviving_bytes/
ysr@777 1228 (double)_collection_set_bytes_used_before;
ysr@777 1229
johnc@3219 1230 // These values are used to update the summary information that is
johnc@3219 1231 // displayed when TraceGen0Time is enabled, and are output as part
brutisso@3710 1232 // of the "finer" output, in the non-parallel case.
johnc@3219 1233
johnc@3021 1234 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
tonyp@3416 1235 double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
johnc@3021 1236 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
johnc@3021 1237 double update_rs_processed_buffers =
johnc@3021 1238 sum_of_values(_par_last_update_rs_processed_buffers);
johnc@3021 1239 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
johnc@3021 1240 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
johnc@3021 1241 double termination_time = avg_value(_par_last_termination_times_ms);
johnc@3021 1242
johnc@3219 1243 double known_time = ext_root_scan_time +
tonyp@3416 1244 satb_filtering_time +
johnc@3219 1245 update_rs_time +
johnc@3219 1246 scan_rs_time +
johnc@3219 1247 obj_copy_time;
johnc@3219 1248
johnc@3219 1249 double other_time_ms = elapsed_ms;
johnc@3219 1250
tonyp@3464 1251 // Subtract the root region scanning wait time. It's initialized to
tonyp@3464 1252 // zero at the start of the pause.
tonyp@3464 1253 other_time_ms -= _root_region_scan_wait_time_ms;
tonyp@3464 1254
johnc@3219 1255 if (parallel) {
johnc@3219 1256 other_time_ms -= _cur_collection_par_time_ms;
johnc@3219 1257 } else {
johnc@3219 1258 other_time_ms -= known_time;
johnc@3219 1259 }
johnc@3219 1260
johnc@3689 1261 // Now subtract the time taken to fix up roots in generated code
johnc@3689 1262 other_time_ms -= _cur_collection_code_root_fixup_time_ms;
johnc@3689 1263
johnc@3219 1264 // Subtract the time taken to clean the card table from the
johnc@3219 1265 // current value of "other time"
johnc@3219 1266 other_time_ms -= _cur_clear_ct_time_ms;
johnc@3219 1267
johnc@3219 1268 // TraceGen0Time and TraceGen1Time summary info updating.
johnc@3219 1269 _all_pause_times_ms->add(elapsed_ms);
johnc@3021 1270
tonyp@1030 1271 if (update_stats) {
johnc@3219 1272 _summary->record_total_time_ms(elapsed_ms);
johnc@3219 1273 _summary->record_other_time_ms(other_time_ms);
johnc@3219 1274
johnc@3219 1275 MainBodySummary* body_summary = _summary->main_body_summary();
johnc@3219 1276 assert(body_summary != NULL, "should not be null!");
johnc@3219 1277
tonyp@3464 1278 body_summary->record_root_region_scan_wait_time_ms(
tonyp@3464 1279 _root_region_scan_wait_time_ms);
johnc@3021 1280 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
tonyp@3416 1281 body_summary->record_satb_filtering_time_ms(satb_filtering_time);
johnc@3021 1282 body_summary->record_update_rs_time_ms(update_rs_time);
johnc@3021 1283 body_summary->record_scan_rs_time_ms(scan_rs_time);
johnc@3021 1284 body_summary->record_obj_copy_time_ms(obj_copy_time);
johnc@3219 1285
johnc@3021 1286 if (parallel) {
johnc@3021 1287 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
johnc@3021 1288 body_summary->record_termination_time_ms(termination_time);
johnc@3219 1289
johnc@3219 1290 double parallel_known_time = known_time + termination_time;
johnc@3219 1291 double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
johnc@3021 1292 body_summary->record_parallel_other_time_ms(parallel_other_time);
johnc@3021 1293 }
johnc@3219 1294
johnc@3219 1295 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
johnc@3021 1296
ysr@777 1297 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1298 // fragmentation can produce negative collections. Same with evac
ysr@777 1299 // failure.
ysr@777 1300 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1301 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1302 // (DLD, 10/05.
ysr@777 1303 assert((true || parallel)
ysr@777 1304 || _g1->evacuation_failed()
ysr@777 1305 || surviving_bytes <= _collection_set_bytes_used_before,
ysr@777 1306 "Or else negative collection!");
johnc@3219 1307
ysr@777 1308 // this is where we update the allocation rate of the application
ysr@777 1309 double app_time_ms =
ysr@777 1310 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
ysr@777 1311 if (app_time_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1312 // This usually happens due to the timer not having the required
ysr@777 1313 // granularity. Some Linuxes are the usual culprits.
ysr@777 1314 // We'll just set it to something (arbitrarily) small.
ysr@777 1315 app_time_ms = 1.0;
ysr@777 1316 }
tonyp@3289 1317 // We maintain the invariant that all objects allocated by mutator
tonyp@3289 1318 // threads will be allocated out of eden regions. So, we can use
tonyp@3289 1319 // the eden region number allocated since the previous GC to
tonyp@3289 1320 // calculate the application's allocate rate. The only exception
tonyp@3289 1321 // to that is humongous objects that are allocated separately. But
tonyp@3289 1322 // given that humongous object allocations do not really affect
tonyp@3289 1323 // either the pause's duration nor when the next pause will take
tonyp@3289 1324 // place we can safely ignore them here.
tonyp@3289 1325 size_t regions_allocated = eden_cset_region_length();
ysr@777 1326 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
ysr@777 1327 _alloc_rate_ms_seq->add(alloc_rate_ms);
ysr@777 1328
ysr@777 1329 double interval_ms =
ysr@777 1330 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
ysr@777 1331 update_recent_gc_times(end_time_sec, elapsed_ms);
ysr@777 1332 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
ysr@1521 1333 if (recent_avg_pause_time_ratio() < 0.0 ||
ysr@1521 1334 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
ysr@1521 1335 #ifndef PRODUCT
ysr@1521 1336 // Dump info to allow post-facto debugging
ysr@1521 1337 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
ysr@1521 1338 gclog_or_tty->print_cr("-------------------------------------------");
ysr@1521 1339 gclog_or_tty->print_cr("Recent GC Times (ms):");
ysr@1521 1340 _recent_gc_times_ms->dump();
ysr@1521 1341 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
ysr@1521 1342 _recent_prev_end_times_for_all_gcs_sec->dump();
ysr@1521 1343 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
ysr@1521 1344 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
ysr@1522 1345 // In debug mode, terminate the JVM if the user wants to debug at this point.
ysr@1522 1346 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
ysr@1522 1347 #endif // !PRODUCT
ysr@1522 1348 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
ysr@1522 1349 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
ysr@1521 1350 if (_recent_avg_pause_time_ratio < 0.0) {
ysr@1521 1351 _recent_avg_pause_time_ratio = 0.0;
ysr@1521 1352 } else {
ysr@1521 1353 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
ysr@1521 1354 _recent_avg_pause_time_ratio = 1.0;
ysr@1521 1355 }
ysr@1521 1356 }
ysr@777 1357 }
ysr@777 1358
johnc@3219 1359 for (int i = 0; i < _aux_num; ++i) {
johnc@3219 1360 if (_cur_aux_times_set[i]) {
johnc@3219 1361 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
johnc@3219 1362 }
johnc@3219 1363 }
johnc@3219 1364
brutisso@3710 1365 if (G1Log::finer()) {
johnc@3219 1366 bool print_marking_info =
johnc@3219 1367 _g1->mark_in_progress() && !last_pause_included_initial_mark;
johnc@3219 1368
tonyp@2062 1369 gclog_or_tty->print_cr("%s, %1.8lf secs]",
ysr@777 1370 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
ysr@777 1371 elapsed_ms / 1000.0);
ysr@777 1372
tonyp@3464 1373 if (_root_region_scan_wait_time_ms > 0.0) {
tonyp@3464 1374 print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
tonyp@3464 1375 }
tonyp@2062 1376 if (parallel) {
tonyp@2062 1377 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
johnc@3219 1378 print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
johnc@3219 1379 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
johnc@3219 1380 if (print_marking_info) {
tonyp@3416 1381 print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
johnc@3219 1382 }
tonyp@2062 1383 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
brutisso@3710 1384 if (G1Log::finest()) {
brutisso@3710 1385 print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
brutisso@3710 1386 }
tonyp@2062 1387 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
tonyp@2062 1388 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
tonyp@2062 1389 print_par_stats(2, "Termination", _par_last_termination_times_ms);
brutisso@3710 1390 if (G1Log::finest()) {
brutisso@3710 1391 print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
brutisso@3710 1392 }
brutisso@2712 1393
brutisso@2712 1394 for (int i = 0; i < _parallel_gc_threads; i++) {
johnc@3689 1395 _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] -
johnc@3689 1396 _par_last_gc_worker_start_times_ms[i];
johnc@3219 1397
johnc@3219 1398 double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
tonyp@3416 1399 _par_last_satb_filtering_times_ms[i] +
johnc@3219 1400 _par_last_update_rs_times_ms[i] +
johnc@3219 1401 _par_last_scan_rs_times_ms[i] +
johnc@3219 1402 _par_last_obj_copy_times_ms[i] +
johnc@3219 1403 _par_last_termination_times_ms[i];
johnc@3219 1404
johnc@3689 1405 _par_last_gc_worker_other_times_ms[i] = _par_last_gc_worker_times_ms[i] -
johnc@3689 1406 worker_known_time;
brutisso@2712 1407 }
johnc@3689 1408
johnc@3219 1409 print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
johnc@3689 1410 print_par_stats(2, "GC Worker Total", _par_last_gc_worker_times_ms);
johnc@3689 1411 print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
tonyp@2062 1412 } else {
johnc@3219 1413 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
johnc@3219 1414 if (print_marking_info) {
tonyp@3416 1415 print_stats(1, "SATB Filtering", satb_filtering_time);
johnc@3219 1416 }
tonyp@2062 1417 print_stats(1, "Update RS", update_rs_time);
brutisso@3710 1418 if (G1Log::finest()) {
brutisso@3710 1419 print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
brutisso@3710 1420 }
tonyp@2062 1421 print_stats(1, "Scan RS", scan_rs_time);
tonyp@2062 1422 print_stats(1, "Object Copying", obj_copy_time);
ysr@777 1423 }
johnc@3689 1424 print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
johnc@3219 1425 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
johnc@1325 1426 #ifndef PRODUCT
johnc@1325 1427 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
johnc@1325 1428 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
johnc@1325 1429 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
johnc@1325 1430 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
johnc@1325 1431 if (_num_cc_clears > 0) {
johnc@1325 1432 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
johnc@1325 1433 }
johnc@1325 1434 #endif
ysr@777 1435 print_stats(1, "Other", other_time_ms);
johnc@3296 1436 print_stats(2, "Choose CSet",
johnc@3296 1437 (_recorded_young_cset_choice_time_ms +
johnc@3296 1438 _recorded_non_young_cset_choice_time_ms));
johnc@3175 1439 print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
johnc@3175 1440 print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
johnc@3296 1441 print_stats(2, "Free CSet",
johnc@3296 1442 (_recorded_young_free_cset_time_ms +
johnc@3296 1443 _recorded_non_young_free_cset_time_ms));
johnc@1829 1444
ysr@777 1445 for (int i = 0; i < _aux_num; ++i) {
ysr@777 1446 if (_cur_aux_times_set[i]) {
ysr@777 1447 char buffer[96];
ysr@777 1448 sprintf(buffer, "Aux%d", i);
ysr@777 1449 print_stats(1, buffer, _cur_aux_times_ms[i]);
ysr@777 1450 }
ysr@777 1451 }
ysr@777 1452 }
ysr@777 1453
ysr@777 1454 // Update the efficiency-since-mark vars.
ysr@777 1455 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
ysr@777 1456 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1457 // This usually happens due to the timer not having the required
ysr@777 1458 // granularity. Some Linuxes are the usual culprits.
ysr@777 1459 // We'll just set it to something (arbitrarily) small.
ysr@777 1460 proc_ms = 1.0;
ysr@777 1461 }
ysr@777 1462 double cur_efficiency = (double) freed_bytes / proc_ms;
ysr@777 1463
ysr@777 1464 bool new_in_marking_window = _in_marking_window;
ysr@777 1465 bool new_in_marking_window_im = false;
tonyp@1794 1466 if (during_initial_mark_pause()) {
ysr@777 1467 new_in_marking_window = true;
ysr@777 1468 new_in_marking_window_im = true;
ysr@777 1469 }
ysr@777 1470
tonyp@3337 1471 if (_last_young_gc) {
tonyp@3539 1472 // This is supposed to to be the "last young GC" before we start
tonyp@3539 1473 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
tonyp@3539 1474
johnc@3178 1475 if (!last_pause_included_initial_mark) {
tonyp@3539 1476 if (next_gc_should_be_mixed("start mixed GCs",
tonyp@3539 1477 "do not start mixed GCs")) {
tonyp@3539 1478 set_gcs_are_young(false);
tonyp@3539 1479 }
johnc@3178 1480 } else {
tonyp@3337 1481 ergo_verbose0(ErgoMixedGCs,
tonyp@3337 1482 "do not start mixed GCs",
johnc@3178 1483 ergo_format_reason("concurrent cycle is about to start"));
johnc@3178 1484 }
tonyp@3337 1485 _last_young_gc = false;
brutisso@3065 1486 }
brutisso@3065 1487
tonyp@3337 1488 if (!_last_gc_was_young) {
tonyp@3539 1489 // This is a mixed GC. Here we decide whether to continue doing
tonyp@3539 1490 // mixed GCs or not.
tonyp@3539 1491
tonyp@3539 1492 if (!next_gc_should_be_mixed("continue mixed GCs",
tonyp@3539 1493 "do not continue mixed GCs")) {
tonyp@3337 1494 set_gcs_are_young(true);
ysr@777 1495 }
brutisso@3065 1496 }
tonyp@3337 1497
tonyp@3337 1498 if (_last_gc_was_young && !_during_marking) {
brutisso@3065 1499 _young_gc_eff_seq->add(cur_efficiency);
ysr@777 1500 }
ysr@777 1501
ysr@777 1502 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 1503 // do that for any other surv rate groupsx
ysr@777 1504
apetrusenko@1112 1505 if (update_stats) {
ysr@777 1506 double pause_time_ms = elapsed_ms;
ysr@777 1507
ysr@777 1508 size_t diff = 0;
ysr@777 1509 if (_max_pending_cards >= _pending_cards)
ysr@777 1510 diff = _max_pending_cards - _pending_cards;
ysr@777 1511 _pending_card_diff_seq->add((double) diff);
ysr@777 1512
ysr@777 1513 double cost_per_card_ms = 0.0;
ysr@777 1514 if (_pending_cards > 0) {
ysr@777 1515 cost_per_card_ms = update_rs_time / (double) _pending_cards;
ysr@777 1516 _cost_per_card_ms_seq->add(cost_per_card_ms);
ysr@777 1517 }
ysr@777 1518
ysr@777 1519 size_t cards_scanned = _g1->cards_scanned();
ysr@777 1520
ysr@777 1521 double cost_per_entry_ms = 0.0;
ysr@777 1522 if (cards_scanned > 10) {
ysr@777 1523 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
tonyp@3337 1524 if (_last_gc_was_young) {
ysr@777 1525 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
tonyp@3337 1526 } else {
tonyp@3337 1527 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
tonyp@3337 1528 }
ysr@777 1529 }
ysr@777 1530
ysr@777 1531 if (_max_rs_lengths > 0) {
ysr@777 1532 double cards_per_entry_ratio =
ysr@777 1533 (double) cards_scanned / (double) _max_rs_lengths;
tonyp@3337 1534 if (_last_gc_was_young) {
tonyp@3337 1535 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
tonyp@3337 1536 } else {
tonyp@3337 1537 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
tonyp@3337 1538 }
ysr@777 1539 }
ysr@777 1540
tonyp@3356 1541 // This is defensive. For a while _max_rs_lengths could get
tonyp@3356 1542 // smaller than _recorded_rs_lengths which was causing
tonyp@3356 1543 // rs_length_diff to get very large and mess up the RSet length
tonyp@3356 1544 // predictions. The reason was unsafe concurrent updates to the
tonyp@3356 1545 // _inc_cset_recorded_rs_lengths field which the code below guards
tonyp@3356 1546 // against (see CR 7118202). This bug has now been fixed (see CR
tonyp@3356 1547 // 7119027). However, I'm still worried that
tonyp@3356 1548 // _inc_cset_recorded_rs_lengths might still end up somewhat
tonyp@3356 1549 // inaccurate. The concurrent refinement thread calculates an
tonyp@3356 1550 // RSet's length concurrently with other CR threads updating it
tonyp@3356 1551 // which might cause it to calculate the length incorrectly (if,
tonyp@3356 1552 // say, it's in mid-coarsening). So I'll leave in the defensive
tonyp@3356 1553 // conditional below just in case.
tonyp@3326 1554 size_t rs_length_diff = 0;
tonyp@3326 1555 if (_max_rs_lengths > _recorded_rs_lengths) {
tonyp@3326 1556 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
tonyp@3326 1557 }
tonyp@3326 1558 _rs_length_diff_seq->add((double) rs_length_diff);
ysr@777 1559
ysr@777 1560 size_t copied_bytes = surviving_bytes;
ysr@777 1561 double cost_per_byte_ms = 0.0;
ysr@777 1562 if (copied_bytes > 0) {
ysr@777 1563 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
tonyp@3337 1564 if (_in_marking_window) {
ysr@777 1565 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
tonyp@3337 1566 } else {
ysr@777 1567 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
tonyp@3337 1568 }
ysr@777 1569 }
ysr@777 1570
ysr@777 1571 double all_other_time_ms = pause_time_ms -
tonyp@3691 1572 (update_rs_time + scan_rs_time + obj_copy_time + termination_time);
ysr@777 1573
ysr@777 1574 double young_other_time_ms = 0.0;
tonyp@3289 1575 if (young_cset_region_length() > 0) {
ysr@777 1576 young_other_time_ms =
ysr@777 1577 _recorded_young_cset_choice_time_ms +
ysr@777 1578 _recorded_young_free_cset_time_ms;
ysr@777 1579 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
tonyp@3289 1580 (double) young_cset_region_length());
ysr@777 1581 }
ysr@777 1582 double non_young_other_time_ms = 0.0;
tonyp@3289 1583 if (old_cset_region_length() > 0) {
ysr@777 1584 non_young_other_time_ms =
ysr@777 1585 _recorded_non_young_cset_choice_time_ms +
ysr@777 1586 _recorded_non_young_free_cset_time_ms;
ysr@777 1587
ysr@777 1588 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
tonyp@3289 1589 (double) old_cset_region_length());
ysr@777 1590 }
ysr@777 1591
ysr@777 1592 double constant_other_time_ms = all_other_time_ms -
ysr@777 1593 (young_other_time_ms + non_young_other_time_ms);
ysr@777 1594 _constant_other_time_ms_seq->add(constant_other_time_ms);
ysr@777 1595
ysr@777 1596 double survival_ratio = 0.0;
ysr@777 1597 if (_bytes_in_collection_set_before_gc > 0) {
tonyp@3028 1598 survival_ratio = (double) _bytes_copied_during_gc /
tonyp@3028 1599 (double) _bytes_in_collection_set_before_gc;
ysr@777 1600 }
ysr@777 1601
ysr@777 1602 _pending_cards_seq->add((double) _pending_cards);
ysr@777 1603 _rs_lengths_seq->add((double) _max_rs_lengths);
ysr@777 1604 }
ysr@777 1605
ysr@777 1606 _in_marking_window = new_in_marking_window;
ysr@777 1607 _in_marking_window_im = new_in_marking_window_im;
ysr@777 1608 _free_regions_at_end_of_collection = _g1->free_regions();
tonyp@3119 1609 update_young_list_target_length();
ysr@777 1610
iveresov@1546 1611 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
tonyp@1717 1612 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
iveresov@1546 1613 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
tonyp@3209 1614
tonyp@3209 1615 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
ysr@777 1616 }
ysr@777 1617
tonyp@2961 1618 #define EXT_SIZE_FORMAT "%d%s"
tonyp@2961 1619 #define EXT_SIZE_PARAMS(bytes) \
tonyp@2961 1620 byte_size_in_proper_unit((bytes)), \
tonyp@2961 1621 proper_unit_for_byte_size((bytes))
tonyp@2961 1622
tonyp@2961 1623 void G1CollectorPolicy::print_heap_transition() {
brutisso@3710 1624 if (G1Log::finer()) {
tonyp@2961 1625 YoungList* young_list = _g1->young_list();
tonyp@2961 1626 size_t eden_bytes = young_list->eden_used_bytes();
tonyp@2961 1627 size_t survivor_bytes = young_list->survivor_used_bytes();
tonyp@2961 1628 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
tonyp@2961 1629 size_t used = _g1->used();
tonyp@2961 1630 size_t capacity = _g1->capacity();
brutisso@3120 1631 size_t eden_capacity =
brutisso@3120 1632 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
tonyp@2961 1633
tonyp@2961 1634 gclog_or_tty->print_cr(
brutisso@3120 1635 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
brutisso@3120 1636 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
brutisso@3120 1637 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
brutisso@3120 1638 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
brutisso@3120 1639 EXT_SIZE_PARAMS(_eden_bytes_before_gc),
brutisso@3120 1640 EXT_SIZE_PARAMS(_prev_eden_capacity),
brutisso@3120 1641 EXT_SIZE_PARAMS(eden_bytes),
brutisso@3120 1642 EXT_SIZE_PARAMS(eden_capacity),
brutisso@3120 1643 EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
brutisso@3120 1644 EXT_SIZE_PARAMS(survivor_bytes),
brutisso@3120 1645 EXT_SIZE_PARAMS(used_before_gc),
brutisso@3120 1646 EXT_SIZE_PARAMS(_capacity_before_gc),
brutisso@3120 1647 EXT_SIZE_PARAMS(used),
brutisso@3120 1648 EXT_SIZE_PARAMS(capacity));
brutisso@3120 1649
brutisso@3120 1650 _prev_eden_capacity = eden_capacity;
brutisso@3710 1651 } else if (G1Log::fine()) {
tonyp@2961 1652 _g1->print_size_transition(gclog_or_tty,
tonyp@2961 1653 _cur_collection_pause_used_at_start_bytes,
tonyp@2961 1654 _g1->used(), _g1->capacity());
tonyp@2961 1655 }
tonyp@2961 1656 }
tonyp@2961 1657
iveresov@1546 1658 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 1659 double update_rs_processed_buffers,
iveresov@1546 1660 double goal_ms) {
iveresov@1546 1661 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
iveresov@1546 1662 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
iveresov@1546 1663
tonyp@1717 1664 if (G1UseAdaptiveConcRefinement) {
iveresov@1546 1665 const int k_gy = 3, k_gr = 6;
iveresov@1546 1666 const double inc_k = 1.1, dec_k = 0.9;
iveresov@1546 1667
iveresov@1546 1668 int g = cg1r->green_zone();
iveresov@1546 1669 if (update_rs_time > goal_ms) {
iveresov@1546 1670 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
iveresov@1546 1671 } else {
iveresov@1546 1672 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
iveresov@1546 1673 g = (int)MAX2(g * inc_k, g + 1.0);
iveresov@1546 1674 }
iveresov@1546 1675 }
iveresov@1546 1676 // Change the refinement threads params
iveresov@1546 1677 cg1r->set_green_zone(g);
iveresov@1546 1678 cg1r->set_yellow_zone(g * k_gy);
iveresov@1546 1679 cg1r->set_red_zone(g * k_gr);
iveresov@1546 1680 cg1r->reinitialize_threads();
iveresov@1546 1681
iveresov@1546 1682 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
iveresov@1546 1683 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
iveresov@1546 1684 cg1r->yellow_zone());
iveresov@1546 1685 // Change the barrier params
iveresov@1546 1686 dcqs.set_process_completed_threshold(processing_threshold);
iveresov@1546 1687 dcqs.set_max_completed_queue(cg1r->red_zone());
iveresov@1546 1688 }
iveresov@1546 1689
iveresov@1546 1690 int curr_queue_size = dcqs.completed_buffers_num();
iveresov@1546 1691 if (curr_queue_size >= cg1r->yellow_zone()) {
iveresov@1546 1692 dcqs.set_completed_queue_padding(curr_queue_size);
iveresov@1546 1693 } else {
iveresov@1546 1694 dcqs.set_completed_queue_padding(0);
iveresov@1546 1695 }
iveresov@1546 1696 dcqs.notify_if_necessary();
iveresov@1546 1697 }
iveresov@1546 1698
ysr@777 1699 double
ysr@777 1700 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
ysr@777 1701 size_t rs_length = predict_rs_length_diff();
ysr@777 1702 size_t card_num;
tonyp@3337 1703 if (gcs_are_young()) {
ysr@777 1704 card_num = predict_young_card_num(rs_length);
tonyp@3337 1705 } else {
ysr@777 1706 card_num = predict_non_young_card_num(rs_length);
tonyp@3337 1707 }
ysr@777 1708 return predict_base_elapsed_time_ms(pending_cards, card_num);
ysr@777 1709 }
ysr@777 1710
ysr@777 1711 double
ysr@777 1712 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 1713 size_t scanned_cards) {
ysr@777 1714 return
ysr@777 1715 predict_rs_update_time_ms(pending_cards) +
ysr@777 1716 predict_rs_scan_time_ms(scanned_cards) +
ysr@777 1717 predict_constant_other_time_ms();
ysr@777 1718 }
ysr@777 1719
ysr@777 1720 double
ysr@777 1721 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
ysr@777 1722 bool young) {
ysr@777 1723 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1724 size_t card_num;
tonyp@3337 1725 if (gcs_are_young()) {
ysr@777 1726 card_num = predict_young_card_num(rs_length);
tonyp@3337 1727 } else {
ysr@777 1728 card_num = predict_non_young_card_num(rs_length);
tonyp@3337 1729 }
ysr@777 1730 size_t bytes_to_copy = predict_bytes_to_copy(hr);
ysr@777 1731
ysr@777 1732 double region_elapsed_time_ms =
ysr@777 1733 predict_rs_scan_time_ms(card_num) +
ysr@777 1734 predict_object_copy_time_ms(bytes_to_copy);
ysr@777 1735
ysr@777 1736 if (young)
ysr@777 1737 region_elapsed_time_ms += predict_young_other_time_ms(1);
ysr@777 1738 else
ysr@777 1739 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
ysr@777 1740
ysr@777 1741 return region_elapsed_time_ms;
ysr@777 1742 }
ysr@777 1743
ysr@777 1744 size_t
ysr@777 1745 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
ysr@777 1746 size_t bytes_to_copy;
ysr@777 1747 if (hr->is_marked())
ysr@777 1748 bytes_to_copy = hr->max_live_bytes();
ysr@777 1749 else {
tonyp@3539 1750 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
ysr@777 1751 int age = hr->age_in_surv_rate_group();
apetrusenko@980 1752 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
ysr@777 1753 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
ysr@777 1754 }
ysr@777 1755 return bytes_to_copy;
ysr@777 1756 }
ysr@777 1757
ysr@777 1758 void
tonyp@3289 1759 G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
tonyp@3289 1760 size_t survivor_cset_region_length) {
tonyp@3289 1761 _eden_cset_region_length = eden_cset_region_length;
tonyp@3289 1762 _survivor_cset_region_length = survivor_cset_region_length;
tonyp@3289 1763 _old_cset_region_length = 0;
johnc@1829 1764 }
johnc@1829 1765
johnc@1829 1766 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
johnc@1829 1767 _recorded_rs_lengths = rs_lengths;
johnc@1829 1768 }
johnc@1829 1769
ysr@777 1770 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
ysr@777 1771 double elapsed_ms) {
ysr@777 1772 _recent_gc_times_ms->add(elapsed_ms);
ysr@777 1773 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
ysr@777 1774 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
ysr@777 1775 }
ysr@777 1776
ysr@777 1777 size_t G1CollectorPolicy::expansion_amount() {
tonyp@3114 1778 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
tonyp@3114 1779 double threshold = _gc_overhead_perc;
tonyp@3114 1780 if (recent_gc_overhead > threshold) {
johnc@1186 1781 // We will double the existing space, or take
johnc@1186 1782 // G1ExpandByPercentOfAvailable % of the available expansion
johnc@1186 1783 // space, whichever is smaller, bounded below by a minimum
johnc@1186 1784 // expansion (unless that's all that's left.)
ysr@777 1785 const size_t min_expand_bytes = 1*M;
johnc@2504 1786 size_t reserved_bytes = _g1->max_capacity();
ysr@777 1787 size_t committed_bytes = _g1->capacity();
ysr@777 1788 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
ysr@777 1789 size_t expand_bytes;
ysr@777 1790 size_t expand_bytes_via_pct =
johnc@1186 1791 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
ysr@777 1792 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
ysr@777 1793 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
ysr@777 1794 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
tonyp@3114 1795
tonyp@3114 1796 ergo_verbose5(ErgoHeapSizing,
tonyp@3114 1797 "attempt heap expansion",
tonyp@3114 1798 ergo_format_reason("recent GC overhead higher than "
tonyp@3114 1799 "threshold after GC")
tonyp@3114 1800 ergo_format_perc("recent GC overhead")
tonyp@3114 1801 ergo_format_perc("threshold")
tonyp@3114 1802 ergo_format_byte("uncommitted")
tonyp@3114 1803 ergo_format_byte_perc("calculated expansion amount"),
tonyp@3114 1804 recent_gc_overhead, threshold,
tonyp@3114 1805 uncommitted_bytes,
tonyp@3114 1806 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
tonyp@3114 1807
ysr@777 1808 return expand_bytes;
ysr@777 1809 } else {
ysr@777 1810 return 0;
ysr@777 1811 }
ysr@777 1812 }
ysr@777 1813
ysr@777 1814 class CountCSClosure: public HeapRegionClosure {
ysr@777 1815 G1CollectorPolicy* _g1_policy;
ysr@777 1816 public:
ysr@777 1817 CountCSClosure(G1CollectorPolicy* g1_policy) :
ysr@777 1818 _g1_policy(g1_policy) {}
ysr@777 1819 bool doHeapRegion(HeapRegion* r) {
ysr@777 1820 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
ysr@777 1821 return false;
ysr@777 1822 }
ysr@777 1823 };
ysr@777 1824
ysr@777 1825 void G1CollectorPolicy::count_CS_bytes_used() {
ysr@777 1826 CountCSClosure cs_closure(this);
ysr@777 1827 _g1->collection_set_iterate(&cs_closure);
ysr@777 1828 }
ysr@777 1829
johnc@3219 1830 void G1CollectorPolicy::print_summary(int level,
johnc@3219 1831 const char* str,
johnc@3219 1832 NumberSeq* seq) const {
ysr@777 1833 double sum = seq->sum();
brutisso@2645 1834 LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
ysr@777 1835 str, sum / 1000.0, seq->avg());
ysr@777 1836 }
ysr@777 1837
johnc@3219 1838 void G1CollectorPolicy::print_summary_sd(int level,
johnc@3219 1839 const char* str,
johnc@3219 1840 NumberSeq* seq) const {
ysr@777 1841 print_summary(level, str, seq);
brutisso@2645 1842 LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
ysr@777 1843 seq->num(), seq->sd(), seq->maximum());
ysr@777 1844 }
ysr@777 1845
ysr@777 1846 void G1CollectorPolicy::check_other_times(int level,
ysr@777 1847 NumberSeq* other_times_ms,
ysr@777 1848 NumberSeq* calc_other_times_ms) const {
ysr@777 1849 bool should_print = false;
brutisso@2645 1850 LineBuffer buf(level + 2);
ysr@777 1851
ysr@777 1852 double max_sum = MAX2(fabs(other_times_ms->sum()),
ysr@777 1853 fabs(calc_other_times_ms->sum()));
ysr@777 1854 double min_sum = MIN2(fabs(other_times_ms->sum()),
ysr@777 1855 fabs(calc_other_times_ms->sum()));
ysr@777 1856 double sum_ratio = max_sum / min_sum;
ysr@777 1857 if (sum_ratio > 1.1) {
ysr@777 1858 should_print = true;
brutisso@2645 1859 buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
ysr@777 1860 }
ysr@777 1861
ysr@777 1862 double max_avg = MAX2(fabs(other_times_ms->avg()),
ysr@777 1863 fabs(calc_other_times_ms->avg()));
ysr@777 1864 double min_avg = MIN2(fabs(other_times_ms->avg()),
ysr@777 1865 fabs(calc_other_times_ms->avg()));
ysr@777 1866 double avg_ratio = max_avg / min_avg;
ysr@777 1867 if (avg_ratio > 1.1) {
ysr@777 1868 should_print = true;
brutisso@2645 1869 buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
ysr@777 1870 }
ysr@777 1871
ysr@777 1872 if (other_times_ms->sum() < -0.01) {
brutisso@2645 1873 buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
ysr@777 1874 }
ysr@777 1875
ysr@777 1876 if (other_times_ms->avg() < -0.01) {
brutisso@2645 1877 buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
ysr@777 1878 }
ysr@777 1879
ysr@777 1880 if (calc_other_times_ms->sum() < -0.01) {
ysr@777 1881 should_print = true;
brutisso@2645 1882 buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
ysr@777 1883 }
ysr@777 1884
ysr@777 1885 if (calc_other_times_ms->avg() < -0.01) {
ysr@777 1886 should_print = true;
brutisso@2645 1887 buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
ysr@777 1888 }
ysr@777 1889
ysr@777 1890 if (should_print)
ysr@777 1891 print_summary(level, "Other(Calc)", calc_other_times_ms);
ysr@777 1892 }
ysr@777 1893
ysr@777 1894 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
jmasa@2188 1895 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 1896 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 1897 if (summary->get_total_seq()->num() > 0) {
apetrusenko@1112 1898 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
ysr@777 1899 if (body_summary != NULL) {
tonyp@3464 1900 print_summary(1, "Root Region Scan Wait", body_summary->get_root_region_scan_wait_seq());
ysr@777 1901 if (parallel) {
ysr@777 1902 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
johnc@3219 1903 print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
tonyp@3416 1904 print_summary(2, "SATB Filtering", body_summary->get_satb_filtering_seq());
ysr@777 1905 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
ysr@777 1906 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 1907 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 1908 print_summary(2, "Termination", body_summary->get_termination_seq());
johnc@3219 1909 print_summary(2, "Parallel Other", body_summary->get_parallel_other_seq());
ysr@777 1910 {
ysr@777 1911 NumberSeq* other_parts[] = {
ysr@777 1912 body_summary->get_ext_root_scan_seq(),
tonyp@3416 1913 body_summary->get_satb_filtering_seq(),
johnc@3219 1914 body_summary->get_update_rs_seq(),
ysr@777 1915 body_summary->get_scan_rs_seq(),
ysr@777 1916 body_summary->get_obj_copy_seq(),
ysr@777 1917 body_summary->get_termination_seq()
ysr@777 1918 };
ysr@777 1919 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
johnc@2134 1920 6, other_parts);
ysr@777 1921 check_other_times(2, body_summary->get_parallel_other_seq(),
ysr@777 1922 &calc_other_times_ms);
ysr@777 1923 }
ysr@777 1924 } else {
johnc@3219 1925 print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
tonyp@3416 1926 print_summary(1, "SATB Filtering", body_summary->get_satb_filtering_seq());
ysr@777 1927 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
ysr@777 1928 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 1929 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 1930 }
ysr@777 1931 }
johnc@3219 1932 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
ysr@777 1933 print_summary(1, "Other", summary->get_other_seq());
ysr@777 1934 {
johnc@2134 1935 if (body_summary != NULL) {
johnc@2134 1936 NumberSeq calc_other_times_ms;
johnc@2134 1937 if (parallel) {
johnc@2134 1938 // parallel
johnc@2134 1939 NumberSeq* other_parts[] = {
tonyp@3464 1940 body_summary->get_root_region_scan_wait_seq(),
johnc@2134 1941 body_summary->get_parallel_seq(),
johnc@2134 1942 body_summary->get_clear_ct_seq()
johnc@2134 1943 };
johnc@2134 1944 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
tonyp@3691 1945 3, other_parts);
johnc@2134 1946 } else {
johnc@2134 1947 // serial
johnc@2134 1948 NumberSeq* other_parts[] = {
tonyp@3464 1949 body_summary->get_root_region_scan_wait_seq(),
johnc@2134 1950 body_summary->get_update_rs_seq(),
johnc@2134 1951 body_summary->get_ext_root_scan_seq(),
tonyp@3416 1952 body_summary->get_satb_filtering_seq(),
johnc@2134 1953 body_summary->get_scan_rs_seq(),
johnc@2134 1954 body_summary->get_obj_copy_seq()
johnc@2134 1955 };
johnc@2134 1956 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
tonyp@3691 1957 6, other_parts);
johnc@2134 1958 }
johnc@2134 1959 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
ysr@777 1960 }
ysr@777 1961 }
ysr@777 1962 } else {
brutisso@2645 1963 LineBuffer(1).append_and_print_cr("none");
ysr@777 1964 }
brutisso@2645 1965 LineBuffer(0).append_and_print_cr("");
ysr@777 1966 }
ysr@777 1967
ysr@777 1968 void G1CollectorPolicy::print_tracing_info() const {
ysr@777 1969 if (TraceGen0Time) {
ysr@777 1970 gclog_or_tty->print_cr("ALL PAUSES");
ysr@777 1971 print_summary_sd(0, "Total", _all_pause_times_ms);
ysr@777 1972 gclog_or_tty->print_cr("");
ysr@777 1973 gclog_or_tty->print_cr("");
tonyp@3337 1974 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
tonyp@3337 1975 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
ysr@777 1976 gclog_or_tty->print_cr("");
ysr@777 1977
apetrusenko@1112 1978 gclog_or_tty->print_cr("EVACUATION PAUSES");
apetrusenko@1112 1979 print_summary(_summary);
ysr@777 1980
ysr@777 1981 gclog_or_tty->print_cr("MISC");
ysr@777 1982 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
ysr@777 1983 print_summary_sd(0, "Yields", _all_yield_times_ms);
ysr@777 1984 for (int i = 0; i < _aux_num; ++i) {
ysr@777 1985 if (_all_aux_times_ms[i].num() > 0) {
ysr@777 1986 char buffer[96];
ysr@777 1987 sprintf(buffer, "Aux%d", i);
ysr@777 1988 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
ysr@777 1989 }
ysr@777 1990 }
ysr@777 1991 }
ysr@777 1992 if (TraceGen1Time) {
ysr@777 1993 if (_all_full_gc_times_ms->num() > 0) {
ysr@777 1994 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
ysr@777 1995 _all_full_gc_times_ms->num(),
ysr@777 1996 _all_full_gc_times_ms->sum() / 1000.0);
ysr@777 1997 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
ysr@777 1998 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 1999 _all_full_gc_times_ms->sd(),
ysr@777 2000 _all_full_gc_times_ms->maximum());
ysr@777 2001 }
ysr@777 2002 }
ysr@777 2003 }
ysr@777 2004
ysr@777 2005 void G1CollectorPolicy::print_yg_surv_rate_info() const {
ysr@777 2006 #ifndef PRODUCT
ysr@777 2007 _short_lived_surv_rate_group->print_surv_rate_summary();
ysr@777 2008 // add this call for any other surv rate groups
ysr@777 2009 #endif // PRODUCT
ysr@777 2010 }
ysr@777 2011
ysr@777 2012 #ifndef PRODUCT
ysr@777 2013 // for debugging, bit of a hack...
ysr@777 2014 static char*
ysr@777 2015 region_num_to_mbs(int length) {
ysr@777 2016 static char buffer[64];
ysr@777 2017 double bytes = (double) (length * HeapRegion::GrainBytes);
ysr@777 2018 double mbs = bytes / (double) (1024 * 1024);
ysr@777 2019 sprintf(buffer, "%7.2lfMB", mbs);
ysr@777 2020 return buffer;
ysr@777 2021 }
ysr@777 2022 #endif // PRODUCT
ysr@777 2023
apetrusenko@980 2024 size_t G1CollectorPolicy::max_regions(int purpose) {
ysr@777 2025 switch (purpose) {
ysr@777 2026 case GCAllocForSurvived:
apetrusenko@980 2027 return _max_survivor_regions;
ysr@777 2028 case GCAllocForTenured:
apetrusenko@980 2029 return REGIONS_UNLIMITED;
ysr@777 2030 default:
apetrusenko@980 2031 ShouldNotReachHere();
apetrusenko@980 2032 return REGIONS_UNLIMITED;
ysr@777 2033 };
ysr@777 2034 }
ysr@777 2035
tonyp@3119 2036 void G1CollectorPolicy::update_max_gc_locker_expansion() {
tonyp@2333 2037 size_t expansion_region_num = 0;
tonyp@2333 2038 if (GCLockerEdenExpansionPercent > 0) {
tonyp@2333 2039 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
tonyp@2333 2040 double expansion_region_num_d = perc * (double) _young_list_target_length;
tonyp@2333 2041 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
tonyp@2333 2042 // less than 1.0) we'll get 1.
tonyp@2333 2043 expansion_region_num = (size_t) ceil(expansion_region_num_d);
tonyp@2333 2044 } else {
tonyp@2333 2045 assert(expansion_region_num == 0, "sanity");
tonyp@2333 2046 }
tonyp@2333 2047 _young_list_max_length = _young_list_target_length + expansion_region_num;
tonyp@2333 2048 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
tonyp@2333 2049 }
tonyp@2333 2050
apetrusenko@980 2051 // Calculates survivor space parameters.
tonyp@3119 2052 void G1CollectorPolicy::update_survivors_policy() {
tonyp@3119 2053 double max_survivor_regions_d =
tonyp@3119 2054 (double) _young_list_target_length / (double) SurvivorRatio;
tonyp@3119 2055 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
tonyp@3119 2056 // smaller than 1.0) we'll get 1.
tonyp@3119 2057 _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
tonyp@3119 2058
tonyp@3066 2059 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
apetrusenko@980 2060 HeapRegion::GrainWords * _max_survivor_regions);
apetrusenko@980 2061 }
apetrusenko@980 2062
ysr@777 2063 #ifndef PRODUCT
ysr@777 2064 class HRSortIndexIsOKClosure: public HeapRegionClosure {
ysr@777 2065 CollectionSetChooser* _chooser;
ysr@777 2066 public:
ysr@777 2067 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
ysr@777 2068 _chooser(chooser) {}
ysr@777 2069
ysr@777 2070 bool doHeapRegion(HeapRegion* r) {
ysr@777 2071 if (!r->continuesHumongous()) {
ysr@777 2072 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
ysr@777 2073 }
ysr@777 2074 return false;
ysr@777 2075 }
ysr@777 2076 };
ysr@777 2077
tonyp@3209 2078 bool G1CollectorPolicy::assertMarkedBytesDataOK() {
ysr@777 2079 HRSortIndexIsOKClosure cl(_collectionSetChooser);
ysr@777 2080 _g1->heap_region_iterate(&cl);
ysr@777 2081 return true;
ysr@777 2082 }
ysr@777 2083 #endif
ysr@777 2084
tonyp@3114 2085 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
tonyp@3114 2086 GCCause::Cause gc_cause) {
tonyp@2011 2087 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@2011 2088 if (!during_cycle) {
tonyp@3114 2089 ergo_verbose1(ErgoConcCycles,
tonyp@3114 2090 "request concurrent cycle initiation",
tonyp@3114 2091 ergo_format_reason("requested by GC cause")
tonyp@3114 2092 ergo_format_str("GC cause"),
tonyp@3114 2093 GCCause::to_string(gc_cause));
tonyp@2011 2094 set_initiate_conc_mark_if_possible();
tonyp@2011 2095 return true;
tonyp@2011 2096 } else {
tonyp@3114 2097 ergo_verbose1(ErgoConcCycles,
tonyp@3114 2098 "do not request concurrent cycle initiation",
tonyp@3114 2099 ergo_format_reason("concurrent cycle already in progress")
tonyp@3114 2100 ergo_format_str("GC cause"),
tonyp@3114 2101 GCCause::to_string(gc_cause));
tonyp@2011 2102 return false;
tonyp@2011 2103 }
tonyp@2011 2104 }
tonyp@2011 2105
ysr@777 2106 void
tonyp@1794 2107 G1CollectorPolicy::decide_on_conc_mark_initiation() {
tonyp@1794 2108 // We are about to decide on whether this pause will be an
tonyp@1794 2109 // initial-mark pause.
tonyp@1794 2110
tonyp@1794 2111 // First, during_initial_mark_pause() should not be already set. We
tonyp@1794 2112 // will set it here if we have to. However, it should be cleared by
tonyp@1794 2113 // the end of the pause (it's only set for the duration of an
tonyp@1794 2114 // initial-mark pause).
tonyp@1794 2115 assert(!during_initial_mark_pause(), "pre-condition");
tonyp@1794 2116
tonyp@1794 2117 if (initiate_conc_mark_if_possible()) {
tonyp@1794 2118 // We had noticed on a previous pause that the heap occupancy has
tonyp@1794 2119 // gone over the initiating threshold and we should start a
tonyp@1794 2120 // concurrent marking cycle. So we might initiate one.
tonyp@1794 2121
tonyp@1794 2122 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@1794 2123 if (!during_cycle) {
tonyp@1794 2124 // The concurrent marking thread is not "during a cycle", i.e.,
tonyp@1794 2125 // it has completed the last one. So we can go ahead and
tonyp@1794 2126 // initiate a new cycle.
tonyp@1794 2127
tonyp@1794 2128 set_during_initial_mark_pause();
tonyp@3337 2129 // We do not allow mixed GCs during marking.
tonyp@3337 2130 if (!gcs_are_young()) {
tonyp@3337 2131 set_gcs_are_young(true);
tonyp@3337 2132 ergo_verbose0(ErgoMixedGCs,
tonyp@3337 2133 "end mixed GCs",
johnc@3178 2134 ergo_format_reason("concurrent cycle is about to start"));
johnc@3178 2135 }
tonyp@1794 2136
tonyp@1794 2137 // And we can now clear initiate_conc_mark_if_possible() as
tonyp@1794 2138 // we've already acted on it.
tonyp@1794 2139 clear_initiate_conc_mark_if_possible();
tonyp@3114 2140
tonyp@3114 2141 ergo_verbose0(ErgoConcCycles,
tonyp@3114 2142 "initiate concurrent cycle",
tonyp@3114 2143 ergo_format_reason("concurrent cycle initiation requested"));
tonyp@1794 2144 } else {
tonyp@1794 2145 // The concurrent marking thread is still finishing up the
tonyp@1794 2146 // previous cycle. If we start one right now the two cycles
tonyp@1794 2147 // overlap. In particular, the concurrent marking thread might
tonyp@1794 2148 // be in the process of clearing the next marking bitmap (which
tonyp@1794 2149 // we will use for the next cycle if we start one). Starting a
tonyp@1794 2150 // cycle now will be bad given that parts of the marking
tonyp@1794 2151 // information might get cleared by the marking thread. And we
tonyp@1794 2152 // cannot wait for the marking thread to finish the cycle as it
tonyp@1794 2153 // periodically yields while clearing the next marking bitmap
tonyp@1794 2154 // and, if it's in a yield point, it's waiting for us to
tonyp@1794 2155 // finish. So, at this point we will not start a cycle and we'll
tonyp@1794 2156 // let the concurrent marking thread complete the last one.
tonyp@3114 2157 ergo_verbose0(ErgoConcCycles,
tonyp@3114 2158 "do not initiate concurrent cycle",
tonyp@3114 2159 ergo_format_reason("concurrent cycle already in progress"));
tonyp@1794 2160 }
tonyp@1794 2161 }
tonyp@1794 2162 }
tonyp@1794 2163
ysr@777 2164 class KnownGarbageClosure: public HeapRegionClosure {
tonyp@3539 2165 G1CollectedHeap* _g1h;
ysr@777 2166 CollectionSetChooser* _hrSorted;
ysr@777 2167
ysr@777 2168 public:
ysr@777 2169 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
tonyp@3539 2170 _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
ysr@777 2171
ysr@777 2172 bool doHeapRegion(HeapRegion* r) {
ysr@777 2173 // We only include humongous regions in collection
ysr@777 2174 // sets when concurrent mark shows that their contained object is
ysr@777 2175 // unreachable.
ysr@777 2176
ysr@777 2177 // Do we have any marking information for this region?
ysr@777 2178 if (r->is_marked()) {
tonyp@3539 2179 // We will skip any region that's currently used as an old GC
tonyp@3539 2180 // alloc region (we should not consider those for collection
tonyp@3539 2181 // before we fill them up).
tonyp@3539 2182 if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
ysr@777 2183 _hrSorted->addMarkedHeapRegion(r);
ysr@777 2184 }
ysr@777 2185 }
ysr@777 2186 return false;
ysr@777 2187 }
ysr@777 2188 };
ysr@777 2189
ysr@777 2190 class ParKnownGarbageHRClosure: public HeapRegionClosure {
tonyp@3539 2191 G1CollectedHeap* _g1h;
ysr@777 2192 CollectionSetChooser* _hrSorted;
ysr@777 2193 jint _marked_regions_added;
tonyp@3539 2194 size_t _reclaimable_bytes_added;
ysr@777 2195 jint _chunk_size;
ysr@777 2196 jint _cur_chunk_idx;
ysr@777 2197 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
ysr@777 2198 int _worker;
ysr@777 2199 int _invokes;
ysr@777 2200
ysr@777 2201 void get_new_chunk() {
ysr@777 2202 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
ysr@777 2203 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
ysr@777 2204 }
ysr@777 2205 void add_region(HeapRegion* r) {
ysr@777 2206 if (_cur_chunk_idx == _cur_chunk_end) {
ysr@777 2207 get_new_chunk();
ysr@777 2208 }
ysr@777 2209 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
ysr@777 2210 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
ysr@777 2211 _marked_regions_added++;
tonyp@3539 2212 _reclaimable_bytes_added += r->reclaimable_bytes();
ysr@777 2213 _cur_chunk_idx++;
ysr@777 2214 }
ysr@777 2215
ysr@777 2216 public:
ysr@777 2217 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
ysr@777 2218 jint chunk_size,
ysr@777 2219 int worker) :
tonyp@3539 2220 _g1h(G1CollectedHeap::heap()),
tonyp@3539 2221 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
tonyp@3539 2222 _marked_regions_added(0), _reclaimable_bytes_added(0),
tonyp@3539 2223 _cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { }
ysr@777 2224
ysr@777 2225 bool doHeapRegion(HeapRegion* r) {
ysr@777 2226 // We only include humongous regions in collection
ysr@777 2227 // sets when concurrent mark shows that their contained object is
ysr@777 2228 // unreachable.
ysr@777 2229 _invokes++;
ysr@777 2230
ysr@777 2231 // Do we have any marking information for this region?
ysr@777 2232 if (r->is_marked()) {
tonyp@3539 2233 // We will skip any region that's currently used as an old GC
tonyp@3539 2234 // alloc region (we should not consider those for collection
tonyp@3539 2235 // before we fill them up).
tonyp@3539 2236 if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
ysr@777 2237 add_region(r);
ysr@777 2238 }
ysr@777 2239 }
ysr@777 2240 return false;
ysr@777 2241 }
ysr@777 2242 jint marked_regions_added() { return _marked_regions_added; }
tonyp@3539 2243 size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
ysr@777 2244 int invokes() { return _invokes; }
ysr@777 2245 };
ysr@777 2246
ysr@777 2247 class ParKnownGarbageTask: public AbstractGangTask {
ysr@777 2248 CollectionSetChooser* _hrSorted;
ysr@777 2249 jint _chunk_size;
ysr@777 2250 G1CollectedHeap* _g1;
ysr@777 2251 public:
ysr@777 2252 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
ysr@777 2253 AbstractGangTask("ParKnownGarbageTask"),
ysr@777 2254 _hrSorted(hrSorted), _chunk_size(chunk_size),
tonyp@3539 2255 _g1(G1CollectedHeap::heap()) { }
ysr@777 2256
jmasa@3357 2257 void work(uint worker_id) {
jmasa@3357 2258 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
jmasa@3357 2259 _chunk_size,
jmasa@3357 2260 worker_id);
ysr@777 2261 // Back to zero for the claim value.
jmasa@3357 2262 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
jmasa@3294 2263 _g1->workers()->active_workers(),
tonyp@790 2264 HeapRegion::InitialClaimValue);
ysr@777 2265 jint regions_added = parKnownGarbageCl.marked_regions_added();
tonyp@3539 2266 size_t reclaimable_bytes_added =
tonyp@3539 2267 parKnownGarbageCl.reclaimable_bytes_added();
tonyp@3539 2268 _hrSorted->updateTotals(regions_added, reclaimable_bytes_added);
ysr@777 2269 if (G1PrintParCleanupStats) {
brutisso@2645 2270 gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
jmasa@3357 2271 worker_id, parKnownGarbageCl.invokes(), regions_added);
ysr@777 2272 }
ysr@777 2273 }
ysr@777 2274 };
ysr@777 2275
ysr@777 2276 void
jmasa@3294 2277 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
tonyp@3209 2278 double start_sec;
tonyp@3209 2279 if (G1PrintParCleanupStats) {
tonyp@3209 2280 start_sec = os::elapsedTime();
tonyp@3209 2281 }
ysr@777 2282
ysr@777 2283 _collectionSetChooser->clearMarkedHeapRegions();
tonyp@3209 2284 double clear_marked_end_sec;
ysr@777 2285 if (G1PrintParCleanupStats) {
tonyp@3209 2286 clear_marked_end_sec = os::elapsedTime();
tonyp@3209 2287 gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.",
tonyp@3209 2288 (clear_marked_end_sec - start_sec) * 1000.0);
ysr@777 2289 }
tonyp@3209 2290
jmasa@2188 2291 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 2292 const size_t OverpartitionFactor = 4;
jmasa@3294 2293 size_t WorkUnit;
jmasa@3294 2294 // The use of MinChunkSize = 8 in the original code
jmasa@3294 2295 // causes some assertion failures when the total number of
jmasa@3294 2296 // region is less than 8. The code here tries to fix that.
jmasa@3294 2297 // Should the original code also be fixed?
jmasa@3294 2298 if (no_of_gc_threads > 0) {
jmasa@3294 2299 const size_t MinWorkUnit =
jmasa@3294 2300 MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U);
jmasa@3294 2301 WorkUnit =
jmasa@3294 2302 MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
jmasa@3294 2303 MinWorkUnit);
jmasa@3294 2304 } else {
jmasa@3294 2305 assert(no_of_gc_threads > 0,
jmasa@3294 2306 "The active gc workers should be greater than 0");
jmasa@3294 2307 // In a product build do something reasonable to avoid a crash.
jmasa@3294 2308 const size_t MinWorkUnit =
jmasa@3294 2309 MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
jmasa@3294 2310 WorkUnit =
jmasa@3294 2311 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
jmasa@3294 2312 MinWorkUnit);
jmasa@3294 2313 }
ysr@777 2314 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
kvn@1926 2315 WorkUnit);
ysr@777 2316 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
kvn@1926 2317 (int) WorkUnit);
ysr@777 2318 _g1->workers()->run_task(&parKnownGarbageTask);
tonyp@790 2319
tonyp@790 2320 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2321 "sanity check");
ysr@777 2322 } else {
ysr@777 2323 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
ysr@777 2324 _g1->heap_region_iterate(&knownGarbagecl);
ysr@777 2325 }
tonyp@3209 2326 double known_garbage_end_sec;
ysr@777 2327 if (G1PrintParCleanupStats) {
tonyp@3209 2328 known_garbage_end_sec = os::elapsedTime();
ysr@777 2329 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
tonyp@3209 2330 (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
ysr@777 2331 }
tonyp@3209 2332
ysr@777 2333 _collectionSetChooser->sortMarkedHeapRegions();
tonyp@3209 2334 double end_sec = os::elapsedTime();
ysr@777 2335 if (G1PrintParCleanupStats) {
ysr@777 2336 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
tonyp@3209 2337 (end_sec - known_garbage_end_sec) * 1000.0);
ysr@777 2338 }
ysr@777 2339
tonyp@3209 2340 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
tonyp@3209 2341 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
tonyp@3209 2342 _cur_mark_stop_world_time_ms += elapsed_time_ms;
tonyp@3209 2343 _prev_collection_pause_end_ms += elapsed_time_ms;
tonyp@3209 2344 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
ysr@777 2345 }
ysr@777 2346
johnc@1829 2347 // Add the heap region at the head of the non-incremental collection set
tonyp@3289 2348 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
johnc@1829 2349 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2350 assert(!hr->is_young(), "non-incremental add of young region");
johnc@1829 2351
johnc@1829 2352 assert(!hr->in_collection_set(), "should not already be in the CSet");
ysr@777 2353 hr->set_in_collection_set(true);
ysr@777 2354 hr->set_next_in_collection_set(_collection_set);
ysr@777 2355 _collection_set = hr;
ysr@777 2356 _collection_set_bytes_used_before += hr->used();
tonyp@961 2357 _g1->register_region_with_in_cset_fast_test(hr);
tonyp@3289 2358 size_t rs_length = hr->rem_set()->occupied();
tonyp@3289 2359 _recorded_rs_lengths += rs_length;
tonyp@3289 2360 _old_cset_region_length += 1;
ysr@777 2361 }
ysr@777 2362
johnc@1829 2363 // Initialize the per-collection-set information
johnc@1829 2364 void G1CollectorPolicy::start_incremental_cset_building() {
johnc@1829 2365 assert(_inc_cset_build_state == Inactive, "Precondition");
johnc@1829 2366
johnc@1829 2367 _inc_cset_head = NULL;
johnc@1829 2368 _inc_cset_tail = NULL;
johnc@1829 2369 _inc_cset_bytes_used_before = 0;
johnc@1829 2370
johnc@1829 2371 _inc_cset_max_finger = 0;
johnc@1829 2372 _inc_cset_recorded_rs_lengths = 0;
tonyp@3356 2373 _inc_cset_recorded_rs_lengths_diffs = 0;
tonyp@3356 2374 _inc_cset_predicted_elapsed_time_ms = 0.0;
tonyp@3356 2375 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
johnc@1829 2376 _inc_cset_build_state = Active;
johnc@1829 2377 }
johnc@1829 2378
tonyp@3356 2379 void G1CollectorPolicy::finalize_incremental_cset_building() {
tonyp@3356 2380 assert(_inc_cset_build_state == Active, "Precondition");
tonyp@3356 2381 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
tonyp@3356 2382
tonyp@3356 2383 // The two "main" fields, _inc_cset_recorded_rs_lengths and
tonyp@3356 2384 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
tonyp@3356 2385 // that adds a new region to the CSet. Further updates by the
tonyp@3356 2386 // concurrent refinement thread that samples the young RSet lengths
tonyp@3356 2387 // are accumulated in the *_diffs fields. Here we add the diffs to
tonyp@3356 2388 // the "main" fields.
tonyp@3356 2389
tonyp@3356 2390 if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
tonyp@3356 2391 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
tonyp@3356 2392 } else {
tonyp@3356 2393 // This is defensive. The diff should in theory be always positive
tonyp@3356 2394 // as RSets can only grow between GCs. However, given that we
tonyp@3356 2395 // sample their size concurrently with other threads updating them
tonyp@3356 2396 // it's possible that we might get the wrong size back, which
tonyp@3356 2397 // could make the calculations somewhat inaccurate.
tonyp@3356 2398 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
tonyp@3356 2399 if (_inc_cset_recorded_rs_lengths >= diffs) {
tonyp@3356 2400 _inc_cset_recorded_rs_lengths -= diffs;
tonyp@3356 2401 } else {
tonyp@3356 2402 _inc_cset_recorded_rs_lengths = 0;
tonyp@3356 2403 }
tonyp@3356 2404 }
tonyp@3356 2405 _inc_cset_predicted_elapsed_time_ms +=
tonyp@3356 2406 _inc_cset_predicted_elapsed_time_ms_diffs;
tonyp@3356 2407
tonyp@3356 2408 _inc_cset_recorded_rs_lengths_diffs = 0;
tonyp@3356 2409 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
tonyp@3356 2410 }
tonyp@3356 2411
johnc@1829 2412 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
johnc@1829 2413 // This routine is used when:
johnc@1829 2414 // * adding survivor regions to the incremental cset at the end of an
johnc@1829 2415 // evacuation pause,
johnc@1829 2416 // * adding the current allocation region to the incremental cset
johnc@1829 2417 // when it is retired, and
johnc@1829 2418 // * updating existing policy information for a region in the
johnc@1829 2419 // incremental cset via young list RSet sampling.
johnc@1829 2420 // Therefore this routine may be called at a safepoint by the
johnc@1829 2421 // VM thread, or in-between safepoints by mutator threads (when
johnc@1829 2422 // retiring the current allocation region) or a concurrent
johnc@1829 2423 // refine thread (RSet sampling).
johnc@1829 2424
johnc@1829 2425 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
johnc@1829 2426 size_t used_bytes = hr->used();
johnc@1829 2427 _inc_cset_recorded_rs_lengths += rs_length;
johnc@1829 2428 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
johnc@1829 2429 _inc_cset_bytes_used_before += used_bytes;
johnc@1829 2430
johnc@1829 2431 // Cache the values we have added to the aggregated informtion
johnc@1829 2432 // in the heap region in case we have to remove this region from
johnc@1829 2433 // the incremental collection set, or it is updated by the
johnc@1829 2434 // rset sampling code
johnc@1829 2435 hr->set_recorded_rs_length(rs_length);
johnc@1829 2436 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
johnc@1829 2437 }
johnc@1829 2438
tonyp@3356 2439 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
tonyp@3356 2440 size_t new_rs_length) {
tonyp@3356 2441 // Update the CSet information that is dependent on the new RS length
tonyp@3356 2442 assert(hr->is_young(), "Precondition");
tonyp@3356 2443 assert(!SafepointSynchronize::is_at_safepoint(),
tonyp@3356 2444 "should not be at a safepoint");
tonyp@3356 2445
tonyp@3356 2446 // We could have updated _inc_cset_recorded_rs_lengths and
tonyp@3356 2447 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
tonyp@3356 2448 // that atomically, as this code is executed by a concurrent
tonyp@3356 2449 // refinement thread, potentially concurrently with a mutator thread
tonyp@3356 2450 // allocating a new region and also updating the same fields. To
tonyp@3356 2451 // avoid the atomic operations we accumulate these updates on two
tonyp@3356 2452 // separate fields (*_diffs) and we'll just add them to the "main"
tonyp@3356 2453 // fields at the start of a GC.
tonyp@3356 2454
tonyp@3356 2455 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
tonyp@3356 2456 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
tonyp@3356 2457 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
tonyp@3356 2458
johnc@1829 2459 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
tonyp@3356 2460 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
tonyp@3356 2461 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
tonyp@3356 2462 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
tonyp@3356 2463
tonyp@3356 2464 hr->set_recorded_rs_length(new_rs_length);
tonyp@3356 2465 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
johnc@1829 2466 }
johnc@1829 2467
johnc@1829 2468 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
tonyp@3289 2469 assert(hr->is_young(), "invariant");
tonyp@3289 2470 assert(hr->young_index_in_cset() > -1, "should have already been set");
johnc@1829 2471 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2472
johnc@1829 2473 // We need to clear and set the cached recorded/cached collection set
johnc@1829 2474 // information in the heap region here (before the region gets added
johnc@1829 2475 // to the collection set). An individual heap region's cached values
johnc@1829 2476 // are calculated, aggregated with the policy collection set info,
johnc@1829 2477 // and cached in the heap region here (initially) and (subsequently)
johnc@1829 2478 // by the Young List sampling code.
johnc@1829 2479
johnc@1829 2480 size_t rs_length = hr->rem_set()->occupied();
johnc@1829 2481 add_to_incremental_cset_info(hr, rs_length);
johnc@1829 2482
johnc@1829 2483 HeapWord* hr_end = hr->end();
johnc@1829 2484 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
johnc@1829 2485
johnc@1829 2486 assert(!hr->in_collection_set(), "invariant");
johnc@1829 2487 hr->set_in_collection_set(true);
johnc@1829 2488 assert( hr->next_in_collection_set() == NULL, "invariant");
johnc@1829 2489
johnc@1829 2490 _g1->register_region_with_in_cset_fast_test(hr);
johnc@1829 2491 }
johnc@1829 2492
johnc@1829 2493 // Add the region at the RHS of the incremental cset
johnc@1829 2494 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
johnc@1829 2495 // We should only ever be appending survivors at the end of a pause
johnc@1829 2496 assert( hr->is_survivor(), "Logic");
johnc@1829 2497
johnc@1829 2498 // Do the 'common' stuff
johnc@1829 2499 add_region_to_incremental_cset_common(hr);
johnc@1829 2500
johnc@1829 2501 // Now add the region at the right hand side
johnc@1829 2502 if (_inc_cset_tail == NULL) {
johnc@1829 2503 assert(_inc_cset_head == NULL, "invariant");
johnc@1829 2504 _inc_cset_head = hr;
johnc@1829 2505 } else {
johnc@1829 2506 _inc_cset_tail->set_next_in_collection_set(hr);
johnc@1829 2507 }
johnc@1829 2508 _inc_cset_tail = hr;
johnc@1829 2509 }
johnc@1829 2510
johnc@1829 2511 // Add the region to the LHS of the incremental cset
johnc@1829 2512 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
johnc@1829 2513 // Survivors should be added to the RHS at the end of a pause
johnc@1829 2514 assert(!hr->is_survivor(), "Logic");
johnc@1829 2515
johnc@1829 2516 // Do the 'common' stuff
johnc@1829 2517 add_region_to_incremental_cset_common(hr);
johnc@1829 2518
johnc@1829 2519 // Add the region at the left hand side
johnc@1829 2520 hr->set_next_in_collection_set(_inc_cset_head);
johnc@1829 2521 if (_inc_cset_head == NULL) {
johnc@1829 2522 assert(_inc_cset_tail == NULL, "Invariant");
johnc@1829 2523 _inc_cset_tail = hr;
johnc@1829 2524 }
johnc@1829 2525 _inc_cset_head = hr;
johnc@1829 2526 }
johnc@1829 2527
johnc@1829 2528 #ifndef PRODUCT
johnc@1829 2529 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
johnc@1829 2530 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
johnc@1829 2531
johnc@1829 2532 st->print_cr("\nCollection_set:");
johnc@1829 2533 HeapRegion* csr = list_head;
johnc@1829 2534 while (csr != NULL) {
johnc@1829 2535 HeapRegion* next = csr->next_in_collection_set();
johnc@1829 2536 assert(csr->in_collection_set(), "bad CS");
johnc@1829 2537 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
johnc@1829 2538 "age: %4d, y: %d, surv: %d",
johnc@1829 2539 csr->bottom(), csr->end(),
johnc@1829 2540 csr->top(),
johnc@1829 2541 csr->prev_top_at_mark_start(),
johnc@1829 2542 csr->next_top_at_mark_start(),
johnc@1829 2543 csr->top_at_conc_mark_count(),
johnc@1829 2544 csr->age_in_surv_rate_group_cond(),
johnc@1829 2545 csr->is_young(),
johnc@1829 2546 csr->is_survivor());
johnc@1829 2547 csr = next;
johnc@1829 2548 }
johnc@1829 2549 }
johnc@1829 2550 #endif // !PRODUCT
johnc@1829 2551
tonyp@3539 2552 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
tonyp@3539 2553 const char* false_action_str) {
tonyp@3539 2554 CollectionSetChooser* cset_chooser = _collectionSetChooser;
tonyp@3539 2555 if (cset_chooser->isEmpty()) {
tonyp@3539 2556 ergo_verbose0(ErgoMixedGCs,
tonyp@3539 2557 false_action_str,
tonyp@3539 2558 ergo_format_reason("candidate old regions not available"));
tonyp@3539 2559 return false;
tonyp@3539 2560 }
tonyp@3539 2561 size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes();
tonyp@3539 2562 size_t capacity_bytes = _g1->capacity();
tonyp@3539 2563 double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
tonyp@3667 2564 double threshold = (double) G1HeapWastePercent;
tonyp@3539 2565 if (perc < threshold) {
tonyp@3539 2566 ergo_verbose4(ErgoMixedGCs,
tonyp@3539 2567 false_action_str,
tonyp@3539 2568 ergo_format_reason("reclaimable percentage lower than threshold")
tonyp@3539 2569 ergo_format_region("candidate old regions")
tonyp@3539 2570 ergo_format_byte_perc("reclaimable")
tonyp@3539 2571 ergo_format_perc("threshold"),
tonyp@3539 2572 cset_chooser->remainingRegions(),
tonyp@3539 2573 reclaimable_bytes, perc, threshold);
tonyp@3539 2574 return false;
tonyp@3539 2575 }
tonyp@3539 2576
tonyp@3539 2577 ergo_verbose4(ErgoMixedGCs,
tonyp@3539 2578 true_action_str,
tonyp@3539 2579 ergo_format_reason("candidate old regions available")
tonyp@3539 2580 ergo_format_region("candidate old regions")
tonyp@3539 2581 ergo_format_byte_perc("reclaimable")
tonyp@3539 2582 ergo_format_perc("threshold"),
tonyp@3539 2583 cset_chooser->remainingRegions(),
tonyp@3539 2584 reclaimable_bytes, perc, threshold);
tonyp@3539 2585 return true;
tonyp@3539 2586 }
tonyp@3539 2587
tonyp@3539 2588 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
johnc@1829 2589 // Set this here - in case we're not doing young collections.
johnc@1829 2590 double non_young_start_time_sec = os::elapsedTime();
johnc@1829 2591
tonyp@3114 2592 YoungList* young_list = _g1->young_list();
tonyp@3356 2593 finalize_incremental_cset_building();
tonyp@3114 2594
tonyp@2011 2595 guarantee(target_pause_time_ms > 0.0,
tonyp@2011 2596 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2011 2597 target_pause_time_ms));
tonyp@2011 2598 guarantee(_collection_set == NULL, "Precondition");
ysr@777 2599
ysr@777 2600 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
ysr@777 2601 double predicted_pause_time_ms = base_time_ms;
tonyp@2011 2602 double time_remaining_ms = target_pause_time_ms - base_time_ms;
ysr@777 2603
tonyp@3114 2604 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2605 "start choosing CSet",
tonyp@3114 2606 ergo_format_ms("predicted base time")
tonyp@3114 2607 ergo_format_ms("remaining time")
tonyp@3114 2608 ergo_format_ms("target pause time"),
tonyp@3114 2609 base_time_ms, time_remaining_ms, target_pause_time_ms);
tonyp@3114 2610
tonyp@3114 2611 HeapRegion* hr;
tonyp@3114 2612 double young_start_time_sec = os::elapsedTime();
ysr@777 2613
apetrusenko@1112 2614 _collection_set_bytes_used_before = 0;
tonyp@3337 2615 _last_gc_was_young = gcs_are_young() ? true : false;
tonyp@3337 2616
tonyp@3337 2617 if (_last_gc_was_young) {
tonyp@3337 2618 ++_young_pause_num;
tonyp@3114 2619 } else {
tonyp@3337 2620 ++_mixed_pause_num;
tonyp@3114 2621 }
brutisso@3065 2622
brutisso@3065 2623 // The young list is laid with the survivor regions from the previous
brutisso@3065 2624 // pause are appended to the RHS of the young list, i.e.
brutisso@3065 2625 // [Newly Young Regions ++ Survivors from last pause].
brutisso@3065 2626
tonyp@3289 2627 size_t survivor_region_length = young_list->survivor_length();
tonyp@3289 2628 size_t eden_region_length = young_list->length() - survivor_region_length;
tonyp@3289 2629 init_cset_region_lengths(eden_region_length, survivor_region_length);
tonyp@3114 2630 hr = young_list->first_survivor_region();
brutisso@3065 2631 while (hr != NULL) {
brutisso@3065 2632 assert(hr->is_survivor(), "badly formed young list");
brutisso@3065 2633 hr->set_young();
brutisso@3065 2634 hr = hr->get_next_young_region();
brutisso@3065 2635 }
brutisso@3065 2636
tonyp@3114 2637 // Clear the fields that point to the survivor list - they are all young now.
tonyp@3114 2638 young_list->clear_survivors();
brutisso@3065 2639
brutisso@3065 2640 _collection_set = _inc_cset_head;
brutisso@3065 2641 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
brutisso@3065 2642 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2643 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2644
tonyp@3114 2645 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2646 "add young regions to CSet",
tonyp@3114 2647 ergo_format_region("eden")
tonyp@3114 2648 ergo_format_region("survivors")
tonyp@3114 2649 ergo_format_ms("predicted young region time"),
tonyp@3289 2650 eden_region_length, survivor_region_length,
tonyp@3114 2651 _inc_cset_predicted_elapsed_time_ms);
tonyp@3114 2652
brutisso@3065 2653 // The number of recorded young regions is the incremental
brutisso@3065 2654 // collection set's current size
brutisso@3065 2655 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
brutisso@3065 2656
brutisso@3065 2657 double young_end_time_sec = os::elapsedTime();
brutisso@3065 2658 _recorded_young_cset_choice_time_ms =
brutisso@3065 2659 (young_end_time_sec - young_start_time_sec) * 1000.0;
brutisso@3065 2660
brutisso@3065 2661 // We are doing young collections so reset this.
brutisso@3065 2662 non_young_start_time_sec = young_end_time_sec;
brutisso@3065 2663
tonyp@3337 2664 if (!gcs_are_young()) {
tonyp@3539 2665 CollectionSetChooser* cset_chooser = _collectionSetChooser;
tonyp@3539 2666 assert(cset_chooser->verify(), "CSet Chooser verification - pre");
tonyp@3539 2667 const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength();
tonyp@3539 2668 const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
tonyp@3539 2669
tonyp@3539 2670 size_t expensive_region_num = 0;
tonyp@3539 2671 bool check_time_remaining = adaptive_young_list_length();
tonyp@3539 2672 HeapRegion* hr = cset_chooser->peek();
tonyp@3539 2673 while (hr != NULL) {
tonyp@3539 2674 if (old_cset_region_length() >= max_old_cset_length) {
tonyp@3539 2675 // Added maximum number of old regions to the CSet.
tonyp@3539 2676 ergo_verbose2(ErgoCSetConstruction,
tonyp@3539 2677 "finish adding old regions to CSet",
tonyp@3539 2678 ergo_format_reason("old CSet region num reached max")
tonyp@3539 2679 ergo_format_region("old")
tonyp@3539 2680 ergo_format_region("max"),
tonyp@3539 2681 old_cset_region_length(), max_old_cset_length);
tonyp@3539 2682 break;
ysr@777 2683 }
tonyp@3114 2684
tonyp@3539 2685 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
tonyp@3539 2686 if (check_time_remaining) {
tonyp@3539 2687 if (predicted_time_ms > time_remaining_ms) {
tonyp@3539 2688 // Too expensive for the current CSet.
tonyp@3539 2689
tonyp@3539 2690 if (old_cset_region_length() >= min_old_cset_length) {
tonyp@3539 2691 // We have added the minimum number of old regions to the CSet,
tonyp@3539 2692 // we are done with this CSet.
tonyp@3539 2693 ergo_verbose4(ErgoCSetConstruction,
tonyp@3539 2694 "finish adding old regions to CSet",
tonyp@3539 2695 ergo_format_reason("predicted time is too high")
tonyp@3539 2696 ergo_format_ms("predicted time")
tonyp@3539 2697 ergo_format_ms("remaining time")
tonyp@3539 2698 ergo_format_region("old")
tonyp@3539 2699 ergo_format_region("min"),
tonyp@3539 2700 predicted_time_ms, time_remaining_ms,
tonyp@3539 2701 old_cset_region_length(), min_old_cset_length);
tonyp@3539 2702 break;
tonyp@3539 2703 }
tonyp@3539 2704
tonyp@3539 2705 // We'll add it anyway given that we haven't reached the
tonyp@3539 2706 // minimum number of old regions.
tonyp@3539 2707 expensive_region_num += 1;
tonyp@3539 2708 }
tonyp@3114 2709 } else {
tonyp@3539 2710 if (old_cset_region_length() >= min_old_cset_length) {
tonyp@3539 2711 // In the non-auto-tuning case, we'll finish adding regions
tonyp@3539 2712 // to the CSet if we reach the minimum.
tonyp@3539 2713 ergo_verbose2(ErgoCSetConstruction,
tonyp@3539 2714 "finish adding old regions to CSet",
tonyp@3539 2715 ergo_format_reason("old CSet region num reached min")
tonyp@3539 2716 ergo_format_region("old")
tonyp@3539 2717 ergo_format_region("min"),
tonyp@3539 2718 old_cset_region_length(), min_old_cset_length);
tonyp@3539 2719 break;
tonyp@3114 2720 }
tonyp@3114 2721 }
tonyp@3539 2722
tonyp@3539 2723 // We will add this region to the CSet.
tonyp@3539 2724 time_remaining_ms -= predicted_time_ms;
tonyp@3539 2725 predicted_pause_time_ms += predicted_time_ms;
tonyp@3539 2726 cset_chooser->remove_and_move_to_next(hr);
tonyp@3539 2727 _g1->old_set_remove(hr);
tonyp@3539 2728 add_old_region_to_cset(hr);
tonyp@3539 2729
tonyp@3539 2730 hr = cset_chooser->peek();
tonyp@3114 2731 }
tonyp@3539 2732 if (hr == NULL) {
tonyp@3539 2733 ergo_verbose0(ErgoCSetConstruction,
tonyp@3539 2734 "finish adding old regions to CSet",
tonyp@3539 2735 ergo_format_reason("candidate old regions not available"));
tonyp@3539 2736 }
tonyp@3539 2737
tonyp@3539 2738 if (expensive_region_num > 0) {
tonyp@3539 2739 // We print the information once here at the end, predicated on
tonyp@3539 2740 // whether we added any apparently expensive regions or not, to
tonyp@3539 2741 // avoid generating output per region.
tonyp@3539 2742 ergo_verbose4(ErgoCSetConstruction,
tonyp@3539 2743 "added expensive regions to CSet",
tonyp@3539 2744 ergo_format_reason("old CSet region num not reached min")
tonyp@3539 2745 ergo_format_region("old")
tonyp@3539 2746 ergo_format_region("expensive")
tonyp@3539 2747 ergo_format_region("min")
tonyp@3539 2748 ergo_format_ms("remaining time"),
tonyp@3539 2749 old_cset_region_length(),
tonyp@3539 2750 expensive_region_num,
tonyp@3539 2751 min_old_cset_length,
tonyp@3539 2752 time_remaining_ms);
tonyp@3539 2753 }
tonyp@3539 2754
tonyp@3539 2755 assert(cset_chooser->verify(), "CSet Chooser verification - post");
ysr@777 2756 }
ysr@777 2757
johnc@1829 2758 stop_incremental_cset_building();
johnc@1829 2759
ysr@777 2760 count_CS_bytes_used();
ysr@777 2761
tonyp@3114 2762 ergo_verbose5(ErgoCSetConstruction,
tonyp@3114 2763 "finish choosing CSet",
tonyp@3114 2764 ergo_format_region("eden")
tonyp@3114 2765 ergo_format_region("survivors")
tonyp@3114 2766 ergo_format_region("old")
tonyp@3114 2767 ergo_format_ms("predicted pause time")
tonyp@3114 2768 ergo_format_ms("target pause time"),
tonyp@3289 2769 eden_region_length, survivor_region_length,
tonyp@3289 2770 old_cset_region_length(),
tonyp@3114 2771 predicted_pause_time_ms, target_pause_time_ms);
tonyp@3114 2772
ysr@777 2773 double non_young_end_time_sec = os::elapsedTime();
ysr@777 2774 _recorded_non_young_cset_choice_time_ms =
ysr@777 2775 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
ysr@777 2776 }

mercurial