src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Wed, 25 Jan 2012 12:58:23 -0500

author
tonyp
date
Wed, 25 Jan 2012 12:58:23 -0500
changeset 3464
eff609af17d7
parent 3461
6a78aa6ac1ff
child 3539
a9647476d1a4
permissions
-rw-r--r--

7127706: G1: re-enable survivors during the initial-mark pause
Summary: Re-enable survivors during the initial-mark pause. Afterwards, the concurrent marking threads have to scan them and mark everything reachable from them. The next GC will have to wait for the survivors to be scanned.
Reviewed-by: brutisso, johnc

ysr@777 1 /*
tonyp@3416 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
stefank@2314 32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
stefank@2314 34 #include "runtime/arguments.hpp"
stefank@2314 35 #include "runtime/java.hpp"
stefank@2314 36 #include "runtime/mutexLocker.hpp"
stefank@2314 37 #include "utilities/debug.hpp"
ysr@777 38
ysr@777 39 // Different defaults for different number of GC threads
ysr@777 40 // They were chosen by running GCOld and SPECjbb on debris with different
ysr@777 41 // numbers of GC threads and choosing them based on the results
ysr@777 42
ysr@777 43 // all the same
ysr@777 44 static double rs_length_diff_defaults[] = {
ysr@777 45 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
ysr@777 46 };
ysr@777 47
ysr@777 48 static double cost_per_card_ms_defaults[] = {
ysr@777 49 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
ysr@777 50 };
ysr@777 51
ysr@777 52 // all the same
tonyp@3337 53 static double young_cards_per_entry_ratio_defaults[] = {
ysr@777 54 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
ysr@777 55 };
ysr@777 56
ysr@777 57 static double cost_per_entry_ms_defaults[] = {
ysr@777 58 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
ysr@777 59 };
ysr@777 60
ysr@777 61 static double cost_per_byte_ms_defaults[] = {
ysr@777 62 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
ysr@777 63 };
ysr@777 64
ysr@777 65 // these should be pretty consistent
ysr@777 66 static double constant_other_time_ms_defaults[] = {
ysr@777 67 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
ysr@777 68 };
ysr@777 69
ysr@777 70
ysr@777 71 static double young_other_cost_per_region_ms_defaults[] = {
ysr@777 72 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
ysr@777 73 };
ysr@777 74
ysr@777 75 static double non_young_other_cost_per_region_ms_defaults[] = {
ysr@777 76 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
ysr@777 77 };
ysr@777 78
brutisso@2645 79 // Help class for avoiding interleaved logging
brutisso@2645 80 class LineBuffer: public StackObj {
brutisso@2645 81
brutisso@2645 82 private:
brutisso@2645 83 static const int BUFFER_LEN = 1024;
brutisso@2645 84 static const int INDENT_CHARS = 3;
brutisso@2645 85 char _buffer[BUFFER_LEN];
brutisso@2645 86 int _indent_level;
brutisso@2645 87 int _cur;
brutisso@2645 88
brutisso@2645 89 void vappend(const char* format, va_list ap) {
brutisso@2645 90 int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
brutisso@2645 91 if (res != -1) {
brutisso@2645 92 _cur += res;
brutisso@2645 93 } else {
brutisso@2645 94 DEBUG_ONLY(warning("buffer too small in LineBuffer");)
brutisso@2645 95 _buffer[BUFFER_LEN -1] = 0;
brutisso@2645 96 _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
brutisso@2645 97 }
brutisso@2645 98 }
brutisso@2645 99
brutisso@2645 100 public:
brutisso@2645 101 explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
brutisso@2645 102 for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
brutisso@2645 103 _buffer[_cur] = ' ';
brutisso@2645 104 }
brutisso@2645 105 }
brutisso@2645 106
brutisso@2645 107 #ifndef PRODUCT
brutisso@2645 108 ~LineBuffer() {
brutisso@2645 109 assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
brutisso@2645 110 }
brutisso@2645 111 #endif
brutisso@2645 112
brutisso@2645 113 void append(const char* format, ...) {
brutisso@2645 114 va_list ap;
brutisso@2645 115 va_start(ap, format);
brutisso@2645 116 vappend(format, ap);
brutisso@2645 117 va_end(ap);
brutisso@2645 118 }
brutisso@2645 119
brutisso@2645 120 void append_and_print_cr(const char* format, ...) {
brutisso@2645 121 va_list ap;
brutisso@2645 122 va_start(ap, format);
brutisso@2645 123 vappend(format, ap);
brutisso@2645 124 va_end(ap);
brutisso@2645 125 gclog_or_tty->print_cr("%s", _buffer);
brutisso@2645 126 _cur = _indent_level * INDENT_CHARS;
brutisso@2645 127 }
brutisso@2645 128 };
brutisso@2645 129
ysr@777 130 G1CollectorPolicy::G1CollectorPolicy() :
jmasa@2188 131 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
johnc@3021 132 ? ParallelGCThreads : 1),
jmasa@2188 133
ysr@777 134 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 135 _all_pause_times_ms(new NumberSeq()),
ysr@777 136 _stop_world_start(0.0),
ysr@777 137 _all_stop_world_times_ms(new NumberSeq()),
ysr@777 138 _all_yield_times_ms(new NumberSeq()),
ysr@777 139
apetrusenko@1112 140 _summary(new Summary()),
ysr@777 141
johnc@3175 142 _cur_clear_ct_time_ms(0.0),
johnc@3296 143 _mark_closure_time_ms(0.0),
tonyp@3464 144 _root_region_scan_wait_time_ms(0.0),
johnc@3175 145
johnc@3175 146 _cur_ref_proc_time_ms(0.0),
johnc@3175 147 _cur_ref_enq_time_ms(0.0),
johnc@3175 148
johnc@1325 149 #ifndef PRODUCT
johnc@1325 150 _min_clear_cc_time_ms(-1.0),
johnc@1325 151 _max_clear_cc_time_ms(-1.0),
johnc@1325 152 _cur_clear_cc_time_ms(0.0),
johnc@1325 153 _cum_clear_cc_time_ms(0.0),
johnc@1325 154 _num_cc_clears(0L),
johnc@1325 155 #endif
ysr@777 156
ysr@777 157 _aux_num(10),
ysr@777 158 _all_aux_times_ms(new NumberSeq[_aux_num]),
ysr@777 159 _cur_aux_start_times_ms(new double[_aux_num]),
ysr@777 160 _cur_aux_times_ms(new double[_aux_num]),
ysr@777 161 _cur_aux_times_set(new bool[_aux_num]),
ysr@777 162
ysr@777 163 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 164 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 165
ysr@777 166 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 167 _prev_collection_pause_end_ms(0.0),
ysr@777 168 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 169 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 170 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 171 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 172 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 173 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 174 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 175 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 176 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 177 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 178 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 179 _non_young_other_cost_per_region_ms_seq(
ysr@777 180 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 181
ysr@777 182 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 183 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 184
johnc@1186 185 _pause_time_target_ms((double) MaxGCPauseMillis),
ysr@777 186
tonyp@3337 187 _gcs_are_young(true),
tonyp@3337 188 _young_pause_num(0),
tonyp@3337 189 _mixed_pause_num(0),
ysr@777 190
ysr@777 191 _during_marking(false),
ysr@777 192 _in_marking_window(false),
ysr@777 193 _in_marking_window_im(false),
ysr@777 194
ysr@777 195 _known_garbage_ratio(0.0),
ysr@777 196 _known_garbage_bytes(0),
ysr@777 197
ysr@777 198 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 199
tonyp@3337 200 _recent_prev_end_times_for_all_gcs_sec(
tonyp@3337 201 new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 202
ysr@777 203 _recent_avg_pause_time_ratio(0.0),
ysr@777 204
ysr@777 205 _all_full_gc_times_ms(new NumberSeq()),
ysr@777 206
tonyp@1794 207 _initiate_conc_mark_if_possible(false),
tonyp@1794 208 _during_initial_mark_pause(false),
tonyp@3337 209 _should_revert_to_young_gcs(false),
tonyp@3337 210 _last_young_gc(false),
tonyp@3337 211 _last_gc_was_young(false),
ysr@777 212
tonyp@2961 213 _eden_bytes_before_gc(0),
tonyp@2961 214 _survivor_bytes_before_gc(0),
tonyp@2961 215 _capacity_before_gc(0),
tonyp@2961 216
tonyp@3289 217 _eden_cset_region_length(0),
tonyp@3289 218 _survivor_cset_region_length(0),
tonyp@3289 219 _old_cset_region_length(0),
tonyp@3289 220
ysr@777 221 _collection_set(NULL),
johnc@1829 222 _collection_set_bytes_used_before(0),
johnc@1829 223
johnc@1829 224 // Incremental CSet attributes
johnc@1829 225 _inc_cset_build_state(Inactive),
johnc@1829 226 _inc_cset_head(NULL),
johnc@1829 227 _inc_cset_tail(NULL),
johnc@1829 228 _inc_cset_bytes_used_before(0),
johnc@1829 229 _inc_cset_max_finger(NULL),
johnc@1829 230 _inc_cset_recorded_rs_lengths(0),
tonyp@3356 231 _inc_cset_recorded_rs_lengths_diffs(0),
johnc@1829 232 _inc_cset_predicted_elapsed_time_ms(0.0),
tonyp@3356 233 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
johnc@1829 234
ysr@777 235 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 236 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 237 #endif // _MSC_VER
ysr@777 238
ysr@777 239 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
ysr@777 240 G1YoungSurvRateNumRegionsSummary)),
ysr@777 241 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
apetrusenko@980 242 G1YoungSurvRateNumRegionsSummary)),
ysr@777 243 // add here any more surv rate groups
apetrusenko@980 244 _recorded_survivor_regions(0),
apetrusenko@980 245 _recorded_survivor_head(NULL),
apetrusenko@980 246 _recorded_survivor_tail(NULL),
tonyp@1791 247 _survivors_age_table(true),
tonyp@1791 248
tonyp@3114 249 _gc_overhead_perc(0.0) {
tonyp@3114 250
tonyp@1377 251 // Set up the region size and associated fields. Given that the
tonyp@1377 252 // policy is created before the heap, we have to set this up here,
tonyp@1377 253 // so it's done as soon as possible.
tonyp@1377 254 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
iveresov@1696 255 HeapRegionRemSet::setup_remset_size();
tonyp@1377 256
tonyp@3114 257 G1ErgoVerbose::initialize();
tonyp@3114 258 if (PrintAdaptiveSizePolicy) {
tonyp@3114 259 // Currently, we only use a single switch for all the heuristics.
tonyp@3114 260 G1ErgoVerbose::set_enabled(true);
tonyp@3114 261 // Given that we don't currently have a verboseness level
tonyp@3114 262 // parameter, we'll hardcode this to high. This can be easily
tonyp@3114 263 // changed in the future.
tonyp@3114 264 G1ErgoVerbose::set_level(ErgoHigh);
tonyp@3114 265 } else {
tonyp@3114 266 G1ErgoVerbose::set_enabled(false);
tonyp@3114 267 }
tonyp@3114 268
apetrusenko@1826 269 // Verify PLAB sizes
johnc@3182 270 const size_t region_size = HeapRegion::GrainWords;
apetrusenko@1826 271 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
apetrusenko@1826 272 char buffer[128];
johnc@3182 273 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
apetrusenko@1826 274 OldPLABSize > region_size ? "Old" : "Young", region_size);
apetrusenko@1826 275 vm_exit_during_initialization(buffer);
apetrusenko@1826 276 }
apetrusenko@1826 277
ysr@777 278 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
ysr@777 279 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
ysr@777 280
tonyp@1966 281 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
ysr@777 282 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
tonyp@3416 283 _par_last_satb_filtering_times_ms = new double[_parallel_gc_threads];
ysr@777 284
ysr@777 285 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 286 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
ysr@777 287
ysr@777 288 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 289
ysr@777 290 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
ysr@777 291
ysr@777 292 _par_last_termination_times_ms = new double[_parallel_gc_threads];
tonyp@1966 293 _par_last_termination_attempts = new double[_parallel_gc_threads];
tonyp@1966 294 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
brutisso@2712 295 _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
johnc@3219 296 _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
ysr@777 297
ysr@777 298 // start conservatively
johnc@1186 299 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
ysr@777 300
ysr@777 301 int index;
ysr@777 302 if (ParallelGCThreads == 0)
ysr@777 303 index = 0;
ysr@777 304 else if (ParallelGCThreads > 8)
ysr@777 305 index = 7;
ysr@777 306 else
ysr@777 307 index = ParallelGCThreads - 1;
ysr@777 308
ysr@777 309 _pending_card_diff_seq->add(0.0);
ysr@777 310 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
ysr@777 311 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
tonyp@3337 312 _young_cards_per_entry_ratio_seq->add(
tonyp@3337 313 young_cards_per_entry_ratio_defaults[index]);
ysr@777 314 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
ysr@777 315 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
ysr@777 316 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
ysr@777 317 _young_other_cost_per_region_ms_seq->add(
ysr@777 318 young_other_cost_per_region_ms_defaults[index]);
ysr@777 319 _non_young_other_cost_per_region_ms_seq->add(
ysr@777 320 non_young_other_cost_per_region_ms_defaults[index]);
ysr@777 321
tonyp@1965 322 // Below, we might need to calculate the pause time target based on
tonyp@1965 323 // the pause interval. When we do so we are going to give G1 maximum
tonyp@1965 324 // flexibility and allow it to do pauses when it needs to. So, we'll
tonyp@1965 325 // arrange that the pause interval to be pause time target + 1 to
tonyp@1965 326 // ensure that a) the pause time target is maximized with respect to
tonyp@1965 327 // the pause interval and b) we maintain the invariant that pause
tonyp@1965 328 // time target < pause interval. If the user does not want this
tonyp@1965 329 // maximum flexibility, they will have to set the pause interval
tonyp@1965 330 // explicitly.
tonyp@1965 331
tonyp@1965 332 // First make sure that, if either parameter is set, its value is
tonyp@1965 333 // reasonable.
tonyp@1965 334 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 335 if (MaxGCPauseMillis < 1) {
tonyp@1965 336 vm_exit_during_initialization("MaxGCPauseMillis should be "
tonyp@1965 337 "greater than 0");
tonyp@1965 338 }
tonyp@1965 339 }
tonyp@1965 340 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 341 if (GCPauseIntervalMillis < 1) {
tonyp@1965 342 vm_exit_during_initialization("GCPauseIntervalMillis should be "
tonyp@1965 343 "greater than 0");
tonyp@1965 344 }
tonyp@1965 345 }
tonyp@1965 346
tonyp@1965 347 // Then, if the pause time target parameter was not set, set it to
tonyp@1965 348 // the default value.
tonyp@1965 349 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 350 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 351 // The default pause time target in G1 is 200ms
tonyp@1965 352 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
tonyp@1965 353 } else {
tonyp@1965 354 // We do not allow the pause interval to be set without the
tonyp@1965 355 // pause time target
tonyp@1965 356 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
tonyp@1965 357 "without setting MaxGCPauseMillis");
tonyp@1965 358 }
tonyp@1965 359 }
tonyp@1965 360
tonyp@1965 361 // Then, if the interval parameter was not set, set it according to
tonyp@1965 362 // the pause time target (this will also deal with the case when the
tonyp@1965 363 // pause time target is the default value).
tonyp@1965 364 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 365 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
tonyp@1965 366 }
tonyp@1965 367
tonyp@1965 368 // Finally, make sure that the two parameters are consistent.
tonyp@1965 369 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
tonyp@1965 370 char buffer[256];
tonyp@1965 371 jio_snprintf(buffer, 256,
tonyp@1965 372 "MaxGCPauseMillis (%u) should be less than "
tonyp@1965 373 "GCPauseIntervalMillis (%u)",
tonyp@1965 374 MaxGCPauseMillis, GCPauseIntervalMillis);
tonyp@1965 375 vm_exit_during_initialization(buffer);
tonyp@1965 376 }
tonyp@1965 377
tonyp@1965 378 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
johnc@1186 379 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
ysr@777 380 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
johnc@1186 381 _sigma = (double) G1ConfidencePercent / 100.0;
ysr@777 382
ysr@777 383 // start conservatively (around 50ms is about right)
ysr@777 384 _concurrent_mark_remark_times_ms->add(0.05);
ysr@777 385 _concurrent_mark_cleanup_times_ms->add(0.20);
ysr@777 386 _tenuring_threshold = MaxTenuringThreshold;
tonyp@3066 387 // _max_survivor_regions will be calculated by
tonyp@3119 388 // update_young_list_target_length() during initialization.
tonyp@3066 389 _max_survivor_regions = 0;
apetrusenko@980 390
tonyp@1791 391 assert(GCTimeRatio > 0,
tonyp@1791 392 "we should have set it to a default value set_g1_gc_flags() "
tonyp@1791 393 "if a user set it to 0");
tonyp@1791 394 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
tonyp@1791 395
tonyp@3119 396 uintx reserve_perc = G1ReservePercent;
tonyp@3119 397 // Put an artificial ceiling on this so that it's not set to a silly value.
tonyp@3119 398 if (reserve_perc > 50) {
tonyp@3119 399 reserve_perc = 50;
tonyp@3119 400 warning("G1ReservePercent is set to a value that is too large, "
tonyp@3119 401 "it's been updated to %u", reserve_perc);
tonyp@3119 402 }
tonyp@3119 403 _reserve_factor = (double) reserve_perc / 100.0;
brutisso@3120 404 // This will be set when the heap is expanded
tonyp@3119 405 // for the first time during initialization.
tonyp@3119 406 _reserve_regions = 0;
tonyp@3119 407
ysr@777 408 initialize_all();
tonyp@3209 409 _collectionSetChooser = new CollectionSetChooser();
brutisso@3358 410 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
ysr@777 411 }
ysr@777 412
ysr@777 413 void G1CollectorPolicy::initialize_flags() {
ysr@777 414 set_min_alignment(HeapRegion::GrainBytes);
ysr@777 415 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
apetrusenko@982 416 if (SurvivorRatio < 1) {
apetrusenko@982 417 vm_exit_during_initialization("Invalid survivor ratio specified");
apetrusenko@982 418 }
ysr@777 419 CollectorPolicy::initialize_flags();
ysr@777 420 }
ysr@777 421
brutisso@3358 422 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
brutisso@3358 423 assert(G1DefaultMinNewGenPercent <= G1DefaultMaxNewGenPercent, "Min larger than max");
brutisso@3358 424 assert(G1DefaultMinNewGenPercent > 0 && G1DefaultMinNewGenPercent < 100, "Min out of bounds");
brutisso@3358 425 assert(G1DefaultMaxNewGenPercent > 0 && G1DefaultMaxNewGenPercent < 100, "Max out of bounds");
brutisso@3120 426
brutisso@3120 427 if (FLAG_IS_CMDLINE(NewRatio)) {
brutisso@3120 428 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
tonyp@3172 429 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
brutisso@3120 430 } else {
brutisso@3358 431 _sizer_kind = SizerNewRatio;
brutisso@3358 432 _adaptive_size = false;
brutisso@3358 433 return;
brutisso@3120 434 }
brutisso@3120 435 }
brutisso@3120 436
brutisso@3358 437 if (FLAG_IS_CMDLINE(NewSize)) {
brutisso@3358 438 _min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes);
brutisso@3358 439 if (FLAG_IS_CMDLINE(MaxNewSize)) {
brutisso@3358 440 _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
brutisso@3358 441 _sizer_kind = SizerMaxAndNewSize;
brutisso@3358 442 _adaptive_size = _min_desired_young_length == _max_desired_young_length;
brutisso@3358 443 } else {
brutisso@3358 444 _sizer_kind = SizerNewSizeOnly;
brutisso@3358 445 }
brutisso@3358 446 } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
brutisso@3358 447 _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
brutisso@3358 448 _sizer_kind = SizerMaxNewSizeOnly;
brutisso@3358 449 }
brutisso@3358 450 }
brutisso@3358 451
brutisso@3358 452 size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) {
brutisso@3358 453 size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
brutisso@3358 454 return MAX2((size_t)1, default_value);
brutisso@3358 455 }
brutisso@3358 456
brutisso@3358 457 size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) {
brutisso@3358 458 size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
brutisso@3358 459 return MAX2((size_t)1, default_value);
brutisso@3358 460 }
brutisso@3358 461
brutisso@3358 462 void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) {
brutisso@3358 463 assert(new_number_of_heap_regions > 0, "Heap must be initialized");
brutisso@3358 464
brutisso@3358 465 switch (_sizer_kind) {
brutisso@3358 466 case SizerDefaults:
brutisso@3358 467 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
brutisso@3358 468 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
brutisso@3358 469 break;
brutisso@3358 470 case SizerNewSizeOnly:
brutisso@3358 471 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
brutisso@3358 472 _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
brutisso@3358 473 break;
brutisso@3358 474 case SizerMaxNewSizeOnly:
brutisso@3358 475 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
brutisso@3358 476 _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
brutisso@3358 477 break;
brutisso@3358 478 case SizerMaxAndNewSize:
brutisso@3358 479 // Do nothing. Values set on the command line, don't update them at runtime.
brutisso@3358 480 break;
brutisso@3358 481 case SizerNewRatio:
brutisso@3358 482 _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
brutisso@3358 483 _max_desired_young_length = _min_desired_young_length;
brutisso@3358 484 break;
brutisso@3358 485 default:
brutisso@3358 486 ShouldNotReachHere();
brutisso@3358 487 }
brutisso@3358 488
brutisso@3120 489 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
brutisso@3358 490 }
brutisso@3358 491
brutisso@3358 492 void G1CollectorPolicy::init() {
brutisso@3358 493 // Set aside an initial future to_space.
brutisso@3358 494 _g1 = G1CollectedHeap::heap();
brutisso@3358 495
brutisso@3358 496 assert(Heap_lock->owned_by_self(), "Locking discipline.");
brutisso@3358 497
brutisso@3358 498 initialize_gc_policy_counters();
brutisso@3358 499
brutisso@3120 500 if (adaptive_young_list_length()) {
brutisso@3065 501 _young_list_fixed_length = 0;
johnc@1829 502 } else {
brutisso@3358 503 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
ysr@777 504 }
brutisso@3065 505 _free_regions_at_end_of_collection = _g1->free_regions();
tonyp@3119 506 update_young_list_target_length();
brutisso@3120 507 _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
johnc@1829 508
johnc@1829 509 // We may immediately start allocating regions and placing them on the
johnc@1829 510 // collection set list. Initialize the per-collection set info
johnc@1829 511 start_incremental_cset_building();
ysr@777 512 }
ysr@777 513
apetrusenko@980 514 // Create the jstat counters for the policy.
tonyp@3119 515 void G1CollectorPolicy::initialize_gc_policy_counters() {
brutisso@3065 516 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
apetrusenko@980 517 }
apetrusenko@980 518
tonyp@3119 519 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
tonyp@3119 520 double base_time_ms,
tonyp@3119 521 size_t base_free_regions,
tonyp@3119 522 double target_pause_time_ms) {
tonyp@3119 523 if (young_length >= base_free_regions) {
tonyp@3119 524 // end condition 1: not enough space for the young regions
tonyp@3119 525 return false;
ysr@777 526 }
tonyp@3119 527
tonyp@3119 528 double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
tonyp@3119 529 size_t bytes_to_copy =
tonyp@3119 530 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
tonyp@3119 531 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
tonyp@3119 532 double young_other_time_ms = predict_young_other_time_ms(young_length);
tonyp@3119 533 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
tonyp@3119 534 if (pause_time_ms > target_pause_time_ms) {
tonyp@3119 535 // end condition 2: prediction is over the target pause time
tonyp@3119 536 return false;
tonyp@3119 537 }
tonyp@3119 538
tonyp@3119 539 size_t free_bytes =
tonyp@3119 540 (base_free_regions - young_length) * HeapRegion::GrainBytes;
tonyp@3119 541 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
tonyp@3119 542 // end condition 3: out-of-space (conservatively!)
tonyp@3119 543 return false;
tonyp@3119 544 }
tonyp@3119 545
tonyp@3119 546 // success!
tonyp@3119 547 return true;
ysr@777 548 }
ysr@777 549
brutisso@3120 550 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
brutisso@3120 551 // re-calculate the necessary reserve
brutisso@3120 552 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
tonyp@3119 553 // We use ceiling so that if reserve_regions_d is > 0.0 (but
tonyp@3119 554 // smaller than 1.0) we'll get 1.
tonyp@3119 555 _reserve_regions = (size_t) ceil(reserve_regions_d);
brutisso@3120 556
brutisso@3358 557 _young_gen_sizer->heap_size_changed(new_number_of_regions);
tonyp@3119 558 }
tonyp@3119 559
tonyp@3119 560 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
tonyp@3119 561 size_t base_min_length) {
tonyp@3119 562 size_t desired_min_length = 0;
ysr@777 563 if (adaptive_young_list_length()) {
tonyp@3119 564 if (_alloc_rate_ms_seq->num() > 3) {
tonyp@3119 565 double now_sec = os::elapsedTime();
tonyp@3119 566 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
tonyp@3119 567 double alloc_rate_ms = predict_alloc_rate_ms();
tonyp@3119 568 desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
tonyp@3119 569 } else {
tonyp@3119 570 // otherwise we don't have enough info to make the prediction
tonyp@3119 571 }
ysr@777 572 }
brutisso@3120 573 desired_min_length += base_min_length;
brutisso@3120 574 // make sure we don't go below any user-defined minimum bound
brutisso@3358 575 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
ysr@777 576 }
ysr@777 577
tonyp@3119 578 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
tonyp@3119 579 // Here, we might want to also take into account any additional
tonyp@3119 580 // constraints (i.e., user-defined minimum bound). Currently, we
tonyp@3119 581 // effectively don't set this bound.
brutisso@3358 582 return _young_gen_sizer->max_desired_young_length();
tonyp@3119 583 }
tonyp@3119 584
tonyp@3119 585 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
tonyp@3119 586 if (rs_lengths == (size_t) -1) {
tonyp@3119 587 // if it's set to the default value (-1), we should predict it;
tonyp@3119 588 // otherwise, use the given value.
tonyp@3119 589 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
tonyp@3119 590 }
tonyp@3119 591
tonyp@3119 592 // Calculate the absolute and desired min bounds.
tonyp@3119 593
tonyp@3119 594 // This is how many young regions we already have (currently: the survivors).
tonyp@3119 595 size_t base_min_length = recorded_survivor_regions();
tonyp@3119 596 // This is the absolute minimum young length, which ensures that we
tonyp@3119 597 // can allocate one eden region in the worst-case.
tonyp@3119 598 size_t absolute_min_length = base_min_length + 1;
tonyp@3119 599 size_t desired_min_length =
tonyp@3119 600 calculate_young_list_desired_min_length(base_min_length);
tonyp@3119 601 if (desired_min_length < absolute_min_length) {
tonyp@3119 602 desired_min_length = absolute_min_length;
tonyp@3119 603 }
tonyp@3119 604
tonyp@3119 605 // Calculate the absolute and desired max bounds.
tonyp@3119 606
tonyp@3119 607 // We will try our best not to "eat" into the reserve.
tonyp@3119 608 size_t absolute_max_length = 0;
tonyp@3119 609 if (_free_regions_at_end_of_collection > _reserve_regions) {
tonyp@3119 610 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
tonyp@3119 611 }
tonyp@3119 612 size_t desired_max_length = calculate_young_list_desired_max_length();
tonyp@3119 613 if (desired_max_length > absolute_max_length) {
tonyp@3119 614 desired_max_length = absolute_max_length;
tonyp@3119 615 }
tonyp@3119 616
tonyp@3119 617 size_t young_list_target_length = 0;
tonyp@3119 618 if (adaptive_young_list_length()) {
tonyp@3337 619 if (gcs_are_young()) {
tonyp@3119 620 young_list_target_length =
tonyp@3119 621 calculate_young_list_target_length(rs_lengths,
tonyp@3119 622 base_min_length,
tonyp@3119 623 desired_min_length,
tonyp@3119 624 desired_max_length);
tonyp@3119 625 _rs_lengths_prediction = rs_lengths;
tonyp@3119 626 } else {
tonyp@3119 627 // Don't calculate anything and let the code below bound it to
tonyp@3119 628 // the desired_min_length, i.e., do the next GC as soon as
tonyp@3119 629 // possible to maximize how many old regions we can add to it.
ysr@777 630 }
ysr@777 631 } else {
tonyp@3337 632 if (gcs_are_young()) {
tonyp@3119 633 young_list_target_length = _young_list_fixed_length;
tonyp@3119 634 } else {
tonyp@3337 635 // A bit arbitrary: during mixed GCs we allocate half
tonyp@3119 636 // the young regions to try to add old regions to the CSet.
tonyp@3119 637 young_list_target_length = _young_list_fixed_length / 2;
tonyp@3119 638 // We choose to accept that we might go under the desired min
tonyp@3119 639 // length given that we intentionally ask for a smaller young gen.
tonyp@3119 640 desired_min_length = absolute_min_length;
tonyp@3119 641 }
ysr@777 642 }
ysr@777 643
tonyp@3119 644 // Make sure we don't go over the desired max length, nor under the
tonyp@3119 645 // desired min length. In case they clash, desired_min_length wins
tonyp@3119 646 // which is why that test is second.
tonyp@3119 647 if (young_list_target_length > desired_max_length) {
tonyp@3119 648 young_list_target_length = desired_max_length;
tonyp@3119 649 }
tonyp@3119 650 if (young_list_target_length < desired_min_length) {
tonyp@3119 651 young_list_target_length = desired_min_length;
tonyp@3119 652 }
tonyp@3119 653
tonyp@3119 654 assert(young_list_target_length > recorded_survivor_regions(),
tonyp@3119 655 "we should be able to allocate at least one eden region");
tonyp@3119 656 assert(young_list_target_length >= absolute_min_length, "post-condition");
tonyp@3119 657 _young_list_target_length = young_list_target_length;
tonyp@3119 658
tonyp@3119 659 update_max_gc_locker_expansion();
ysr@777 660 }
ysr@777 661
tonyp@3119 662 size_t
tonyp@3119 663 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
tonyp@3119 664 size_t base_min_length,
tonyp@3119 665 size_t desired_min_length,
tonyp@3119 666 size_t desired_max_length) {
tonyp@3119 667 assert(adaptive_young_list_length(), "pre-condition");
tonyp@3337 668 assert(gcs_are_young(), "only call this for young GCs");
tonyp@3119 669
tonyp@3119 670 // In case some edge-condition makes the desired max length too small...
tonyp@3119 671 if (desired_max_length <= desired_min_length) {
tonyp@3119 672 return desired_min_length;
tonyp@3119 673 }
tonyp@3119 674
tonyp@3119 675 // We'll adjust min_young_length and max_young_length not to include
tonyp@3119 676 // the already allocated young regions (i.e., so they reflect the
tonyp@3119 677 // min and max eden regions we'll allocate). The base_min_length
tonyp@3119 678 // will be reflected in the predictions by the
tonyp@3119 679 // survivor_regions_evac_time prediction.
tonyp@3119 680 assert(desired_min_length > base_min_length, "invariant");
tonyp@3119 681 size_t min_young_length = desired_min_length - base_min_length;
tonyp@3119 682 assert(desired_max_length > base_min_length, "invariant");
tonyp@3119 683 size_t max_young_length = desired_max_length - base_min_length;
tonyp@3119 684
tonyp@3119 685 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
tonyp@3119 686 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
tonyp@3119 687 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
tonyp@3119 688 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
tonyp@3119 689 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
tonyp@3119 690 double base_time_ms =
tonyp@3119 691 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
tonyp@3119 692 survivor_regions_evac_time;
tonyp@3119 693 size_t available_free_regions = _free_regions_at_end_of_collection;
tonyp@3119 694 size_t base_free_regions = 0;
tonyp@3119 695 if (available_free_regions > _reserve_regions) {
tonyp@3119 696 base_free_regions = available_free_regions - _reserve_regions;
tonyp@3119 697 }
tonyp@3119 698
tonyp@3119 699 // Here, we will make sure that the shortest young length that
tonyp@3119 700 // makes sense fits within the target pause time.
tonyp@3119 701
tonyp@3119 702 if (predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 703 base_free_regions, target_pause_time_ms)) {
tonyp@3119 704 // The shortest young length will fit into the target pause time;
tonyp@3119 705 // we'll now check whether the absolute maximum number of young
tonyp@3119 706 // regions will fit in the target pause time. If not, we'll do
tonyp@3119 707 // a binary search between min_young_length and max_young_length.
tonyp@3119 708 if (predict_will_fit(max_young_length, base_time_ms,
tonyp@3119 709 base_free_regions, target_pause_time_ms)) {
tonyp@3119 710 // The maximum young length will fit into the target pause time.
tonyp@3119 711 // We are done so set min young length to the maximum length (as
tonyp@3119 712 // the result is assumed to be returned in min_young_length).
tonyp@3119 713 min_young_length = max_young_length;
tonyp@3119 714 } else {
tonyp@3119 715 // The maximum possible number of young regions will not fit within
tonyp@3119 716 // the target pause time so we'll search for the optimal
tonyp@3119 717 // length. The loop invariants are:
tonyp@3119 718 //
tonyp@3119 719 // min_young_length < max_young_length
tonyp@3119 720 // min_young_length is known to fit into the target pause time
tonyp@3119 721 // max_young_length is known not to fit into the target pause time
tonyp@3119 722 //
tonyp@3119 723 // Going into the loop we know the above hold as we've just
tonyp@3119 724 // checked them. Every time around the loop we check whether
tonyp@3119 725 // the middle value between min_young_length and
tonyp@3119 726 // max_young_length fits into the target pause time. If it
tonyp@3119 727 // does, it becomes the new min. If it doesn't, it becomes
tonyp@3119 728 // the new max. This way we maintain the loop invariants.
tonyp@3119 729
tonyp@3119 730 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 731 size_t diff = (max_young_length - min_young_length) / 2;
tonyp@3119 732 while (diff > 0) {
tonyp@3119 733 size_t young_length = min_young_length + diff;
tonyp@3119 734 if (predict_will_fit(young_length, base_time_ms,
tonyp@3119 735 base_free_regions, target_pause_time_ms)) {
tonyp@3119 736 min_young_length = young_length;
tonyp@3119 737 } else {
tonyp@3119 738 max_young_length = young_length;
tonyp@3119 739 }
tonyp@3119 740 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 741 diff = (max_young_length - min_young_length) / 2;
tonyp@3119 742 }
tonyp@3119 743 // The results is min_young_length which, according to the
tonyp@3119 744 // loop invariants, should fit within the target pause time.
tonyp@3119 745
tonyp@3119 746 // These are the post-conditions of the binary search above:
tonyp@3119 747 assert(min_young_length < max_young_length,
tonyp@3119 748 "otherwise we should have discovered that max_young_length "
tonyp@3119 749 "fits into the pause target and not done the binary search");
tonyp@3119 750 assert(predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 751 base_free_regions, target_pause_time_ms),
tonyp@3119 752 "min_young_length, the result of the binary search, should "
tonyp@3119 753 "fit into the pause target");
tonyp@3119 754 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
tonyp@3119 755 base_free_regions, target_pause_time_ms),
tonyp@3119 756 "min_young_length, the result of the binary search, should be "
tonyp@3119 757 "optimal, so no larger length should fit into the pause target");
tonyp@3119 758 }
tonyp@3119 759 } else {
tonyp@3119 760 // Even the minimum length doesn't fit into the pause time
tonyp@3119 761 // target, return it as the result nevertheless.
tonyp@3119 762 }
tonyp@3119 763 return base_min_length + min_young_length;
ysr@777 764 }
ysr@777 765
apetrusenko@980 766 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
apetrusenko@980 767 double survivor_regions_evac_time = 0.0;
apetrusenko@980 768 for (HeapRegion * r = _recorded_survivor_head;
apetrusenko@980 769 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
apetrusenko@980 770 r = r->get_next_young_region()) {
apetrusenko@980 771 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
apetrusenko@980 772 }
apetrusenko@980 773 return survivor_regions_evac_time;
apetrusenko@980 774 }
apetrusenko@980 775
tonyp@3119 776 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
ysr@777 777 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
ysr@777 778
johnc@1829 779 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
ysr@777 780 if (rs_lengths > _rs_lengths_prediction) {
ysr@777 781 // add 10% to avoid having to recalculate often
ysr@777 782 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
tonyp@3119 783 update_young_list_target_length(rs_lengths_prediction);
ysr@777 784 }
ysr@777 785 }
ysr@777 786
tonyp@3119 787
tonyp@3119 788
ysr@777 789 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
ysr@777 790 bool is_tlab,
ysr@777 791 bool* gc_overhead_limit_was_exceeded) {
ysr@777 792 guarantee(false, "Not using this policy feature yet.");
ysr@777 793 return NULL;
ysr@777 794 }
ysr@777 795
ysr@777 796 // This method controls how a collector handles one or more
ysr@777 797 // of its generations being fully allocated.
ysr@777 798 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
ysr@777 799 bool is_tlab) {
ysr@777 800 guarantee(false, "Not using this policy feature yet.");
ysr@777 801 return NULL;
ysr@777 802 }
ysr@777 803
ysr@777 804
ysr@777 805 #ifndef PRODUCT
ysr@777 806 bool G1CollectorPolicy::verify_young_ages() {
johnc@1829 807 HeapRegion* head = _g1->young_list()->first_region();
ysr@777 808 return
ysr@777 809 verify_young_ages(head, _short_lived_surv_rate_group);
ysr@777 810 // also call verify_young_ages on any additional surv rate groups
ysr@777 811 }
ysr@777 812
ysr@777 813 bool
ysr@777 814 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
ysr@777 815 SurvRateGroup *surv_rate_group) {
ysr@777 816 guarantee( surv_rate_group != NULL, "pre-condition" );
ysr@777 817
ysr@777 818 const char* name = surv_rate_group->name();
ysr@777 819 bool ret = true;
ysr@777 820 int prev_age = -1;
ysr@777 821
ysr@777 822 for (HeapRegion* curr = head;
ysr@777 823 curr != NULL;
ysr@777 824 curr = curr->get_next_young_region()) {
ysr@777 825 SurvRateGroup* group = curr->surv_rate_group();
ysr@777 826 if (group == NULL && !curr->is_survivor()) {
ysr@777 827 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
ysr@777 828 ret = false;
ysr@777 829 }
ysr@777 830
ysr@777 831 if (surv_rate_group == group) {
ysr@777 832 int age = curr->age_in_surv_rate_group();
ysr@777 833
ysr@777 834 if (age < 0) {
ysr@777 835 gclog_or_tty->print_cr("## %s: encountered negative age", name);
ysr@777 836 ret = false;
ysr@777 837 }
ysr@777 838
ysr@777 839 if (age <= prev_age) {
ysr@777 840 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
ysr@777 841 "(%d, %d)", name, age, prev_age);
ysr@777 842 ret = false;
ysr@777 843 }
ysr@777 844 prev_age = age;
ysr@777 845 }
ysr@777 846 }
ysr@777 847
ysr@777 848 return ret;
ysr@777 849 }
ysr@777 850 #endif // PRODUCT
ysr@777 851
ysr@777 852 void G1CollectorPolicy::record_full_collection_start() {
ysr@777 853 _cur_collection_start_sec = os::elapsedTime();
ysr@777 854 // Release the future to-space so that it is available for compaction into.
ysr@777 855 _g1->set_full_collection();
ysr@777 856 }
ysr@777 857
ysr@777 858 void G1CollectorPolicy::record_full_collection_end() {
ysr@777 859 // Consider this like a collection pause for the purposes of allocation
ysr@777 860 // since last pause.
ysr@777 861 double end_sec = os::elapsedTime();
ysr@777 862 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
ysr@777 863 double full_gc_time_ms = full_gc_time_sec * 1000.0;
ysr@777 864
ysr@777 865 _all_full_gc_times_ms->add(full_gc_time_ms);
ysr@777 866
tonyp@1030 867 update_recent_gc_times(end_sec, full_gc_time_ms);
ysr@777 868
ysr@777 869 _g1->clear_full_collection();
ysr@777 870
tonyp@3337 871 // "Nuke" the heuristics that control the young/mixed GC
tonyp@3337 872 // transitions and make sure we start with young GCs after the Full GC.
tonyp@3337 873 set_gcs_are_young(true);
tonyp@3337 874 _last_young_gc = false;
tonyp@3337 875 _should_revert_to_young_gcs = false;
tonyp@1794 876 clear_initiate_conc_mark_if_possible();
tonyp@1794 877 clear_during_initial_mark_pause();
ysr@777 878 _known_garbage_bytes = 0;
ysr@777 879 _known_garbage_ratio = 0.0;
ysr@777 880 _in_marking_window = false;
ysr@777 881 _in_marking_window_im = false;
ysr@777 882
ysr@777 883 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 884 // also call this on any additional surv rate groups
ysr@777 885
apetrusenko@980 886 record_survivor_regions(0, NULL, NULL);
apetrusenko@980 887
ysr@777 888 _free_regions_at_end_of_collection = _g1->free_regions();
apetrusenko@980 889 // Reset survivors SurvRateGroup.
apetrusenko@980 890 _survivor_surv_rate_group->reset();
tonyp@3119 891 update_young_list_target_length();
tonyp@3209 892 _collectionSetChooser->updateAfterFullCollection();
tonyp@2315 893 }
ysr@777 894
ysr@777 895 void G1CollectorPolicy::record_stop_world_start() {
ysr@777 896 _stop_world_start = os::elapsedTime();
ysr@777 897 }
ysr@777 898
ysr@777 899 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
ysr@777 900 size_t start_used) {
ysr@777 901 if (PrintGCDetails) {
ysr@777 902 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 903 gclog_or_tty->print("[GC pause");
tonyp@3337 904 gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
ysr@777 905 }
ysr@777 906
tonyp@3464 907 // We only need to do this here as the policy will only be applied
tonyp@3464 908 // to the GC we're about to start. so, no point is calculating this
tonyp@3464 909 // every time we calculate / recalculate the target young length.
tonyp@3464 910 update_survivors_policy();
tonyp@3119 911
tonyp@2315 912 assert(_g1->used() == _g1->recalculate_used(),
tonyp@2315 913 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
tonyp@2315 914 _g1->used(), _g1->recalculate_used()));
ysr@777 915
ysr@777 916 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
ysr@777 917 _all_stop_world_times_ms->add(s_w_t_ms);
ysr@777 918 _stop_world_start = 0.0;
ysr@777 919
ysr@777 920 _cur_collection_start_sec = start_time_sec;
ysr@777 921 _cur_collection_pause_used_at_start_bytes = start_used;
ysr@777 922 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
ysr@777 923 _pending_cards = _g1->pending_card_num();
ysr@777 924 _max_pending_cards = _g1->max_pending_card_num();
ysr@777 925
ysr@777 926 _bytes_in_collection_set_before_gc = 0;
tonyp@3028 927 _bytes_copied_during_gc = 0;
ysr@777 928
tonyp@2961 929 YoungList* young_list = _g1->young_list();
tonyp@2961 930 _eden_bytes_before_gc = young_list->eden_used_bytes();
tonyp@2961 931 _survivor_bytes_before_gc = young_list->survivor_used_bytes();
tonyp@2961 932 _capacity_before_gc = _g1->capacity();
tonyp@2961 933
ysr@777 934 #ifdef DEBUG
ysr@777 935 // initialise these to something well known so that we can spot
ysr@777 936 // if they are not set properly
ysr@777 937
ysr@777 938 for (int i = 0; i < _parallel_gc_threads; ++i) {
tonyp@1966 939 _par_last_gc_worker_start_times_ms[i] = -1234.0;
tonyp@1966 940 _par_last_ext_root_scan_times_ms[i] = -1234.0;
tonyp@3416 941 _par_last_satb_filtering_times_ms[i] = -1234.0;
tonyp@1966 942 _par_last_update_rs_times_ms[i] = -1234.0;
tonyp@1966 943 _par_last_update_rs_processed_buffers[i] = -1234.0;
tonyp@1966 944 _par_last_scan_rs_times_ms[i] = -1234.0;
tonyp@1966 945 _par_last_obj_copy_times_ms[i] = -1234.0;
tonyp@1966 946 _par_last_termination_times_ms[i] = -1234.0;
tonyp@1966 947 _par_last_termination_attempts[i] = -1234.0;
tonyp@1966 948 _par_last_gc_worker_end_times_ms[i] = -1234.0;
brutisso@2712 949 _par_last_gc_worker_times_ms[i] = -1234.0;
johnc@3219 950 _par_last_gc_worker_other_times_ms[i] = -1234.0;
ysr@777 951 }
ysr@777 952 #endif
ysr@777 953
ysr@777 954 for (int i = 0; i < _aux_num; ++i) {
ysr@777 955 _cur_aux_times_ms[i] = 0.0;
ysr@777 956 _cur_aux_times_set[i] = false;
ysr@777 957 }
ysr@777 958
johnc@3295 959 // This is initialized to zero here and is set during
johnc@3219 960 // the evacuation pause if marking is in progress.
johnc@3219 961 _cur_satb_drain_time_ms = 0.0;
tonyp@3464 962 // This is initialized to zero here and is set during the evacuation
tonyp@3464 963 // pause if we actually waited for the root region scanning to finish.
tonyp@3464 964 _root_region_scan_wait_time_ms = 0.0;
ysr@777 965
tonyp@3337 966 _last_gc_was_young = false;
ysr@777 967
ysr@777 968 // do that for any other surv rate groups
ysr@777 969 _short_lived_surv_rate_group->stop_adding_regions();
tonyp@1717 970 _survivors_age_table.clear();
apetrusenko@980 971
ysr@777 972 assert( verify_young_ages(), "region age verification" );
ysr@777 973 }
ysr@777 974
brutisso@3065 975 void G1CollectorPolicy::record_concurrent_mark_init_end(double
ysr@777 976 mark_init_elapsed_time_ms) {
ysr@777 977 _during_marking = true;
tonyp@1794 978 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
tonyp@1794 979 clear_during_initial_mark_pause();
ysr@777 980 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
ysr@777 981 }
ysr@777 982
ysr@777 983 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
ysr@777 984 _mark_remark_start_sec = os::elapsedTime();
ysr@777 985 _during_marking = false;
ysr@777 986 }
ysr@777 987
ysr@777 988 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
ysr@777 989 double end_time_sec = os::elapsedTime();
ysr@777 990 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
ysr@777 991 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
ysr@777 992 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 993 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 994
ysr@777 995 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
ysr@777 996 }
ysr@777 997
ysr@777 998 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
ysr@777 999 _mark_cleanup_start_sec = os::elapsedTime();
ysr@777 1000 }
ysr@777 1001
tonyp@3209 1002 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
tonyp@3337 1003 _should_revert_to_young_gcs = false;
tonyp@3337 1004 _last_young_gc = true;
brutisso@3065 1005 _in_marking_window = false;
ysr@777 1006 }
ysr@777 1007
ysr@777 1008 void G1CollectorPolicy::record_concurrent_pause() {
ysr@777 1009 if (_stop_world_start > 0.0) {
ysr@777 1010 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
ysr@777 1011 _all_yield_times_ms->add(yield_ms);
ysr@777 1012 }
ysr@777 1013 }
ysr@777 1014
ysr@777 1015 void G1CollectorPolicy::record_concurrent_pause_end() {
ysr@777 1016 }
ysr@777 1017
ysr@777 1018 template<class T>
ysr@777 1019 T sum_of(T* sum_arr, int start, int n, int N) {
ysr@777 1020 T sum = (T)0;
ysr@777 1021 for (int i = 0; i < n; i++) {
ysr@777 1022 int j = (start + i) % N;
ysr@777 1023 sum += sum_arr[j];
ysr@777 1024 }
ysr@777 1025 return sum;
ysr@777 1026 }
ysr@777 1027
tonyp@1966 1028 void G1CollectorPolicy::print_par_stats(int level,
tonyp@1966 1029 const char* str,
brutisso@2712 1030 double* data) {
ysr@777 1031 double min = data[0], max = data[0];
ysr@777 1032 double total = 0.0;
brutisso@2645 1033 LineBuffer buf(level);
brutisso@2645 1034 buf.append("[%s (ms):", str);
jmasa@3294 1035 for (uint i = 0; i < no_of_gc_threads(); ++i) {
ysr@777 1036 double val = data[i];
ysr@777 1037 if (val < min)
ysr@777 1038 min = val;
ysr@777 1039 if (val > max)
ysr@777 1040 max = val;
ysr@777 1041 total += val;
brutisso@2645 1042 buf.append(" %3.1lf", val);
ysr@777 1043 }
brutisso@2712 1044 buf.append_and_print_cr("");
jmasa@3294 1045 double avg = total / (double) no_of_gc_threads();
brutisso@2712 1046 buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
brutisso@2712 1047 avg, min, max, max - min);
ysr@777 1048 }
ysr@777 1049
tonyp@1966 1050 void G1CollectorPolicy::print_par_sizes(int level,
tonyp@1966 1051 const char* str,
brutisso@2712 1052 double* data) {
ysr@777 1053 double min = data[0], max = data[0];
ysr@777 1054 double total = 0.0;
brutisso@2645 1055 LineBuffer buf(level);
brutisso@2645 1056 buf.append("[%s :", str);
jmasa@3294 1057 for (uint i = 0; i < no_of_gc_threads(); ++i) {
ysr@777 1058 double val = data[i];
ysr@777 1059 if (val < min)
ysr@777 1060 min = val;
ysr@777 1061 if (val > max)
ysr@777 1062 max = val;
ysr@777 1063 total += val;
brutisso@2645 1064 buf.append(" %d", (int) val);
ysr@777 1065 }
brutisso@2712 1066 buf.append_and_print_cr("");
jmasa@3294 1067 double avg = total / (double) no_of_gc_threads();
brutisso@2712 1068 buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
brutisso@2712 1069 (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
ysr@777 1070 }
ysr@777 1071
johnc@3219 1072 void G1CollectorPolicy::print_stats(int level,
johnc@3219 1073 const char* str,
johnc@3219 1074 double value) {
brutisso@2645 1075 LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
ysr@777 1076 }
ysr@777 1077
johnc@3219 1078 void G1CollectorPolicy::print_stats(int level,
johnc@3219 1079 const char* str,
johnc@3219 1080 int value) {
brutisso@2645 1081 LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
ysr@777 1082 }
ysr@777 1083
johnc@3219 1084 double G1CollectorPolicy::avg_value(double* data) {
jmasa@2188 1085 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1086 double ret = 0.0;
jmasa@3294 1087 for (uint i = 0; i < no_of_gc_threads(); ++i) {
ysr@777 1088 ret += data[i];
johnc@3219 1089 }
jmasa@3294 1090 return ret / (double) no_of_gc_threads();
ysr@777 1091 } else {
ysr@777 1092 return data[0];
ysr@777 1093 }
ysr@777 1094 }
ysr@777 1095
johnc@3219 1096 double G1CollectorPolicy::max_value(double* data) {
jmasa@2188 1097 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1098 double ret = data[0];
jmasa@3294 1099 for (uint i = 1; i < no_of_gc_threads(); ++i) {
johnc@3219 1100 if (data[i] > ret) {
ysr@777 1101 ret = data[i];
johnc@3219 1102 }
johnc@3219 1103 }
ysr@777 1104 return ret;
ysr@777 1105 } else {
ysr@777 1106 return data[0];
ysr@777 1107 }
ysr@777 1108 }
ysr@777 1109
johnc@3219 1110 double G1CollectorPolicy::sum_of_values(double* data) {
jmasa@2188 1111 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1112 double sum = 0.0;
jmasa@3294 1113 for (uint i = 0; i < no_of_gc_threads(); i++) {
ysr@777 1114 sum += data[i];
johnc@3219 1115 }
ysr@777 1116 return sum;
ysr@777 1117 } else {
ysr@777 1118 return data[0];
ysr@777 1119 }
ysr@777 1120 }
ysr@777 1121
johnc@3219 1122 double G1CollectorPolicy::max_sum(double* data1, double* data2) {
ysr@777 1123 double ret = data1[0] + data2[0];
ysr@777 1124
jmasa@2188 1125 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 1126 for (uint i = 1; i < no_of_gc_threads(); ++i) {
ysr@777 1127 double data = data1[i] + data2[i];
johnc@3219 1128 if (data > ret) {
ysr@777 1129 ret = data;
johnc@3219 1130 }
ysr@777 1131 }
ysr@777 1132 }
ysr@777 1133 return ret;
ysr@777 1134 }
ysr@777 1135
brutisso@3461 1136 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
brutisso@3461 1137 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
brutisso@3456 1138 return false;
brutisso@3456 1139 }
brutisso@3456 1140
brutisso@3456 1141 size_t marking_initiating_used_threshold =
brutisso@3456 1142 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
brutisso@3456 1143 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
brutisso@3461 1144 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
brutisso@3461 1145
brutisso@3461 1146 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
brutisso@3456 1147 if (gcs_are_young()) {
brutisso@3461 1148 ergo_verbose5(ErgoConcCycles,
brutisso@3456 1149 "request concurrent cycle initiation",
brutisso@3456 1150 ergo_format_reason("occupancy higher than threshold")
brutisso@3456 1151 ergo_format_byte("occupancy")
brutisso@3461 1152 ergo_format_byte("allocation request")
brutisso@3456 1153 ergo_format_byte_perc("threshold")
brutisso@3456 1154 ergo_format_str("source"),
brutisso@3456 1155 cur_used_bytes,
brutisso@3461 1156 alloc_byte_size,
brutisso@3456 1157 marking_initiating_used_threshold,
brutisso@3456 1158 (double) InitiatingHeapOccupancyPercent,
brutisso@3456 1159 source);
brutisso@3456 1160 return true;
brutisso@3456 1161 } else {
brutisso@3461 1162 ergo_verbose5(ErgoConcCycles,
brutisso@3456 1163 "do not request concurrent cycle initiation",
brutisso@3456 1164 ergo_format_reason("still doing mixed collections")
brutisso@3456 1165 ergo_format_byte("occupancy")
brutisso@3461 1166 ergo_format_byte("allocation request")
brutisso@3456 1167 ergo_format_byte_perc("threshold")
brutisso@3456 1168 ergo_format_str("source"),
brutisso@3456 1169 cur_used_bytes,
brutisso@3461 1170 alloc_byte_size,
brutisso@3456 1171 marking_initiating_used_threshold,
brutisso@3456 1172 (double) InitiatingHeapOccupancyPercent,
brutisso@3456 1173 source);
brutisso@3456 1174 }
brutisso@3456 1175 }
brutisso@3456 1176
brutisso@3456 1177 return false;
brutisso@3456 1178 }
brutisso@3456 1179
ysr@777 1180 // Anything below that is considered to be zero
ysr@777 1181 #define MIN_TIMER_GRANULARITY 0.0000001
ysr@777 1182
jmasa@3294 1183 void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
ysr@777 1184 double end_time_sec = os::elapsedTime();
ysr@777 1185 double elapsed_ms = _last_pause_time_ms;
jmasa@2188 1186 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
tonyp@3289 1187 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
tonyp@3289 1188 "otherwise, the subtraction below does not make sense");
ysr@777 1189 size_t rs_size =
tonyp@3289 1190 _cur_collection_pause_used_regions_at_start - cset_region_length();
ysr@777 1191 size_t cur_used_bytes = _g1->used();
ysr@777 1192 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
ysr@777 1193 bool last_pause_included_initial_mark = false;
tonyp@2062 1194 bool update_stats = !_g1->evacuation_failed();
jmasa@3294 1195 set_no_of_gc_threads(no_of_gc_threads);
ysr@777 1196
ysr@777 1197 #ifndef PRODUCT
ysr@777 1198 if (G1YoungSurvRateVerbose) {
ysr@777 1199 gclog_or_tty->print_cr("");
ysr@777 1200 _short_lived_surv_rate_group->print();
ysr@777 1201 // do that for any other surv rate groups too
ysr@777 1202 }
ysr@777 1203 #endif // PRODUCT
ysr@777 1204
brutisso@3065 1205 last_pause_included_initial_mark = during_initial_mark_pause();
brutisso@3456 1206 if (last_pause_included_initial_mark) {
brutisso@3065 1207 record_concurrent_mark_init_end(0.0);
ysr@777 1208 }
ysr@777 1209
brutisso@3456 1210 if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
brutisso@3456 1211 // Note: this might have already been set, if during the last
brutisso@3456 1212 // pause we decided to start a cycle but at the beginning of
brutisso@3456 1213 // this pause we decided to postpone it. That's OK.
brutisso@3456 1214 set_initiate_conc_mark_if_possible();
brutisso@3456 1215 }
brutisso@3065 1216
ysr@777 1217 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
ysr@777 1218 end_time_sec, false);
ysr@777 1219
ysr@777 1220 // This assert is exempted when we're doing parallel collection pauses,
ysr@777 1221 // because the fragmentation caused by the parallel GC allocation buffers
ysr@777 1222 // can lead to more memory being used during collection than was used
ysr@777 1223 // before. Best leave this out until the fragmentation problem is fixed.
ysr@777 1224 // Pauses in which evacuation failed can also lead to negative
ysr@777 1225 // collections, since no space is reclaimed from a region containing an
ysr@777 1226 // object whose evacuation failed.
ysr@777 1227 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1228 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1229 // (DLD, 10/05.)
ysr@777 1230 assert((true || parallel) // Always using GC LABs now.
ysr@777 1231 || _g1->evacuation_failed()
ysr@777 1232 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
ysr@777 1233 "Negative collection");
ysr@777 1234
ysr@777 1235 size_t freed_bytes =
ysr@777 1236 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
ysr@777 1237 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
johnc@1829 1238
ysr@777 1239 double survival_fraction =
ysr@777 1240 (double)surviving_bytes/
ysr@777 1241 (double)_collection_set_bytes_used_before;
ysr@777 1242
johnc@3219 1243 // These values are used to update the summary information that is
johnc@3219 1244 // displayed when TraceGen0Time is enabled, and are output as part
johnc@3219 1245 // of the PrintGCDetails output, in the non-parallel case.
johnc@3219 1246
johnc@3021 1247 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
tonyp@3416 1248 double satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
johnc@3021 1249 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
johnc@3021 1250 double update_rs_processed_buffers =
johnc@3021 1251 sum_of_values(_par_last_update_rs_processed_buffers);
johnc@3021 1252 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
johnc@3021 1253 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
johnc@3021 1254 double termination_time = avg_value(_par_last_termination_times_ms);
johnc@3021 1255
johnc@3219 1256 double known_time = ext_root_scan_time +
tonyp@3416 1257 satb_filtering_time +
johnc@3219 1258 update_rs_time +
johnc@3219 1259 scan_rs_time +
johnc@3219 1260 obj_copy_time;
johnc@3219 1261
johnc@3219 1262 double other_time_ms = elapsed_ms;
johnc@3219 1263
johnc@3219 1264 // Subtract the SATB drain time. It's initialized to zero at the
johnc@3219 1265 // start of the pause and is updated during the pause if marking
johnc@3219 1266 // is in progress.
johnc@3219 1267 other_time_ms -= _cur_satb_drain_time_ms;
johnc@3219 1268
tonyp@3464 1269 // Subtract the root region scanning wait time. It's initialized to
tonyp@3464 1270 // zero at the start of the pause.
tonyp@3464 1271 other_time_ms -= _root_region_scan_wait_time_ms;
tonyp@3464 1272
johnc@3219 1273 if (parallel) {
johnc@3219 1274 other_time_ms -= _cur_collection_par_time_ms;
johnc@3219 1275 } else {
johnc@3219 1276 other_time_ms -= known_time;
johnc@3219 1277 }
johnc@3219 1278
johnc@3219 1279 // Subtract the time taken to clean the card table from the
johnc@3219 1280 // current value of "other time"
johnc@3219 1281 other_time_ms -= _cur_clear_ct_time_ms;
johnc@3219 1282
johnc@3296 1283 // Subtract the time spent completing marking in the collection
johnc@3296 1284 // set. Note if marking is not in progress during the pause
johnc@3296 1285 // the value of _mark_closure_time_ms will be zero.
johnc@3296 1286 other_time_ms -= _mark_closure_time_ms;
johnc@3296 1287
johnc@3219 1288 // TraceGen0Time and TraceGen1Time summary info updating.
johnc@3219 1289 _all_pause_times_ms->add(elapsed_ms);
johnc@3021 1290
tonyp@1030 1291 if (update_stats) {
johnc@3219 1292 _summary->record_total_time_ms(elapsed_ms);
johnc@3219 1293 _summary->record_other_time_ms(other_time_ms);
johnc@3219 1294
johnc@3219 1295 MainBodySummary* body_summary = _summary->main_body_summary();
johnc@3219 1296 assert(body_summary != NULL, "should not be null!");
johnc@3219 1297
johnc@3219 1298 // This will be non-zero iff marking is currently in progress (i.e.
johnc@3219 1299 // _g1->mark_in_progress() == true) and the currrent pause was not
johnc@3219 1300 // an initial mark pause. Since the body_summary items are NumberSeqs,
johnc@3219 1301 // however, they have to be consistent and updated in lock-step with
johnc@3219 1302 // each other. Therefore we unconditionally record the SATB drain
johnc@3219 1303 // time - even if it's zero.
johnc@3219 1304 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
tonyp@3464 1305 body_summary->record_root_region_scan_wait_time_ms(
tonyp@3464 1306 _root_region_scan_wait_time_ms);
johnc@3021 1307
johnc@3021 1308 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
tonyp@3416 1309 body_summary->record_satb_filtering_time_ms(satb_filtering_time);
johnc@3021 1310 body_summary->record_update_rs_time_ms(update_rs_time);
johnc@3021 1311 body_summary->record_scan_rs_time_ms(scan_rs_time);
johnc@3021 1312 body_summary->record_obj_copy_time_ms(obj_copy_time);
johnc@3219 1313
johnc@3021 1314 if (parallel) {
johnc@3021 1315 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
johnc@3021 1316 body_summary->record_termination_time_ms(termination_time);
johnc@3219 1317
johnc@3219 1318 double parallel_known_time = known_time + termination_time;
johnc@3219 1319 double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
johnc@3021 1320 body_summary->record_parallel_other_time_ms(parallel_other_time);
johnc@3021 1321 }
johnc@3219 1322
johnc@3021 1323 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
johnc@3219 1324 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
johnc@3021 1325
ysr@777 1326 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1327 // fragmentation can produce negative collections. Same with evac
ysr@777 1328 // failure.
ysr@777 1329 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1330 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1331 // (DLD, 10/05.
ysr@777 1332 assert((true || parallel)
ysr@777 1333 || _g1->evacuation_failed()
ysr@777 1334 || surviving_bytes <= _collection_set_bytes_used_before,
ysr@777 1335 "Or else negative collection!");
johnc@3219 1336
ysr@777 1337 // this is where we update the allocation rate of the application
ysr@777 1338 double app_time_ms =
ysr@777 1339 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
ysr@777 1340 if (app_time_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1341 // This usually happens due to the timer not having the required
ysr@777 1342 // granularity. Some Linuxes are the usual culprits.
ysr@777 1343 // We'll just set it to something (arbitrarily) small.
ysr@777 1344 app_time_ms = 1.0;
ysr@777 1345 }
tonyp@3289 1346 // We maintain the invariant that all objects allocated by mutator
tonyp@3289 1347 // threads will be allocated out of eden regions. So, we can use
tonyp@3289 1348 // the eden region number allocated since the previous GC to
tonyp@3289 1349 // calculate the application's allocate rate. The only exception
tonyp@3289 1350 // to that is humongous objects that are allocated separately. But
tonyp@3289 1351 // given that humongous object allocations do not really affect
tonyp@3289 1352 // either the pause's duration nor when the next pause will take
tonyp@3289 1353 // place we can safely ignore them here.
tonyp@3289 1354 size_t regions_allocated = eden_cset_region_length();
ysr@777 1355 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
ysr@777 1356 _alloc_rate_ms_seq->add(alloc_rate_ms);
ysr@777 1357
ysr@777 1358 double interval_ms =
ysr@777 1359 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
ysr@777 1360 update_recent_gc_times(end_time_sec, elapsed_ms);
ysr@777 1361 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
ysr@1521 1362 if (recent_avg_pause_time_ratio() < 0.0 ||
ysr@1521 1363 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
ysr@1521 1364 #ifndef PRODUCT
ysr@1521 1365 // Dump info to allow post-facto debugging
ysr@1521 1366 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
ysr@1521 1367 gclog_or_tty->print_cr("-------------------------------------------");
ysr@1521 1368 gclog_or_tty->print_cr("Recent GC Times (ms):");
ysr@1521 1369 _recent_gc_times_ms->dump();
ysr@1521 1370 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
ysr@1521 1371 _recent_prev_end_times_for_all_gcs_sec->dump();
ysr@1521 1372 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
ysr@1521 1373 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
ysr@1522 1374 // In debug mode, terminate the JVM if the user wants to debug at this point.
ysr@1522 1375 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
ysr@1522 1376 #endif // !PRODUCT
ysr@1522 1377 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
ysr@1522 1378 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
ysr@1521 1379 if (_recent_avg_pause_time_ratio < 0.0) {
ysr@1521 1380 _recent_avg_pause_time_ratio = 0.0;
ysr@1521 1381 } else {
ysr@1521 1382 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
ysr@1521 1383 _recent_avg_pause_time_ratio = 1.0;
ysr@1521 1384 }
ysr@1521 1385 }
ysr@777 1386 }
ysr@777 1387
johnc@3219 1388 for (int i = 0; i < _aux_num; ++i) {
johnc@3219 1389 if (_cur_aux_times_set[i]) {
johnc@3219 1390 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
johnc@3219 1391 }
johnc@3219 1392 }
johnc@3219 1393
johnc@3219 1394 // PrintGCDetails output
ysr@777 1395 if (PrintGCDetails) {
johnc@3219 1396 bool print_marking_info =
johnc@3219 1397 _g1->mark_in_progress() && !last_pause_included_initial_mark;
johnc@3219 1398
tonyp@2062 1399 gclog_or_tty->print_cr("%s, %1.8lf secs]",
ysr@777 1400 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
ysr@777 1401 elapsed_ms / 1000.0);
ysr@777 1402
tonyp@3464 1403 if (_root_region_scan_wait_time_ms > 0.0) {
tonyp@3464 1404 print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
tonyp@3464 1405 }
tonyp@2062 1406 if (parallel) {
tonyp@2062 1407 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
johnc@3219 1408 print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
johnc@3219 1409 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
johnc@3219 1410 if (print_marking_info) {
tonyp@3416 1411 print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
johnc@3219 1412 }
tonyp@2062 1413 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
brutisso@2712 1414 print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
tonyp@2062 1415 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
tonyp@2062 1416 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
tonyp@2062 1417 print_par_stats(2, "Termination", _par_last_termination_times_ms);
brutisso@2712 1418 print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
johnc@3219 1419 print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
brutisso@2712 1420
brutisso@2712 1421 for (int i = 0; i < _parallel_gc_threads; i++) {
brutisso@2712 1422 _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
johnc@3219 1423
johnc@3219 1424 double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
tonyp@3416 1425 _par_last_satb_filtering_times_ms[i] +
johnc@3219 1426 _par_last_update_rs_times_ms[i] +
johnc@3219 1427 _par_last_scan_rs_times_ms[i] +
johnc@3219 1428 _par_last_obj_copy_times_ms[i] +
johnc@3219 1429 _par_last_termination_times_ms[i];
johnc@3219 1430
johnc@3219 1431 _par_last_gc_worker_other_times_ms[i] = _cur_collection_par_time_ms - worker_known_time;
brutisso@2712 1432 }
johnc@3219 1433 print_par_stats(2, "GC Worker", _par_last_gc_worker_times_ms);
johnc@3219 1434 print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
tonyp@2062 1435 } else {
johnc@3219 1436 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
johnc@3219 1437 if (print_marking_info) {
tonyp@3416 1438 print_stats(1, "SATB Filtering", satb_filtering_time);
johnc@3219 1439 }
tonyp@2062 1440 print_stats(1, "Update RS", update_rs_time);
johnc@3219 1441 print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
tonyp@2062 1442 print_stats(1, "Scan RS", scan_rs_time);
tonyp@2062 1443 print_stats(1, "Object Copying", obj_copy_time);
ysr@777 1444 }
johnc@3296 1445 if (print_marking_info) {
johnc@3296 1446 print_stats(1, "Complete CSet Marking", _mark_closure_time_ms);
johnc@3296 1447 }
johnc@3219 1448 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
johnc@1325 1449 #ifndef PRODUCT
johnc@1325 1450 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
johnc@1325 1451 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
johnc@1325 1452 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
johnc@1325 1453 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
johnc@1325 1454 if (_num_cc_clears > 0) {
johnc@1325 1455 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
johnc@1325 1456 }
johnc@1325 1457 #endif
ysr@777 1458 print_stats(1, "Other", other_time_ms);
johnc@3296 1459 print_stats(2, "Choose CSet",
johnc@3296 1460 (_recorded_young_cset_choice_time_ms +
johnc@3296 1461 _recorded_non_young_cset_choice_time_ms));
johnc@3175 1462 print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
johnc@3175 1463 print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
johnc@3296 1464 print_stats(2, "Free CSet",
johnc@3296 1465 (_recorded_young_free_cset_time_ms +
johnc@3296 1466 _recorded_non_young_free_cset_time_ms));
johnc@1829 1467
ysr@777 1468 for (int i = 0; i < _aux_num; ++i) {
ysr@777 1469 if (_cur_aux_times_set[i]) {
ysr@777 1470 char buffer[96];
ysr@777 1471 sprintf(buffer, "Aux%d", i);
ysr@777 1472 print_stats(1, buffer, _cur_aux_times_ms[i]);
ysr@777 1473 }
ysr@777 1474 }
ysr@777 1475 }
ysr@777 1476
ysr@777 1477 // Update the efficiency-since-mark vars.
ysr@777 1478 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
ysr@777 1479 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1480 // This usually happens due to the timer not having the required
ysr@777 1481 // granularity. Some Linuxes are the usual culprits.
ysr@777 1482 // We'll just set it to something (arbitrarily) small.
ysr@777 1483 proc_ms = 1.0;
ysr@777 1484 }
ysr@777 1485 double cur_efficiency = (double) freed_bytes / proc_ms;
ysr@777 1486
ysr@777 1487 bool new_in_marking_window = _in_marking_window;
ysr@777 1488 bool new_in_marking_window_im = false;
tonyp@1794 1489 if (during_initial_mark_pause()) {
ysr@777 1490 new_in_marking_window = true;
ysr@777 1491 new_in_marking_window_im = true;
ysr@777 1492 }
ysr@777 1493
tonyp@3337 1494 if (_last_young_gc) {
johnc@3178 1495 if (!last_pause_included_initial_mark) {
tonyp@3337 1496 ergo_verbose2(ErgoMixedGCs,
tonyp@3337 1497 "start mixed GCs",
johnc@3178 1498 ergo_format_byte_perc("known garbage"),
johnc@3178 1499 _known_garbage_bytes, _known_garbage_ratio * 100.0);
tonyp@3337 1500 set_gcs_are_young(false);
johnc@3178 1501 } else {
tonyp@3337 1502 ergo_verbose0(ErgoMixedGCs,
tonyp@3337 1503 "do not start mixed GCs",
johnc@3178 1504 ergo_format_reason("concurrent cycle is about to start"));
johnc@3178 1505 }
tonyp@3337 1506 _last_young_gc = false;
brutisso@3065 1507 }
brutisso@3065 1508
tonyp@3337 1509 if (!_last_gc_was_young) {
tonyp@3337 1510 if (_should_revert_to_young_gcs) {
tonyp@3337 1511 ergo_verbose2(ErgoMixedGCs,
tonyp@3337 1512 "end mixed GCs",
tonyp@3337 1513 ergo_format_reason("mixed GCs end requested")
tonyp@3114 1514 ergo_format_byte_perc("known garbage"),
tonyp@3114 1515 _known_garbage_bytes, _known_garbage_ratio * 100.0);
tonyp@3337 1516 set_gcs_are_young(true);
tonyp@3114 1517 } else if (_known_garbage_ratio < 0.05) {
tonyp@3337 1518 ergo_verbose3(ErgoMixedGCs,
tonyp@3337 1519 "end mixed GCs",
tonyp@3114 1520 ergo_format_reason("known garbage percent lower than threshold")
tonyp@3114 1521 ergo_format_byte_perc("known garbage")
tonyp@3114 1522 ergo_format_perc("threshold"),
tonyp@3114 1523 _known_garbage_bytes, _known_garbage_ratio * 100.0,
tonyp@3114 1524 0.05 * 100.0);
tonyp@3337 1525 set_gcs_are_young(true);
tonyp@3114 1526 } else if (adaptive_young_list_length() &&
tonyp@3114 1527 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
tonyp@3337 1528 ergo_verbose5(ErgoMixedGCs,
tonyp@3337 1529 "end mixed GCs",
tonyp@3114 1530 ergo_format_reason("current GC efficiency lower than "
tonyp@3337 1531 "predicted young GC efficiency")
tonyp@3114 1532 ergo_format_double("GC efficiency factor")
tonyp@3114 1533 ergo_format_double("current GC efficiency")
tonyp@3337 1534 ergo_format_double("predicted young GC efficiency")
tonyp@3114 1535 ergo_format_byte_perc("known garbage"),
tonyp@3114 1536 get_gc_eff_factor(), cur_efficiency,
tonyp@3114 1537 predict_young_gc_eff(),
tonyp@3114 1538 _known_garbage_bytes, _known_garbage_ratio * 100.0);
tonyp@3337 1539 set_gcs_are_young(true);
ysr@777 1540 }
brutisso@3065 1541 }
tonyp@3337 1542 _should_revert_to_young_gcs = false;
tonyp@3337 1543
tonyp@3337 1544 if (_last_gc_was_young && !_during_marking) {
brutisso@3065 1545 _young_gc_eff_seq->add(cur_efficiency);
ysr@777 1546 }
ysr@777 1547
ysr@777 1548 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 1549 // do that for any other surv rate groupsx
ysr@777 1550
apetrusenko@1112 1551 if (update_stats) {
ysr@777 1552 double pause_time_ms = elapsed_ms;
ysr@777 1553
ysr@777 1554 size_t diff = 0;
ysr@777 1555 if (_max_pending_cards >= _pending_cards)
ysr@777 1556 diff = _max_pending_cards - _pending_cards;
ysr@777 1557 _pending_card_diff_seq->add((double) diff);
ysr@777 1558
ysr@777 1559 double cost_per_card_ms = 0.0;
ysr@777 1560 if (_pending_cards > 0) {
ysr@777 1561 cost_per_card_ms = update_rs_time / (double) _pending_cards;
ysr@777 1562 _cost_per_card_ms_seq->add(cost_per_card_ms);
ysr@777 1563 }
ysr@777 1564
ysr@777 1565 size_t cards_scanned = _g1->cards_scanned();
ysr@777 1566
ysr@777 1567 double cost_per_entry_ms = 0.0;
ysr@777 1568 if (cards_scanned > 10) {
ysr@777 1569 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
tonyp@3337 1570 if (_last_gc_was_young) {
ysr@777 1571 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
tonyp@3337 1572 } else {
tonyp@3337 1573 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
tonyp@3337 1574 }
ysr@777 1575 }
ysr@777 1576
ysr@777 1577 if (_max_rs_lengths > 0) {
ysr@777 1578 double cards_per_entry_ratio =
ysr@777 1579 (double) cards_scanned / (double) _max_rs_lengths;
tonyp@3337 1580 if (_last_gc_was_young) {
tonyp@3337 1581 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
tonyp@3337 1582 } else {
tonyp@3337 1583 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
tonyp@3337 1584 }
ysr@777 1585 }
ysr@777 1586
tonyp@3356 1587 // This is defensive. For a while _max_rs_lengths could get
tonyp@3356 1588 // smaller than _recorded_rs_lengths which was causing
tonyp@3356 1589 // rs_length_diff to get very large and mess up the RSet length
tonyp@3356 1590 // predictions. The reason was unsafe concurrent updates to the
tonyp@3356 1591 // _inc_cset_recorded_rs_lengths field which the code below guards
tonyp@3356 1592 // against (see CR 7118202). This bug has now been fixed (see CR
tonyp@3356 1593 // 7119027). However, I'm still worried that
tonyp@3356 1594 // _inc_cset_recorded_rs_lengths might still end up somewhat
tonyp@3356 1595 // inaccurate. The concurrent refinement thread calculates an
tonyp@3356 1596 // RSet's length concurrently with other CR threads updating it
tonyp@3356 1597 // which might cause it to calculate the length incorrectly (if,
tonyp@3356 1598 // say, it's in mid-coarsening). So I'll leave in the defensive
tonyp@3356 1599 // conditional below just in case.
tonyp@3326 1600 size_t rs_length_diff = 0;
tonyp@3326 1601 if (_max_rs_lengths > _recorded_rs_lengths) {
tonyp@3326 1602 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
tonyp@3326 1603 }
tonyp@3326 1604 _rs_length_diff_seq->add((double) rs_length_diff);
ysr@777 1605
ysr@777 1606 size_t copied_bytes = surviving_bytes;
ysr@777 1607 double cost_per_byte_ms = 0.0;
ysr@777 1608 if (copied_bytes > 0) {
ysr@777 1609 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
tonyp@3337 1610 if (_in_marking_window) {
ysr@777 1611 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
tonyp@3337 1612 } else {
ysr@777 1613 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
tonyp@3337 1614 }
ysr@777 1615 }
ysr@777 1616
ysr@777 1617 double all_other_time_ms = pause_time_ms -
johnc@1829 1618 (update_rs_time + scan_rs_time + obj_copy_time +
ysr@777 1619 _mark_closure_time_ms + termination_time);
ysr@777 1620
ysr@777 1621 double young_other_time_ms = 0.0;
tonyp@3289 1622 if (young_cset_region_length() > 0) {
ysr@777 1623 young_other_time_ms =
ysr@777 1624 _recorded_young_cset_choice_time_ms +
ysr@777 1625 _recorded_young_free_cset_time_ms;
ysr@777 1626 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
tonyp@3289 1627 (double) young_cset_region_length());
ysr@777 1628 }
ysr@777 1629 double non_young_other_time_ms = 0.0;
tonyp@3289 1630 if (old_cset_region_length() > 0) {
ysr@777 1631 non_young_other_time_ms =
ysr@777 1632 _recorded_non_young_cset_choice_time_ms +
ysr@777 1633 _recorded_non_young_free_cset_time_ms;
ysr@777 1634
ysr@777 1635 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
tonyp@3289 1636 (double) old_cset_region_length());
ysr@777 1637 }
ysr@777 1638
ysr@777 1639 double constant_other_time_ms = all_other_time_ms -
ysr@777 1640 (young_other_time_ms + non_young_other_time_ms);
ysr@777 1641 _constant_other_time_ms_seq->add(constant_other_time_ms);
ysr@777 1642
ysr@777 1643 double survival_ratio = 0.0;
ysr@777 1644 if (_bytes_in_collection_set_before_gc > 0) {
tonyp@3028 1645 survival_ratio = (double) _bytes_copied_during_gc /
tonyp@3028 1646 (double) _bytes_in_collection_set_before_gc;
ysr@777 1647 }
ysr@777 1648
ysr@777 1649 _pending_cards_seq->add((double) _pending_cards);
ysr@777 1650 _rs_lengths_seq->add((double) _max_rs_lengths);
ysr@777 1651
ysr@777 1652 double expensive_region_limit_ms =
johnc@1186 1653 (double) MaxGCPauseMillis - predict_constant_other_time_ms();
ysr@777 1654 if (expensive_region_limit_ms < 0.0) {
ysr@777 1655 // this means that the other time was predicted to be longer than
ysr@777 1656 // than the max pause time
johnc@1186 1657 expensive_region_limit_ms = (double) MaxGCPauseMillis;
ysr@777 1658 }
ysr@777 1659 _expensive_region_limit_ms = expensive_region_limit_ms;
ysr@777 1660 }
ysr@777 1661
ysr@777 1662 _in_marking_window = new_in_marking_window;
ysr@777 1663 _in_marking_window_im = new_in_marking_window_im;
ysr@777 1664 _free_regions_at_end_of_collection = _g1->free_regions();
tonyp@3119 1665 update_young_list_target_length();
ysr@777 1666
iveresov@1546 1667 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
tonyp@1717 1668 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
iveresov@1546 1669 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
tonyp@3209 1670
tonyp@3209 1671 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
ysr@777 1672 }
ysr@777 1673
tonyp@2961 1674 #define EXT_SIZE_FORMAT "%d%s"
tonyp@2961 1675 #define EXT_SIZE_PARAMS(bytes) \
tonyp@2961 1676 byte_size_in_proper_unit((bytes)), \
tonyp@2961 1677 proper_unit_for_byte_size((bytes))
tonyp@2961 1678
tonyp@2961 1679 void G1CollectorPolicy::print_heap_transition() {
tonyp@2961 1680 if (PrintGCDetails) {
tonyp@2961 1681 YoungList* young_list = _g1->young_list();
tonyp@2961 1682 size_t eden_bytes = young_list->eden_used_bytes();
tonyp@2961 1683 size_t survivor_bytes = young_list->survivor_used_bytes();
tonyp@2961 1684 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
tonyp@2961 1685 size_t used = _g1->used();
tonyp@2961 1686 size_t capacity = _g1->capacity();
brutisso@3120 1687 size_t eden_capacity =
brutisso@3120 1688 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
tonyp@2961 1689
tonyp@2961 1690 gclog_or_tty->print_cr(
brutisso@3120 1691 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
brutisso@3120 1692 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
brutisso@3120 1693 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
brutisso@3120 1694 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
brutisso@3120 1695 EXT_SIZE_PARAMS(_eden_bytes_before_gc),
brutisso@3120 1696 EXT_SIZE_PARAMS(_prev_eden_capacity),
brutisso@3120 1697 EXT_SIZE_PARAMS(eden_bytes),
brutisso@3120 1698 EXT_SIZE_PARAMS(eden_capacity),
brutisso@3120 1699 EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
brutisso@3120 1700 EXT_SIZE_PARAMS(survivor_bytes),
brutisso@3120 1701 EXT_SIZE_PARAMS(used_before_gc),
brutisso@3120 1702 EXT_SIZE_PARAMS(_capacity_before_gc),
brutisso@3120 1703 EXT_SIZE_PARAMS(used),
brutisso@3120 1704 EXT_SIZE_PARAMS(capacity));
brutisso@3120 1705
brutisso@3120 1706 _prev_eden_capacity = eden_capacity;
tonyp@2961 1707 } else if (PrintGC) {
tonyp@2961 1708 _g1->print_size_transition(gclog_or_tty,
tonyp@2961 1709 _cur_collection_pause_used_at_start_bytes,
tonyp@2961 1710 _g1->used(), _g1->capacity());
tonyp@2961 1711 }
tonyp@2961 1712 }
tonyp@2961 1713
iveresov@1546 1714 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 1715 double update_rs_processed_buffers,
iveresov@1546 1716 double goal_ms) {
iveresov@1546 1717 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
iveresov@1546 1718 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
iveresov@1546 1719
tonyp@1717 1720 if (G1UseAdaptiveConcRefinement) {
iveresov@1546 1721 const int k_gy = 3, k_gr = 6;
iveresov@1546 1722 const double inc_k = 1.1, dec_k = 0.9;
iveresov@1546 1723
iveresov@1546 1724 int g = cg1r->green_zone();
iveresov@1546 1725 if (update_rs_time > goal_ms) {
iveresov@1546 1726 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
iveresov@1546 1727 } else {
iveresov@1546 1728 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
iveresov@1546 1729 g = (int)MAX2(g * inc_k, g + 1.0);
iveresov@1546 1730 }
iveresov@1546 1731 }
iveresov@1546 1732 // Change the refinement threads params
iveresov@1546 1733 cg1r->set_green_zone(g);
iveresov@1546 1734 cg1r->set_yellow_zone(g * k_gy);
iveresov@1546 1735 cg1r->set_red_zone(g * k_gr);
iveresov@1546 1736 cg1r->reinitialize_threads();
iveresov@1546 1737
iveresov@1546 1738 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
iveresov@1546 1739 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
iveresov@1546 1740 cg1r->yellow_zone());
iveresov@1546 1741 // Change the barrier params
iveresov@1546 1742 dcqs.set_process_completed_threshold(processing_threshold);
iveresov@1546 1743 dcqs.set_max_completed_queue(cg1r->red_zone());
iveresov@1546 1744 }
iveresov@1546 1745
iveresov@1546 1746 int curr_queue_size = dcqs.completed_buffers_num();
iveresov@1546 1747 if (curr_queue_size >= cg1r->yellow_zone()) {
iveresov@1546 1748 dcqs.set_completed_queue_padding(curr_queue_size);
iveresov@1546 1749 } else {
iveresov@1546 1750 dcqs.set_completed_queue_padding(0);
iveresov@1546 1751 }
iveresov@1546 1752 dcqs.notify_if_necessary();
iveresov@1546 1753 }
iveresov@1546 1754
ysr@777 1755 double
ysr@777 1756 G1CollectorPolicy::
ysr@777 1757 predict_young_collection_elapsed_time_ms(size_t adjustment) {
ysr@777 1758 guarantee( adjustment == 0 || adjustment == 1, "invariant" );
ysr@777 1759
ysr@777 1760 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@1829 1761 size_t young_num = g1h->young_list()->length();
ysr@777 1762 if (young_num == 0)
ysr@777 1763 return 0.0;
ysr@777 1764
ysr@777 1765 young_num += adjustment;
ysr@777 1766 size_t pending_cards = predict_pending_cards();
johnc@1829 1767 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
ysr@777 1768 predict_rs_length_diff();
ysr@777 1769 size_t card_num;
tonyp@3337 1770 if (gcs_are_young()) {
ysr@777 1771 card_num = predict_young_card_num(rs_lengths);
tonyp@3337 1772 } else {
ysr@777 1773 card_num = predict_non_young_card_num(rs_lengths);
tonyp@3337 1774 }
ysr@777 1775 size_t young_byte_size = young_num * HeapRegion::GrainBytes;
ysr@777 1776 double accum_yg_surv_rate =
ysr@777 1777 _short_lived_surv_rate_group->accum_surv_rate(adjustment);
ysr@777 1778
ysr@777 1779 size_t bytes_to_copy =
ysr@777 1780 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
ysr@777 1781
ysr@777 1782 return
ysr@777 1783 predict_rs_update_time_ms(pending_cards) +
ysr@777 1784 predict_rs_scan_time_ms(card_num) +
ysr@777 1785 predict_object_copy_time_ms(bytes_to_copy) +
ysr@777 1786 predict_young_other_time_ms(young_num) +
ysr@777 1787 predict_constant_other_time_ms();
ysr@777 1788 }
ysr@777 1789
ysr@777 1790 double
ysr@777 1791 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
ysr@777 1792 size_t rs_length = predict_rs_length_diff();
ysr@777 1793 size_t card_num;
tonyp@3337 1794 if (gcs_are_young()) {
ysr@777 1795 card_num = predict_young_card_num(rs_length);
tonyp@3337 1796 } else {
ysr@777 1797 card_num = predict_non_young_card_num(rs_length);
tonyp@3337 1798 }
ysr@777 1799 return predict_base_elapsed_time_ms(pending_cards, card_num);
ysr@777 1800 }
ysr@777 1801
ysr@777 1802 double
ysr@777 1803 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 1804 size_t scanned_cards) {
ysr@777 1805 return
ysr@777 1806 predict_rs_update_time_ms(pending_cards) +
ysr@777 1807 predict_rs_scan_time_ms(scanned_cards) +
ysr@777 1808 predict_constant_other_time_ms();
ysr@777 1809 }
ysr@777 1810
ysr@777 1811 double
ysr@777 1812 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
ysr@777 1813 bool young) {
ysr@777 1814 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1815 size_t card_num;
tonyp@3337 1816 if (gcs_are_young()) {
ysr@777 1817 card_num = predict_young_card_num(rs_length);
tonyp@3337 1818 } else {
ysr@777 1819 card_num = predict_non_young_card_num(rs_length);
tonyp@3337 1820 }
ysr@777 1821 size_t bytes_to_copy = predict_bytes_to_copy(hr);
ysr@777 1822
ysr@777 1823 double region_elapsed_time_ms =
ysr@777 1824 predict_rs_scan_time_ms(card_num) +
ysr@777 1825 predict_object_copy_time_ms(bytes_to_copy);
ysr@777 1826
ysr@777 1827 if (young)
ysr@777 1828 region_elapsed_time_ms += predict_young_other_time_ms(1);
ysr@777 1829 else
ysr@777 1830 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
ysr@777 1831
ysr@777 1832 return region_elapsed_time_ms;
ysr@777 1833 }
ysr@777 1834
ysr@777 1835 size_t
ysr@777 1836 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
ysr@777 1837 size_t bytes_to_copy;
ysr@777 1838 if (hr->is_marked())
ysr@777 1839 bytes_to_copy = hr->max_live_bytes();
ysr@777 1840 else {
ysr@777 1841 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
ysr@777 1842 "invariant" );
ysr@777 1843 int age = hr->age_in_surv_rate_group();
apetrusenko@980 1844 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
ysr@777 1845 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
ysr@777 1846 }
ysr@777 1847
ysr@777 1848 return bytes_to_copy;
ysr@777 1849 }
ysr@777 1850
ysr@777 1851 void
tonyp@3289 1852 G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
tonyp@3289 1853 size_t survivor_cset_region_length) {
tonyp@3289 1854 _eden_cset_region_length = eden_cset_region_length;
tonyp@3289 1855 _survivor_cset_region_length = survivor_cset_region_length;
tonyp@3289 1856 _old_cset_region_length = 0;
johnc@1829 1857 }
johnc@1829 1858
johnc@1829 1859 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
johnc@1829 1860 _recorded_rs_lengths = rs_lengths;
johnc@1829 1861 }
johnc@1829 1862
ysr@777 1863 void G1CollectorPolicy::check_if_region_is_too_expensive(double
ysr@777 1864 predicted_time_ms) {
ysr@777 1865 // I don't think we need to do this when in young GC mode since
ysr@777 1866 // marking will be initiated next time we hit the soft limit anyway...
ysr@777 1867 if (predicted_time_ms > _expensive_region_limit_ms) {
tonyp@3337 1868 ergo_verbose2(ErgoMixedGCs,
tonyp@3337 1869 "request mixed GCs end",
tonyp@3114 1870 ergo_format_reason("predicted region time higher than threshold")
tonyp@3114 1871 ergo_format_ms("predicted region time")
tonyp@3114 1872 ergo_format_ms("threshold"),
tonyp@3114 1873 predicted_time_ms, _expensive_region_limit_ms);
tonyp@3337 1874 // no point in doing another mixed GC
tonyp@3337 1875 _should_revert_to_young_gcs = true;
ysr@777 1876 }
ysr@777 1877 }
ysr@777 1878
ysr@777 1879 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
ysr@777 1880 double elapsed_ms) {
ysr@777 1881 _recent_gc_times_ms->add(elapsed_ms);
ysr@777 1882 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
ysr@777 1883 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
ysr@777 1884 }
ysr@777 1885
ysr@777 1886 size_t G1CollectorPolicy::expansion_amount() {
tonyp@3114 1887 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
tonyp@3114 1888 double threshold = _gc_overhead_perc;
tonyp@3114 1889 if (recent_gc_overhead > threshold) {
johnc@1186 1890 // We will double the existing space, or take
johnc@1186 1891 // G1ExpandByPercentOfAvailable % of the available expansion
johnc@1186 1892 // space, whichever is smaller, bounded below by a minimum
johnc@1186 1893 // expansion (unless that's all that's left.)
ysr@777 1894 const size_t min_expand_bytes = 1*M;
johnc@2504 1895 size_t reserved_bytes = _g1->max_capacity();
ysr@777 1896 size_t committed_bytes = _g1->capacity();
ysr@777 1897 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
ysr@777 1898 size_t expand_bytes;
ysr@777 1899 size_t expand_bytes_via_pct =
johnc@1186 1900 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
ysr@777 1901 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
ysr@777 1902 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
ysr@777 1903 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
tonyp@3114 1904
tonyp@3114 1905 ergo_verbose5(ErgoHeapSizing,
tonyp@3114 1906 "attempt heap expansion",
tonyp@3114 1907 ergo_format_reason("recent GC overhead higher than "
tonyp@3114 1908 "threshold after GC")
tonyp@3114 1909 ergo_format_perc("recent GC overhead")
tonyp@3114 1910 ergo_format_perc("threshold")
tonyp@3114 1911 ergo_format_byte("uncommitted")
tonyp@3114 1912 ergo_format_byte_perc("calculated expansion amount"),
tonyp@3114 1913 recent_gc_overhead, threshold,
tonyp@3114 1914 uncommitted_bytes,
tonyp@3114 1915 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
tonyp@3114 1916
ysr@777 1917 return expand_bytes;
ysr@777 1918 } else {
ysr@777 1919 return 0;
ysr@777 1920 }
ysr@777 1921 }
ysr@777 1922
ysr@777 1923 class CountCSClosure: public HeapRegionClosure {
ysr@777 1924 G1CollectorPolicy* _g1_policy;
ysr@777 1925 public:
ysr@777 1926 CountCSClosure(G1CollectorPolicy* g1_policy) :
ysr@777 1927 _g1_policy(g1_policy) {}
ysr@777 1928 bool doHeapRegion(HeapRegion* r) {
ysr@777 1929 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
ysr@777 1930 return false;
ysr@777 1931 }
ysr@777 1932 };
ysr@777 1933
ysr@777 1934 void G1CollectorPolicy::count_CS_bytes_used() {
ysr@777 1935 CountCSClosure cs_closure(this);
ysr@777 1936 _g1->collection_set_iterate(&cs_closure);
ysr@777 1937 }
ysr@777 1938
johnc@3219 1939 void G1CollectorPolicy::print_summary(int level,
johnc@3219 1940 const char* str,
johnc@3219 1941 NumberSeq* seq) const {
ysr@777 1942 double sum = seq->sum();
brutisso@2645 1943 LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
ysr@777 1944 str, sum / 1000.0, seq->avg());
ysr@777 1945 }
ysr@777 1946
johnc@3219 1947 void G1CollectorPolicy::print_summary_sd(int level,
johnc@3219 1948 const char* str,
johnc@3219 1949 NumberSeq* seq) const {
ysr@777 1950 print_summary(level, str, seq);
brutisso@2645 1951 LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
ysr@777 1952 seq->num(), seq->sd(), seq->maximum());
ysr@777 1953 }
ysr@777 1954
ysr@777 1955 void G1CollectorPolicy::check_other_times(int level,
ysr@777 1956 NumberSeq* other_times_ms,
ysr@777 1957 NumberSeq* calc_other_times_ms) const {
ysr@777 1958 bool should_print = false;
brutisso@2645 1959 LineBuffer buf(level + 2);
ysr@777 1960
ysr@777 1961 double max_sum = MAX2(fabs(other_times_ms->sum()),
ysr@777 1962 fabs(calc_other_times_ms->sum()));
ysr@777 1963 double min_sum = MIN2(fabs(other_times_ms->sum()),
ysr@777 1964 fabs(calc_other_times_ms->sum()));
ysr@777 1965 double sum_ratio = max_sum / min_sum;
ysr@777 1966 if (sum_ratio > 1.1) {
ysr@777 1967 should_print = true;
brutisso@2645 1968 buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
ysr@777 1969 }
ysr@777 1970
ysr@777 1971 double max_avg = MAX2(fabs(other_times_ms->avg()),
ysr@777 1972 fabs(calc_other_times_ms->avg()));
ysr@777 1973 double min_avg = MIN2(fabs(other_times_ms->avg()),
ysr@777 1974 fabs(calc_other_times_ms->avg()));
ysr@777 1975 double avg_ratio = max_avg / min_avg;
ysr@777 1976 if (avg_ratio > 1.1) {
ysr@777 1977 should_print = true;
brutisso@2645 1978 buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
ysr@777 1979 }
ysr@777 1980
ysr@777 1981 if (other_times_ms->sum() < -0.01) {
brutisso@2645 1982 buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
ysr@777 1983 }
ysr@777 1984
ysr@777 1985 if (other_times_ms->avg() < -0.01) {
brutisso@2645 1986 buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
ysr@777 1987 }
ysr@777 1988
ysr@777 1989 if (calc_other_times_ms->sum() < -0.01) {
ysr@777 1990 should_print = true;
brutisso@2645 1991 buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
ysr@777 1992 }
ysr@777 1993
ysr@777 1994 if (calc_other_times_ms->avg() < -0.01) {
ysr@777 1995 should_print = true;
brutisso@2645 1996 buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
ysr@777 1997 }
ysr@777 1998
ysr@777 1999 if (should_print)
ysr@777 2000 print_summary(level, "Other(Calc)", calc_other_times_ms);
ysr@777 2001 }
ysr@777 2002
ysr@777 2003 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
jmasa@2188 2004 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 2005 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 2006 if (summary->get_total_seq()->num() > 0) {
apetrusenko@1112 2007 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
ysr@777 2008 if (body_summary != NULL) {
tonyp@3464 2009 print_summary(1, "Root Region Scan Wait", body_summary->get_root_region_scan_wait_seq());
ysr@777 2010 if (parallel) {
ysr@777 2011 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
johnc@3219 2012 print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
tonyp@3416 2013 print_summary(2, "SATB Filtering", body_summary->get_satb_filtering_seq());
ysr@777 2014 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2015 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2016 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2017 print_summary(2, "Termination", body_summary->get_termination_seq());
johnc@3219 2018 print_summary(2, "Parallel Other", body_summary->get_parallel_other_seq());
ysr@777 2019 {
ysr@777 2020 NumberSeq* other_parts[] = {
ysr@777 2021 body_summary->get_ext_root_scan_seq(),
tonyp@3416 2022 body_summary->get_satb_filtering_seq(),
johnc@3219 2023 body_summary->get_update_rs_seq(),
ysr@777 2024 body_summary->get_scan_rs_seq(),
ysr@777 2025 body_summary->get_obj_copy_seq(),
ysr@777 2026 body_summary->get_termination_seq()
ysr@777 2027 };
ysr@777 2028 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
johnc@2134 2029 6, other_parts);
ysr@777 2030 check_other_times(2, body_summary->get_parallel_other_seq(),
ysr@777 2031 &calc_other_times_ms);
ysr@777 2032 }
ysr@777 2033 } else {
johnc@3219 2034 print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
tonyp@3416 2035 print_summary(1, "SATB Filtering", body_summary->get_satb_filtering_seq());
ysr@777 2036 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2037 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2038 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2039 }
ysr@777 2040 }
johnc@3219 2041 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
johnc@3219 2042 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
ysr@777 2043 print_summary(1, "Other", summary->get_other_seq());
ysr@777 2044 {
johnc@2134 2045 if (body_summary != NULL) {
johnc@2134 2046 NumberSeq calc_other_times_ms;
johnc@2134 2047 if (parallel) {
johnc@2134 2048 // parallel
johnc@2134 2049 NumberSeq* other_parts[] = {
johnc@2134 2050 body_summary->get_satb_drain_seq(),
tonyp@3464 2051 body_summary->get_root_region_scan_wait_seq(),
johnc@2134 2052 body_summary->get_parallel_seq(),
johnc@2134 2053 body_summary->get_clear_ct_seq()
johnc@2134 2054 };
johnc@2134 2055 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
tonyp@3464 2056 4, other_parts);
johnc@2134 2057 } else {
johnc@2134 2058 // serial
johnc@2134 2059 NumberSeq* other_parts[] = {
johnc@2134 2060 body_summary->get_satb_drain_seq(),
tonyp@3464 2061 body_summary->get_root_region_scan_wait_seq(),
johnc@2134 2062 body_summary->get_update_rs_seq(),
johnc@2134 2063 body_summary->get_ext_root_scan_seq(),
tonyp@3416 2064 body_summary->get_satb_filtering_seq(),
johnc@2134 2065 body_summary->get_scan_rs_seq(),
johnc@2134 2066 body_summary->get_obj_copy_seq()
johnc@2134 2067 };
johnc@2134 2068 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
tonyp@3464 2069 7, other_parts);
johnc@2134 2070 }
johnc@2134 2071 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
ysr@777 2072 }
ysr@777 2073 }
ysr@777 2074 } else {
brutisso@2645 2075 LineBuffer(1).append_and_print_cr("none");
ysr@777 2076 }
brutisso@2645 2077 LineBuffer(0).append_and_print_cr("");
ysr@777 2078 }
ysr@777 2079
ysr@777 2080 void G1CollectorPolicy::print_tracing_info() const {
ysr@777 2081 if (TraceGen0Time) {
ysr@777 2082 gclog_or_tty->print_cr("ALL PAUSES");
ysr@777 2083 print_summary_sd(0, "Total", _all_pause_times_ms);
ysr@777 2084 gclog_or_tty->print_cr("");
ysr@777 2085 gclog_or_tty->print_cr("");
tonyp@3337 2086 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
tonyp@3337 2087 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
ysr@777 2088 gclog_or_tty->print_cr("");
ysr@777 2089
apetrusenko@1112 2090 gclog_or_tty->print_cr("EVACUATION PAUSES");
apetrusenko@1112 2091 print_summary(_summary);
ysr@777 2092
ysr@777 2093 gclog_or_tty->print_cr("MISC");
ysr@777 2094 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
ysr@777 2095 print_summary_sd(0, "Yields", _all_yield_times_ms);
ysr@777 2096 for (int i = 0; i < _aux_num; ++i) {
ysr@777 2097 if (_all_aux_times_ms[i].num() > 0) {
ysr@777 2098 char buffer[96];
ysr@777 2099 sprintf(buffer, "Aux%d", i);
ysr@777 2100 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
ysr@777 2101 }
ysr@777 2102 }
ysr@777 2103 }
ysr@777 2104 if (TraceGen1Time) {
ysr@777 2105 if (_all_full_gc_times_ms->num() > 0) {
ysr@777 2106 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
ysr@777 2107 _all_full_gc_times_ms->num(),
ysr@777 2108 _all_full_gc_times_ms->sum() / 1000.0);
ysr@777 2109 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
ysr@777 2110 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 2111 _all_full_gc_times_ms->sd(),
ysr@777 2112 _all_full_gc_times_ms->maximum());
ysr@777 2113 }
ysr@777 2114 }
ysr@777 2115 }
ysr@777 2116
ysr@777 2117 void G1CollectorPolicy::print_yg_surv_rate_info() const {
ysr@777 2118 #ifndef PRODUCT
ysr@777 2119 _short_lived_surv_rate_group->print_surv_rate_summary();
ysr@777 2120 // add this call for any other surv rate groups
ysr@777 2121 #endif // PRODUCT
ysr@777 2122 }
ysr@777 2123
ysr@777 2124 #ifndef PRODUCT
ysr@777 2125 // for debugging, bit of a hack...
ysr@777 2126 static char*
ysr@777 2127 region_num_to_mbs(int length) {
ysr@777 2128 static char buffer[64];
ysr@777 2129 double bytes = (double) (length * HeapRegion::GrainBytes);
ysr@777 2130 double mbs = bytes / (double) (1024 * 1024);
ysr@777 2131 sprintf(buffer, "%7.2lfMB", mbs);
ysr@777 2132 return buffer;
ysr@777 2133 }
ysr@777 2134 #endif // PRODUCT
ysr@777 2135
apetrusenko@980 2136 size_t G1CollectorPolicy::max_regions(int purpose) {
ysr@777 2137 switch (purpose) {
ysr@777 2138 case GCAllocForSurvived:
apetrusenko@980 2139 return _max_survivor_regions;
ysr@777 2140 case GCAllocForTenured:
apetrusenko@980 2141 return REGIONS_UNLIMITED;
ysr@777 2142 default:
apetrusenko@980 2143 ShouldNotReachHere();
apetrusenko@980 2144 return REGIONS_UNLIMITED;
ysr@777 2145 };
ysr@777 2146 }
ysr@777 2147
tonyp@3119 2148 void G1CollectorPolicy::update_max_gc_locker_expansion() {
tonyp@2333 2149 size_t expansion_region_num = 0;
tonyp@2333 2150 if (GCLockerEdenExpansionPercent > 0) {
tonyp@2333 2151 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
tonyp@2333 2152 double expansion_region_num_d = perc * (double) _young_list_target_length;
tonyp@2333 2153 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
tonyp@2333 2154 // less than 1.0) we'll get 1.
tonyp@2333 2155 expansion_region_num = (size_t) ceil(expansion_region_num_d);
tonyp@2333 2156 } else {
tonyp@2333 2157 assert(expansion_region_num == 0, "sanity");
tonyp@2333 2158 }
tonyp@2333 2159 _young_list_max_length = _young_list_target_length + expansion_region_num;
tonyp@2333 2160 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
tonyp@2333 2161 }
tonyp@2333 2162
apetrusenko@980 2163 // Calculates survivor space parameters.
tonyp@3119 2164 void G1CollectorPolicy::update_survivors_policy() {
tonyp@3119 2165 double max_survivor_regions_d =
tonyp@3119 2166 (double) _young_list_target_length / (double) SurvivorRatio;
tonyp@3119 2167 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
tonyp@3119 2168 // smaller than 1.0) we'll get 1.
tonyp@3119 2169 _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
tonyp@3119 2170
tonyp@3066 2171 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
apetrusenko@980 2172 HeapRegion::GrainWords * _max_survivor_regions);
apetrusenko@980 2173 }
apetrusenko@980 2174
ysr@777 2175 #ifndef PRODUCT
ysr@777 2176 class HRSortIndexIsOKClosure: public HeapRegionClosure {
ysr@777 2177 CollectionSetChooser* _chooser;
ysr@777 2178 public:
ysr@777 2179 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
ysr@777 2180 _chooser(chooser) {}
ysr@777 2181
ysr@777 2182 bool doHeapRegion(HeapRegion* r) {
ysr@777 2183 if (!r->continuesHumongous()) {
ysr@777 2184 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
ysr@777 2185 }
ysr@777 2186 return false;
ysr@777 2187 }
ysr@777 2188 };
ysr@777 2189
tonyp@3209 2190 bool G1CollectorPolicy::assertMarkedBytesDataOK() {
ysr@777 2191 HRSortIndexIsOKClosure cl(_collectionSetChooser);
ysr@777 2192 _g1->heap_region_iterate(&cl);
ysr@777 2193 return true;
ysr@777 2194 }
ysr@777 2195 #endif
ysr@777 2196
tonyp@3114 2197 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
tonyp@3114 2198 GCCause::Cause gc_cause) {
tonyp@2011 2199 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@2011 2200 if (!during_cycle) {
tonyp@3114 2201 ergo_verbose1(ErgoConcCycles,
tonyp@3114 2202 "request concurrent cycle initiation",
tonyp@3114 2203 ergo_format_reason("requested by GC cause")
tonyp@3114 2204 ergo_format_str("GC cause"),
tonyp@3114 2205 GCCause::to_string(gc_cause));
tonyp@2011 2206 set_initiate_conc_mark_if_possible();
tonyp@2011 2207 return true;
tonyp@2011 2208 } else {
tonyp@3114 2209 ergo_verbose1(ErgoConcCycles,
tonyp@3114 2210 "do not request concurrent cycle initiation",
tonyp@3114 2211 ergo_format_reason("concurrent cycle already in progress")
tonyp@3114 2212 ergo_format_str("GC cause"),
tonyp@3114 2213 GCCause::to_string(gc_cause));
tonyp@2011 2214 return false;
tonyp@2011 2215 }
tonyp@2011 2216 }
tonyp@2011 2217
ysr@777 2218 void
tonyp@1794 2219 G1CollectorPolicy::decide_on_conc_mark_initiation() {
tonyp@1794 2220 // We are about to decide on whether this pause will be an
tonyp@1794 2221 // initial-mark pause.
tonyp@1794 2222
tonyp@1794 2223 // First, during_initial_mark_pause() should not be already set. We
tonyp@1794 2224 // will set it here if we have to. However, it should be cleared by
tonyp@1794 2225 // the end of the pause (it's only set for the duration of an
tonyp@1794 2226 // initial-mark pause).
tonyp@1794 2227 assert(!during_initial_mark_pause(), "pre-condition");
tonyp@1794 2228
tonyp@1794 2229 if (initiate_conc_mark_if_possible()) {
tonyp@1794 2230 // We had noticed on a previous pause that the heap occupancy has
tonyp@1794 2231 // gone over the initiating threshold and we should start a
tonyp@1794 2232 // concurrent marking cycle. So we might initiate one.
tonyp@1794 2233
tonyp@1794 2234 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@1794 2235 if (!during_cycle) {
tonyp@1794 2236 // The concurrent marking thread is not "during a cycle", i.e.,
tonyp@1794 2237 // it has completed the last one. So we can go ahead and
tonyp@1794 2238 // initiate a new cycle.
tonyp@1794 2239
tonyp@1794 2240 set_during_initial_mark_pause();
tonyp@3337 2241 // We do not allow mixed GCs during marking.
tonyp@3337 2242 if (!gcs_are_young()) {
tonyp@3337 2243 set_gcs_are_young(true);
tonyp@3337 2244 ergo_verbose0(ErgoMixedGCs,
tonyp@3337 2245 "end mixed GCs",
johnc@3178 2246 ergo_format_reason("concurrent cycle is about to start"));
johnc@3178 2247 }
tonyp@1794 2248
tonyp@1794 2249 // And we can now clear initiate_conc_mark_if_possible() as
tonyp@1794 2250 // we've already acted on it.
tonyp@1794 2251 clear_initiate_conc_mark_if_possible();
tonyp@3114 2252
tonyp@3114 2253 ergo_verbose0(ErgoConcCycles,
tonyp@3114 2254 "initiate concurrent cycle",
tonyp@3114 2255 ergo_format_reason("concurrent cycle initiation requested"));
tonyp@1794 2256 } else {
tonyp@1794 2257 // The concurrent marking thread is still finishing up the
tonyp@1794 2258 // previous cycle. If we start one right now the two cycles
tonyp@1794 2259 // overlap. In particular, the concurrent marking thread might
tonyp@1794 2260 // be in the process of clearing the next marking bitmap (which
tonyp@1794 2261 // we will use for the next cycle if we start one). Starting a
tonyp@1794 2262 // cycle now will be bad given that parts of the marking
tonyp@1794 2263 // information might get cleared by the marking thread. And we
tonyp@1794 2264 // cannot wait for the marking thread to finish the cycle as it
tonyp@1794 2265 // periodically yields while clearing the next marking bitmap
tonyp@1794 2266 // and, if it's in a yield point, it's waiting for us to
tonyp@1794 2267 // finish. So, at this point we will not start a cycle and we'll
tonyp@1794 2268 // let the concurrent marking thread complete the last one.
tonyp@3114 2269 ergo_verbose0(ErgoConcCycles,
tonyp@3114 2270 "do not initiate concurrent cycle",
tonyp@3114 2271 ergo_format_reason("concurrent cycle already in progress"));
tonyp@1794 2272 }
tonyp@1794 2273 }
tonyp@1794 2274 }
tonyp@1794 2275
ysr@777 2276 class KnownGarbageClosure: public HeapRegionClosure {
ysr@777 2277 CollectionSetChooser* _hrSorted;
ysr@777 2278
ysr@777 2279 public:
ysr@777 2280 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
ysr@777 2281 _hrSorted(hrSorted)
ysr@777 2282 {}
ysr@777 2283
ysr@777 2284 bool doHeapRegion(HeapRegion* r) {
ysr@777 2285 // We only include humongous regions in collection
ysr@777 2286 // sets when concurrent mark shows that their contained object is
ysr@777 2287 // unreachable.
ysr@777 2288
ysr@777 2289 // Do we have any marking information for this region?
ysr@777 2290 if (r->is_marked()) {
ysr@777 2291 // We don't include humongous regions in collection
ysr@777 2292 // sets because we collect them immediately at the end of a marking
ysr@777 2293 // cycle. We also don't include young regions because we *must*
ysr@777 2294 // include them in the next collection pause.
ysr@777 2295 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2296 _hrSorted->addMarkedHeapRegion(r);
ysr@777 2297 }
ysr@777 2298 }
ysr@777 2299 return false;
ysr@777 2300 }
ysr@777 2301 };
ysr@777 2302
ysr@777 2303 class ParKnownGarbageHRClosure: public HeapRegionClosure {
ysr@777 2304 CollectionSetChooser* _hrSorted;
ysr@777 2305 jint _marked_regions_added;
ysr@777 2306 jint _chunk_size;
ysr@777 2307 jint _cur_chunk_idx;
ysr@777 2308 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
ysr@777 2309 int _worker;
ysr@777 2310 int _invokes;
ysr@777 2311
ysr@777 2312 void get_new_chunk() {
ysr@777 2313 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
ysr@777 2314 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
ysr@777 2315 }
ysr@777 2316 void add_region(HeapRegion* r) {
ysr@777 2317 if (_cur_chunk_idx == _cur_chunk_end) {
ysr@777 2318 get_new_chunk();
ysr@777 2319 }
ysr@777 2320 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
ysr@777 2321 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
ysr@777 2322 _marked_regions_added++;
ysr@777 2323 _cur_chunk_idx++;
ysr@777 2324 }
ysr@777 2325
ysr@777 2326 public:
ysr@777 2327 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
ysr@777 2328 jint chunk_size,
ysr@777 2329 int worker) :
ysr@777 2330 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
ysr@777 2331 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
ysr@777 2332 _invokes(0)
ysr@777 2333 {}
ysr@777 2334
ysr@777 2335 bool doHeapRegion(HeapRegion* r) {
ysr@777 2336 // We only include humongous regions in collection
ysr@777 2337 // sets when concurrent mark shows that their contained object is
ysr@777 2338 // unreachable.
ysr@777 2339 _invokes++;
ysr@777 2340
ysr@777 2341 // Do we have any marking information for this region?
ysr@777 2342 if (r->is_marked()) {
ysr@777 2343 // We don't include humongous regions in collection
ysr@777 2344 // sets because we collect them immediately at the end of a marking
ysr@777 2345 // cycle.
ysr@777 2346 // We also do not include young regions in collection sets
ysr@777 2347 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2348 add_region(r);
ysr@777 2349 }
ysr@777 2350 }
ysr@777 2351 return false;
ysr@777 2352 }
ysr@777 2353 jint marked_regions_added() { return _marked_regions_added; }
ysr@777 2354 int invokes() { return _invokes; }
ysr@777 2355 };
ysr@777 2356
ysr@777 2357 class ParKnownGarbageTask: public AbstractGangTask {
ysr@777 2358 CollectionSetChooser* _hrSorted;
ysr@777 2359 jint _chunk_size;
ysr@777 2360 G1CollectedHeap* _g1;
ysr@777 2361 public:
ysr@777 2362 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
ysr@777 2363 AbstractGangTask("ParKnownGarbageTask"),
ysr@777 2364 _hrSorted(hrSorted), _chunk_size(chunk_size),
ysr@777 2365 _g1(G1CollectedHeap::heap())
ysr@777 2366 {}
ysr@777 2367
jmasa@3357 2368 void work(uint worker_id) {
jmasa@3357 2369 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
jmasa@3357 2370 _chunk_size,
jmasa@3357 2371 worker_id);
ysr@777 2372 // Back to zero for the claim value.
jmasa@3357 2373 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
jmasa@3294 2374 _g1->workers()->active_workers(),
tonyp@790 2375 HeapRegion::InitialClaimValue);
ysr@777 2376 jint regions_added = parKnownGarbageCl.marked_regions_added();
ysr@777 2377 _hrSorted->incNumMarkedHeapRegions(regions_added);
ysr@777 2378 if (G1PrintParCleanupStats) {
brutisso@2645 2379 gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
jmasa@3357 2380 worker_id, parKnownGarbageCl.invokes(), regions_added);
ysr@777 2381 }
ysr@777 2382 }
ysr@777 2383 };
ysr@777 2384
ysr@777 2385 void
jmasa@3294 2386 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
tonyp@3209 2387 double start_sec;
tonyp@3209 2388 if (G1PrintParCleanupStats) {
tonyp@3209 2389 start_sec = os::elapsedTime();
tonyp@3209 2390 }
ysr@777 2391
ysr@777 2392 _collectionSetChooser->clearMarkedHeapRegions();
tonyp@3209 2393 double clear_marked_end_sec;
ysr@777 2394 if (G1PrintParCleanupStats) {
tonyp@3209 2395 clear_marked_end_sec = os::elapsedTime();
tonyp@3209 2396 gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.",
tonyp@3209 2397 (clear_marked_end_sec - start_sec) * 1000.0);
ysr@777 2398 }
tonyp@3209 2399
jmasa@2188 2400 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 2401 const size_t OverpartitionFactor = 4;
jmasa@3294 2402 size_t WorkUnit;
jmasa@3294 2403 // The use of MinChunkSize = 8 in the original code
jmasa@3294 2404 // causes some assertion failures when the total number of
jmasa@3294 2405 // region is less than 8. The code here tries to fix that.
jmasa@3294 2406 // Should the original code also be fixed?
jmasa@3294 2407 if (no_of_gc_threads > 0) {
jmasa@3294 2408 const size_t MinWorkUnit =
jmasa@3294 2409 MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U);
jmasa@3294 2410 WorkUnit =
jmasa@3294 2411 MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
jmasa@3294 2412 MinWorkUnit);
jmasa@3294 2413 } else {
jmasa@3294 2414 assert(no_of_gc_threads > 0,
jmasa@3294 2415 "The active gc workers should be greater than 0");
jmasa@3294 2416 // In a product build do something reasonable to avoid a crash.
jmasa@3294 2417 const size_t MinWorkUnit =
jmasa@3294 2418 MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
jmasa@3294 2419 WorkUnit =
jmasa@3294 2420 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
jmasa@3294 2421 MinWorkUnit);
jmasa@3294 2422 }
ysr@777 2423 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
kvn@1926 2424 WorkUnit);
ysr@777 2425 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
kvn@1926 2426 (int) WorkUnit);
ysr@777 2427 _g1->workers()->run_task(&parKnownGarbageTask);
tonyp@790 2428
tonyp@790 2429 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2430 "sanity check");
ysr@777 2431 } else {
ysr@777 2432 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
ysr@777 2433 _g1->heap_region_iterate(&knownGarbagecl);
ysr@777 2434 }
tonyp@3209 2435 double known_garbage_end_sec;
ysr@777 2436 if (G1PrintParCleanupStats) {
tonyp@3209 2437 known_garbage_end_sec = os::elapsedTime();
ysr@777 2438 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
tonyp@3209 2439 (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
ysr@777 2440 }
tonyp@3209 2441
ysr@777 2442 _collectionSetChooser->sortMarkedHeapRegions();
tonyp@3209 2443 double end_sec = os::elapsedTime();
ysr@777 2444 if (G1PrintParCleanupStats) {
ysr@777 2445 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
tonyp@3209 2446 (end_sec - known_garbage_end_sec) * 1000.0);
ysr@777 2447 }
ysr@777 2448
tonyp@3209 2449 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
tonyp@3209 2450 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
tonyp@3209 2451 _cur_mark_stop_world_time_ms += elapsed_time_ms;
tonyp@3209 2452 _prev_collection_pause_end_ms += elapsed_time_ms;
tonyp@3209 2453 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
ysr@777 2454 }
ysr@777 2455
johnc@1829 2456 // Add the heap region at the head of the non-incremental collection set
tonyp@3289 2457 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
johnc@1829 2458 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2459 assert(!hr->is_young(), "non-incremental add of young region");
johnc@1829 2460
johnc@1829 2461 assert(!hr->in_collection_set(), "should not already be in the CSet");
ysr@777 2462 hr->set_in_collection_set(true);
ysr@777 2463 hr->set_next_in_collection_set(_collection_set);
ysr@777 2464 _collection_set = hr;
ysr@777 2465 _collection_set_bytes_used_before += hr->used();
tonyp@961 2466 _g1->register_region_with_in_cset_fast_test(hr);
tonyp@3289 2467 size_t rs_length = hr->rem_set()->occupied();
tonyp@3289 2468 _recorded_rs_lengths += rs_length;
tonyp@3289 2469 _old_cset_region_length += 1;
ysr@777 2470 }
ysr@777 2471
johnc@1829 2472 // Initialize the per-collection-set information
johnc@1829 2473 void G1CollectorPolicy::start_incremental_cset_building() {
johnc@1829 2474 assert(_inc_cset_build_state == Inactive, "Precondition");
johnc@1829 2475
johnc@1829 2476 _inc_cset_head = NULL;
johnc@1829 2477 _inc_cset_tail = NULL;
johnc@1829 2478 _inc_cset_bytes_used_before = 0;
johnc@1829 2479
johnc@1829 2480 _inc_cset_max_finger = 0;
johnc@1829 2481 _inc_cset_recorded_rs_lengths = 0;
tonyp@3356 2482 _inc_cset_recorded_rs_lengths_diffs = 0;
tonyp@3356 2483 _inc_cset_predicted_elapsed_time_ms = 0.0;
tonyp@3356 2484 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
johnc@1829 2485 _inc_cset_build_state = Active;
johnc@1829 2486 }
johnc@1829 2487
tonyp@3356 2488 void G1CollectorPolicy::finalize_incremental_cset_building() {
tonyp@3356 2489 assert(_inc_cset_build_state == Active, "Precondition");
tonyp@3356 2490 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
tonyp@3356 2491
tonyp@3356 2492 // The two "main" fields, _inc_cset_recorded_rs_lengths and
tonyp@3356 2493 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
tonyp@3356 2494 // that adds a new region to the CSet. Further updates by the
tonyp@3356 2495 // concurrent refinement thread that samples the young RSet lengths
tonyp@3356 2496 // are accumulated in the *_diffs fields. Here we add the diffs to
tonyp@3356 2497 // the "main" fields.
tonyp@3356 2498
tonyp@3356 2499 if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
tonyp@3356 2500 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
tonyp@3356 2501 } else {
tonyp@3356 2502 // This is defensive. The diff should in theory be always positive
tonyp@3356 2503 // as RSets can only grow between GCs. However, given that we
tonyp@3356 2504 // sample their size concurrently with other threads updating them
tonyp@3356 2505 // it's possible that we might get the wrong size back, which
tonyp@3356 2506 // could make the calculations somewhat inaccurate.
tonyp@3356 2507 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
tonyp@3356 2508 if (_inc_cset_recorded_rs_lengths >= diffs) {
tonyp@3356 2509 _inc_cset_recorded_rs_lengths -= diffs;
tonyp@3356 2510 } else {
tonyp@3356 2511 _inc_cset_recorded_rs_lengths = 0;
tonyp@3356 2512 }
tonyp@3356 2513 }
tonyp@3356 2514 _inc_cset_predicted_elapsed_time_ms +=
tonyp@3356 2515 _inc_cset_predicted_elapsed_time_ms_diffs;
tonyp@3356 2516
tonyp@3356 2517 _inc_cset_recorded_rs_lengths_diffs = 0;
tonyp@3356 2518 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
tonyp@3356 2519 }
tonyp@3356 2520
johnc@1829 2521 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
johnc@1829 2522 // This routine is used when:
johnc@1829 2523 // * adding survivor regions to the incremental cset at the end of an
johnc@1829 2524 // evacuation pause,
johnc@1829 2525 // * adding the current allocation region to the incremental cset
johnc@1829 2526 // when it is retired, and
johnc@1829 2527 // * updating existing policy information for a region in the
johnc@1829 2528 // incremental cset via young list RSet sampling.
johnc@1829 2529 // Therefore this routine may be called at a safepoint by the
johnc@1829 2530 // VM thread, or in-between safepoints by mutator threads (when
johnc@1829 2531 // retiring the current allocation region) or a concurrent
johnc@1829 2532 // refine thread (RSet sampling).
johnc@1829 2533
johnc@1829 2534 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
johnc@1829 2535 size_t used_bytes = hr->used();
johnc@1829 2536 _inc_cset_recorded_rs_lengths += rs_length;
johnc@1829 2537 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
johnc@1829 2538 _inc_cset_bytes_used_before += used_bytes;
johnc@1829 2539
johnc@1829 2540 // Cache the values we have added to the aggregated informtion
johnc@1829 2541 // in the heap region in case we have to remove this region from
johnc@1829 2542 // the incremental collection set, or it is updated by the
johnc@1829 2543 // rset sampling code
johnc@1829 2544 hr->set_recorded_rs_length(rs_length);
johnc@1829 2545 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
johnc@1829 2546 }
johnc@1829 2547
tonyp@3356 2548 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
tonyp@3356 2549 size_t new_rs_length) {
tonyp@3356 2550 // Update the CSet information that is dependent on the new RS length
tonyp@3356 2551 assert(hr->is_young(), "Precondition");
tonyp@3356 2552 assert(!SafepointSynchronize::is_at_safepoint(),
tonyp@3356 2553 "should not be at a safepoint");
tonyp@3356 2554
tonyp@3356 2555 // We could have updated _inc_cset_recorded_rs_lengths and
tonyp@3356 2556 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
tonyp@3356 2557 // that atomically, as this code is executed by a concurrent
tonyp@3356 2558 // refinement thread, potentially concurrently with a mutator thread
tonyp@3356 2559 // allocating a new region and also updating the same fields. To
tonyp@3356 2560 // avoid the atomic operations we accumulate these updates on two
tonyp@3356 2561 // separate fields (*_diffs) and we'll just add them to the "main"
tonyp@3356 2562 // fields at the start of a GC.
tonyp@3356 2563
tonyp@3356 2564 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
tonyp@3356 2565 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
tonyp@3356 2566 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
tonyp@3356 2567
johnc@1829 2568 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
tonyp@3356 2569 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
tonyp@3356 2570 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
tonyp@3356 2571 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
tonyp@3356 2572
tonyp@3356 2573 hr->set_recorded_rs_length(new_rs_length);
tonyp@3356 2574 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
johnc@1829 2575 }
johnc@1829 2576
johnc@1829 2577 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
tonyp@3289 2578 assert(hr->is_young(), "invariant");
tonyp@3289 2579 assert(hr->young_index_in_cset() > -1, "should have already been set");
johnc@1829 2580 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2581
johnc@1829 2582 // We need to clear and set the cached recorded/cached collection set
johnc@1829 2583 // information in the heap region here (before the region gets added
johnc@1829 2584 // to the collection set). An individual heap region's cached values
johnc@1829 2585 // are calculated, aggregated with the policy collection set info,
johnc@1829 2586 // and cached in the heap region here (initially) and (subsequently)
johnc@1829 2587 // by the Young List sampling code.
johnc@1829 2588
johnc@1829 2589 size_t rs_length = hr->rem_set()->occupied();
johnc@1829 2590 add_to_incremental_cset_info(hr, rs_length);
johnc@1829 2591
johnc@1829 2592 HeapWord* hr_end = hr->end();
johnc@1829 2593 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
johnc@1829 2594
johnc@1829 2595 assert(!hr->in_collection_set(), "invariant");
johnc@1829 2596 hr->set_in_collection_set(true);
johnc@1829 2597 assert( hr->next_in_collection_set() == NULL, "invariant");
johnc@1829 2598
johnc@1829 2599 _g1->register_region_with_in_cset_fast_test(hr);
johnc@1829 2600 }
johnc@1829 2601
johnc@1829 2602 // Add the region at the RHS of the incremental cset
johnc@1829 2603 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
johnc@1829 2604 // We should only ever be appending survivors at the end of a pause
johnc@1829 2605 assert( hr->is_survivor(), "Logic");
johnc@1829 2606
johnc@1829 2607 // Do the 'common' stuff
johnc@1829 2608 add_region_to_incremental_cset_common(hr);
johnc@1829 2609
johnc@1829 2610 // Now add the region at the right hand side
johnc@1829 2611 if (_inc_cset_tail == NULL) {
johnc@1829 2612 assert(_inc_cset_head == NULL, "invariant");
johnc@1829 2613 _inc_cset_head = hr;
johnc@1829 2614 } else {
johnc@1829 2615 _inc_cset_tail->set_next_in_collection_set(hr);
johnc@1829 2616 }
johnc@1829 2617 _inc_cset_tail = hr;
johnc@1829 2618 }
johnc@1829 2619
johnc@1829 2620 // Add the region to the LHS of the incremental cset
johnc@1829 2621 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
johnc@1829 2622 // Survivors should be added to the RHS at the end of a pause
johnc@1829 2623 assert(!hr->is_survivor(), "Logic");
johnc@1829 2624
johnc@1829 2625 // Do the 'common' stuff
johnc@1829 2626 add_region_to_incremental_cset_common(hr);
johnc@1829 2627
johnc@1829 2628 // Add the region at the left hand side
johnc@1829 2629 hr->set_next_in_collection_set(_inc_cset_head);
johnc@1829 2630 if (_inc_cset_head == NULL) {
johnc@1829 2631 assert(_inc_cset_tail == NULL, "Invariant");
johnc@1829 2632 _inc_cset_tail = hr;
johnc@1829 2633 }
johnc@1829 2634 _inc_cset_head = hr;
johnc@1829 2635 }
johnc@1829 2636
johnc@1829 2637 #ifndef PRODUCT
johnc@1829 2638 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
johnc@1829 2639 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
johnc@1829 2640
johnc@1829 2641 st->print_cr("\nCollection_set:");
johnc@1829 2642 HeapRegion* csr = list_head;
johnc@1829 2643 while (csr != NULL) {
johnc@1829 2644 HeapRegion* next = csr->next_in_collection_set();
johnc@1829 2645 assert(csr->in_collection_set(), "bad CS");
johnc@1829 2646 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
johnc@1829 2647 "age: %4d, y: %d, surv: %d",
johnc@1829 2648 csr->bottom(), csr->end(),
johnc@1829 2649 csr->top(),
johnc@1829 2650 csr->prev_top_at_mark_start(),
johnc@1829 2651 csr->next_top_at_mark_start(),
johnc@1829 2652 csr->top_at_conc_mark_count(),
johnc@1829 2653 csr->age_in_surv_rate_group_cond(),
johnc@1829 2654 csr->is_young(),
johnc@1829 2655 csr->is_survivor());
johnc@1829 2656 csr = next;
johnc@1829 2657 }
johnc@1829 2658 }
johnc@1829 2659 #endif // !PRODUCT
johnc@1829 2660
tonyp@3209 2661 void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
johnc@1829 2662 // Set this here - in case we're not doing young collections.
johnc@1829 2663 double non_young_start_time_sec = os::elapsedTime();
johnc@1829 2664
tonyp@3114 2665 YoungList* young_list = _g1->young_list();
tonyp@3356 2666 finalize_incremental_cset_building();
tonyp@3114 2667
tonyp@2011 2668 guarantee(target_pause_time_ms > 0.0,
tonyp@2011 2669 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2011 2670 target_pause_time_ms));
tonyp@2011 2671 guarantee(_collection_set == NULL, "Precondition");
ysr@777 2672
ysr@777 2673 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
ysr@777 2674 double predicted_pause_time_ms = base_time_ms;
ysr@777 2675
tonyp@2011 2676 double time_remaining_ms = target_pause_time_ms - base_time_ms;
ysr@777 2677
tonyp@3114 2678 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2679 "start choosing CSet",
tonyp@3114 2680 ergo_format_ms("predicted base time")
tonyp@3114 2681 ergo_format_ms("remaining time")
tonyp@3114 2682 ergo_format_ms("target pause time"),
tonyp@3114 2683 base_time_ms, time_remaining_ms, target_pause_time_ms);
tonyp@3114 2684
ysr@777 2685 // the 10% and 50% values are arbitrary...
tonyp@3114 2686 double threshold = 0.10 * target_pause_time_ms;
tonyp@3114 2687 if (time_remaining_ms < threshold) {
tonyp@3114 2688 double prev_time_remaining_ms = time_remaining_ms;
tonyp@2011 2689 time_remaining_ms = 0.50 * target_pause_time_ms;
tonyp@3114 2690 ergo_verbose3(ErgoCSetConstruction,
tonyp@3114 2691 "adjust remaining time",
tonyp@3114 2692 ergo_format_reason("remaining time lower than threshold")
tonyp@3114 2693 ergo_format_ms("remaining time")
tonyp@3114 2694 ergo_format_ms("threshold")
tonyp@3114 2695 ergo_format_ms("adjusted remaining time"),
tonyp@3114 2696 prev_time_remaining_ms, threshold, time_remaining_ms);
ysr@777 2697 }
ysr@777 2698
tonyp@3114 2699 size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
tonyp@3114 2700
tonyp@3114 2701 HeapRegion* hr;
tonyp@3114 2702 double young_start_time_sec = os::elapsedTime();
ysr@777 2703
apetrusenko@1112 2704 _collection_set_bytes_used_before = 0;
tonyp@3337 2705 _last_gc_was_young = gcs_are_young() ? true : false;
tonyp@3337 2706
tonyp@3337 2707 if (_last_gc_was_young) {
tonyp@3337 2708 ++_young_pause_num;
tonyp@3114 2709 } else {
tonyp@3337 2710 ++_mixed_pause_num;
tonyp@3114 2711 }
brutisso@3065 2712
brutisso@3065 2713 // The young list is laid with the survivor regions from the previous
brutisso@3065 2714 // pause are appended to the RHS of the young list, i.e.
brutisso@3065 2715 // [Newly Young Regions ++ Survivors from last pause].
brutisso@3065 2716
tonyp@3289 2717 size_t survivor_region_length = young_list->survivor_length();
tonyp@3289 2718 size_t eden_region_length = young_list->length() - survivor_region_length;
tonyp@3289 2719 init_cset_region_lengths(eden_region_length, survivor_region_length);
tonyp@3114 2720 hr = young_list->first_survivor_region();
brutisso@3065 2721 while (hr != NULL) {
brutisso@3065 2722 assert(hr->is_survivor(), "badly formed young list");
brutisso@3065 2723 hr->set_young();
brutisso@3065 2724 hr = hr->get_next_young_region();
brutisso@3065 2725 }
brutisso@3065 2726
tonyp@3114 2727 // Clear the fields that point to the survivor list - they are all young now.
tonyp@3114 2728 young_list->clear_survivors();
brutisso@3065 2729
brutisso@3065 2730 _collection_set = _inc_cset_head;
brutisso@3065 2731 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
brutisso@3065 2732 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2733 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2734
tonyp@3114 2735 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2736 "add young regions to CSet",
tonyp@3114 2737 ergo_format_region("eden")
tonyp@3114 2738 ergo_format_region("survivors")
tonyp@3114 2739 ergo_format_ms("predicted young region time"),
tonyp@3289 2740 eden_region_length, survivor_region_length,
tonyp@3114 2741 _inc_cset_predicted_elapsed_time_ms);
tonyp@3114 2742
brutisso@3065 2743 // The number of recorded young regions is the incremental
brutisso@3065 2744 // collection set's current size
brutisso@3065 2745 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
brutisso@3065 2746
brutisso@3065 2747 double young_end_time_sec = os::elapsedTime();
brutisso@3065 2748 _recorded_young_cset_choice_time_ms =
brutisso@3065 2749 (young_end_time_sec - young_start_time_sec) * 1000.0;
brutisso@3065 2750
brutisso@3065 2751 // We are doing young collections so reset this.
brutisso@3065 2752 non_young_start_time_sec = young_end_time_sec;
brutisso@3065 2753
tonyp@3337 2754 if (!gcs_are_young()) {
ysr@777 2755 bool should_continue = true;
ysr@777 2756 NumberSeq seq;
ysr@777 2757 double avg_prediction = 100000000000000000.0; // something very large
johnc@1829 2758
tonyp@3114 2759 double prev_predicted_pause_time_ms = predicted_pause_time_ms;
ysr@777 2760 do {
tonyp@3289 2761 // Note that add_old_region_to_cset() increments the
tonyp@3289 2762 // _old_cset_region_length field and cset_region_length() returns the
tonyp@3289 2763 // sum of _eden_cset_region_length, _survivor_cset_region_length, and
tonyp@3289 2764 // _old_cset_region_length. So, as old regions are added to the
tonyp@3289 2765 // CSet, _old_cset_region_length will be incremented and
tonyp@3289 2766 // cset_region_length(), which is used below, will always reflect
tonyp@3289 2767 // the the total number of regions added up to this point to the CSet.
tonyp@3289 2768
ysr@777 2769 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
ysr@777 2770 avg_prediction);
apetrusenko@1112 2771 if (hr != NULL) {
tonyp@3268 2772 _g1->old_set_remove(hr);
ysr@777 2773 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
ysr@777 2774 time_remaining_ms -= predicted_time_ms;
ysr@777 2775 predicted_pause_time_ms += predicted_time_ms;
tonyp@3289 2776 add_old_region_to_cset(hr);
ysr@777 2777 seq.add(predicted_time_ms);
ysr@777 2778 avg_prediction = seq.avg() + seq.sd();
ysr@777 2779 }
tonyp@3114 2780
tonyp@3114 2781 should_continue = true;
tonyp@3114 2782 if (hr == NULL) {
tonyp@3114 2783 // No need for an ergo verbose message here,
tonyp@3114 2784 // getNextMarkRegion() does this when it returns NULL.
tonyp@3114 2785 should_continue = false;
tonyp@3114 2786 } else {
tonyp@3114 2787 if (adaptive_young_list_length()) {
tonyp@3114 2788 if (time_remaining_ms < 0.0) {
tonyp@3114 2789 ergo_verbose1(ErgoCSetConstruction,
tonyp@3114 2790 "stop adding old regions to CSet",
tonyp@3114 2791 ergo_format_reason("remaining time is lower than 0")
tonyp@3114 2792 ergo_format_ms("remaining time"),
tonyp@3114 2793 time_remaining_ms);
tonyp@3114 2794 should_continue = false;
tonyp@3114 2795 }
tonyp@3114 2796 } else {
tonyp@3289 2797 if (cset_region_length() >= _young_list_fixed_length) {
tonyp@3114 2798 ergo_verbose2(ErgoCSetConstruction,
tonyp@3114 2799 "stop adding old regions to CSet",
tonyp@3126 2800 ergo_format_reason("CSet length reached target")
tonyp@3114 2801 ergo_format_region("CSet")
tonyp@3114 2802 ergo_format_region("young target"),
tonyp@3289 2803 cset_region_length(), _young_list_fixed_length);
tonyp@3114 2804 should_continue = false;
tonyp@3114 2805 }
tonyp@3114 2806 }
tonyp@3114 2807 }
ysr@777 2808 } while (should_continue);
ysr@777 2809
ysr@777 2810 if (!adaptive_young_list_length() &&
tonyp@3337 2811 cset_region_length() < _young_list_fixed_length) {
tonyp@3114 2812 ergo_verbose2(ErgoCSetConstruction,
tonyp@3337 2813 "request mixed GCs end",
tonyp@3114 2814 ergo_format_reason("CSet length lower than target")
tonyp@3114 2815 ergo_format_region("CSet")
tonyp@3114 2816 ergo_format_region("young target"),
tonyp@3289 2817 cset_region_length(), _young_list_fixed_length);
tonyp@3337 2818 _should_revert_to_young_gcs = true;
tonyp@3114 2819 }
tonyp@3114 2820
tonyp@3114 2821 ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2822 "add old regions to CSet",
tonyp@3114 2823 ergo_format_region("old")
tonyp@3114 2824 ergo_format_ms("predicted old region time"),
tonyp@3289 2825 old_cset_region_length(),
tonyp@3114 2826 predicted_pause_time_ms - prev_predicted_pause_time_ms);
ysr@777 2827 }
ysr@777 2828
johnc@1829 2829 stop_incremental_cset_building();
johnc@1829 2830
ysr@777 2831 count_CS_bytes_used();
ysr@777 2832
tonyp@3114 2833 ergo_verbose5(ErgoCSetConstruction,
tonyp@3114 2834 "finish choosing CSet",
tonyp@3114 2835 ergo_format_region("eden")
tonyp@3114 2836 ergo_format_region("survivors")
tonyp@3114 2837 ergo_format_region("old")
tonyp@3114 2838 ergo_format_ms("predicted pause time")
tonyp@3114 2839 ergo_format_ms("target pause time"),
tonyp@3289 2840 eden_region_length, survivor_region_length,
tonyp@3289 2841 old_cset_region_length(),
tonyp@3114 2842 predicted_pause_time_ms, target_pause_time_ms);
tonyp@3114 2843
ysr@777 2844 double non_young_end_time_sec = os::elapsedTime();
ysr@777 2845 _recorded_non_young_cset_choice_time_ms =
ysr@777 2846 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
ysr@777 2847 }

mercurial