src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Fri, 16 Dec 2011 02:14:27 -0500

author
tonyp
date
Fri, 16 Dec 2011 02:14:27 -0500
changeset 3337
41406797186b
parent 3326
d23d2b18183e
child 3356
67fdcb391461
permissions
-rw-r--r--

7113012: G1: rename not-fully-young GCs as "mixed"
Summary: Renamed partially-young GCs as mixed and fully-young GCs as young. Change all external output that includes those terms (GC log and GC ergo log) as well as any comments, fields, methods, etc. The changeset also includes very minor code tidying up (added some curly brackets).
Reviewed-by: johnc, brutisso

ysr@777 1 /*
tonyp@2472 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
stefank@2314 32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
stefank@2314 34 #include "runtime/arguments.hpp"
stefank@2314 35 #include "runtime/java.hpp"
stefank@2314 36 #include "runtime/mutexLocker.hpp"
stefank@2314 37 #include "utilities/debug.hpp"
ysr@777 38
ysr@777 39 // Different defaults for different number of GC threads
ysr@777 40 // They were chosen by running GCOld and SPECjbb on debris with different
ysr@777 41 // numbers of GC threads and choosing them based on the results
ysr@777 42
ysr@777 43 // all the same
ysr@777 44 static double rs_length_diff_defaults[] = {
ysr@777 45 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
ysr@777 46 };
ysr@777 47
ysr@777 48 static double cost_per_card_ms_defaults[] = {
ysr@777 49 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
ysr@777 50 };
ysr@777 51
ysr@777 52 // all the same
tonyp@3337 53 static double young_cards_per_entry_ratio_defaults[] = {
ysr@777 54 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
ysr@777 55 };
ysr@777 56
ysr@777 57 static double cost_per_entry_ms_defaults[] = {
ysr@777 58 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
ysr@777 59 };
ysr@777 60
ysr@777 61 static double cost_per_byte_ms_defaults[] = {
ysr@777 62 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
ysr@777 63 };
ysr@777 64
ysr@777 65 // these should be pretty consistent
ysr@777 66 static double constant_other_time_ms_defaults[] = {
ysr@777 67 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
ysr@777 68 };
ysr@777 69
ysr@777 70
ysr@777 71 static double young_other_cost_per_region_ms_defaults[] = {
ysr@777 72 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
ysr@777 73 };
ysr@777 74
ysr@777 75 static double non_young_other_cost_per_region_ms_defaults[] = {
ysr@777 76 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
ysr@777 77 };
ysr@777 78
brutisso@2645 79 // Help class for avoiding interleaved logging
brutisso@2645 80 class LineBuffer: public StackObj {
brutisso@2645 81
brutisso@2645 82 private:
brutisso@2645 83 static const int BUFFER_LEN = 1024;
brutisso@2645 84 static const int INDENT_CHARS = 3;
brutisso@2645 85 char _buffer[BUFFER_LEN];
brutisso@2645 86 int _indent_level;
brutisso@2645 87 int _cur;
brutisso@2645 88
brutisso@2645 89 void vappend(const char* format, va_list ap) {
brutisso@2645 90 int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
brutisso@2645 91 if (res != -1) {
brutisso@2645 92 _cur += res;
brutisso@2645 93 } else {
brutisso@2645 94 DEBUG_ONLY(warning("buffer too small in LineBuffer");)
brutisso@2645 95 _buffer[BUFFER_LEN -1] = 0;
brutisso@2645 96 _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
brutisso@2645 97 }
brutisso@2645 98 }
brutisso@2645 99
brutisso@2645 100 public:
brutisso@2645 101 explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
brutisso@2645 102 for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
brutisso@2645 103 _buffer[_cur] = ' ';
brutisso@2645 104 }
brutisso@2645 105 }
brutisso@2645 106
brutisso@2645 107 #ifndef PRODUCT
brutisso@2645 108 ~LineBuffer() {
brutisso@2645 109 assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
brutisso@2645 110 }
brutisso@2645 111 #endif
brutisso@2645 112
brutisso@2645 113 void append(const char* format, ...) {
brutisso@2645 114 va_list ap;
brutisso@2645 115 va_start(ap, format);
brutisso@2645 116 vappend(format, ap);
brutisso@2645 117 va_end(ap);
brutisso@2645 118 }
brutisso@2645 119
brutisso@2645 120 void append_and_print_cr(const char* format, ...) {
brutisso@2645 121 va_list ap;
brutisso@2645 122 va_start(ap, format);
brutisso@2645 123 vappend(format, ap);
brutisso@2645 124 va_end(ap);
brutisso@2645 125 gclog_or_tty->print_cr("%s", _buffer);
brutisso@2645 126 _cur = _indent_level * INDENT_CHARS;
brutisso@2645 127 }
brutisso@2645 128 };
brutisso@2645 129
ysr@777 130 G1CollectorPolicy::G1CollectorPolicy() :
jmasa@2188 131 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
johnc@3021 132 ? ParallelGCThreads : 1),
jmasa@2188 133
ysr@777 134 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 135 _all_pause_times_ms(new NumberSeq()),
ysr@777 136 _stop_world_start(0.0),
ysr@777 137 _all_stop_world_times_ms(new NumberSeq()),
ysr@777 138 _all_yield_times_ms(new NumberSeq()),
brutisso@3120 139 _using_new_ratio_calculations(false),
ysr@777 140
apetrusenko@1112 141 _summary(new Summary()),
ysr@777 142
johnc@3175 143 _cur_clear_ct_time_ms(0.0),
johnc@3296 144 _mark_closure_time_ms(0.0),
johnc@3175 145
johnc@3175 146 _cur_ref_proc_time_ms(0.0),
johnc@3175 147 _cur_ref_enq_time_ms(0.0),
johnc@3175 148
johnc@1325 149 #ifndef PRODUCT
johnc@1325 150 _min_clear_cc_time_ms(-1.0),
johnc@1325 151 _max_clear_cc_time_ms(-1.0),
johnc@1325 152 _cur_clear_cc_time_ms(0.0),
johnc@1325 153 _cum_clear_cc_time_ms(0.0),
johnc@1325 154 _num_cc_clears(0L),
johnc@1325 155 #endif
ysr@777 156
ysr@777 157 _aux_num(10),
ysr@777 158 _all_aux_times_ms(new NumberSeq[_aux_num]),
ysr@777 159 _cur_aux_start_times_ms(new double[_aux_num]),
ysr@777 160 _cur_aux_times_ms(new double[_aux_num]),
ysr@777 161 _cur_aux_times_set(new bool[_aux_num]),
ysr@777 162
ysr@777 163 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 164 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 165
ysr@777 166 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 167 _prev_collection_pause_end_ms(0.0),
ysr@777 168 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 169 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 170 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 171 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 172 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 173 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
tonyp@3337 174 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 175 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 176 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 177 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 178 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 179 _non_young_other_cost_per_region_ms_seq(
ysr@777 180 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 181
ysr@777 182 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 183 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 184
johnc@1186 185 _pause_time_target_ms((double) MaxGCPauseMillis),
ysr@777 186
tonyp@3337 187 _gcs_are_young(true),
tonyp@3337 188 _young_pause_num(0),
tonyp@3337 189 _mixed_pause_num(0),
ysr@777 190
ysr@777 191 _during_marking(false),
ysr@777 192 _in_marking_window(false),
ysr@777 193 _in_marking_window_im(false),
ysr@777 194
ysr@777 195 _known_garbage_ratio(0.0),
ysr@777 196 _known_garbage_bytes(0),
ysr@777 197
ysr@777 198 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 199
tonyp@3337 200 _recent_prev_end_times_for_all_gcs_sec(
tonyp@3337 201 new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 202
ysr@777 203 _recent_avg_pause_time_ratio(0.0),
ysr@777 204
ysr@777 205 _all_full_gc_times_ms(new NumberSeq()),
ysr@777 206
tonyp@1794 207 _initiate_conc_mark_if_possible(false),
tonyp@1794 208 _during_initial_mark_pause(false),
tonyp@3337 209 _should_revert_to_young_gcs(false),
tonyp@3337 210 _last_young_gc(false),
tonyp@3337 211 _last_gc_was_young(false),
ysr@777 212
tonyp@2961 213 _eden_bytes_before_gc(0),
tonyp@2961 214 _survivor_bytes_before_gc(0),
tonyp@2961 215 _capacity_before_gc(0),
tonyp@2961 216
ysr@777 217 _prev_collection_pause_used_at_end_bytes(0),
ysr@777 218
tonyp@3289 219 _eden_cset_region_length(0),
tonyp@3289 220 _survivor_cset_region_length(0),
tonyp@3289 221 _old_cset_region_length(0),
tonyp@3289 222
ysr@777 223 _collection_set(NULL),
johnc@1829 224 _collection_set_bytes_used_before(0),
johnc@1829 225
johnc@1829 226 // Incremental CSet attributes
johnc@1829 227 _inc_cset_build_state(Inactive),
johnc@1829 228 _inc_cset_head(NULL),
johnc@1829 229 _inc_cset_tail(NULL),
johnc@1829 230 _inc_cset_bytes_used_before(0),
johnc@1829 231 _inc_cset_max_finger(NULL),
johnc@1829 232 _inc_cset_recorded_rs_lengths(0),
johnc@1829 233 _inc_cset_predicted_elapsed_time_ms(0.0),
johnc@1829 234
ysr@777 235 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 236 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 237 #endif // _MSC_VER
ysr@777 238
ysr@777 239 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
ysr@777 240 G1YoungSurvRateNumRegionsSummary)),
ysr@777 241 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
apetrusenko@980 242 G1YoungSurvRateNumRegionsSummary)),
ysr@777 243 // add here any more surv rate groups
apetrusenko@980 244 _recorded_survivor_regions(0),
apetrusenko@980 245 _recorded_survivor_head(NULL),
apetrusenko@980 246 _recorded_survivor_tail(NULL),
tonyp@1791 247 _survivors_age_table(true),
tonyp@1791 248
tonyp@3114 249 _gc_overhead_perc(0.0) {
tonyp@3114 250
tonyp@1377 251 // Set up the region size and associated fields. Given that the
tonyp@1377 252 // policy is created before the heap, we have to set this up here,
tonyp@1377 253 // so it's done as soon as possible.
tonyp@1377 254 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
iveresov@1696 255 HeapRegionRemSet::setup_remset_size();
tonyp@1377 256
tonyp@3114 257 G1ErgoVerbose::initialize();
tonyp@3114 258 if (PrintAdaptiveSizePolicy) {
tonyp@3114 259 // Currently, we only use a single switch for all the heuristics.
tonyp@3114 260 G1ErgoVerbose::set_enabled(true);
tonyp@3114 261 // Given that we don't currently have a verboseness level
tonyp@3114 262 // parameter, we'll hardcode this to high. This can be easily
tonyp@3114 263 // changed in the future.
tonyp@3114 264 G1ErgoVerbose::set_level(ErgoHigh);
tonyp@3114 265 } else {
tonyp@3114 266 G1ErgoVerbose::set_enabled(false);
tonyp@3114 267 }
tonyp@3114 268
apetrusenko@1826 269 // Verify PLAB sizes
johnc@3182 270 const size_t region_size = HeapRegion::GrainWords;
apetrusenko@1826 271 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
apetrusenko@1826 272 char buffer[128];
johnc@3182 273 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
apetrusenko@1826 274 OldPLABSize > region_size ? "Old" : "Young", region_size);
apetrusenko@1826 275 vm_exit_during_initialization(buffer);
apetrusenko@1826 276 }
apetrusenko@1826 277
ysr@777 278 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
ysr@777 279 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
ysr@777 280
tonyp@1966 281 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
ysr@777 282 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 283 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 284
ysr@777 285 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 286 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
ysr@777 287
ysr@777 288 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 289
ysr@777 290 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
ysr@777 291
ysr@777 292 _par_last_termination_times_ms = new double[_parallel_gc_threads];
tonyp@1966 293 _par_last_termination_attempts = new double[_parallel_gc_threads];
tonyp@1966 294 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
brutisso@2712 295 _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
johnc@3219 296 _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
ysr@777 297
ysr@777 298 // start conservatively
johnc@1186 299 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
ysr@777 300
ysr@777 301 int index;
ysr@777 302 if (ParallelGCThreads == 0)
ysr@777 303 index = 0;
ysr@777 304 else if (ParallelGCThreads > 8)
ysr@777 305 index = 7;
ysr@777 306 else
ysr@777 307 index = ParallelGCThreads - 1;
ysr@777 308
ysr@777 309 _pending_card_diff_seq->add(0.0);
ysr@777 310 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
ysr@777 311 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
tonyp@3337 312 _young_cards_per_entry_ratio_seq->add(
tonyp@3337 313 young_cards_per_entry_ratio_defaults[index]);
ysr@777 314 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
ysr@777 315 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
ysr@777 316 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
ysr@777 317 _young_other_cost_per_region_ms_seq->add(
ysr@777 318 young_other_cost_per_region_ms_defaults[index]);
ysr@777 319 _non_young_other_cost_per_region_ms_seq->add(
ysr@777 320 non_young_other_cost_per_region_ms_defaults[index]);
ysr@777 321
tonyp@1965 322 // Below, we might need to calculate the pause time target based on
tonyp@1965 323 // the pause interval. When we do so we are going to give G1 maximum
tonyp@1965 324 // flexibility and allow it to do pauses when it needs to. So, we'll
tonyp@1965 325 // arrange that the pause interval to be pause time target + 1 to
tonyp@1965 326 // ensure that a) the pause time target is maximized with respect to
tonyp@1965 327 // the pause interval and b) we maintain the invariant that pause
tonyp@1965 328 // time target < pause interval. If the user does not want this
tonyp@1965 329 // maximum flexibility, they will have to set the pause interval
tonyp@1965 330 // explicitly.
tonyp@1965 331
tonyp@1965 332 // First make sure that, if either parameter is set, its value is
tonyp@1965 333 // reasonable.
tonyp@1965 334 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 335 if (MaxGCPauseMillis < 1) {
tonyp@1965 336 vm_exit_during_initialization("MaxGCPauseMillis should be "
tonyp@1965 337 "greater than 0");
tonyp@1965 338 }
tonyp@1965 339 }
tonyp@1965 340 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 341 if (GCPauseIntervalMillis < 1) {
tonyp@1965 342 vm_exit_during_initialization("GCPauseIntervalMillis should be "
tonyp@1965 343 "greater than 0");
tonyp@1965 344 }
tonyp@1965 345 }
tonyp@1965 346
tonyp@1965 347 // Then, if the pause time target parameter was not set, set it to
tonyp@1965 348 // the default value.
tonyp@1965 349 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 350 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 351 // The default pause time target in G1 is 200ms
tonyp@1965 352 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
tonyp@1965 353 } else {
tonyp@1965 354 // We do not allow the pause interval to be set without the
tonyp@1965 355 // pause time target
tonyp@1965 356 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
tonyp@1965 357 "without setting MaxGCPauseMillis");
tonyp@1965 358 }
tonyp@1965 359 }
tonyp@1965 360
tonyp@1965 361 // Then, if the interval parameter was not set, set it according to
tonyp@1965 362 // the pause time target (this will also deal with the case when the
tonyp@1965 363 // pause time target is the default value).
tonyp@1965 364 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 365 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
tonyp@1965 366 }
tonyp@1965 367
tonyp@1965 368 // Finally, make sure that the two parameters are consistent.
tonyp@1965 369 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
tonyp@1965 370 char buffer[256];
tonyp@1965 371 jio_snprintf(buffer, 256,
tonyp@1965 372 "MaxGCPauseMillis (%u) should be less than "
tonyp@1965 373 "GCPauseIntervalMillis (%u)",
tonyp@1965 374 MaxGCPauseMillis, GCPauseIntervalMillis);
tonyp@1965 375 vm_exit_during_initialization(buffer);
tonyp@1965 376 }
tonyp@1965 377
tonyp@1965 378 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
johnc@1186 379 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
ysr@777 380 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
johnc@1186 381 _sigma = (double) G1ConfidencePercent / 100.0;
ysr@777 382
ysr@777 383 // start conservatively (around 50ms is about right)
ysr@777 384 _concurrent_mark_remark_times_ms->add(0.05);
ysr@777 385 _concurrent_mark_cleanup_times_ms->add(0.20);
ysr@777 386 _tenuring_threshold = MaxTenuringThreshold;
tonyp@3066 387 // _max_survivor_regions will be calculated by
tonyp@3119 388 // update_young_list_target_length() during initialization.
tonyp@3066 389 _max_survivor_regions = 0;
apetrusenko@980 390
tonyp@1791 391 assert(GCTimeRatio > 0,
tonyp@1791 392 "we should have set it to a default value set_g1_gc_flags() "
tonyp@1791 393 "if a user set it to 0");
tonyp@1791 394 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
tonyp@1791 395
tonyp@3119 396 uintx reserve_perc = G1ReservePercent;
tonyp@3119 397 // Put an artificial ceiling on this so that it's not set to a silly value.
tonyp@3119 398 if (reserve_perc > 50) {
tonyp@3119 399 reserve_perc = 50;
tonyp@3119 400 warning("G1ReservePercent is set to a value that is too large, "
tonyp@3119 401 "it's been updated to %u", reserve_perc);
tonyp@3119 402 }
tonyp@3119 403 _reserve_factor = (double) reserve_perc / 100.0;
brutisso@3120 404 // This will be set when the heap is expanded
tonyp@3119 405 // for the first time during initialization.
tonyp@3119 406 _reserve_regions = 0;
tonyp@3119 407
ysr@777 408 initialize_all();
tonyp@3209 409 _collectionSetChooser = new CollectionSetChooser();
ysr@777 410 }
ysr@777 411
ysr@777 412 // Increment "i", mod "len"
ysr@777 413 static void inc_mod(int& i, int len) {
ysr@777 414 i++; if (i == len) i = 0;
ysr@777 415 }
ysr@777 416
ysr@777 417 void G1CollectorPolicy::initialize_flags() {
ysr@777 418 set_min_alignment(HeapRegion::GrainBytes);
ysr@777 419 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
apetrusenko@982 420 if (SurvivorRatio < 1) {
apetrusenko@982 421 vm_exit_during_initialization("Invalid survivor ratio specified");
apetrusenko@982 422 }
ysr@777 423 CollectorPolicy::initialize_flags();
ysr@777 424 }
ysr@777 425
tonyp@1720 426 // The easiest way to deal with the parsing of the NewSize /
tonyp@1720 427 // MaxNewSize / etc. parameteres is to re-use the code in the
tonyp@1720 428 // TwoGenerationCollectorPolicy class. This is similar to what
tonyp@1720 429 // ParallelScavenge does with its GenerationSizer class (see
tonyp@1720 430 // ParallelScavengeHeap::initialize()). We might change this in the
tonyp@1720 431 // future, but it's a good start.
tonyp@1720 432 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
tonyp@3172 433 private:
tonyp@3172 434 size_t size_to_region_num(size_t byte_size) {
tonyp@3172 435 return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
tonyp@3172 436 }
tonyp@1720 437
tonyp@1720 438 public:
tonyp@1720 439 G1YoungGenSizer() {
tonyp@1720 440 initialize_flags();
tonyp@1720 441 initialize_size_info();
tonyp@1720 442 }
tonyp@1720 443 size_t min_young_region_num() {
tonyp@1720 444 return size_to_region_num(_min_gen0_size);
tonyp@1720 445 }
tonyp@1720 446 size_t initial_young_region_num() {
tonyp@1720 447 return size_to_region_num(_initial_gen0_size);
tonyp@1720 448 }
tonyp@1720 449 size_t max_young_region_num() {
tonyp@1720 450 return size_to_region_num(_max_gen0_size);
tonyp@1720 451 }
tonyp@1720 452 };
tonyp@1720 453
brutisso@3120 454 void G1CollectorPolicy::update_young_list_size_using_newratio(size_t number_of_heap_regions) {
brutisso@3120 455 assert(number_of_heap_regions > 0, "Heap must be initialized");
brutisso@3120 456 size_t young_size = number_of_heap_regions / (NewRatio + 1);
brutisso@3120 457 _min_desired_young_length = young_size;
brutisso@3120 458 _max_desired_young_length = young_size;
brutisso@3120 459 }
brutisso@3120 460
ysr@777 461 void G1CollectorPolicy::init() {
ysr@777 462 // Set aside an initial future to_space.
ysr@777 463 _g1 = G1CollectedHeap::heap();
ysr@777 464
ysr@777 465 assert(Heap_lock->owned_by_self(), "Locking discipline.");
ysr@777 466
apetrusenko@980 467 initialize_gc_policy_counters();
apetrusenko@980 468
brutisso@3065 469 G1YoungGenSizer sizer;
brutisso@3120 470 _min_desired_young_length = sizer.min_young_region_num();
brutisso@3120 471 _max_desired_young_length = sizer.max_young_region_num();
brutisso@3120 472
brutisso@3120 473 if (FLAG_IS_CMDLINE(NewRatio)) {
brutisso@3120 474 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
tonyp@3172 475 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
brutisso@3120 476 } else {
brutisso@3120 477 // Treat NewRatio as a fixed size that is only recalculated when the heap size changes
tonyp@3172 478 update_young_list_size_using_newratio(_g1->n_regions());
brutisso@3120 479 _using_new_ratio_calculations = true;
brutisso@3120 480 }
brutisso@3120 481 }
brutisso@3120 482
brutisso@3120 483 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
brutisso@3120 484
brutisso@3120 485 set_adaptive_young_list_length(_min_desired_young_length < _max_desired_young_length);
brutisso@3120 486 if (adaptive_young_list_length()) {
brutisso@3065 487 _young_list_fixed_length = 0;
johnc@1829 488 } else {
brutisso@3221 489 assert(_min_desired_young_length == _max_desired_young_length, "Min and max young size differ");
brutisso@3221 490 _young_list_fixed_length = _min_desired_young_length;
ysr@777 491 }
brutisso@3065 492 _free_regions_at_end_of_collection = _g1->free_regions();
tonyp@3119 493 update_young_list_target_length();
brutisso@3120 494 _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
johnc@1829 495
johnc@1829 496 // We may immediately start allocating regions and placing them on the
johnc@1829 497 // collection set list. Initialize the per-collection set info
johnc@1829 498 start_incremental_cset_building();
ysr@777 499 }
ysr@777 500
apetrusenko@980 501 // Create the jstat counters for the policy.
tonyp@3119 502 void G1CollectorPolicy::initialize_gc_policy_counters() {
brutisso@3065 503 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
apetrusenko@980 504 }
apetrusenko@980 505
tonyp@3119 506 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
tonyp@3119 507 double base_time_ms,
tonyp@3119 508 size_t base_free_regions,
tonyp@3119 509 double target_pause_time_ms) {
tonyp@3119 510 if (young_length >= base_free_regions) {
tonyp@3119 511 // end condition 1: not enough space for the young regions
tonyp@3119 512 return false;
ysr@777 513 }
tonyp@3119 514
tonyp@3119 515 double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
tonyp@3119 516 size_t bytes_to_copy =
tonyp@3119 517 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
tonyp@3119 518 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
tonyp@3119 519 double young_other_time_ms = predict_young_other_time_ms(young_length);
tonyp@3119 520 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
tonyp@3119 521 if (pause_time_ms > target_pause_time_ms) {
tonyp@3119 522 // end condition 2: prediction is over the target pause time
tonyp@3119 523 return false;
tonyp@3119 524 }
tonyp@3119 525
tonyp@3119 526 size_t free_bytes =
tonyp@3119 527 (base_free_regions - young_length) * HeapRegion::GrainBytes;
tonyp@3119 528 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
tonyp@3119 529 // end condition 3: out-of-space (conservatively!)
tonyp@3119 530 return false;
tonyp@3119 531 }
tonyp@3119 532
tonyp@3119 533 // success!
tonyp@3119 534 return true;
ysr@777 535 }
ysr@777 536
brutisso@3120 537 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
brutisso@3120 538 // re-calculate the necessary reserve
brutisso@3120 539 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
tonyp@3119 540 // We use ceiling so that if reserve_regions_d is > 0.0 (but
tonyp@3119 541 // smaller than 1.0) we'll get 1.
tonyp@3119 542 _reserve_regions = (size_t) ceil(reserve_regions_d);
brutisso@3120 543
brutisso@3120 544 if (_using_new_ratio_calculations) {
brutisso@3120 545 // -XX:NewRatio was specified so we need to update the
brutisso@3120 546 // young gen length when the heap size has changed.
brutisso@3120 547 update_young_list_size_using_newratio(new_number_of_regions);
brutisso@3120 548 }
tonyp@3119 549 }
tonyp@3119 550
tonyp@3119 551 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
tonyp@3119 552 size_t base_min_length) {
tonyp@3119 553 size_t desired_min_length = 0;
ysr@777 554 if (adaptive_young_list_length()) {
tonyp@3119 555 if (_alloc_rate_ms_seq->num() > 3) {
tonyp@3119 556 double now_sec = os::elapsedTime();
tonyp@3119 557 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
tonyp@3119 558 double alloc_rate_ms = predict_alloc_rate_ms();
tonyp@3119 559 desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
tonyp@3119 560 } else {
tonyp@3119 561 // otherwise we don't have enough info to make the prediction
tonyp@3119 562 }
ysr@777 563 }
brutisso@3120 564 desired_min_length += base_min_length;
brutisso@3120 565 // make sure we don't go below any user-defined minimum bound
brutisso@3120 566 return MAX2(_min_desired_young_length, desired_min_length);
ysr@777 567 }
ysr@777 568
tonyp@3119 569 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
tonyp@3119 570 // Here, we might want to also take into account any additional
tonyp@3119 571 // constraints (i.e., user-defined minimum bound). Currently, we
tonyp@3119 572 // effectively don't set this bound.
brutisso@3120 573 return _max_desired_young_length;
tonyp@3119 574 }
tonyp@3119 575
tonyp@3119 576 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
tonyp@3119 577 if (rs_lengths == (size_t) -1) {
tonyp@3119 578 // if it's set to the default value (-1), we should predict it;
tonyp@3119 579 // otherwise, use the given value.
tonyp@3119 580 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
tonyp@3119 581 }
tonyp@3119 582
tonyp@3119 583 // Calculate the absolute and desired min bounds.
tonyp@3119 584
tonyp@3119 585 // This is how many young regions we already have (currently: the survivors).
tonyp@3119 586 size_t base_min_length = recorded_survivor_regions();
tonyp@3119 587 // This is the absolute minimum young length, which ensures that we
tonyp@3119 588 // can allocate one eden region in the worst-case.
tonyp@3119 589 size_t absolute_min_length = base_min_length + 1;
tonyp@3119 590 size_t desired_min_length =
tonyp@3119 591 calculate_young_list_desired_min_length(base_min_length);
tonyp@3119 592 if (desired_min_length < absolute_min_length) {
tonyp@3119 593 desired_min_length = absolute_min_length;
tonyp@3119 594 }
tonyp@3119 595
tonyp@3119 596 // Calculate the absolute and desired max bounds.
tonyp@3119 597
tonyp@3119 598 // We will try our best not to "eat" into the reserve.
tonyp@3119 599 size_t absolute_max_length = 0;
tonyp@3119 600 if (_free_regions_at_end_of_collection > _reserve_regions) {
tonyp@3119 601 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
tonyp@3119 602 }
tonyp@3119 603 size_t desired_max_length = calculate_young_list_desired_max_length();
tonyp@3119 604 if (desired_max_length > absolute_max_length) {
tonyp@3119 605 desired_max_length = absolute_max_length;
tonyp@3119 606 }
tonyp@3119 607
tonyp@3119 608 size_t young_list_target_length = 0;
tonyp@3119 609 if (adaptive_young_list_length()) {
tonyp@3337 610 if (gcs_are_young()) {
tonyp@3119 611 young_list_target_length =
tonyp@3119 612 calculate_young_list_target_length(rs_lengths,
tonyp@3119 613 base_min_length,
tonyp@3119 614 desired_min_length,
tonyp@3119 615 desired_max_length);
tonyp@3119 616 _rs_lengths_prediction = rs_lengths;
tonyp@3119 617 } else {
tonyp@3119 618 // Don't calculate anything and let the code below bound it to
tonyp@3119 619 // the desired_min_length, i.e., do the next GC as soon as
tonyp@3119 620 // possible to maximize how many old regions we can add to it.
ysr@777 621 }
ysr@777 622 } else {
tonyp@3337 623 if (gcs_are_young()) {
tonyp@3119 624 young_list_target_length = _young_list_fixed_length;
tonyp@3119 625 } else {
tonyp@3337 626 // A bit arbitrary: during mixed GCs we allocate half
tonyp@3119 627 // the young regions to try to add old regions to the CSet.
tonyp@3119 628 young_list_target_length = _young_list_fixed_length / 2;
tonyp@3119 629 // We choose to accept that we might go under the desired min
tonyp@3119 630 // length given that we intentionally ask for a smaller young gen.
tonyp@3119 631 desired_min_length = absolute_min_length;
tonyp@3119 632 }
ysr@777 633 }
ysr@777 634
tonyp@3119 635 // Make sure we don't go over the desired max length, nor under the
tonyp@3119 636 // desired min length. In case they clash, desired_min_length wins
tonyp@3119 637 // which is why that test is second.
tonyp@3119 638 if (young_list_target_length > desired_max_length) {
tonyp@3119 639 young_list_target_length = desired_max_length;
tonyp@3119 640 }
tonyp@3119 641 if (young_list_target_length < desired_min_length) {
tonyp@3119 642 young_list_target_length = desired_min_length;
tonyp@3119 643 }
tonyp@3119 644
tonyp@3119 645 assert(young_list_target_length > recorded_survivor_regions(),
tonyp@3119 646 "we should be able to allocate at least one eden region");
tonyp@3119 647 assert(young_list_target_length >= absolute_min_length, "post-condition");
tonyp@3119 648 _young_list_target_length = young_list_target_length;
tonyp@3119 649
tonyp@3119 650 update_max_gc_locker_expansion();
ysr@777 651 }
ysr@777 652
tonyp@3119 653 size_t
tonyp@3119 654 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
tonyp@3119 655 size_t base_min_length,
tonyp@3119 656 size_t desired_min_length,
tonyp@3119 657 size_t desired_max_length) {
tonyp@3119 658 assert(adaptive_young_list_length(), "pre-condition");
tonyp@3337 659 assert(gcs_are_young(), "only call this for young GCs");
tonyp@3119 660
tonyp@3119 661 // In case some edge-condition makes the desired max length too small...
tonyp@3119 662 if (desired_max_length <= desired_min_length) {
tonyp@3119 663 return desired_min_length;
tonyp@3119 664 }
tonyp@3119 665
tonyp@3119 666 // We'll adjust min_young_length and max_young_length not to include
tonyp@3119 667 // the already allocated young regions (i.e., so they reflect the
tonyp@3119 668 // min and max eden regions we'll allocate). The base_min_length
tonyp@3119 669 // will be reflected in the predictions by the
tonyp@3119 670 // survivor_regions_evac_time prediction.
tonyp@3119 671 assert(desired_min_length > base_min_length, "invariant");
tonyp@3119 672 size_t min_young_length = desired_min_length - base_min_length;
tonyp@3119 673 assert(desired_max_length > base_min_length, "invariant");
tonyp@3119 674 size_t max_young_length = desired_max_length - base_min_length;
tonyp@3119 675
tonyp@3119 676 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
tonyp@3119 677 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
tonyp@3119 678 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
tonyp@3119 679 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
tonyp@3119 680 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
tonyp@3119 681 double base_time_ms =
tonyp@3119 682 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
tonyp@3119 683 survivor_regions_evac_time;
tonyp@3119 684 size_t available_free_regions = _free_regions_at_end_of_collection;
tonyp@3119 685 size_t base_free_regions = 0;
tonyp@3119 686 if (available_free_regions > _reserve_regions) {
tonyp@3119 687 base_free_regions = available_free_regions - _reserve_regions;
tonyp@3119 688 }
tonyp@3119 689
tonyp@3119 690 // Here, we will make sure that the shortest young length that
tonyp@3119 691 // makes sense fits within the target pause time.
tonyp@3119 692
tonyp@3119 693 if (predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 694 base_free_regions, target_pause_time_ms)) {
tonyp@3119 695 // The shortest young length will fit into the target pause time;
tonyp@3119 696 // we'll now check whether the absolute maximum number of young
tonyp@3119 697 // regions will fit in the target pause time. If not, we'll do
tonyp@3119 698 // a binary search between min_young_length and max_young_length.
tonyp@3119 699 if (predict_will_fit(max_young_length, base_time_ms,
tonyp@3119 700 base_free_regions, target_pause_time_ms)) {
tonyp@3119 701 // The maximum young length will fit into the target pause time.
tonyp@3119 702 // We are done so set min young length to the maximum length (as
tonyp@3119 703 // the result is assumed to be returned in min_young_length).
tonyp@3119 704 min_young_length = max_young_length;
tonyp@3119 705 } else {
tonyp@3119 706 // The maximum possible number of young regions will not fit within
tonyp@3119 707 // the target pause time so we'll search for the optimal
tonyp@3119 708 // length. The loop invariants are:
tonyp@3119 709 //
tonyp@3119 710 // min_young_length < max_young_length
tonyp@3119 711 // min_young_length is known to fit into the target pause time
tonyp@3119 712 // max_young_length is known not to fit into the target pause time
tonyp@3119 713 //
tonyp@3119 714 // Going into the loop we know the above hold as we've just
tonyp@3119 715 // checked them. Every time around the loop we check whether
tonyp@3119 716 // the middle value between min_young_length and
tonyp@3119 717 // max_young_length fits into the target pause time. If it
tonyp@3119 718 // does, it becomes the new min. If it doesn't, it becomes
tonyp@3119 719 // the new max. This way we maintain the loop invariants.
tonyp@3119 720
tonyp@3119 721 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 722 size_t diff = (max_young_length - min_young_length) / 2;
tonyp@3119 723 while (diff > 0) {
tonyp@3119 724 size_t young_length = min_young_length + diff;
tonyp@3119 725 if (predict_will_fit(young_length, base_time_ms,
tonyp@3119 726 base_free_regions, target_pause_time_ms)) {
tonyp@3119 727 min_young_length = young_length;
tonyp@3119 728 } else {
tonyp@3119 729 max_young_length = young_length;
tonyp@3119 730 }
tonyp@3119 731 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 732 diff = (max_young_length - min_young_length) / 2;
tonyp@3119 733 }
tonyp@3119 734 // The results is min_young_length which, according to the
tonyp@3119 735 // loop invariants, should fit within the target pause time.
tonyp@3119 736
tonyp@3119 737 // These are the post-conditions of the binary search above:
tonyp@3119 738 assert(min_young_length < max_young_length,
tonyp@3119 739 "otherwise we should have discovered that max_young_length "
tonyp@3119 740 "fits into the pause target and not done the binary search");
tonyp@3119 741 assert(predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 742 base_free_regions, target_pause_time_ms),
tonyp@3119 743 "min_young_length, the result of the binary search, should "
tonyp@3119 744 "fit into the pause target");
tonyp@3119 745 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
tonyp@3119 746 base_free_regions, target_pause_time_ms),
tonyp@3119 747 "min_young_length, the result of the binary search, should be "
tonyp@3119 748 "optimal, so no larger length should fit into the pause target");
tonyp@3119 749 }
tonyp@3119 750 } else {
tonyp@3119 751 // Even the minimum length doesn't fit into the pause time
tonyp@3119 752 // target, return it as the result nevertheless.
tonyp@3119 753 }
tonyp@3119 754 return base_min_length + min_young_length;
ysr@777 755 }
ysr@777 756
apetrusenko@980 757 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
apetrusenko@980 758 double survivor_regions_evac_time = 0.0;
apetrusenko@980 759 for (HeapRegion * r = _recorded_survivor_head;
apetrusenko@980 760 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
apetrusenko@980 761 r = r->get_next_young_region()) {
apetrusenko@980 762 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
apetrusenko@980 763 }
apetrusenko@980 764 return survivor_regions_evac_time;
apetrusenko@980 765 }
apetrusenko@980 766
tonyp@3119 767 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
ysr@777 768 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
ysr@777 769
johnc@1829 770 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
ysr@777 771 if (rs_lengths > _rs_lengths_prediction) {
ysr@777 772 // add 10% to avoid having to recalculate often
ysr@777 773 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
tonyp@3119 774 update_young_list_target_length(rs_lengths_prediction);
ysr@777 775 }
ysr@777 776 }
ysr@777 777
tonyp@3119 778
tonyp@3119 779
ysr@777 780 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
ysr@777 781 bool is_tlab,
ysr@777 782 bool* gc_overhead_limit_was_exceeded) {
ysr@777 783 guarantee(false, "Not using this policy feature yet.");
ysr@777 784 return NULL;
ysr@777 785 }
ysr@777 786
ysr@777 787 // This method controls how a collector handles one or more
ysr@777 788 // of its generations being fully allocated.
ysr@777 789 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
ysr@777 790 bool is_tlab) {
ysr@777 791 guarantee(false, "Not using this policy feature yet.");
ysr@777 792 return NULL;
ysr@777 793 }
ysr@777 794
ysr@777 795
ysr@777 796 #ifndef PRODUCT
ysr@777 797 bool G1CollectorPolicy::verify_young_ages() {
johnc@1829 798 HeapRegion* head = _g1->young_list()->first_region();
ysr@777 799 return
ysr@777 800 verify_young_ages(head, _short_lived_surv_rate_group);
ysr@777 801 // also call verify_young_ages on any additional surv rate groups
ysr@777 802 }
ysr@777 803
ysr@777 804 bool
ysr@777 805 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
ysr@777 806 SurvRateGroup *surv_rate_group) {
ysr@777 807 guarantee( surv_rate_group != NULL, "pre-condition" );
ysr@777 808
ysr@777 809 const char* name = surv_rate_group->name();
ysr@777 810 bool ret = true;
ysr@777 811 int prev_age = -1;
ysr@777 812
ysr@777 813 for (HeapRegion* curr = head;
ysr@777 814 curr != NULL;
ysr@777 815 curr = curr->get_next_young_region()) {
ysr@777 816 SurvRateGroup* group = curr->surv_rate_group();
ysr@777 817 if (group == NULL && !curr->is_survivor()) {
ysr@777 818 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
ysr@777 819 ret = false;
ysr@777 820 }
ysr@777 821
ysr@777 822 if (surv_rate_group == group) {
ysr@777 823 int age = curr->age_in_surv_rate_group();
ysr@777 824
ysr@777 825 if (age < 0) {
ysr@777 826 gclog_or_tty->print_cr("## %s: encountered negative age", name);
ysr@777 827 ret = false;
ysr@777 828 }
ysr@777 829
ysr@777 830 if (age <= prev_age) {
ysr@777 831 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
ysr@777 832 "(%d, %d)", name, age, prev_age);
ysr@777 833 ret = false;
ysr@777 834 }
ysr@777 835 prev_age = age;
ysr@777 836 }
ysr@777 837 }
ysr@777 838
ysr@777 839 return ret;
ysr@777 840 }
ysr@777 841 #endif // PRODUCT
ysr@777 842
ysr@777 843 void G1CollectorPolicy::record_full_collection_start() {
ysr@777 844 _cur_collection_start_sec = os::elapsedTime();
ysr@777 845 // Release the future to-space so that it is available for compaction into.
ysr@777 846 _g1->set_full_collection();
ysr@777 847 }
ysr@777 848
ysr@777 849 void G1CollectorPolicy::record_full_collection_end() {
ysr@777 850 // Consider this like a collection pause for the purposes of allocation
ysr@777 851 // since last pause.
ysr@777 852 double end_sec = os::elapsedTime();
ysr@777 853 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
ysr@777 854 double full_gc_time_ms = full_gc_time_sec * 1000.0;
ysr@777 855
ysr@777 856 _all_full_gc_times_ms->add(full_gc_time_ms);
ysr@777 857
tonyp@1030 858 update_recent_gc_times(end_sec, full_gc_time_ms);
ysr@777 859
ysr@777 860 _g1->clear_full_collection();
ysr@777 861
tonyp@3337 862 // "Nuke" the heuristics that control the young/mixed GC
tonyp@3337 863 // transitions and make sure we start with young GCs after the Full GC.
tonyp@3337 864 set_gcs_are_young(true);
tonyp@3337 865 _last_young_gc = false;
tonyp@3337 866 _should_revert_to_young_gcs = false;
tonyp@1794 867 clear_initiate_conc_mark_if_possible();
tonyp@1794 868 clear_during_initial_mark_pause();
ysr@777 869 _known_garbage_bytes = 0;
ysr@777 870 _known_garbage_ratio = 0.0;
ysr@777 871 _in_marking_window = false;
ysr@777 872 _in_marking_window_im = false;
ysr@777 873
ysr@777 874 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 875 // also call this on any additional surv rate groups
ysr@777 876
apetrusenko@980 877 record_survivor_regions(0, NULL, NULL);
apetrusenko@980 878
ysr@777 879 _free_regions_at_end_of_collection = _g1->free_regions();
apetrusenko@980 880 // Reset survivors SurvRateGroup.
apetrusenko@980 881 _survivor_surv_rate_group->reset();
tonyp@3119 882 update_young_list_target_length();
tonyp@3209 883 _collectionSetChooser->updateAfterFullCollection();
tonyp@2315 884 }
ysr@777 885
ysr@777 886 void G1CollectorPolicy::record_stop_world_start() {
ysr@777 887 _stop_world_start = os::elapsedTime();
ysr@777 888 }
ysr@777 889
ysr@777 890 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
ysr@777 891 size_t start_used) {
ysr@777 892 if (PrintGCDetails) {
ysr@777 893 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 894 gclog_or_tty->print("[GC pause");
tonyp@3337 895 gclog_or_tty->print(" (%s)", gcs_are_young() ? "young" : "mixed");
ysr@777 896 }
ysr@777 897
tonyp@3119 898 // We only need to do this here as the policy will only be applied
tonyp@3119 899 // to the GC we're about to start. so, no point is calculating this
tonyp@3119 900 // every time we calculate / recalculate the target young length.
tonyp@3119 901 update_survivors_policy();
tonyp@3119 902
tonyp@2315 903 assert(_g1->used() == _g1->recalculate_used(),
tonyp@2315 904 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
tonyp@2315 905 _g1->used(), _g1->recalculate_used()));
ysr@777 906
ysr@777 907 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
ysr@777 908 _all_stop_world_times_ms->add(s_w_t_ms);
ysr@777 909 _stop_world_start = 0.0;
ysr@777 910
ysr@777 911 _cur_collection_start_sec = start_time_sec;
ysr@777 912 _cur_collection_pause_used_at_start_bytes = start_used;
ysr@777 913 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
ysr@777 914 _pending_cards = _g1->pending_card_num();
ysr@777 915 _max_pending_cards = _g1->max_pending_card_num();
ysr@777 916
ysr@777 917 _bytes_in_collection_set_before_gc = 0;
tonyp@3028 918 _bytes_copied_during_gc = 0;
ysr@777 919
tonyp@2961 920 YoungList* young_list = _g1->young_list();
tonyp@2961 921 _eden_bytes_before_gc = young_list->eden_used_bytes();
tonyp@2961 922 _survivor_bytes_before_gc = young_list->survivor_used_bytes();
tonyp@2961 923 _capacity_before_gc = _g1->capacity();
tonyp@2961 924
ysr@777 925 #ifdef DEBUG
ysr@777 926 // initialise these to something well known so that we can spot
ysr@777 927 // if they are not set properly
ysr@777 928
ysr@777 929 for (int i = 0; i < _parallel_gc_threads; ++i) {
tonyp@1966 930 _par_last_gc_worker_start_times_ms[i] = -1234.0;
tonyp@1966 931 _par_last_ext_root_scan_times_ms[i] = -1234.0;
tonyp@1966 932 _par_last_mark_stack_scan_times_ms[i] = -1234.0;
tonyp@1966 933 _par_last_update_rs_times_ms[i] = -1234.0;
tonyp@1966 934 _par_last_update_rs_processed_buffers[i] = -1234.0;
tonyp@1966 935 _par_last_scan_rs_times_ms[i] = -1234.0;
tonyp@1966 936 _par_last_obj_copy_times_ms[i] = -1234.0;
tonyp@1966 937 _par_last_termination_times_ms[i] = -1234.0;
tonyp@1966 938 _par_last_termination_attempts[i] = -1234.0;
tonyp@1966 939 _par_last_gc_worker_end_times_ms[i] = -1234.0;
brutisso@2712 940 _par_last_gc_worker_times_ms[i] = -1234.0;
johnc@3219 941 _par_last_gc_worker_other_times_ms[i] = -1234.0;
ysr@777 942 }
ysr@777 943 #endif
ysr@777 944
ysr@777 945 for (int i = 0; i < _aux_num; ++i) {
ysr@777 946 _cur_aux_times_ms[i] = 0.0;
ysr@777 947 _cur_aux_times_set[i] = false;
ysr@777 948 }
ysr@777 949
johnc@3295 950 // This is initialized to zero here and is set during
johnc@3219 951 // the evacuation pause if marking is in progress.
johnc@3219 952 _cur_satb_drain_time_ms = 0.0;
ysr@777 953
tonyp@3337 954 _last_gc_was_young = false;
ysr@777 955
ysr@777 956 // do that for any other surv rate groups
ysr@777 957 _short_lived_surv_rate_group->stop_adding_regions();
tonyp@1717 958 _survivors_age_table.clear();
apetrusenko@980 959
ysr@777 960 assert( verify_young_ages(), "region age verification" );
ysr@777 961 }
ysr@777 962
brutisso@3065 963 void G1CollectorPolicy::record_concurrent_mark_init_end(double
ysr@777 964 mark_init_elapsed_time_ms) {
ysr@777 965 _during_marking = true;
tonyp@1794 966 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
tonyp@1794 967 clear_during_initial_mark_pause();
ysr@777 968 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
ysr@777 969 }
ysr@777 970
ysr@777 971 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
ysr@777 972 _mark_remark_start_sec = os::elapsedTime();
ysr@777 973 _during_marking = false;
ysr@777 974 }
ysr@777 975
ysr@777 976 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
ysr@777 977 double end_time_sec = os::elapsedTime();
ysr@777 978 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
ysr@777 979 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
ysr@777 980 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 981 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 982
ysr@777 983 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
ysr@777 984 }
ysr@777 985
ysr@777 986 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
ysr@777 987 _mark_cleanup_start_sec = os::elapsedTime();
ysr@777 988 }
ysr@777 989
tonyp@3209 990 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
tonyp@3337 991 _should_revert_to_young_gcs = false;
tonyp@3337 992 _last_young_gc = true;
brutisso@3065 993 _in_marking_window = false;
ysr@777 994 }
ysr@777 995
ysr@777 996 void G1CollectorPolicy::record_concurrent_pause() {
ysr@777 997 if (_stop_world_start > 0.0) {
ysr@777 998 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
ysr@777 999 _all_yield_times_ms->add(yield_ms);
ysr@777 1000 }
ysr@777 1001 }
ysr@777 1002
ysr@777 1003 void G1CollectorPolicy::record_concurrent_pause_end() {
ysr@777 1004 }
ysr@777 1005
ysr@777 1006 template<class T>
ysr@777 1007 T sum_of(T* sum_arr, int start, int n, int N) {
ysr@777 1008 T sum = (T)0;
ysr@777 1009 for (int i = 0; i < n; i++) {
ysr@777 1010 int j = (start + i) % N;
ysr@777 1011 sum += sum_arr[j];
ysr@777 1012 }
ysr@777 1013 return sum;
ysr@777 1014 }
ysr@777 1015
tonyp@1966 1016 void G1CollectorPolicy::print_par_stats(int level,
tonyp@1966 1017 const char* str,
brutisso@2712 1018 double* data) {
ysr@777 1019 double min = data[0], max = data[0];
ysr@777 1020 double total = 0.0;
brutisso@2645 1021 LineBuffer buf(level);
brutisso@2645 1022 buf.append("[%s (ms):", str);
jmasa@3294 1023 for (uint i = 0; i < no_of_gc_threads(); ++i) {
ysr@777 1024 double val = data[i];
ysr@777 1025 if (val < min)
ysr@777 1026 min = val;
ysr@777 1027 if (val > max)
ysr@777 1028 max = val;
ysr@777 1029 total += val;
brutisso@2645 1030 buf.append(" %3.1lf", val);
ysr@777 1031 }
brutisso@2712 1032 buf.append_and_print_cr("");
jmasa@3294 1033 double avg = total / (double) no_of_gc_threads();
brutisso@2712 1034 buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
brutisso@2712 1035 avg, min, max, max - min);
ysr@777 1036 }
ysr@777 1037
tonyp@1966 1038 void G1CollectorPolicy::print_par_sizes(int level,
tonyp@1966 1039 const char* str,
brutisso@2712 1040 double* data) {
ysr@777 1041 double min = data[0], max = data[0];
ysr@777 1042 double total = 0.0;
brutisso@2645 1043 LineBuffer buf(level);
brutisso@2645 1044 buf.append("[%s :", str);
jmasa@3294 1045 for (uint i = 0; i < no_of_gc_threads(); ++i) {
ysr@777 1046 double val = data[i];
ysr@777 1047 if (val < min)
ysr@777 1048 min = val;
ysr@777 1049 if (val > max)
ysr@777 1050 max = val;
ysr@777 1051 total += val;
brutisso@2645 1052 buf.append(" %d", (int) val);
ysr@777 1053 }
brutisso@2712 1054 buf.append_and_print_cr("");
jmasa@3294 1055 double avg = total / (double) no_of_gc_threads();
brutisso@2712 1056 buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
brutisso@2712 1057 (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
ysr@777 1058 }
ysr@777 1059
johnc@3219 1060 void G1CollectorPolicy::print_stats(int level,
johnc@3219 1061 const char* str,
johnc@3219 1062 double value) {
brutisso@2645 1063 LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
ysr@777 1064 }
ysr@777 1065
johnc@3219 1066 void G1CollectorPolicy::print_stats(int level,
johnc@3219 1067 const char* str,
johnc@3219 1068 int value) {
brutisso@2645 1069 LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
ysr@777 1070 }
ysr@777 1071
johnc@3219 1072 double G1CollectorPolicy::avg_value(double* data) {
jmasa@2188 1073 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1074 double ret = 0.0;
jmasa@3294 1075 for (uint i = 0; i < no_of_gc_threads(); ++i) {
ysr@777 1076 ret += data[i];
johnc@3219 1077 }
jmasa@3294 1078 return ret / (double) no_of_gc_threads();
ysr@777 1079 } else {
ysr@777 1080 return data[0];
ysr@777 1081 }
ysr@777 1082 }
ysr@777 1083
johnc@3219 1084 double G1CollectorPolicy::max_value(double* data) {
jmasa@2188 1085 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1086 double ret = data[0];
jmasa@3294 1087 for (uint i = 1; i < no_of_gc_threads(); ++i) {
johnc@3219 1088 if (data[i] > ret) {
ysr@777 1089 ret = data[i];
johnc@3219 1090 }
johnc@3219 1091 }
ysr@777 1092 return ret;
ysr@777 1093 } else {
ysr@777 1094 return data[0];
ysr@777 1095 }
ysr@777 1096 }
ysr@777 1097
johnc@3219 1098 double G1CollectorPolicy::sum_of_values(double* data) {
jmasa@2188 1099 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1100 double sum = 0.0;
jmasa@3294 1101 for (uint i = 0; i < no_of_gc_threads(); i++) {
ysr@777 1102 sum += data[i];
johnc@3219 1103 }
ysr@777 1104 return sum;
ysr@777 1105 } else {
ysr@777 1106 return data[0];
ysr@777 1107 }
ysr@777 1108 }
ysr@777 1109
johnc@3219 1110 double G1CollectorPolicy::max_sum(double* data1, double* data2) {
ysr@777 1111 double ret = data1[0] + data2[0];
ysr@777 1112
jmasa@2188 1113 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 1114 for (uint i = 1; i < no_of_gc_threads(); ++i) {
ysr@777 1115 double data = data1[i] + data2[i];
johnc@3219 1116 if (data > ret) {
ysr@777 1117 ret = data;
johnc@3219 1118 }
ysr@777 1119 }
ysr@777 1120 }
ysr@777 1121 return ret;
ysr@777 1122 }
ysr@777 1123
ysr@777 1124 // Anything below that is considered to be zero
ysr@777 1125 #define MIN_TIMER_GRANULARITY 0.0000001
ysr@777 1126
jmasa@3294 1127 void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) {
ysr@777 1128 double end_time_sec = os::elapsedTime();
ysr@777 1129 double elapsed_ms = _last_pause_time_ms;
jmasa@2188 1130 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
tonyp@3289 1131 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
tonyp@3289 1132 "otherwise, the subtraction below does not make sense");
ysr@777 1133 size_t rs_size =
tonyp@3289 1134 _cur_collection_pause_used_regions_at_start - cset_region_length();
ysr@777 1135 size_t cur_used_bytes = _g1->used();
ysr@777 1136 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
ysr@777 1137 bool last_pause_included_initial_mark = false;
tonyp@2062 1138 bool update_stats = !_g1->evacuation_failed();
jmasa@3294 1139 set_no_of_gc_threads(no_of_gc_threads);
ysr@777 1140
ysr@777 1141 #ifndef PRODUCT
ysr@777 1142 if (G1YoungSurvRateVerbose) {
ysr@777 1143 gclog_or_tty->print_cr("");
ysr@777 1144 _short_lived_surv_rate_group->print();
ysr@777 1145 // do that for any other surv rate groups too
ysr@777 1146 }
ysr@777 1147 #endif // PRODUCT
ysr@777 1148
brutisso@3065 1149 last_pause_included_initial_mark = during_initial_mark_pause();
brutisso@3065 1150 if (last_pause_included_initial_mark)
brutisso@3065 1151 record_concurrent_mark_init_end(0.0);
brutisso@3065 1152
tonyp@3114 1153 size_t marking_initiating_used_threshold =
brutisso@3065 1154 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
brutisso@3065 1155
tonyp@3337 1156 if (!_g1->mark_in_progress() && !_last_young_gc) {
brutisso@3065 1157 assert(!last_pause_included_initial_mark, "invariant");
tonyp@3114 1158 if (cur_used_bytes > marking_initiating_used_threshold) {
tonyp@3114 1159 if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
tonyp@1794 1160 assert(!during_initial_mark_pause(), "we should not see this here");
tonyp@1794 1161
tonyp@3114 1162 ergo_verbose3(ErgoConcCycles,
tonyp@3114 1163 "request concurrent cycle initiation",
tonyp@3114 1164 ergo_format_reason("occupancy higher than threshold")
tonyp@3114 1165 ergo_format_byte("occupancy")
tonyp@3114 1166 ergo_format_byte_perc("threshold"),
tonyp@3114 1167 cur_used_bytes,
tonyp@3114 1168 marking_initiating_used_threshold,
tonyp@3114 1169 (double) InitiatingHeapOccupancyPercent);
tonyp@3114 1170
tonyp@1794 1171 // Note: this might have already been set, if during the last
tonyp@1794 1172 // pause we decided to start a cycle but at the beginning of
tonyp@1794 1173 // this pause we decided to postpone it. That's OK.
tonyp@1794 1174 set_initiate_conc_mark_if_possible();
tonyp@3114 1175 } else {
tonyp@3114 1176 ergo_verbose2(ErgoConcCycles,
tonyp@3114 1177 "do not request concurrent cycle initiation",
tonyp@3114 1178 ergo_format_reason("occupancy lower than previous occupancy")
tonyp@3114 1179 ergo_format_byte("occupancy")
tonyp@3114 1180 ergo_format_byte("previous occupancy"),
tonyp@3114 1181 cur_used_bytes,
tonyp@3114 1182 _prev_collection_pause_used_at_end_bytes);
tonyp@3114 1183 }
ysr@777 1184 }
ysr@777 1185 }
ysr@777 1186
brutisso@3065 1187 _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
brutisso@3065 1188
ysr@777 1189 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
ysr@777 1190 end_time_sec, false);
ysr@777 1191
ysr@777 1192 // This assert is exempted when we're doing parallel collection pauses,
ysr@777 1193 // because the fragmentation caused by the parallel GC allocation buffers
ysr@777 1194 // can lead to more memory being used during collection than was used
ysr@777 1195 // before. Best leave this out until the fragmentation problem is fixed.
ysr@777 1196 // Pauses in which evacuation failed can also lead to negative
ysr@777 1197 // collections, since no space is reclaimed from a region containing an
ysr@777 1198 // object whose evacuation failed.
ysr@777 1199 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1200 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1201 // (DLD, 10/05.)
ysr@777 1202 assert((true || parallel) // Always using GC LABs now.
ysr@777 1203 || _g1->evacuation_failed()
ysr@777 1204 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
ysr@777 1205 "Negative collection");
ysr@777 1206
ysr@777 1207 size_t freed_bytes =
ysr@777 1208 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
ysr@777 1209 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
johnc@1829 1210
ysr@777 1211 double survival_fraction =
ysr@777 1212 (double)surviving_bytes/
ysr@777 1213 (double)_collection_set_bytes_used_before;
ysr@777 1214
johnc@3219 1215 // These values are used to update the summary information that is
johnc@3219 1216 // displayed when TraceGen0Time is enabled, and are output as part
johnc@3219 1217 // of the PrintGCDetails output, in the non-parallel case.
johnc@3219 1218
johnc@3021 1219 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
johnc@3021 1220 double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
johnc@3021 1221 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
johnc@3021 1222 double update_rs_processed_buffers =
johnc@3021 1223 sum_of_values(_par_last_update_rs_processed_buffers);
johnc@3021 1224 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
johnc@3021 1225 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
johnc@3021 1226 double termination_time = avg_value(_par_last_termination_times_ms);
johnc@3021 1227
johnc@3219 1228 double known_time = ext_root_scan_time +
johnc@3219 1229 mark_stack_scan_time +
johnc@3219 1230 update_rs_time +
johnc@3219 1231 scan_rs_time +
johnc@3219 1232 obj_copy_time;
johnc@3219 1233
johnc@3219 1234 double other_time_ms = elapsed_ms;
johnc@3219 1235
johnc@3219 1236 // Subtract the SATB drain time. It's initialized to zero at the
johnc@3219 1237 // start of the pause and is updated during the pause if marking
johnc@3219 1238 // is in progress.
johnc@3219 1239 other_time_ms -= _cur_satb_drain_time_ms;
johnc@3219 1240
johnc@3219 1241 if (parallel) {
johnc@3219 1242 other_time_ms -= _cur_collection_par_time_ms;
johnc@3219 1243 } else {
johnc@3219 1244 other_time_ms -= known_time;
johnc@3219 1245 }
johnc@3219 1246
johnc@3219 1247 // Subtract the time taken to clean the card table from the
johnc@3219 1248 // current value of "other time"
johnc@3219 1249 other_time_ms -= _cur_clear_ct_time_ms;
johnc@3219 1250
johnc@3296 1251 // Subtract the time spent completing marking in the collection
johnc@3296 1252 // set. Note if marking is not in progress during the pause
johnc@3296 1253 // the value of _mark_closure_time_ms will be zero.
johnc@3296 1254 other_time_ms -= _mark_closure_time_ms;
johnc@3296 1255
johnc@3219 1256 // TraceGen0Time and TraceGen1Time summary info updating.
johnc@3219 1257 _all_pause_times_ms->add(elapsed_ms);
johnc@3021 1258
tonyp@1030 1259 if (update_stats) {
johnc@3219 1260 _summary->record_total_time_ms(elapsed_ms);
johnc@3219 1261 _summary->record_other_time_ms(other_time_ms);
johnc@3219 1262
johnc@3219 1263 MainBodySummary* body_summary = _summary->main_body_summary();
johnc@3219 1264 assert(body_summary != NULL, "should not be null!");
johnc@3219 1265
johnc@3219 1266 // This will be non-zero iff marking is currently in progress (i.e.
johnc@3219 1267 // _g1->mark_in_progress() == true) and the currrent pause was not
johnc@3219 1268 // an initial mark pause. Since the body_summary items are NumberSeqs,
johnc@3219 1269 // however, they have to be consistent and updated in lock-step with
johnc@3219 1270 // each other. Therefore we unconditionally record the SATB drain
johnc@3219 1271 // time - even if it's zero.
johnc@3219 1272 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
johnc@3021 1273
johnc@3021 1274 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
johnc@3021 1275 body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
johnc@3021 1276 body_summary->record_update_rs_time_ms(update_rs_time);
johnc@3021 1277 body_summary->record_scan_rs_time_ms(scan_rs_time);
johnc@3021 1278 body_summary->record_obj_copy_time_ms(obj_copy_time);
johnc@3219 1279
johnc@3021 1280 if (parallel) {
johnc@3021 1281 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
johnc@3021 1282 body_summary->record_termination_time_ms(termination_time);
johnc@3219 1283
johnc@3219 1284 double parallel_known_time = known_time + termination_time;
johnc@3219 1285 double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
johnc@3021 1286 body_summary->record_parallel_other_time_ms(parallel_other_time);
johnc@3021 1287 }
johnc@3219 1288
johnc@3021 1289 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
johnc@3219 1290 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
johnc@3021 1291
ysr@777 1292 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1293 // fragmentation can produce negative collections. Same with evac
ysr@777 1294 // failure.
ysr@777 1295 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1296 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1297 // (DLD, 10/05.
ysr@777 1298 assert((true || parallel)
ysr@777 1299 || _g1->evacuation_failed()
ysr@777 1300 || surviving_bytes <= _collection_set_bytes_used_before,
ysr@777 1301 "Or else negative collection!");
johnc@3219 1302
ysr@777 1303 // this is where we update the allocation rate of the application
ysr@777 1304 double app_time_ms =
ysr@777 1305 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
ysr@777 1306 if (app_time_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1307 // This usually happens due to the timer not having the required
ysr@777 1308 // granularity. Some Linuxes are the usual culprits.
ysr@777 1309 // We'll just set it to something (arbitrarily) small.
ysr@777 1310 app_time_ms = 1.0;
ysr@777 1311 }
tonyp@3289 1312 // We maintain the invariant that all objects allocated by mutator
tonyp@3289 1313 // threads will be allocated out of eden regions. So, we can use
tonyp@3289 1314 // the eden region number allocated since the previous GC to
tonyp@3289 1315 // calculate the application's allocate rate. The only exception
tonyp@3289 1316 // to that is humongous objects that are allocated separately. But
tonyp@3289 1317 // given that humongous object allocations do not really affect
tonyp@3289 1318 // either the pause's duration nor when the next pause will take
tonyp@3289 1319 // place we can safely ignore them here.
tonyp@3289 1320 size_t regions_allocated = eden_cset_region_length();
ysr@777 1321 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
ysr@777 1322 _alloc_rate_ms_seq->add(alloc_rate_ms);
ysr@777 1323
ysr@777 1324 double interval_ms =
ysr@777 1325 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
ysr@777 1326 update_recent_gc_times(end_time_sec, elapsed_ms);
ysr@777 1327 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
ysr@1521 1328 if (recent_avg_pause_time_ratio() < 0.0 ||
ysr@1521 1329 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
ysr@1521 1330 #ifndef PRODUCT
ysr@1521 1331 // Dump info to allow post-facto debugging
ysr@1521 1332 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
ysr@1521 1333 gclog_or_tty->print_cr("-------------------------------------------");
ysr@1521 1334 gclog_or_tty->print_cr("Recent GC Times (ms):");
ysr@1521 1335 _recent_gc_times_ms->dump();
ysr@1521 1336 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
ysr@1521 1337 _recent_prev_end_times_for_all_gcs_sec->dump();
ysr@1521 1338 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
ysr@1521 1339 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
ysr@1522 1340 // In debug mode, terminate the JVM if the user wants to debug at this point.
ysr@1522 1341 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
ysr@1522 1342 #endif // !PRODUCT
ysr@1522 1343 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
ysr@1522 1344 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
ysr@1521 1345 if (_recent_avg_pause_time_ratio < 0.0) {
ysr@1521 1346 _recent_avg_pause_time_ratio = 0.0;
ysr@1521 1347 } else {
ysr@1521 1348 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
ysr@1521 1349 _recent_avg_pause_time_ratio = 1.0;
ysr@1521 1350 }
ysr@1521 1351 }
ysr@777 1352 }
ysr@777 1353
johnc@3219 1354 for (int i = 0; i < _aux_num; ++i) {
johnc@3219 1355 if (_cur_aux_times_set[i]) {
johnc@3219 1356 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
johnc@3219 1357 }
johnc@3219 1358 }
johnc@3219 1359
johnc@3219 1360 // PrintGCDetails output
ysr@777 1361 if (PrintGCDetails) {
johnc@3219 1362 bool print_marking_info =
johnc@3219 1363 _g1->mark_in_progress() && !last_pause_included_initial_mark;
johnc@3219 1364
tonyp@2062 1365 gclog_or_tty->print_cr("%s, %1.8lf secs]",
ysr@777 1366 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
ysr@777 1367 elapsed_ms / 1000.0);
ysr@777 1368
johnc@3219 1369 if (print_marking_info) {
tonyp@2062 1370 print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
tonyp@2062 1371 }
johnc@3219 1372
tonyp@2062 1373 if (parallel) {
tonyp@2062 1374 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
johnc@3219 1375 print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
johnc@3219 1376 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
johnc@3219 1377 if (print_marking_info) {
johnc@3219 1378 print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
johnc@3219 1379 }
tonyp@2062 1380 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
brutisso@2712 1381 print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
tonyp@2062 1382 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
tonyp@2062 1383 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
tonyp@2062 1384 print_par_stats(2, "Termination", _par_last_termination_times_ms);
brutisso@2712 1385 print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
johnc@3219 1386 print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
brutisso@2712 1387
brutisso@2712 1388 for (int i = 0; i < _parallel_gc_threads; i++) {
brutisso@2712 1389 _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
johnc@3219 1390
johnc@3219 1391 double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
johnc@3219 1392 _par_last_mark_stack_scan_times_ms[i] +
johnc@3219 1393 _par_last_update_rs_times_ms[i] +
johnc@3219 1394 _par_last_scan_rs_times_ms[i] +
johnc@3219 1395 _par_last_obj_copy_times_ms[i] +
johnc@3219 1396 _par_last_termination_times_ms[i];
johnc@3219 1397
johnc@3219 1398 _par_last_gc_worker_other_times_ms[i] = _cur_collection_par_time_ms - worker_known_time;
brutisso@2712 1399 }
johnc@3219 1400 print_par_stats(2, "GC Worker", _par_last_gc_worker_times_ms);
johnc@3219 1401 print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
tonyp@2062 1402 } else {
johnc@3219 1403 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
johnc@3219 1404 if (print_marking_info) {
johnc@3219 1405 print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
johnc@3219 1406 }
tonyp@2062 1407 print_stats(1, "Update RS", update_rs_time);
johnc@3219 1408 print_stats(2, "Processed Buffers", (int)update_rs_processed_buffers);
tonyp@2062 1409 print_stats(1, "Scan RS", scan_rs_time);
tonyp@2062 1410 print_stats(1, "Object Copying", obj_copy_time);
ysr@777 1411 }
johnc@3296 1412 if (print_marking_info) {
johnc@3296 1413 print_stats(1, "Complete CSet Marking", _mark_closure_time_ms);
johnc@3296 1414 }
johnc@3219 1415 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
johnc@1325 1416 #ifndef PRODUCT
johnc@1325 1417 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
johnc@1325 1418 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
johnc@1325 1419 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
johnc@1325 1420 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
johnc@1325 1421 if (_num_cc_clears > 0) {
johnc@1325 1422 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
johnc@1325 1423 }
johnc@1325 1424 #endif
ysr@777 1425 print_stats(1, "Other", other_time_ms);
johnc@3296 1426 print_stats(2, "Choose CSet",
johnc@3296 1427 (_recorded_young_cset_choice_time_ms +
johnc@3296 1428 _recorded_non_young_cset_choice_time_ms));
johnc@3175 1429 print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
johnc@3175 1430 print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
johnc@3296 1431 print_stats(2, "Free CSet",
johnc@3296 1432 (_recorded_young_free_cset_time_ms +
johnc@3296 1433 _recorded_non_young_free_cset_time_ms));
johnc@1829 1434
ysr@777 1435 for (int i = 0; i < _aux_num; ++i) {
ysr@777 1436 if (_cur_aux_times_set[i]) {
ysr@777 1437 char buffer[96];
ysr@777 1438 sprintf(buffer, "Aux%d", i);
ysr@777 1439 print_stats(1, buffer, _cur_aux_times_ms[i]);
ysr@777 1440 }
ysr@777 1441 }
ysr@777 1442 }
ysr@777 1443
ysr@777 1444 // Update the efficiency-since-mark vars.
ysr@777 1445 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
ysr@777 1446 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1447 // This usually happens due to the timer not having the required
ysr@777 1448 // granularity. Some Linuxes are the usual culprits.
ysr@777 1449 // We'll just set it to something (arbitrarily) small.
ysr@777 1450 proc_ms = 1.0;
ysr@777 1451 }
ysr@777 1452 double cur_efficiency = (double) freed_bytes / proc_ms;
ysr@777 1453
ysr@777 1454 bool new_in_marking_window = _in_marking_window;
ysr@777 1455 bool new_in_marking_window_im = false;
tonyp@1794 1456 if (during_initial_mark_pause()) {
ysr@777 1457 new_in_marking_window = true;
ysr@777 1458 new_in_marking_window_im = true;
ysr@777 1459 }
ysr@777 1460
tonyp@3337 1461 if (_last_young_gc) {
johnc@3178 1462 if (!last_pause_included_initial_mark) {
tonyp@3337 1463 ergo_verbose2(ErgoMixedGCs,
tonyp@3337 1464 "start mixed GCs",
johnc@3178 1465 ergo_format_byte_perc("known garbage"),
johnc@3178 1466 _known_garbage_bytes, _known_garbage_ratio * 100.0);
tonyp@3337 1467 set_gcs_are_young(false);
johnc@3178 1468 } else {
tonyp@3337 1469 ergo_verbose0(ErgoMixedGCs,
tonyp@3337 1470 "do not start mixed GCs",
johnc@3178 1471 ergo_format_reason("concurrent cycle is about to start"));
johnc@3178 1472 }
tonyp@3337 1473 _last_young_gc = false;
brutisso@3065 1474 }
brutisso@3065 1475
tonyp@3337 1476 if (!_last_gc_was_young) {
tonyp@3337 1477 if (_should_revert_to_young_gcs) {
tonyp@3337 1478 ergo_verbose2(ErgoMixedGCs,
tonyp@3337 1479 "end mixed GCs",
tonyp@3337 1480 ergo_format_reason("mixed GCs end requested")
tonyp@3114 1481 ergo_format_byte_perc("known garbage"),
tonyp@3114 1482 _known_garbage_bytes, _known_garbage_ratio * 100.0);
tonyp@3337 1483 set_gcs_are_young(true);
tonyp@3114 1484 } else if (_known_garbage_ratio < 0.05) {
tonyp@3337 1485 ergo_verbose3(ErgoMixedGCs,
tonyp@3337 1486 "end mixed GCs",
tonyp@3114 1487 ergo_format_reason("known garbage percent lower than threshold")
tonyp@3114 1488 ergo_format_byte_perc("known garbage")
tonyp@3114 1489 ergo_format_perc("threshold"),
tonyp@3114 1490 _known_garbage_bytes, _known_garbage_ratio * 100.0,
tonyp@3114 1491 0.05 * 100.0);
tonyp@3337 1492 set_gcs_are_young(true);
tonyp@3114 1493 } else if (adaptive_young_list_length() &&
tonyp@3114 1494 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
tonyp@3337 1495 ergo_verbose5(ErgoMixedGCs,
tonyp@3337 1496 "end mixed GCs",
tonyp@3114 1497 ergo_format_reason("current GC efficiency lower than "
tonyp@3337 1498 "predicted young GC efficiency")
tonyp@3114 1499 ergo_format_double("GC efficiency factor")
tonyp@3114 1500 ergo_format_double("current GC efficiency")
tonyp@3337 1501 ergo_format_double("predicted young GC efficiency")
tonyp@3114 1502 ergo_format_byte_perc("known garbage"),
tonyp@3114 1503 get_gc_eff_factor(), cur_efficiency,
tonyp@3114 1504 predict_young_gc_eff(),
tonyp@3114 1505 _known_garbage_bytes, _known_garbage_ratio * 100.0);
tonyp@3337 1506 set_gcs_are_young(true);
ysr@777 1507 }
brutisso@3065 1508 }
tonyp@3337 1509 _should_revert_to_young_gcs = false;
tonyp@3337 1510
tonyp@3337 1511 if (_last_gc_was_young && !_during_marking) {
brutisso@3065 1512 _young_gc_eff_seq->add(cur_efficiency);
ysr@777 1513 }
ysr@777 1514
ysr@777 1515 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 1516 // do that for any other surv rate groupsx
ysr@777 1517
apetrusenko@1112 1518 if (update_stats) {
ysr@777 1519 double pause_time_ms = elapsed_ms;
ysr@777 1520
ysr@777 1521 size_t diff = 0;
ysr@777 1522 if (_max_pending_cards >= _pending_cards)
ysr@777 1523 diff = _max_pending_cards - _pending_cards;
ysr@777 1524 _pending_card_diff_seq->add((double) diff);
ysr@777 1525
ysr@777 1526 double cost_per_card_ms = 0.0;
ysr@777 1527 if (_pending_cards > 0) {
ysr@777 1528 cost_per_card_ms = update_rs_time / (double) _pending_cards;
ysr@777 1529 _cost_per_card_ms_seq->add(cost_per_card_ms);
ysr@777 1530 }
ysr@777 1531
ysr@777 1532 size_t cards_scanned = _g1->cards_scanned();
ysr@777 1533
ysr@777 1534 double cost_per_entry_ms = 0.0;
ysr@777 1535 if (cards_scanned > 10) {
ysr@777 1536 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
tonyp@3337 1537 if (_last_gc_was_young) {
ysr@777 1538 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
tonyp@3337 1539 } else {
tonyp@3337 1540 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
tonyp@3337 1541 }
ysr@777 1542 }
ysr@777 1543
ysr@777 1544 if (_max_rs_lengths > 0) {
ysr@777 1545 double cards_per_entry_ratio =
ysr@777 1546 (double) cards_scanned / (double) _max_rs_lengths;
tonyp@3337 1547 if (_last_gc_was_young) {
tonyp@3337 1548 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
tonyp@3337 1549 } else {
tonyp@3337 1550 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
tonyp@3337 1551 }
ysr@777 1552 }
ysr@777 1553
tonyp@3326 1554 // It turns out that, sometimes, _max_rs_lengths can get smaller
tonyp@3326 1555 // than _recorded_rs_lengths which causes rs_length_diff to get
tonyp@3326 1556 // very large and mess up the RSet length predictions. We'll be
tonyp@3326 1557 // defensive until we work out why this happens.
tonyp@3326 1558 size_t rs_length_diff = 0;
tonyp@3326 1559 if (_max_rs_lengths > _recorded_rs_lengths) {
tonyp@3326 1560 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
tonyp@3326 1561 }
tonyp@3326 1562 _rs_length_diff_seq->add((double) rs_length_diff);
ysr@777 1563
ysr@777 1564 size_t copied_bytes = surviving_bytes;
ysr@777 1565 double cost_per_byte_ms = 0.0;
ysr@777 1566 if (copied_bytes > 0) {
ysr@777 1567 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
tonyp@3337 1568 if (_in_marking_window) {
ysr@777 1569 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
tonyp@3337 1570 } else {
ysr@777 1571 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
tonyp@3337 1572 }
ysr@777 1573 }
ysr@777 1574
ysr@777 1575 double all_other_time_ms = pause_time_ms -
johnc@1829 1576 (update_rs_time + scan_rs_time + obj_copy_time +
ysr@777 1577 _mark_closure_time_ms + termination_time);
ysr@777 1578
ysr@777 1579 double young_other_time_ms = 0.0;
tonyp@3289 1580 if (young_cset_region_length() > 0) {
ysr@777 1581 young_other_time_ms =
ysr@777 1582 _recorded_young_cset_choice_time_ms +
ysr@777 1583 _recorded_young_free_cset_time_ms;
ysr@777 1584 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
tonyp@3289 1585 (double) young_cset_region_length());
ysr@777 1586 }
ysr@777 1587 double non_young_other_time_ms = 0.0;
tonyp@3289 1588 if (old_cset_region_length() > 0) {
ysr@777 1589 non_young_other_time_ms =
ysr@777 1590 _recorded_non_young_cset_choice_time_ms +
ysr@777 1591 _recorded_non_young_free_cset_time_ms;
ysr@777 1592
ysr@777 1593 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
tonyp@3289 1594 (double) old_cset_region_length());
ysr@777 1595 }
ysr@777 1596
ysr@777 1597 double constant_other_time_ms = all_other_time_ms -
ysr@777 1598 (young_other_time_ms + non_young_other_time_ms);
ysr@777 1599 _constant_other_time_ms_seq->add(constant_other_time_ms);
ysr@777 1600
ysr@777 1601 double survival_ratio = 0.0;
ysr@777 1602 if (_bytes_in_collection_set_before_gc > 0) {
tonyp@3028 1603 survival_ratio = (double) _bytes_copied_during_gc /
tonyp@3028 1604 (double) _bytes_in_collection_set_before_gc;
ysr@777 1605 }
ysr@777 1606
ysr@777 1607 _pending_cards_seq->add((double) _pending_cards);
ysr@777 1608 _rs_lengths_seq->add((double) _max_rs_lengths);
ysr@777 1609
ysr@777 1610 double expensive_region_limit_ms =
johnc@1186 1611 (double) MaxGCPauseMillis - predict_constant_other_time_ms();
ysr@777 1612 if (expensive_region_limit_ms < 0.0) {
ysr@777 1613 // this means that the other time was predicted to be longer than
ysr@777 1614 // than the max pause time
johnc@1186 1615 expensive_region_limit_ms = (double) MaxGCPauseMillis;
ysr@777 1616 }
ysr@777 1617 _expensive_region_limit_ms = expensive_region_limit_ms;
ysr@777 1618 }
ysr@777 1619
ysr@777 1620 _in_marking_window = new_in_marking_window;
ysr@777 1621 _in_marking_window_im = new_in_marking_window_im;
ysr@777 1622 _free_regions_at_end_of_collection = _g1->free_regions();
tonyp@3119 1623 update_young_list_target_length();
ysr@777 1624
iveresov@1546 1625 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
tonyp@1717 1626 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
iveresov@1546 1627 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
tonyp@3209 1628
tonyp@3209 1629 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
ysr@777 1630 }
ysr@777 1631
tonyp@2961 1632 #define EXT_SIZE_FORMAT "%d%s"
tonyp@2961 1633 #define EXT_SIZE_PARAMS(bytes) \
tonyp@2961 1634 byte_size_in_proper_unit((bytes)), \
tonyp@2961 1635 proper_unit_for_byte_size((bytes))
tonyp@2961 1636
tonyp@2961 1637 void G1CollectorPolicy::print_heap_transition() {
tonyp@2961 1638 if (PrintGCDetails) {
tonyp@2961 1639 YoungList* young_list = _g1->young_list();
tonyp@2961 1640 size_t eden_bytes = young_list->eden_used_bytes();
tonyp@2961 1641 size_t survivor_bytes = young_list->survivor_used_bytes();
tonyp@2961 1642 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
tonyp@2961 1643 size_t used = _g1->used();
tonyp@2961 1644 size_t capacity = _g1->capacity();
brutisso@3120 1645 size_t eden_capacity =
brutisso@3120 1646 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
tonyp@2961 1647
tonyp@2961 1648 gclog_or_tty->print_cr(
brutisso@3120 1649 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
brutisso@3120 1650 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
brutisso@3120 1651 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
brutisso@3120 1652 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
brutisso@3120 1653 EXT_SIZE_PARAMS(_eden_bytes_before_gc),
brutisso@3120 1654 EXT_SIZE_PARAMS(_prev_eden_capacity),
brutisso@3120 1655 EXT_SIZE_PARAMS(eden_bytes),
brutisso@3120 1656 EXT_SIZE_PARAMS(eden_capacity),
brutisso@3120 1657 EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
brutisso@3120 1658 EXT_SIZE_PARAMS(survivor_bytes),
brutisso@3120 1659 EXT_SIZE_PARAMS(used_before_gc),
brutisso@3120 1660 EXT_SIZE_PARAMS(_capacity_before_gc),
brutisso@3120 1661 EXT_SIZE_PARAMS(used),
brutisso@3120 1662 EXT_SIZE_PARAMS(capacity));
brutisso@3120 1663
brutisso@3120 1664 _prev_eden_capacity = eden_capacity;
tonyp@2961 1665 } else if (PrintGC) {
tonyp@2961 1666 _g1->print_size_transition(gclog_or_tty,
tonyp@2961 1667 _cur_collection_pause_used_at_start_bytes,
tonyp@2961 1668 _g1->used(), _g1->capacity());
tonyp@2961 1669 }
tonyp@2961 1670 }
tonyp@2961 1671
iveresov@1546 1672 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 1673 double update_rs_processed_buffers,
iveresov@1546 1674 double goal_ms) {
iveresov@1546 1675 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
iveresov@1546 1676 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
iveresov@1546 1677
tonyp@1717 1678 if (G1UseAdaptiveConcRefinement) {
iveresov@1546 1679 const int k_gy = 3, k_gr = 6;
iveresov@1546 1680 const double inc_k = 1.1, dec_k = 0.9;
iveresov@1546 1681
iveresov@1546 1682 int g = cg1r->green_zone();
iveresov@1546 1683 if (update_rs_time > goal_ms) {
iveresov@1546 1684 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
iveresov@1546 1685 } else {
iveresov@1546 1686 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
iveresov@1546 1687 g = (int)MAX2(g * inc_k, g + 1.0);
iveresov@1546 1688 }
iveresov@1546 1689 }
iveresov@1546 1690 // Change the refinement threads params
iveresov@1546 1691 cg1r->set_green_zone(g);
iveresov@1546 1692 cg1r->set_yellow_zone(g * k_gy);
iveresov@1546 1693 cg1r->set_red_zone(g * k_gr);
iveresov@1546 1694 cg1r->reinitialize_threads();
iveresov@1546 1695
iveresov@1546 1696 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
iveresov@1546 1697 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
iveresov@1546 1698 cg1r->yellow_zone());
iveresov@1546 1699 // Change the barrier params
iveresov@1546 1700 dcqs.set_process_completed_threshold(processing_threshold);
iveresov@1546 1701 dcqs.set_max_completed_queue(cg1r->red_zone());
iveresov@1546 1702 }
iveresov@1546 1703
iveresov@1546 1704 int curr_queue_size = dcqs.completed_buffers_num();
iveresov@1546 1705 if (curr_queue_size >= cg1r->yellow_zone()) {
iveresov@1546 1706 dcqs.set_completed_queue_padding(curr_queue_size);
iveresov@1546 1707 } else {
iveresov@1546 1708 dcqs.set_completed_queue_padding(0);
iveresov@1546 1709 }
iveresov@1546 1710 dcqs.notify_if_necessary();
iveresov@1546 1711 }
iveresov@1546 1712
ysr@777 1713 double
ysr@777 1714 G1CollectorPolicy::
ysr@777 1715 predict_young_collection_elapsed_time_ms(size_t adjustment) {
ysr@777 1716 guarantee( adjustment == 0 || adjustment == 1, "invariant" );
ysr@777 1717
ysr@777 1718 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@1829 1719 size_t young_num = g1h->young_list()->length();
ysr@777 1720 if (young_num == 0)
ysr@777 1721 return 0.0;
ysr@777 1722
ysr@777 1723 young_num += adjustment;
ysr@777 1724 size_t pending_cards = predict_pending_cards();
johnc@1829 1725 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
ysr@777 1726 predict_rs_length_diff();
ysr@777 1727 size_t card_num;
tonyp@3337 1728 if (gcs_are_young()) {
ysr@777 1729 card_num = predict_young_card_num(rs_lengths);
tonyp@3337 1730 } else {
ysr@777 1731 card_num = predict_non_young_card_num(rs_lengths);
tonyp@3337 1732 }
ysr@777 1733 size_t young_byte_size = young_num * HeapRegion::GrainBytes;
ysr@777 1734 double accum_yg_surv_rate =
ysr@777 1735 _short_lived_surv_rate_group->accum_surv_rate(adjustment);
ysr@777 1736
ysr@777 1737 size_t bytes_to_copy =
ysr@777 1738 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
ysr@777 1739
ysr@777 1740 return
ysr@777 1741 predict_rs_update_time_ms(pending_cards) +
ysr@777 1742 predict_rs_scan_time_ms(card_num) +
ysr@777 1743 predict_object_copy_time_ms(bytes_to_copy) +
ysr@777 1744 predict_young_other_time_ms(young_num) +
ysr@777 1745 predict_constant_other_time_ms();
ysr@777 1746 }
ysr@777 1747
ysr@777 1748 double
ysr@777 1749 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
ysr@777 1750 size_t rs_length = predict_rs_length_diff();
ysr@777 1751 size_t card_num;
tonyp@3337 1752 if (gcs_are_young()) {
ysr@777 1753 card_num = predict_young_card_num(rs_length);
tonyp@3337 1754 } else {
ysr@777 1755 card_num = predict_non_young_card_num(rs_length);
tonyp@3337 1756 }
ysr@777 1757 return predict_base_elapsed_time_ms(pending_cards, card_num);
ysr@777 1758 }
ysr@777 1759
ysr@777 1760 double
ysr@777 1761 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 1762 size_t scanned_cards) {
ysr@777 1763 return
ysr@777 1764 predict_rs_update_time_ms(pending_cards) +
ysr@777 1765 predict_rs_scan_time_ms(scanned_cards) +
ysr@777 1766 predict_constant_other_time_ms();
ysr@777 1767 }
ysr@777 1768
ysr@777 1769 double
ysr@777 1770 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
ysr@777 1771 bool young) {
ysr@777 1772 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1773 size_t card_num;
tonyp@3337 1774 if (gcs_are_young()) {
ysr@777 1775 card_num = predict_young_card_num(rs_length);
tonyp@3337 1776 } else {
ysr@777 1777 card_num = predict_non_young_card_num(rs_length);
tonyp@3337 1778 }
ysr@777 1779 size_t bytes_to_copy = predict_bytes_to_copy(hr);
ysr@777 1780
ysr@777 1781 double region_elapsed_time_ms =
ysr@777 1782 predict_rs_scan_time_ms(card_num) +
ysr@777 1783 predict_object_copy_time_ms(bytes_to_copy);
ysr@777 1784
ysr@777 1785 if (young)
ysr@777 1786 region_elapsed_time_ms += predict_young_other_time_ms(1);
ysr@777 1787 else
ysr@777 1788 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
ysr@777 1789
ysr@777 1790 return region_elapsed_time_ms;
ysr@777 1791 }
ysr@777 1792
ysr@777 1793 size_t
ysr@777 1794 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
ysr@777 1795 size_t bytes_to_copy;
ysr@777 1796 if (hr->is_marked())
ysr@777 1797 bytes_to_copy = hr->max_live_bytes();
ysr@777 1798 else {
ysr@777 1799 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
ysr@777 1800 "invariant" );
ysr@777 1801 int age = hr->age_in_surv_rate_group();
apetrusenko@980 1802 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
ysr@777 1803 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
ysr@777 1804 }
ysr@777 1805
ysr@777 1806 return bytes_to_copy;
ysr@777 1807 }
ysr@777 1808
ysr@777 1809 void
tonyp@3289 1810 G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
tonyp@3289 1811 size_t survivor_cset_region_length) {
tonyp@3289 1812 _eden_cset_region_length = eden_cset_region_length;
tonyp@3289 1813 _survivor_cset_region_length = survivor_cset_region_length;
tonyp@3289 1814 _old_cset_region_length = 0;
johnc@1829 1815 }
johnc@1829 1816
johnc@1829 1817 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
johnc@1829 1818 _recorded_rs_lengths = rs_lengths;
johnc@1829 1819 }
johnc@1829 1820
ysr@777 1821 void G1CollectorPolicy::check_if_region_is_too_expensive(double
ysr@777 1822 predicted_time_ms) {
ysr@777 1823 // I don't think we need to do this when in young GC mode since
ysr@777 1824 // marking will be initiated next time we hit the soft limit anyway...
ysr@777 1825 if (predicted_time_ms > _expensive_region_limit_ms) {
tonyp@3337 1826 ergo_verbose2(ErgoMixedGCs,
tonyp@3337 1827 "request mixed GCs end",
tonyp@3114 1828 ergo_format_reason("predicted region time higher than threshold")
tonyp@3114 1829 ergo_format_ms("predicted region time")
tonyp@3114 1830 ergo_format_ms("threshold"),
tonyp@3114 1831 predicted_time_ms, _expensive_region_limit_ms);
tonyp@3337 1832 // no point in doing another mixed GC
tonyp@3337 1833 _should_revert_to_young_gcs = true;
ysr@777 1834 }
ysr@777 1835 }
ysr@777 1836
ysr@777 1837 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
ysr@777 1838 double elapsed_ms) {
ysr@777 1839 _recent_gc_times_ms->add(elapsed_ms);
ysr@777 1840 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
ysr@777 1841 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
ysr@777 1842 }
ysr@777 1843
ysr@777 1844 size_t G1CollectorPolicy::expansion_amount() {
tonyp@3114 1845 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
tonyp@3114 1846 double threshold = _gc_overhead_perc;
tonyp@3114 1847 if (recent_gc_overhead > threshold) {
johnc@1186 1848 // We will double the existing space, or take
johnc@1186 1849 // G1ExpandByPercentOfAvailable % of the available expansion
johnc@1186 1850 // space, whichever is smaller, bounded below by a minimum
johnc@1186 1851 // expansion (unless that's all that's left.)
ysr@777 1852 const size_t min_expand_bytes = 1*M;
johnc@2504 1853 size_t reserved_bytes = _g1->max_capacity();
ysr@777 1854 size_t committed_bytes = _g1->capacity();
ysr@777 1855 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
ysr@777 1856 size_t expand_bytes;
ysr@777 1857 size_t expand_bytes_via_pct =
johnc@1186 1858 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
ysr@777 1859 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
ysr@777 1860 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
ysr@777 1861 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
tonyp@3114 1862
tonyp@3114 1863 ergo_verbose5(ErgoHeapSizing,
tonyp@3114 1864 "attempt heap expansion",
tonyp@3114 1865 ergo_format_reason("recent GC overhead higher than "
tonyp@3114 1866 "threshold after GC")
tonyp@3114 1867 ergo_format_perc("recent GC overhead")
tonyp@3114 1868 ergo_format_perc("threshold")
tonyp@3114 1869 ergo_format_byte("uncommitted")
tonyp@3114 1870 ergo_format_byte_perc("calculated expansion amount"),
tonyp@3114 1871 recent_gc_overhead, threshold,
tonyp@3114 1872 uncommitted_bytes,
tonyp@3114 1873 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
tonyp@3114 1874
ysr@777 1875 return expand_bytes;
ysr@777 1876 } else {
ysr@777 1877 return 0;
ysr@777 1878 }
ysr@777 1879 }
ysr@777 1880
ysr@777 1881 class CountCSClosure: public HeapRegionClosure {
ysr@777 1882 G1CollectorPolicy* _g1_policy;
ysr@777 1883 public:
ysr@777 1884 CountCSClosure(G1CollectorPolicy* g1_policy) :
ysr@777 1885 _g1_policy(g1_policy) {}
ysr@777 1886 bool doHeapRegion(HeapRegion* r) {
ysr@777 1887 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
ysr@777 1888 return false;
ysr@777 1889 }
ysr@777 1890 };
ysr@777 1891
ysr@777 1892 void G1CollectorPolicy::count_CS_bytes_used() {
ysr@777 1893 CountCSClosure cs_closure(this);
ysr@777 1894 _g1->collection_set_iterate(&cs_closure);
ysr@777 1895 }
ysr@777 1896
johnc@3219 1897 void G1CollectorPolicy::print_summary(int level,
johnc@3219 1898 const char* str,
johnc@3219 1899 NumberSeq* seq) const {
ysr@777 1900 double sum = seq->sum();
brutisso@2645 1901 LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
ysr@777 1902 str, sum / 1000.0, seq->avg());
ysr@777 1903 }
ysr@777 1904
johnc@3219 1905 void G1CollectorPolicy::print_summary_sd(int level,
johnc@3219 1906 const char* str,
johnc@3219 1907 NumberSeq* seq) const {
ysr@777 1908 print_summary(level, str, seq);
brutisso@2645 1909 LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
ysr@777 1910 seq->num(), seq->sd(), seq->maximum());
ysr@777 1911 }
ysr@777 1912
ysr@777 1913 void G1CollectorPolicy::check_other_times(int level,
ysr@777 1914 NumberSeq* other_times_ms,
ysr@777 1915 NumberSeq* calc_other_times_ms) const {
ysr@777 1916 bool should_print = false;
brutisso@2645 1917 LineBuffer buf(level + 2);
ysr@777 1918
ysr@777 1919 double max_sum = MAX2(fabs(other_times_ms->sum()),
ysr@777 1920 fabs(calc_other_times_ms->sum()));
ysr@777 1921 double min_sum = MIN2(fabs(other_times_ms->sum()),
ysr@777 1922 fabs(calc_other_times_ms->sum()));
ysr@777 1923 double sum_ratio = max_sum / min_sum;
ysr@777 1924 if (sum_ratio > 1.1) {
ysr@777 1925 should_print = true;
brutisso@2645 1926 buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
ysr@777 1927 }
ysr@777 1928
ysr@777 1929 double max_avg = MAX2(fabs(other_times_ms->avg()),
ysr@777 1930 fabs(calc_other_times_ms->avg()));
ysr@777 1931 double min_avg = MIN2(fabs(other_times_ms->avg()),
ysr@777 1932 fabs(calc_other_times_ms->avg()));
ysr@777 1933 double avg_ratio = max_avg / min_avg;
ysr@777 1934 if (avg_ratio > 1.1) {
ysr@777 1935 should_print = true;
brutisso@2645 1936 buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
ysr@777 1937 }
ysr@777 1938
ysr@777 1939 if (other_times_ms->sum() < -0.01) {
brutisso@2645 1940 buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
ysr@777 1941 }
ysr@777 1942
ysr@777 1943 if (other_times_ms->avg() < -0.01) {
brutisso@2645 1944 buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
ysr@777 1945 }
ysr@777 1946
ysr@777 1947 if (calc_other_times_ms->sum() < -0.01) {
ysr@777 1948 should_print = true;
brutisso@2645 1949 buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
ysr@777 1950 }
ysr@777 1951
ysr@777 1952 if (calc_other_times_ms->avg() < -0.01) {
ysr@777 1953 should_print = true;
brutisso@2645 1954 buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
ysr@777 1955 }
ysr@777 1956
ysr@777 1957 if (should_print)
ysr@777 1958 print_summary(level, "Other(Calc)", calc_other_times_ms);
ysr@777 1959 }
ysr@777 1960
ysr@777 1961 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
jmasa@2188 1962 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 1963 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 1964 if (summary->get_total_seq()->num() > 0) {
apetrusenko@1112 1965 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
ysr@777 1966 if (body_summary != NULL) {
ysr@777 1967 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
ysr@777 1968 if (parallel) {
ysr@777 1969 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
johnc@3219 1970 print_summary(2, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
johnc@3219 1971 print_summary(2, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq());
ysr@777 1972 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
ysr@777 1973 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 1974 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 1975 print_summary(2, "Termination", body_summary->get_termination_seq());
johnc@3219 1976 print_summary(2, "Parallel Other", body_summary->get_parallel_other_seq());
ysr@777 1977 {
ysr@777 1978 NumberSeq* other_parts[] = {
ysr@777 1979 body_summary->get_ext_root_scan_seq(),
ysr@777 1980 body_summary->get_mark_stack_scan_seq(),
johnc@3219 1981 body_summary->get_update_rs_seq(),
ysr@777 1982 body_summary->get_scan_rs_seq(),
ysr@777 1983 body_summary->get_obj_copy_seq(),
ysr@777 1984 body_summary->get_termination_seq()
ysr@777 1985 };
ysr@777 1986 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
johnc@2134 1987 6, other_parts);
ysr@777 1988 check_other_times(2, body_summary->get_parallel_other_seq(),
ysr@777 1989 &calc_other_times_ms);
ysr@777 1990 }
ysr@777 1991 } else {
johnc@3219 1992 print_summary(1, "Ext Root Scanning", body_summary->get_ext_root_scan_seq());
johnc@3219 1993 print_summary(1, "Mark Stack Scanning", body_summary->get_mark_stack_scan_seq());
ysr@777 1994 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
ysr@777 1995 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 1996 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 1997 }
ysr@777 1998 }
johnc@3219 1999 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
johnc@3219 2000 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
ysr@777 2001 print_summary(1, "Other", summary->get_other_seq());
ysr@777 2002 {
johnc@2134 2003 if (body_summary != NULL) {
johnc@2134 2004 NumberSeq calc_other_times_ms;
johnc@2134 2005 if (parallel) {
johnc@2134 2006 // parallel
johnc@2134 2007 NumberSeq* other_parts[] = {
johnc@2134 2008 body_summary->get_satb_drain_seq(),
johnc@2134 2009 body_summary->get_parallel_seq(),
johnc@2134 2010 body_summary->get_clear_ct_seq()
johnc@2134 2011 };
johnc@2134 2012 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
johnc@2134 2013 3, other_parts);
johnc@2134 2014 } else {
johnc@2134 2015 // serial
johnc@2134 2016 NumberSeq* other_parts[] = {
johnc@2134 2017 body_summary->get_satb_drain_seq(),
johnc@2134 2018 body_summary->get_update_rs_seq(),
johnc@2134 2019 body_summary->get_ext_root_scan_seq(),
johnc@2134 2020 body_summary->get_mark_stack_scan_seq(),
johnc@2134 2021 body_summary->get_scan_rs_seq(),
johnc@2134 2022 body_summary->get_obj_copy_seq()
johnc@2134 2023 };
johnc@2134 2024 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
johnc@2134 2025 6, other_parts);
johnc@2134 2026 }
johnc@2134 2027 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
ysr@777 2028 }
ysr@777 2029 }
ysr@777 2030 } else {
brutisso@2645 2031 LineBuffer(1).append_and_print_cr("none");
ysr@777 2032 }
brutisso@2645 2033 LineBuffer(0).append_and_print_cr("");
ysr@777 2034 }
ysr@777 2035
ysr@777 2036 void G1CollectorPolicy::print_tracing_info() const {
ysr@777 2037 if (TraceGen0Time) {
ysr@777 2038 gclog_or_tty->print_cr("ALL PAUSES");
ysr@777 2039 print_summary_sd(0, "Total", _all_pause_times_ms);
ysr@777 2040 gclog_or_tty->print_cr("");
ysr@777 2041 gclog_or_tty->print_cr("");
tonyp@3337 2042 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
tonyp@3337 2043 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
ysr@777 2044 gclog_or_tty->print_cr("");
ysr@777 2045
apetrusenko@1112 2046 gclog_or_tty->print_cr("EVACUATION PAUSES");
apetrusenko@1112 2047 print_summary(_summary);
ysr@777 2048
ysr@777 2049 gclog_or_tty->print_cr("MISC");
ysr@777 2050 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
ysr@777 2051 print_summary_sd(0, "Yields", _all_yield_times_ms);
ysr@777 2052 for (int i = 0; i < _aux_num; ++i) {
ysr@777 2053 if (_all_aux_times_ms[i].num() > 0) {
ysr@777 2054 char buffer[96];
ysr@777 2055 sprintf(buffer, "Aux%d", i);
ysr@777 2056 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
ysr@777 2057 }
ysr@777 2058 }
ysr@777 2059 }
ysr@777 2060 if (TraceGen1Time) {
ysr@777 2061 if (_all_full_gc_times_ms->num() > 0) {
ysr@777 2062 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
ysr@777 2063 _all_full_gc_times_ms->num(),
ysr@777 2064 _all_full_gc_times_ms->sum() / 1000.0);
ysr@777 2065 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
ysr@777 2066 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 2067 _all_full_gc_times_ms->sd(),
ysr@777 2068 _all_full_gc_times_ms->maximum());
ysr@777 2069 }
ysr@777 2070 }
ysr@777 2071 }
ysr@777 2072
ysr@777 2073 void G1CollectorPolicy::print_yg_surv_rate_info() const {
ysr@777 2074 #ifndef PRODUCT
ysr@777 2075 _short_lived_surv_rate_group->print_surv_rate_summary();
ysr@777 2076 // add this call for any other surv rate groups
ysr@777 2077 #endif // PRODUCT
ysr@777 2078 }
ysr@777 2079
ysr@777 2080 #ifndef PRODUCT
ysr@777 2081 // for debugging, bit of a hack...
ysr@777 2082 static char*
ysr@777 2083 region_num_to_mbs(int length) {
ysr@777 2084 static char buffer[64];
ysr@777 2085 double bytes = (double) (length * HeapRegion::GrainBytes);
ysr@777 2086 double mbs = bytes / (double) (1024 * 1024);
ysr@777 2087 sprintf(buffer, "%7.2lfMB", mbs);
ysr@777 2088 return buffer;
ysr@777 2089 }
ysr@777 2090 #endif // PRODUCT
ysr@777 2091
apetrusenko@980 2092 size_t G1CollectorPolicy::max_regions(int purpose) {
ysr@777 2093 switch (purpose) {
ysr@777 2094 case GCAllocForSurvived:
apetrusenko@980 2095 return _max_survivor_regions;
ysr@777 2096 case GCAllocForTenured:
apetrusenko@980 2097 return REGIONS_UNLIMITED;
ysr@777 2098 default:
apetrusenko@980 2099 ShouldNotReachHere();
apetrusenko@980 2100 return REGIONS_UNLIMITED;
ysr@777 2101 };
ysr@777 2102 }
ysr@777 2103
tonyp@3119 2104 void G1CollectorPolicy::update_max_gc_locker_expansion() {
tonyp@2333 2105 size_t expansion_region_num = 0;
tonyp@2333 2106 if (GCLockerEdenExpansionPercent > 0) {
tonyp@2333 2107 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
tonyp@2333 2108 double expansion_region_num_d = perc * (double) _young_list_target_length;
tonyp@2333 2109 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
tonyp@2333 2110 // less than 1.0) we'll get 1.
tonyp@2333 2111 expansion_region_num = (size_t) ceil(expansion_region_num_d);
tonyp@2333 2112 } else {
tonyp@2333 2113 assert(expansion_region_num == 0, "sanity");
tonyp@2333 2114 }
tonyp@2333 2115 _young_list_max_length = _young_list_target_length + expansion_region_num;
tonyp@2333 2116 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
tonyp@2333 2117 }
tonyp@2333 2118
apetrusenko@980 2119 // Calculates survivor space parameters.
tonyp@3119 2120 void G1CollectorPolicy::update_survivors_policy() {
tonyp@3119 2121 double max_survivor_regions_d =
tonyp@3119 2122 (double) _young_list_target_length / (double) SurvivorRatio;
tonyp@3119 2123 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
tonyp@3119 2124 // smaller than 1.0) we'll get 1.
tonyp@3119 2125 _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
tonyp@3119 2126
tonyp@3066 2127 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
apetrusenko@980 2128 HeapRegion::GrainWords * _max_survivor_regions);
apetrusenko@980 2129 }
apetrusenko@980 2130
ysr@777 2131 #ifndef PRODUCT
ysr@777 2132 class HRSortIndexIsOKClosure: public HeapRegionClosure {
ysr@777 2133 CollectionSetChooser* _chooser;
ysr@777 2134 public:
ysr@777 2135 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
ysr@777 2136 _chooser(chooser) {}
ysr@777 2137
ysr@777 2138 bool doHeapRegion(HeapRegion* r) {
ysr@777 2139 if (!r->continuesHumongous()) {
ysr@777 2140 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
ysr@777 2141 }
ysr@777 2142 return false;
ysr@777 2143 }
ysr@777 2144 };
ysr@777 2145
tonyp@3209 2146 bool G1CollectorPolicy::assertMarkedBytesDataOK() {
ysr@777 2147 HRSortIndexIsOKClosure cl(_collectionSetChooser);
ysr@777 2148 _g1->heap_region_iterate(&cl);
ysr@777 2149 return true;
ysr@777 2150 }
ysr@777 2151 #endif
ysr@777 2152
tonyp@3114 2153 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
tonyp@3114 2154 GCCause::Cause gc_cause) {
tonyp@2011 2155 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@2011 2156 if (!during_cycle) {
tonyp@3114 2157 ergo_verbose1(ErgoConcCycles,
tonyp@3114 2158 "request concurrent cycle initiation",
tonyp@3114 2159 ergo_format_reason("requested by GC cause")
tonyp@3114 2160 ergo_format_str("GC cause"),
tonyp@3114 2161 GCCause::to_string(gc_cause));
tonyp@2011 2162 set_initiate_conc_mark_if_possible();
tonyp@2011 2163 return true;
tonyp@2011 2164 } else {
tonyp@3114 2165 ergo_verbose1(ErgoConcCycles,
tonyp@3114 2166 "do not request concurrent cycle initiation",
tonyp@3114 2167 ergo_format_reason("concurrent cycle already in progress")
tonyp@3114 2168 ergo_format_str("GC cause"),
tonyp@3114 2169 GCCause::to_string(gc_cause));
tonyp@2011 2170 return false;
tonyp@2011 2171 }
tonyp@2011 2172 }
tonyp@2011 2173
ysr@777 2174 void
tonyp@1794 2175 G1CollectorPolicy::decide_on_conc_mark_initiation() {
tonyp@1794 2176 // We are about to decide on whether this pause will be an
tonyp@1794 2177 // initial-mark pause.
tonyp@1794 2178
tonyp@1794 2179 // First, during_initial_mark_pause() should not be already set. We
tonyp@1794 2180 // will set it here if we have to. However, it should be cleared by
tonyp@1794 2181 // the end of the pause (it's only set for the duration of an
tonyp@1794 2182 // initial-mark pause).
tonyp@1794 2183 assert(!during_initial_mark_pause(), "pre-condition");
tonyp@1794 2184
tonyp@1794 2185 if (initiate_conc_mark_if_possible()) {
tonyp@1794 2186 // We had noticed on a previous pause that the heap occupancy has
tonyp@1794 2187 // gone over the initiating threshold and we should start a
tonyp@1794 2188 // concurrent marking cycle. So we might initiate one.
tonyp@1794 2189
tonyp@1794 2190 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@1794 2191 if (!during_cycle) {
tonyp@1794 2192 // The concurrent marking thread is not "during a cycle", i.e.,
tonyp@1794 2193 // it has completed the last one. So we can go ahead and
tonyp@1794 2194 // initiate a new cycle.
tonyp@1794 2195
tonyp@1794 2196 set_during_initial_mark_pause();
tonyp@3337 2197 // We do not allow mixed GCs during marking.
tonyp@3337 2198 if (!gcs_are_young()) {
tonyp@3337 2199 set_gcs_are_young(true);
tonyp@3337 2200 ergo_verbose0(ErgoMixedGCs,
tonyp@3337 2201 "end mixed GCs",
johnc@3178 2202 ergo_format_reason("concurrent cycle is about to start"));
johnc@3178 2203 }
tonyp@1794 2204
tonyp@1794 2205 // And we can now clear initiate_conc_mark_if_possible() as
tonyp@1794 2206 // we've already acted on it.
tonyp@1794 2207 clear_initiate_conc_mark_if_possible();
tonyp@3114 2208
tonyp@3114 2209 ergo_verbose0(ErgoConcCycles,
tonyp@3114 2210 "initiate concurrent cycle",
tonyp@3114 2211 ergo_format_reason("concurrent cycle initiation requested"));
tonyp@1794 2212 } else {
tonyp@1794 2213 // The concurrent marking thread is still finishing up the
tonyp@1794 2214 // previous cycle. If we start one right now the two cycles
tonyp@1794 2215 // overlap. In particular, the concurrent marking thread might
tonyp@1794 2216 // be in the process of clearing the next marking bitmap (which
tonyp@1794 2217 // we will use for the next cycle if we start one). Starting a
tonyp@1794 2218 // cycle now will be bad given that parts of the marking
tonyp@1794 2219 // information might get cleared by the marking thread. And we
tonyp@1794 2220 // cannot wait for the marking thread to finish the cycle as it
tonyp@1794 2221 // periodically yields while clearing the next marking bitmap
tonyp@1794 2222 // and, if it's in a yield point, it's waiting for us to
tonyp@1794 2223 // finish. So, at this point we will not start a cycle and we'll
tonyp@1794 2224 // let the concurrent marking thread complete the last one.
tonyp@3114 2225 ergo_verbose0(ErgoConcCycles,
tonyp@3114 2226 "do not initiate concurrent cycle",
tonyp@3114 2227 ergo_format_reason("concurrent cycle already in progress"));
tonyp@1794 2228 }
tonyp@1794 2229 }
tonyp@1794 2230 }
tonyp@1794 2231
ysr@777 2232 class KnownGarbageClosure: public HeapRegionClosure {
ysr@777 2233 CollectionSetChooser* _hrSorted;
ysr@777 2234
ysr@777 2235 public:
ysr@777 2236 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
ysr@777 2237 _hrSorted(hrSorted)
ysr@777 2238 {}
ysr@777 2239
ysr@777 2240 bool doHeapRegion(HeapRegion* r) {
ysr@777 2241 // We only include humongous regions in collection
ysr@777 2242 // sets when concurrent mark shows that their contained object is
ysr@777 2243 // unreachable.
ysr@777 2244
ysr@777 2245 // Do we have any marking information for this region?
ysr@777 2246 if (r->is_marked()) {
ysr@777 2247 // We don't include humongous regions in collection
ysr@777 2248 // sets because we collect them immediately at the end of a marking
ysr@777 2249 // cycle. We also don't include young regions because we *must*
ysr@777 2250 // include them in the next collection pause.
ysr@777 2251 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2252 _hrSorted->addMarkedHeapRegion(r);
ysr@777 2253 }
ysr@777 2254 }
ysr@777 2255 return false;
ysr@777 2256 }
ysr@777 2257 };
ysr@777 2258
ysr@777 2259 class ParKnownGarbageHRClosure: public HeapRegionClosure {
ysr@777 2260 CollectionSetChooser* _hrSorted;
ysr@777 2261 jint _marked_regions_added;
ysr@777 2262 jint _chunk_size;
ysr@777 2263 jint _cur_chunk_idx;
ysr@777 2264 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
ysr@777 2265 int _worker;
ysr@777 2266 int _invokes;
ysr@777 2267
ysr@777 2268 void get_new_chunk() {
ysr@777 2269 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
ysr@777 2270 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
ysr@777 2271 }
ysr@777 2272 void add_region(HeapRegion* r) {
ysr@777 2273 if (_cur_chunk_idx == _cur_chunk_end) {
ysr@777 2274 get_new_chunk();
ysr@777 2275 }
ysr@777 2276 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
ysr@777 2277 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
ysr@777 2278 _marked_regions_added++;
ysr@777 2279 _cur_chunk_idx++;
ysr@777 2280 }
ysr@777 2281
ysr@777 2282 public:
ysr@777 2283 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
ysr@777 2284 jint chunk_size,
ysr@777 2285 int worker) :
ysr@777 2286 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
ysr@777 2287 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
ysr@777 2288 _invokes(0)
ysr@777 2289 {}
ysr@777 2290
ysr@777 2291 bool doHeapRegion(HeapRegion* r) {
ysr@777 2292 // We only include humongous regions in collection
ysr@777 2293 // sets when concurrent mark shows that their contained object is
ysr@777 2294 // unreachable.
ysr@777 2295 _invokes++;
ysr@777 2296
ysr@777 2297 // Do we have any marking information for this region?
ysr@777 2298 if (r->is_marked()) {
ysr@777 2299 // We don't include humongous regions in collection
ysr@777 2300 // sets because we collect them immediately at the end of a marking
ysr@777 2301 // cycle.
ysr@777 2302 // We also do not include young regions in collection sets
ysr@777 2303 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2304 add_region(r);
ysr@777 2305 }
ysr@777 2306 }
ysr@777 2307 return false;
ysr@777 2308 }
ysr@777 2309 jint marked_regions_added() { return _marked_regions_added; }
ysr@777 2310 int invokes() { return _invokes; }
ysr@777 2311 };
ysr@777 2312
ysr@777 2313 class ParKnownGarbageTask: public AbstractGangTask {
ysr@777 2314 CollectionSetChooser* _hrSorted;
ysr@777 2315 jint _chunk_size;
ysr@777 2316 G1CollectedHeap* _g1;
ysr@777 2317 public:
ysr@777 2318 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
ysr@777 2319 AbstractGangTask("ParKnownGarbageTask"),
ysr@777 2320 _hrSorted(hrSorted), _chunk_size(chunk_size),
ysr@777 2321 _g1(G1CollectedHeap::heap())
ysr@777 2322 {}
ysr@777 2323
ysr@777 2324 void work(int i) {
ysr@777 2325 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
ysr@777 2326 // Back to zero for the claim value.
tonyp@790 2327 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
jmasa@3294 2328 _g1->workers()->active_workers(),
tonyp@790 2329 HeapRegion::InitialClaimValue);
ysr@777 2330 jint regions_added = parKnownGarbageCl.marked_regions_added();
ysr@777 2331 _hrSorted->incNumMarkedHeapRegions(regions_added);
ysr@777 2332 if (G1PrintParCleanupStats) {
brutisso@2645 2333 gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
ysr@777 2334 i, parKnownGarbageCl.invokes(), regions_added);
ysr@777 2335 }
ysr@777 2336 }
ysr@777 2337 };
ysr@777 2338
ysr@777 2339 void
jmasa@3294 2340 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
tonyp@3209 2341 double start_sec;
tonyp@3209 2342 if (G1PrintParCleanupStats) {
tonyp@3209 2343 start_sec = os::elapsedTime();
tonyp@3209 2344 }
ysr@777 2345
ysr@777 2346 _collectionSetChooser->clearMarkedHeapRegions();
tonyp@3209 2347 double clear_marked_end_sec;
ysr@777 2348 if (G1PrintParCleanupStats) {
tonyp@3209 2349 clear_marked_end_sec = os::elapsedTime();
tonyp@3209 2350 gclog_or_tty->print_cr(" clear marked regions: %8.3f ms.",
tonyp@3209 2351 (clear_marked_end_sec - start_sec) * 1000.0);
ysr@777 2352 }
tonyp@3209 2353
jmasa@2188 2354 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 2355 const size_t OverpartitionFactor = 4;
jmasa@3294 2356 size_t WorkUnit;
jmasa@3294 2357 // The use of MinChunkSize = 8 in the original code
jmasa@3294 2358 // causes some assertion failures when the total number of
jmasa@3294 2359 // region is less than 8. The code here tries to fix that.
jmasa@3294 2360 // Should the original code also be fixed?
jmasa@3294 2361 if (no_of_gc_threads > 0) {
jmasa@3294 2362 const size_t MinWorkUnit =
jmasa@3294 2363 MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U);
jmasa@3294 2364 WorkUnit =
jmasa@3294 2365 MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
jmasa@3294 2366 MinWorkUnit);
jmasa@3294 2367 } else {
jmasa@3294 2368 assert(no_of_gc_threads > 0,
jmasa@3294 2369 "The active gc workers should be greater than 0");
jmasa@3294 2370 // In a product build do something reasonable to avoid a crash.
jmasa@3294 2371 const size_t MinWorkUnit =
jmasa@3294 2372 MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
jmasa@3294 2373 WorkUnit =
jmasa@3294 2374 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
jmasa@3294 2375 MinWorkUnit);
jmasa@3294 2376 }
ysr@777 2377 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
kvn@1926 2378 WorkUnit);
ysr@777 2379 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
kvn@1926 2380 (int) WorkUnit);
ysr@777 2381 _g1->workers()->run_task(&parKnownGarbageTask);
tonyp@790 2382
tonyp@790 2383 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2384 "sanity check");
ysr@777 2385 } else {
ysr@777 2386 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
ysr@777 2387 _g1->heap_region_iterate(&knownGarbagecl);
ysr@777 2388 }
tonyp@3209 2389 double known_garbage_end_sec;
ysr@777 2390 if (G1PrintParCleanupStats) {
tonyp@3209 2391 known_garbage_end_sec = os::elapsedTime();
ysr@777 2392 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
tonyp@3209 2393 (known_garbage_end_sec - clear_marked_end_sec) * 1000.0);
ysr@777 2394 }
tonyp@3209 2395
ysr@777 2396 _collectionSetChooser->sortMarkedHeapRegions();
tonyp@3209 2397 double end_sec = os::elapsedTime();
ysr@777 2398 if (G1PrintParCleanupStats) {
ysr@777 2399 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
tonyp@3209 2400 (end_sec - known_garbage_end_sec) * 1000.0);
ysr@777 2401 }
ysr@777 2402
tonyp@3209 2403 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
tonyp@3209 2404 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
tonyp@3209 2405 _cur_mark_stop_world_time_ms += elapsed_time_ms;
tonyp@3209 2406 _prev_collection_pause_end_ms += elapsed_time_ms;
tonyp@3209 2407 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
ysr@777 2408 }
ysr@777 2409
johnc@1829 2410 // Add the heap region at the head of the non-incremental collection set
tonyp@3289 2411 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
johnc@1829 2412 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2413 assert(!hr->is_young(), "non-incremental add of young region");
johnc@1829 2414
ysr@777 2415 if (_g1->mark_in_progress())
ysr@777 2416 _g1->concurrent_mark()->registerCSetRegion(hr);
ysr@777 2417
johnc@1829 2418 assert(!hr->in_collection_set(), "should not already be in the CSet");
ysr@777 2419 hr->set_in_collection_set(true);
ysr@777 2420 hr->set_next_in_collection_set(_collection_set);
ysr@777 2421 _collection_set = hr;
ysr@777 2422 _collection_set_bytes_used_before += hr->used();
tonyp@961 2423 _g1->register_region_with_in_cset_fast_test(hr);
tonyp@3289 2424 size_t rs_length = hr->rem_set()->occupied();
tonyp@3289 2425 _recorded_rs_lengths += rs_length;
tonyp@3289 2426 _old_cset_region_length += 1;
ysr@777 2427 }
ysr@777 2428
johnc@1829 2429 // Initialize the per-collection-set information
johnc@1829 2430 void G1CollectorPolicy::start_incremental_cset_building() {
johnc@1829 2431 assert(_inc_cset_build_state == Inactive, "Precondition");
johnc@1829 2432
johnc@1829 2433 _inc_cset_head = NULL;
johnc@1829 2434 _inc_cset_tail = NULL;
johnc@1829 2435 _inc_cset_bytes_used_before = 0;
johnc@1829 2436
johnc@1829 2437 _inc_cset_max_finger = 0;
johnc@1829 2438 _inc_cset_recorded_rs_lengths = 0;
johnc@1829 2439 _inc_cset_predicted_elapsed_time_ms = 0;
johnc@1829 2440 _inc_cset_build_state = Active;
johnc@1829 2441 }
johnc@1829 2442
johnc@1829 2443 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
johnc@1829 2444 // This routine is used when:
johnc@1829 2445 // * adding survivor regions to the incremental cset at the end of an
johnc@1829 2446 // evacuation pause,
johnc@1829 2447 // * adding the current allocation region to the incremental cset
johnc@1829 2448 // when it is retired, and
johnc@1829 2449 // * updating existing policy information for a region in the
johnc@1829 2450 // incremental cset via young list RSet sampling.
johnc@1829 2451 // Therefore this routine may be called at a safepoint by the
johnc@1829 2452 // VM thread, or in-between safepoints by mutator threads (when
johnc@1829 2453 // retiring the current allocation region) or a concurrent
johnc@1829 2454 // refine thread (RSet sampling).
johnc@1829 2455
johnc@1829 2456 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
johnc@1829 2457 size_t used_bytes = hr->used();
johnc@1829 2458
johnc@1829 2459 _inc_cset_recorded_rs_lengths += rs_length;
johnc@1829 2460 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
johnc@1829 2461
johnc@1829 2462 _inc_cset_bytes_used_before += used_bytes;
johnc@1829 2463
johnc@1829 2464 // Cache the values we have added to the aggregated informtion
johnc@1829 2465 // in the heap region in case we have to remove this region from
johnc@1829 2466 // the incremental collection set, or it is updated by the
johnc@1829 2467 // rset sampling code
johnc@1829 2468 hr->set_recorded_rs_length(rs_length);
johnc@1829 2469 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
johnc@1829 2470 }
johnc@1829 2471
johnc@1829 2472 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
johnc@1829 2473 // This routine is currently only called as part of the updating of
johnc@1829 2474 // existing policy information for regions in the incremental cset that
johnc@1829 2475 // is performed by the concurrent refine thread(s) as part of young list
johnc@1829 2476 // RSet sampling. Therefore we should not be at a safepoint.
johnc@1829 2477
johnc@1829 2478 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
johnc@1829 2479 assert(hr->is_young(), "it should be");
johnc@1829 2480
johnc@1829 2481 size_t used_bytes = hr->used();
johnc@1829 2482 size_t old_rs_length = hr->recorded_rs_length();
johnc@1829 2483 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
johnc@1829 2484
johnc@1829 2485 // Subtract the old recorded/predicted policy information for
johnc@1829 2486 // the given heap region from the collection set info.
johnc@1829 2487 _inc_cset_recorded_rs_lengths -= old_rs_length;
johnc@1829 2488 _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
johnc@1829 2489
johnc@1829 2490 _inc_cset_bytes_used_before -= used_bytes;
johnc@1829 2491
johnc@1829 2492 // Clear the values cached in the heap region
johnc@1829 2493 hr->set_recorded_rs_length(0);
johnc@1829 2494 hr->set_predicted_elapsed_time_ms(0);
johnc@1829 2495 }
johnc@1829 2496
johnc@1829 2497 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
johnc@1829 2498 // Update the collection set information that is dependent on the new RS length
johnc@1829 2499 assert(hr->is_young(), "Precondition");
johnc@1829 2500
johnc@1829 2501 remove_from_incremental_cset_info(hr);
johnc@1829 2502 add_to_incremental_cset_info(hr, new_rs_length);
johnc@1829 2503 }
johnc@1829 2504
johnc@1829 2505 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
tonyp@3289 2506 assert(hr->is_young(), "invariant");
tonyp@3289 2507 assert(hr->young_index_in_cset() > -1, "should have already been set");
johnc@1829 2508 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2509
johnc@1829 2510 // We need to clear and set the cached recorded/cached collection set
johnc@1829 2511 // information in the heap region here (before the region gets added
johnc@1829 2512 // to the collection set). An individual heap region's cached values
johnc@1829 2513 // are calculated, aggregated with the policy collection set info,
johnc@1829 2514 // and cached in the heap region here (initially) and (subsequently)
johnc@1829 2515 // by the Young List sampling code.
johnc@1829 2516
johnc@1829 2517 size_t rs_length = hr->rem_set()->occupied();
johnc@1829 2518 add_to_incremental_cset_info(hr, rs_length);
johnc@1829 2519
johnc@1829 2520 HeapWord* hr_end = hr->end();
johnc@1829 2521 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
johnc@1829 2522
johnc@1829 2523 assert(!hr->in_collection_set(), "invariant");
johnc@1829 2524 hr->set_in_collection_set(true);
johnc@1829 2525 assert( hr->next_in_collection_set() == NULL, "invariant");
johnc@1829 2526
johnc@1829 2527 _g1->register_region_with_in_cset_fast_test(hr);
johnc@1829 2528 }
johnc@1829 2529
johnc@1829 2530 // Add the region at the RHS of the incremental cset
johnc@1829 2531 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
johnc@1829 2532 // We should only ever be appending survivors at the end of a pause
johnc@1829 2533 assert( hr->is_survivor(), "Logic");
johnc@1829 2534
johnc@1829 2535 // Do the 'common' stuff
johnc@1829 2536 add_region_to_incremental_cset_common(hr);
johnc@1829 2537
johnc@1829 2538 // Now add the region at the right hand side
johnc@1829 2539 if (_inc_cset_tail == NULL) {
johnc@1829 2540 assert(_inc_cset_head == NULL, "invariant");
johnc@1829 2541 _inc_cset_head = hr;
johnc@1829 2542 } else {
johnc@1829 2543 _inc_cset_tail->set_next_in_collection_set(hr);
johnc@1829 2544 }
johnc@1829 2545 _inc_cset_tail = hr;
johnc@1829 2546 }
johnc@1829 2547
johnc@1829 2548 // Add the region to the LHS of the incremental cset
johnc@1829 2549 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
johnc@1829 2550 // Survivors should be added to the RHS at the end of a pause
johnc@1829 2551 assert(!hr->is_survivor(), "Logic");
johnc@1829 2552
johnc@1829 2553 // Do the 'common' stuff
johnc@1829 2554 add_region_to_incremental_cset_common(hr);
johnc@1829 2555
johnc@1829 2556 // Add the region at the left hand side
johnc@1829 2557 hr->set_next_in_collection_set(_inc_cset_head);
johnc@1829 2558 if (_inc_cset_head == NULL) {
johnc@1829 2559 assert(_inc_cset_tail == NULL, "Invariant");
johnc@1829 2560 _inc_cset_tail = hr;
johnc@1829 2561 }
johnc@1829 2562 _inc_cset_head = hr;
johnc@1829 2563 }
johnc@1829 2564
johnc@1829 2565 #ifndef PRODUCT
johnc@1829 2566 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
johnc@1829 2567 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
johnc@1829 2568
johnc@1829 2569 st->print_cr("\nCollection_set:");
johnc@1829 2570 HeapRegion* csr = list_head;
johnc@1829 2571 while (csr != NULL) {
johnc@1829 2572 HeapRegion* next = csr->next_in_collection_set();
johnc@1829 2573 assert(csr->in_collection_set(), "bad CS");
johnc@1829 2574 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
johnc@1829 2575 "age: %4d, y: %d, surv: %d",
johnc@1829 2576 csr->bottom(), csr->end(),
johnc@1829 2577 csr->top(),
johnc@1829 2578 csr->prev_top_at_mark_start(),
johnc@1829 2579 csr->next_top_at_mark_start(),
johnc@1829 2580 csr->top_at_conc_mark_count(),
johnc@1829 2581 csr->age_in_surv_rate_group_cond(),
johnc@1829 2582 csr->is_young(),
johnc@1829 2583 csr->is_survivor());
johnc@1829 2584 csr = next;
johnc@1829 2585 }
johnc@1829 2586 }
johnc@1829 2587 #endif // !PRODUCT
johnc@1829 2588
tonyp@3209 2589 void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
johnc@1829 2590 // Set this here - in case we're not doing young collections.
johnc@1829 2591 double non_young_start_time_sec = os::elapsedTime();
johnc@1829 2592
tonyp@3114 2593 YoungList* young_list = _g1->young_list();
tonyp@3114 2594
tonyp@2011 2595 guarantee(target_pause_time_ms > 0.0,
tonyp@2011 2596 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2011 2597 target_pause_time_ms));
tonyp@2011 2598 guarantee(_collection_set == NULL, "Precondition");
ysr@777 2599
ysr@777 2600 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
ysr@777 2601 double predicted_pause_time_ms = base_time_ms;
ysr@777 2602
tonyp@2011 2603 double time_remaining_ms = target_pause_time_ms - base_time_ms;
ysr@777 2604
tonyp@3114 2605 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2606 "start choosing CSet",
tonyp@3114 2607 ergo_format_ms("predicted base time")
tonyp@3114 2608 ergo_format_ms("remaining time")
tonyp@3114 2609 ergo_format_ms("target pause time"),
tonyp@3114 2610 base_time_ms, time_remaining_ms, target_pause_time_ms);
tonyp@3114 2611
ysr@777 2612 // the 10% and 50% values are arbitrary...
tonyp@3114 2613 double threshold = 0.10 * target_pause_time_ms;
tonyp@3114 2614 if (time_remaining_ms < threshold) {
tonyp@3114 2615 double prev_time_remaining_ms = time_remaining_ms;
tonyp@2011 2616 time_remaining_ms = 0.50 * target_pause_time_ms;
tonyp@3114 2617 ergo_verbose3(ErgoCSetConstruction,
tonyp@3114 2618 "adjust remaining time",
tonyp@3114 2619 ergo_format_reason("remaining time lower than threshold")
tonyp@3114 2620 ergo_format_ms("remaining time")
tonyp@3114 2621 ergo_format_ms("threshold")
tonyp@3114 2622 ergo_format_ms("adjusted remaining time"),
tonyp@3114 2623 prev_time_remaining_ms, threshold, time_remaining_ms);
ysr@777 2624 }
ysr@777 2625
tonyp@3114 2626 size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
tonyp@3114 2627
tonyp@3114 2628 HeapRegion* hr;
tonyp@3114 2629 double young_start_time_sec = os::elapsedTime();
ysr@777 2630
apetrusenko@1112 2631 _collection_set_bytes_used_before = 0;
tonyp@3337 2632 _last_gc_was_young = gcs_are_young() ? true : false;
tonyp@3337 2633
tonyp@3337 2634 if (_last_gc_was_young) {
tonyp@3337 2635 ++_young_pause_num;
tonyp@3114 2636 } else {
tonyp@3337 2637 ++_mixed_pause_num;
tonyp@3114 2638 }
brutisso@3065 2639
brutisso@3065 2640 // The young list is laid with the survivor regions from the previous
brutisso@3065 2641 // pause are appended to the RHS of the young list, i.e.
brutisso@3065 2642 // [Newly Young Regions ++ Survivors from last pause].
brutisso@3065 2643
tonyp@3289 2644 size_t survivor_region_length = young_list->survivor_length();
tonyp@3289 2645 size_t eden_region_length = young_list->length() - survivor_region_length;
tonyp@3289 2646 init_cset_region_lengths(eden_region_length, survivor_region_length);
tonyp@3114 2647 hr = young_list->first_survivor_region();
brutisso@3065 2648 while (hr != NULL) {
brutisso@3065 2649 assert(hr->is_survivor(), "badly formed young list");
brutisso@3065 2650 hr->set_young();
brutisso@3065 2651 hr = hr->get_next_young_region();
brutisso@3065 2652 }
brutisso@3065 2653
tonyp@3114 2654 // Clear the fields that point to the survivor list - they are all young now.
tonyp@3114 2655 young_list->clear_survivors();
brutisso@3065 2656
brutisso@3065 2657 if (_g1->mark_in_progress())
brutisso@3065 2658 _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
brutisso@3065 2659
brutisso@3065 2660 _collection_set = _inc_cset_head;
brutisso@3065 2661 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
brutisso@3065 2662 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2663 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2664
tonyp@3114 2665 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2666 "add young regions to CSet",
tonyp@3114 2667 ergo_format_region("eden")
tonyp@3114 2668 ergo_format_region("survivors")
tonyp@3114 2669 ergo_format_ms("predicted young region time"),
tonyp@3289 2670 eden_region_length, survivor_region_length,
tonyp@3114 2671 _inc_cset_predicted_elapsed_time_ms);
tonyp@3114 2672
brutisso@3065 2673 // The number of recorded young regions is the incremental
brutisso@3065 2674 // collection set's current size
brutisso@3065 2675 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
brutisso@3065 2676
brutisso@3065 2677 double young_end_time_sec = os::elapsedTime();
brutisso@3065 2678 _recorded_young_cset_choice_time_ms =
brutisso@3065 2679 (young_end_time_sec - young_start_time_sec) * 1000.0;
brutisso@3065 2680
brutisso@3065 2681 // We are doing young collections so reset this.
brutisso@3065 2682 non_young_start_time_sec = young_end_time_sec;
brutisso@3065 2683
tonyp@3337 2684 if (!gcs_are_young()) {
ysr@777 2685 bool should_continue = true;
ysr@777 2686 NumberSeq seq;
ysr@777 2687 double avg_prediction = 100000000000000000.0; // something very large
johnc@1829 2688
tonyp@3114 2689 double prev_predicted_pause_time_ms = predicted_pause_time_ms;
ysr@777 2690 do {
tonyp@3289 2691 // Note that add_old_region_to_cset() increments the
tonyp@3289 2692 // _old_cset_region_length field and cset_region_length() returns the
tonyp@3289 2693 // sum of _eden_cset_region_length, _survivor_cset_region_length, and
tonyp@3289 2694 // _old_cset_region_length. So, as old regions are added to the
tonyp@3289 2695 // CSet, _old_cset_region_length will be incremented and
tonyp@3289 2696 // cset_region_length(), which is used below, will always reflect
tonyp@3289 2697 // the the total number of regions added up to this point to the CSet.
tonyp@3289 2698
ysr@777 2699 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
ysr@777 2700 avg_prediction);
apetrusenko@1112 2701 if (hr != NULL) {
tonyp@3268 2702 _g1->old_set_remove(hr);
ysr@777 2703 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
ysr@777 2704 time_remaining_ms -= predicted_time_ms;
ysr@777 2705 predicted_pause_time_ms += predicted_time_ms;
tonyp@3289 2706 add_old_region_to_cset(hr);
ysr@777 2707 seq.add(predicted_time_ms);
ysr@777 2708 avg_prediction = seq.avg() + seq.sd();
ysr@777 2709 }
tonyp@3114 2710
tonyp@3114 2711 should_continue = true;
tonyp@3114 2712 if (hr == NULL) {
tonyp@3114 2713 // No need for an ergo verbose message here,
tonyp@3114 2714 // getNextMarkRegion() does this when it returns NULL.
tonyp@3114 2715 should_continue = false;
tonyp@3114 2716 } else {
tonyp@3114 2717 if (adaptive_young_list_length()) {
tonyp@3114 2718 if (time_remaining_ms < 0.0) {
tonyp@3114 2719 ergo_verbose1(ErgoCSetConstruction,
tonyp@3114 2720 "stop adding old regions to CSet",
tonyp@3114 2721 ergo_format_reason("remaining time is lower than 0")
tonyp@3114 2722 ergo_format_ms("remaining time"),
tonyp@3114 2723 time_remaining_ms);
tonyp@3114 2724 should_continue = false;
tonyp@3114 2725 }
tonyp@3114 2726 } else {
tonyp@3289 2727 if (cset_region_length() >= _young_list_fixed_length) {
tonyp@3114 2728 ergo_verbose2(ErgoCSetConstruction,
tonyp@3114 2729 "stop adding old regions to CSet",
tonyp@3126 2730 ergo_format_reason("CSet length reached target")
tonyp@3114 2731 ergo_format_region("CSet")
tonyp@3114 2732 ergo_format_region("young target"),
tonyp@3289 2733 cset_region_length(), _young_list_fixed_length);
tonyp@3114 2734 should_continue = false;
tonyp@3114 2735 }
tonyp@3114 2736 }
tonyp@3114 2737 }
ysr@777 2738 } while (should_continue);
ysr@777 2739
ysr@777 2740 if (!adaptive_young_list_length() &&
tonyp@3337 2741 cset_region_length() < _young_list_fixed_length) {
tonyp@3114 2742 ergo_verbose2(ErgoCSetConstruction,
tonyp@3337 2743 "request mixed GCs end",
tonyp@3114 2744 ergo_format_reason("CSet length lower than target")
tonyp@3114 2745 ergo_format_region("CSet")
tonyp@3114 2746 ergo_format_region("young target"),
tonyp@3289 2747 cset_region_length(), _young_list_fixed_length);
tonyp@3337 2748 _should_revert_to_young_gcs = true;
tonyp@3114 2749 }
tonyp@3114 2750
tonyp@3114 2751 ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2752 "add old regions to CSet",
tonyp@3114 2753 ergo_format_region("old")
tonyp@3114 2754 ergo_format_ms("predicted old region time"),
tonyp@3289 2755 old_cset_region_length(),
tonyp@3114 2756 predicted_pause_time_ms - prev_predicted_pause_time_ms);
ysr@777 2757 }
ysr@777 2758
johnc@1829 2759 stop_incremental_cset_building();
johnc@1829 2760
ysr@777 2761 count_CS_bytes_used();
ysr@777 2762
tonyp@3114 2763 ergo_verbose5(ErgoCSetConstruction,
tonyp@3114 2764 "finish choosing CSet",
tonyp@3114 2765 ergo_format_region("eden")
tonyp@3114 2766 ergo_format_region("survivors")
tonyp@3114 2767 ergo_format_region("old")
tonyp@3114 2768 ergo_format_ms("predicted pause time")
tonyp@3114 2769 ergo_format_ms("target pause time"),
tonyp@3289 2770 eden_region_length, survivor_region_length,
tonyp@3289 2771 old_cset_region_length(),
tonyp@3114 2772 predicted_pause_time_ms, target_pause_time_ms);
tonyp@3114 2773
ysr@777 2774 double non_young_end_time_sec = os::elapsedTime();
ysr@777 2775 _recorded_non_young_cset_choice_time_ms =
ysr@777 2776 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
ysr@777 2777 }

mercurial