src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Thu, 08 Sep 2011 05:16:49 -0400

author
tonyp
date
Thu, 08 Sep 2011 05:16:49 -0400
changeset 3119
4f41766176cf
parent 3114
20213c8a3c40
child 3120
af2ab04e0038
permissions
-rw-r--r--

7084509: G1: fix inconsistencies and mistakes in the young list target length calculations
Summary: Fixed inconsistencies and mistakes in the young list target length calculations so that a) the calculated target length is optimal (before, it was not), b) other parameters like max survivor size and max gc locker eden expansion are always consistent with the calculated target length (before, they were not always), and c) the resulting target length was always bound by desired min and max values (before, it was not).
Reviewed-by: brutisso, johnc

ysr@777 1 /*
tonyp@2472 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
stefank@2314 32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
stefank@2314 34 #include "runtime/arguments.hpp"
stefank@2314 35 #include "runtime/java.hpp"
stefank@2314 36 #include "runtime/mutexLocker.hpp"
stefank@2314 37 #include "utilities/debug.hpp"
ysr@777 38
ysr@777 39 #define PREDICTIONS_VERBOSE 0
ysr@777 40
ysr@777 41 // <NEW PREDICTION>
ysr@777 42
ysr@777 43 // Different defaults for different number of GC threads
ysr@777 44 // They were chosen by running GCOld and SPECjbb on debris with different
ysr@777 45 // numbers of GC threads and choosing them based on the results
ysr@777 46
ysr@777 47 // all the same
ysr@777 48 static double rs_length_diff_defaults[] = {
ysr@777 49 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
ysr@777 50 };
ysr@777 51
ysr@777 52 static double cost_per_card_ms_defaults[] = {
ysr@777 53 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
ysr@777 54 };
ysr@777 55
ysr@777 56 // all the same
ysr@777 57 static double fully_young_cards_per_entry_ratio_defaults[] = {
ysr@777 58 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
ysr@777 59 };
ysr@777 60
ysr@777 61 static double cost_per_entry_ms_defaults[] = {
ysr@777 62 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
ysr@777 63 };
ysr@777 64
ysr@777 65 static double cost_per_byte_ms_defaults[] = {
ysr@777 66 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
ysr@777 67 };
ysr@777 68
ysr@777 69 // these should be pretty consistent
ysr@777 70 static double constant_other_time_ms_defaults[] = {
ysr@777 71 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
ysr@777 72 };
ysr@777 73
ysr@777 74
ysr@777 75 static double young_other_cost_per_region_ms_defaults[] = {
ysr@777 76 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
ysr@777 77 };
ysr@777 78
ysr@777 79 static double non_young_other_cost_per_region_ms_defaults[] = {
ysr@777 80 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
ysr@777 81 };
ysr@777 82
ysr@777 83 // </NEW PREDICTION>
ysr@777 84
brutisso@2645 85 // Help class for avoiding interleaved logging
brutisso@2645 86 class LineBuffer: public StackObj {
brutisso@2645 87
brutisso@2645 88 private:
brutisso@2645 89 static const int BUFFER_LEN = 1024;
brutisso@2645 90 static const int INDENT_CHARS = 3;
brutisso@2645 91 char _buffer[BUFFER_LEN];
brutisso@2645 92 int _indent_level;
brutisso@2645 93 int _cur;
brutisso@2645 94
brutisso@2645 95 void vappend(const char* format, va_list ap) {
brutisso@2645 96 int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
brutisso@2645 97 if (res != -1) {
brutisso@2645 98 _cur += res;
brutisso@2645 99 } else {
brutisso@2645 100 DEBUG_ONLY(warning("buffer too small in LineBuffer");)
brutisso@2645 101 _buffer[BUFFER_LEN -1] = 0;
brutisso@2645 102 _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
brutisso@2645 103 }
brutisso@2645 104 }
brutisso@2645 105
brutisso@2645 106 public:
brutisso@2645 107 explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
brutisso@2645 108 for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
brutisso@2645 109 _buffer[_cur] = ' ';
brutisso@2645 110 }
brutisso@2645 111 }
brutisso@2645 112
brutisso@2645 113 #ifndef PRODUCT
brutisso@2645 114 ~LineBuffer() {
brutisso@2645 115 assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
brutisso@2645 116 }
brutisso@2645 117 #endif
brutisso@2645 118
brutisso@2645 119 void append(const char* format, ...) {
brutisso@2645 120 va_list ap;
brutisso@2645 121 va_start(ap, format);
brutisso@2645 122 vappend(format, ap);
brutisso@2645 123 va_end(ap);
brutisso@2645 124 }
brutisso@2645 125
brutisso@2645 126 void append_and_print_cr(const char* format, ...) {
brutisso@2645 127 va_list ap;
brutisso@2645 128 va_start(ap, format);
brutisso@2645 129 vappend(format, ap);
brutisso@2645 130 va_end(ap);
brutisso@2645 131 gclog_or_tty->print_cr("%s", _buffer);
brutisso@2645 132 _cur = _indent_level * INDENT_CHARS;
brutisso@2645 133 }
brutisso@2645 134 };
brutisso@2645 135
ysr@777 136 G1CollectorPolicy::G1CollectorPolicy() :
jmasa@2188 137 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
johnc@3021 138 ? ParallelGCThreads : 1),
jmasa@2188 139
ysr@777 140 _n_pauses(0),
johnc@3021 141 _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 142 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 143 _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 144 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 145 _all_pause_times_ms(new NumberSeq()),
ysr@777 146 _stop_world_start(0.0),
ysr@777 147 _all_stop_world_times_ms(new NumberSeq()),
ysr@777 148 _all_yield_times_ms(new NumberSeq()),
ysr@777 149
ysr@777 150 _all_mod_union_times_ms(new NumberSeq()),
ysr@777 151
apetrusenko@1112 152 _summary(new Summary()),
ysr@777 153
johnc@1325 154 #ifndef PRODUCT
ysr@777 155 _cur_clear_ct_time_ms(0.0),
johnc@1325 156 _min_clear_cc_time_ms(-1.0),
johnc@1325 157 _max_clear_cc_time_ms(-1.0),
johnc@1325 158 _cur_clear_cc_time_ms(0.0),
johnc@1325 159 _cum_clear_cc_time_ms(0.0),
johnc@1325 160 _num_cc_clears(0L),
johnc@1325 161 #endif
ysr@777 162
ysr@777 163 _region_num_young(0),
ysr@777 164 _region_num_tenured(0),
ysr@777 165 _prev_region_num_young(0),
ysr@777 166 _prev_region_num_tenured(0),
ysr@777 167
ysr@777 168 _aux_num(10),
ysr@777 169 _all_aux_times_ms(new NumberSeq[_aux_num]),
ysr@777 170 _cur_aux_start_times_ms(new double[_aux_num]),
ysr@777 171 _cur_aux_times_ms(new double[_aux_num]),
ysr@777 172 _cur_aux_times_set(new bool[_aux_num]),
ysr@777 173
ysr@777 174 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 175 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 176
ysr@777 177 // <NEW PREDICTION>
ysr@777 178
ysr@777 179 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 180 _prev_collection_pause_end_ms(0.0),
ysr@777 181 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 182 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 183 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 184 _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 185 _partially_young_cards_per_entry_ratio_seq(
ysr@777 186 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 187 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 188 _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 189 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 190 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 191 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 192 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 193 _non_young_other_cost_per_region_ms_seq(
ysr@777 194 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 195
ysr@777 196 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 197 _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 198 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 199
johnc@1186 200 _pause_time_target_ms((double) MaxGCPauseMillis),
ysr@777 201
ysr@777 202 // </NEW PREDICTION>
ysr@777 203
ysr@777 204 _full_young_gcs(true),
ysr@777 205 _full_young_pause_num(0),
ysr@777 206 _partial_young_pause_num(0),
ysr@777 207
ysr@777 208 _during_marking(false),
ysr@777 209 _in_marking_window(false),
ysr@777 210 _in_marking_window_im(false),
ysr@777 211
ysr@777 212 _known_garbage_ratio(0.0),
ysr@777 213 _known_garbage_bytes(0),
ysr@777 214
ysr@777 215 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 216
ysr@777 217 _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 218
ysr@777 219 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 220 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 221
ysr@777 222 _recent_avg_pause_time_ratio(0.0),
ysr@777 223 _num_markings(0),
ysr@777 224 _n_marks(0),
ysr@777 225 _n_pauses_at_mark_end(0),
ysr@777 226
ysr@777 227 _all_full_gc_times_ms(new NumberSeq()),
ysr@777 228
ysr@777 229 // G1PausesBtwnConcMark defaults to -1
ysr@777 230 // so the hack is to do the cast QQQ FIXME
ysr@777 231 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
ysr@777 232 _n_marks_since_last_pause(0),
tonyp@1794 233 _initiate_conc_mark_if_possible(false),
tonyp@1794 234 _during_initial_mark_pause(false),
ysr@777 235 _should_revert_to_full_young_gcs(false),
ysr@777 236 _last_full_young_gc(false),
ysr@777 237
tonyp@2961 238 _eden_bytes_before_gc(0),
tonyp@2961 239 _survivor_bytes_before_gc(0),
tonyp@2961 240 _capacity_before_gc(0),
tonyp@2961 241
ysr@777 242 _prev_collection_pause_used_at_end_bytes(0),
ysr@777 243
ysr@777 244 _collection_set(NULL),
johnc@1829 245 _collection_set_size(0),
johnc@1829 246 _collection_set_bytes_used_before(0),
johnc@1829 247
johnc@1829 248 // Incremental CSet attributes
johnc@1829 249 _inc_cset_build_state(Inactive),
johnc@1829 250 _inc_cset_head(NULL),
johnc@1829 251 _inc_cset_tail(NULL),
johnc@1829 252 _inc_cset_size(0),
johnc@1829 253 _inc_cset_young_index(0),
johnc@1829 254 _inc_cset_bytes_used_before(0),
johnc@1829 255 _inc_cset_max_finger(NULL),
johnc@1829 256 _inc_cset_recorded_young_bytes(0),
johnc@1829 257 _inc_cset_recorded_rs_lengths(0),
johnc@1829 258 _inc_cset_predicted_elapsed_time_ms(0.0),
johnc@1829 259 _inc_cset_predicted_bytes_to_copy(0),
johnc@1829 260
ysr@777 261 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 262 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 263 #endif // _MSC_VER
ysr@777 264
ysr@777 265 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
ysr@777 266 G1YoungSurvRateNumRegionsSummary)),
ysr@777 267 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
apetrusenko@980 268 G1YoungSurvRateNumRegionsSummary)),
ysr@777 269 // add here any more surv rate groups
apetrusenko@980 270 _recorded_survivor_regions(0),
apetrusenko@980 271 _recorded_survivor_head(NULL),
apetrusenko@980 272 _recorded_survivor_tail(NULL),
tonyp@1791 273 _survivors_age_table(true),
tonyp@1791 274
tonyp@3114 275 _gc_overhead_perc(0.0) {
tonyp@3114 276
tonyp@1377 277 // Set up the region size and associated fields. Given that the
tonyp@1377 278 // policy is created before the heap, we have to set this up here,
tonyp@1377 279 // so it's done as soon as possible.
tonyp@1377 280 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
iveresov@1696 281 HeapRegionRemSet::setup_remset_size();
tonyp@1377 282
tonyp@3114 283 G1ErgoVerbose::initialize();
tonyp@3114 284 if (PrintAdaptiveSizePolicy) {
tonyp@3114 285 // Currently, we only use a single switch for all the heuristics.
tonyp@3114 286 G1ErgoVerbose::set_enabled(true);
tonyp@3114 287 // Given that we don't currently have a verboseness level
tonyp@3114 288 // parameter, we'll hardcode this to high. This can be easily
tonyp@3114 289 // changed in the future.
tonyp@3114 290 G1ErgoVerbose::set_level(ErgoHigh);
tonyp@3114 291 } else {
tonyp@3114 292 G1ErgoVerbose::set_enabled(false);
tonyp@3114 293 }
tonyp@3114 294
apetrusenko@1826 295 // Verify PLAB sizes
apetrusenko@1826 296 const uint region_size = HeapRegion::GrainWords;
apetrusenko@1826 297 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
apetrusenko@1826 298 char buffer[128];
apetrusenko@1826 299 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
apetrusenko@1826 300 OldPLABSize > region_size ? "Old" : "Young", region_size);
apetrusenko@1826 301 vm_exit_during_initialization(buffer);
apetrusenko@1826 302 }
apetrusenko@1826 303
ysr@777 304 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
ysr@777 305 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
ysr@777 306
tonyp@1966 307 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
ysr@777 308 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 309 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 310
ysr@777 311 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 312 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
ysr@777 313
ysr@777 314 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 315
ysr@777 316 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
ysr@777 317
ysr@777 318 _par_last_termination_times_ms = new double[_parallel_gc_threads];
tonyp@1966 319 _par_last_termination_attempts = new double[_parallel_gc_threads];
tonyp@1966 320 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
brutisso@2712 321 _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
ysr@777 322
ysr@777 323 // start conservatively
johnc@1186 324 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
ysr@777 325
ysr@777 326 // <NEW PREDICTION>
ysr@777 327
ysr@777 328 int index;
ysr@777 329 if (ParallelGCThreads == 0)
ysr@777 330 index = 0;
ysr@777 331 else if (ParallelGCThreads > 8)
ysr@777 332 index = 7;
ysr@777 333 else
ysr@777 334 index = ParallelGCThreads - 1;
ysr@777 335
ysr@777 336 _pending_card_diff_seq->add(0.0);
ysr@777 337 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
ysr@777 338 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
ysr@777 339 _fully_young_cards_per_entry_ratio_seq->add(
ysr@777 340 fully_young_cards_per_entry_ratio_defaults[index]);
ysr@777 341 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
ysr@777 342 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
ysr@777 343 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
ysr@777 344 _young_other_cost_per_region_ms_seq->add(
ysr@777 345 young_other_cost_per_region_ms_defaults[index]);
ysr@777 346 _non_young_other_cost_per_region_ms_seq->add(
ysr@777 347 non_young_other_cost_per_region_ms_defaults[index]);
ysr@777 348
ysr@777 349 // </NEW PREDICTION>
ysr@777 350
tonyp@1965 351 // Below, we might need to calculate the pause time target based on
tonyp@1965 352 // the pause interval. When we do so we are going to give G1 maximum
tonyp@1965 353 // flexibility and allow it to do pauses when it needs to. So, we'll
tonyp@1965 354 // arrange that the pause interval to be pause time target + 1 to
tonyp@1965 355 // ensure that a) the pause time target is maximized with respect to
tonyp@1965 356 // the pause interval and b) we maintain the invariant that pause
tonyp@1965 357 // time target < pause interval. If the user does not want this
tonyp@1965 358 // maximum flexibility, they will have to set the pause interval
tonyp@1965 359 // explicitly.
tonyp@1965 360
tonyp@1965 361 // First make sure that, if either parameter is set, its value is
tonyp@1965 362 // reasonable.
tonyp@1965 363 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 364 if (MaxGCPauseMillis < 1) {
tonyp@1965 365 vm_exit_during_initialization("MaxGCPauseMillis should be "
tonyp@1965 366 "greater than 0");
tonyp@1965 367 }
tonyp@1965 368 }
tonyp@1965 369 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 370 if (GCPauseIntervalMillis < 1) {
tonyp@1965 371 vm_exit_during_initialization("GCPauseIntervalMillis should be "
tonyp@1965 372 "greater than 0");
tonyp@1965 373 }
tonyp@1965 374 }
tonyp@1965 375
tonyp@1965 376 // Then, if the pause time target parameter was not set, set it to
tonyp@1965 377 // the default value.
tonyp@1965 378 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 379 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 380 // The default pause time target in G1 is 200ms
tonyp@1965 381 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
tonyp@1965 382 } else {
tonyp@1965 383 // We do not allow the pause interval to be set without the
tonyp@1965 384 // pause time target
tonyp@1965 385 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
tonyp@1965 386 "without setting MaxGCPauseMillis");
tonyp@1965 387 }
tonyp@1965 388 }
tonyp@1965 389
tonyp@1965 390 // Then, if the interval parameter was not set, set it according to
tonyp@1965 391 // the pause time target (this will also deal with the case when the
tonyp@1965 392 // pause time target is the default value).
tonyp@1965 393 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 394 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
tonyp@1965 395 }
tonyp@1965 396
tonyp@1965 397 // Finally, make sure that the two parameters are consistent.
tonyp@1965 398 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
tonyp@1965 399 char buffer[256];
tonyp@1965 400 jio_snprintf(buffer, 256,
tonyp@1965 401 "MaxGCPauseMillis (%u) should be less than "
tonyp@1965 402 "GCPauseIntervalMillis (%u)",
tonyp@1965 403 MaxGCPauseMillis, GCPauseIntervalMillis);
tonyp@1965 404 vm_exit_during_initialization(buffer);
tonyp@1965 405 }
tonyp@1965 406
tonyp@1965 407 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
johnc@1186 408 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
ysr@777 409 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
johnc@1186 410 _sigma = (double) G1ConfidencePercent / 100.0;
ysr@777 411
ysr@777 412 // start conservatively (around 50ms is about right)
ysr@777 413 _concurrent_mark_remark_times_ms->add(0.05);
ysr@777 414 _concurrent_mark_cleanup_times_ms->add(0.20);
ysr@777 415 _tenuring_threshold = MaxTenuringThreshold;
tonyp@3066 416 // _max_survivor_regions will be calculated by
tonyp@3119 417 // update_young_list_target_length() during initialization.
tonyp@3066 418 _max_survivor_regions = 0;
apetrusenko@980 419
tonyp@1791 420 assert(GCTimeRatio > 0,
tonyp@1791 421 "we should have set it to a default value set_g1_gc_flags() "
tonyp@1791 422 "if a user set it to 0");
tonyp@1791 423 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
tonyp@1791 424
tonyp@3119 425 uintx reserve_perc = G1ReservePercent;
tonyp@3119 426 // Put an artificial ceiling on this so that it's not set to a silly value.
tonyp@3119 427 if (reserve_perc > 50) {
tonyp@3119 428 reserve_perc = 50;
tonyp@3119 429 warning("G1ReservePercent is set to a value that is too large, "
tonyp@3119 430 "it's been updated to %u", reserve_perc);
tonyp@3119 431 }
tonyp@3119 432 _reserve_factor = (double) reserve_perc / 100.0;
tonyp@3119 433 // This will be set in calculate_reserve() when the heap is expanded
tonyp@3119 434 // for the first time during initialization.
tonyp@3119 435 _reserve_regions = 0;
tonyp@3119 436
ysr@777 437 initialize_all();
ysr@777 438 }
ysr@777 439
ysr@777 440 // Increment "i", mod "len"
ysr@777 441 static void inc_mod(int& i, int len) {
ysr@777 442 i++; if (i == len) i = 0;
ysr@777 443 }
ysr@777 444
ysr@777 445 void G1CollectorPolicy::initialize_flags() {
ysr@777 446 set_min_alignment(HeapRegion::GrainBytes);
ysr@777 447 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
apetrusenko@982 448 if (SurvivorRatio < 1) {
apetrusenko@982 449 vm_exit_during_initialization("Invalid survivor ratio specified");
apetrusenko@982 450 }
ysr@777 451 CollectorPolicy::initialize_flags();
ysr@777 452 }
ysr@777 453
tonyp@1720 454 // The easiest way to deal with the parsing of the NewSize /
tonyp@1720 455 // MaxNewSize / etc. parameteres is to re-use the code in the
tonyp@1720 456 // TwoGenerationCollectorPolicy class. This is similar to what
tonyp@1720 457 // ParallelScavenge does with its GenerationSizer class (see
tonyp@1720 458 // ParallelScavengeHeap::initialize()). We might change this in the
tonyp@1720 459 // future, but it's a good start.
tonyp@1720 460 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
tonyp@1720 461 size_t size_to_region_num(size_t byte_size) {
tonyp@1720 462 return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
tonyp@1720 463 }
tonyp@1720 464
tonyp@1720 465 public:
tonyp@1720 466 G1YoungGenSizer() {
tonyp@1720 467 initialize_flags();
tonyp@1720 468 initialize_size_info();
tonyp@1720 469 }
tonyp@1720 470
tonyp@1720 471 size_t min_young_region_num() {
tonyp@1720 472 return size_to_region_num(_min_gen0_size);
tonyp@1720 473 }
tonyp@1720 474 size_t initial_young_region_num() {
tonyp@1720 475 return size_to_region_num(_initial_gen0_size);
tonyp@1720 476 }
tonyp@1720 477 size_t max_young_region_num() {
tonyp@1720 478 return size_to_region_num(_max_gen0_size);
tonyp@1720 479 }
tonyp@1720 480 };
tonyp@1720 481
ysr@777 482 void G1CollectorPolicy::init() {
ysr@777 483 // Set aside an initial future to_space.
ysr@777 484 _g1 = G1CollectedHeap::heap();
ysr@777 485
ysr@777 486 assert(Heap_lock->owned_by_self(), "Locking discipline.");
ysr@777 487
apetrusenko@980 488 initialize_gc_policy_counters();
apetrusenko@980 489
brutisso@3065 490 G1YoungGenSizer sizer;
brutisso@3065 491 size_t initial_region_num = sizer.initial_young_region_num();
brutisso@3065 492
brutisso@3065 493 if (UseAdaptiveSizePolicy) {
brutisso@3065 494 set_adaptive_young_list_length(true);
brutisso@3065 495 _young_list_fixed_length = 0;
johnc@1829 496 } else {
brutisso@3065 497 set_adaptive_young_list_length(false);
brutisso@3065 498 _young_list_fixed_length = initial_region_num;
ysr@777 499 }
brutisso@3065 500 _free_regions_at_end_of_collection = _g1->free_regions();
tonyp@3119 501 update_young_list_target_length();
johnc@1829 502
johnc@1829 503 // We may immediately start allocating regions and placing them on the
johnc@1829 504 // collection set list. Initialize the per-collection set info
johnc@1829 505 start_incremental_cset_building();
ysr@777 506 }
ysr@777 507
apetrusenko@980 508 // Create the jstat counters for the policy.
tonyp@3119 509 void G1CollectorPolicy::initialize_gc_policy_counters() {
brutisso@3065 510 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
apetrusenko@980 511 }
apetrusenko@980 512
tonyp@3119 513 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
tonyp@3119 514 double base_time_ms,
tonyp@3119 515 size_t base_free_regions,
tonyp@3119 516 double target_pause_time_ms) {
tonyp@3119 517 if (young_length >= base_free_regions) {
tonyp@3119 518 // end condition 1: not enough space for the young regions
tonyp@3119 519 return false;
ysr@777 520 }
tonyp@3119 521
tonyp@3119 522 double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
tonyp@3119 523 size_t bytes_to_copy =
tonyp@3119 524 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
tonyp@3119 525 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
tonyp@3119 526 double young_other_time_ms = predict_young_other_time_ms(young_length);
tonyp@3119 527 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
tonyp@3119 528 if (pause_time_ms > target_pause_time_ms) {
tonyp@3119 529 // end condition 2: prediction is over the target pause time
tonyp@3119 530 return false;
tonyp@3119 531 }
tonyp@3119 532
tonyp@3119 533 size_t free_bytes =
tonyp@3119 534 (base_free_regions - young_length) * HeapRegion::GrainBytes;
tonyp@3119 535 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
tonyp@3119 536 // end condition 3: out-of-space (conservatively!)
tonyp@3119 537 return false;
tonyp@3119 538 }
tonyp@3119 539
tonyp@3119 540 // success!
tonyp@3119 541 return true;
ysr@777 542 }
ysr@777 543
tonyp@3119 544 void G1CollectorPolicy::calculate_reserve(size_t all_regions) {
tonyp@3119 545 double reserve_regions_d = (double) all_regions * _reserve_factor;
tonyp@3119 546 // We use ceiling so that if reserve_regions_d is > 0.0 (but
tonyp@3119 547 // smaller than 1.0) we'll get 1.
tonyp@3119 548 _reserve_regions = (size_t) ceil(reserve_regions_d);
tonyp@3119 549 }
tonyp@3119 550
tonyp@3119 551 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
tonyp@3119 552 size_t base_min_length) {
tonyp@3119 553 size_t desired_min_length = 0;
ysr@777 554 if (adaptive_young_list_length()) {
tonyp@3119 555 if (_alloc_rate_ms_seq->num() > 3) {
tonyp@3119 556 double now_sec = os::elapsedTime();
tonyp@3119 557 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
tonyp@3119 558 double alloc_rate_ms = predict_alloc_rate_ms();
tonyp@3119 559 desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
tonyp@3119 560 } else {
tonyp@3119 561 // otherwise we don't have enough info to make the prediction
tonyp@3119 562 }
ysr@777 563 }
tonyp@3119 564 // Here, we might want to also take into account any additional
tonyp@3119 565 // constraints (i.e., user-defined minimum bound). Currently, we don't.
tonyp@3119 566 return base_min_length + desired_min_length;
ysr@777 567 }
ysr@777 568
tonyp@3119 569 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
tonyp@3119 570 // Here, we might want to also take into account any additional
tonyp@3119 571 // constraints (i.e., user-defined minimum bound). Currently, we
tonyp@3119 572 // effectively don't set this bound.
tonyp@3119 573 return _g1->n_regions();
tonyp@3119 574 }
tonyp@3119 575
tonyp@3119 576 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
tonyp@3119 577 if (rs_lengths == (size_t) -1) {
tonyp@3119 578 // if it's set to the default value (-1), we should predict it;
tonyp@3119 579 // otherwise, use the given value.
tonyp@3119 580 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
tonyp@3119 581 }
tonyp@3119 582
tonyp@3119 583 // Calculate the absolute and desired min bounds.
tonyp@3119 584
tonyp@3119 585 // This is how many young regions we already have (currently: the survivors).
tonyp@3119 586 size_t base_min_length = recorded_survivor_regions();
tonyp@3119 587 // This is the absolute minimum young length, which ensures that we
tonyp@3119 588 // can allocate one eden region in the worst-case.
tonyp@3119 589 size_t absolute_min_length = base_min_length + 1;
tonyp@3119 590 size_t desired_min_length =
tonyp@3119 591 calculate_young_list_desired_min_length(base_min_length);
tonyp@3119 592 if (desired_min_length < absolute_min_length) {
tonyp@3119 593 desired_min_length = absolute_min_length;
tonyp@3119 594 }
tonyp@3119 595
tonyp@3119 596 // Calculate the absolute and desired max bounds.
tonyp@3119 597
tonyp@3119 598 // We will try our best not to "eat" into the reserve.
tonyp@3119 599 size_t absolute_max_length = 0;
tonyp@3119 600 if (_free_regions_at_end_of_collection > _reserve_regions) {
tonyp@3119 601 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
tonyp@3119 602 }
tonyp@3119 603 size_t desired_max_length = calculate_young_list_desired_max_length();
tonyp@3119 604 if (desired_max_length > absolute_max_length) {
tonyp@3119 605 desired_max_length = absolute_max_length;
tonyp@3119 606 }
tonyp@3119 607
tonyp@3119 608 size_t young_list_target_length = 0;
tonyp@3119 609 if (adaptive_young_list_length()) {
tonyp@3119 610 if (full_young_gcs()) {
tonyp@3119 611 young_list_target_length =
tonyp@3119 612 calculate_young_list_target_length(rs_lengths,
tonyp@3119 613 base_min_length,
tonyp@3119 614 desired_min_length,
tonyp@3119 615 desired_max_length);
tonyp@3119 616 _rs_lengths_prediction = rs_lengths;
tonyp@3119 617 } else {
tonyp@3119 618 // Don't calculate anything and let the code below bound it to
tonyp@3119 619 // the desired_min_length, i.e., do the next GC as soon as
tonyp@3119 620 // possible to maximize how many old regions we can add to it.
ysr@777 621 }
ysr@777 622 } else {
tonyp@3119 623 if (full_young_gcs()) {
tonyp@3119 624 young_list_target_length = _young_list_fixed_length;
tonyp@3119 625 } else {
tonyp@3119 626 // A bit arbitrary: during partially-young GCs we allocate half
tonyp@3119 627 // the young regions to try to add old regions to the CSet.
tonyp@3119 628 young_list_target_length = _young_list_fixed_length / 2;
tonyp@3119 629 // We choose to accept that we might go under the desired min
tonyp@3119 630 // length given that we intentionally ask for a smaller young gen.
tonyp@3119 631 desired_min_length = absolute_min_length;
tonyp@3119 632 }
ysr@777 633 }
ysr@777 634
tonyp@3119 635 // Make sure we don't go over the desired max length, nor under the
tonyp@3119 636 // desired min length. In case they clash, desired_min_length wins
tonyp@3119 637 // which is why that test is second.
tonyp@3119 638 if (young_list_target_length > desired_max_length) {
tonyp@3119 639 young_list_target_length = desired_max_length;
tonyp@3119 640 }
tonyp@3119 641 if (young_list_target_length < desired_min_length) {
tonyp@3119 642 young_list_target_length = desired_min_length;
tonyp@3119 643 }
tonyp@3119 644
tonyp@3119 645 assert(young_list_target_length > recorded_survivor_regions(),
tonyp@3119 646 "we should be able to allocate at least one eden region");
tonyp@3119 647 assert(young_list_target_length >= absolute_min_length, "post-condition");
tonyp@3119 648 _young_list_target_length = young_list_target_length;
tonyp@3119 649
tonyp@3119 650 update_max_gc_locker_expansion();
ysr@777 651 }
ysr@777 652
tonyp@3119 653 size_t
tonyp@3119 654 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
tonyp@3119 655 size_t base_min_length,
tonyp@3119 656 size_t desired_min_length,
tonyp@3119 657 size_t desired_max_length) {
tonyp@3119 658 assert(adaptive_young_list_length(), "pre-condition");
tonyp@3119 659 assert(full_young_gcs(), "only call this for fully-young GCs");
tonyp@3119 660
tonyp@3119 661 // In case some edge-condition makes the desired max length too small...
tonyp@3119 662 if (desired_max_length <= desired_min_length) {
tonyp@3119 663 return desired_min_length;
tonyp@3119 664 }
tonyp@3119 665
tonyp@3119 666 // We'll adjust min_young_length and max_young_length not to include
tonyp@3119 667 // the already allocated young regions (i.e., so they reflect the
tonyp@3119 668 // min and max eden regions we'll allocate). The base_min_length
tonyp@3119 669 // will be reflected in the predictions by the
tonyp@3119 670 // survivor_regions_evac_time prediction.
tonyp@3119 671 assert(desired_min_length > base_min_length, "invariant");
tonyp@3119 672 size_t min_young_length = desired_min_length - base_min_length;
tonyp@3119 673 assert(desired_max_length > base_min_length, "invariant");
tonyp@3119 674 size_t max_young_length = desired_max_length - base_min_length;
tonyp@3119 675
tonyp@3119 676 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
tonyp@3119 677 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
tonyp@3119 678 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
tonyp@3119 679 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
tonyp@3119 680 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
tonyp@3119 681 double base_time_ms =
tonyp@3119 682 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
tonyp@3119 683 survivor_regions_evac_time;
tonyp@3119 684 size_t available_free_regions = _free_regions_at_end_of_collection;
tonyp@3119 685 size_t base_free_regions = 0;
tonyp@3119 686 if (available_free_regions > _reserve_regions) {
tonyp@3119 687 base_free_regions = available_free_regions - _reserve_regions;
tonyp@3119 688 }
tonyp@3119 689
tonyp@3119 690 // Here, we will make sure that the shortest young length that
tonyp@3119 691 // makes sense fits within the target pause time.
tonyp@3119 692
tonyp@3119 693 if (predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 694 base_free_regions, target_pause_time_ms)) {
tonyp@3119 695 // The shortest young length will fit into the target pause time;
tonyp@3119 696 // we'll now check whether the absolute maximum number of young
tonyp@3119 697 // regions will fit in the target pause time. If not, we'll do
tonyp@3119 698 // a binary search between min_young_length and max_young_length.
tonyp@3119 699 if (predict_will_fit(max_young_length, base_time_ms,
tonyp@3119 700 base_free_regions, target_pause_time_ms)) {
tonyp@3119 701 // The maximum young length will fit into the target pause time.
tonyp@3119 702 // We are done so set min young length to the maximum length (as
tonyp@3119 703 // the result is assumed to be returned in min_young_length).
tonyp@3119 704 min_young_length = max_young_length;
tonyp@3119 705 } else {
tonyp@3119 706 // The maximum possible number of young regions will not fit within
tonyp@3119 707 // the target pause time so we'll search for the optimal
tonyp@3119 708 // length. The loop invariants are:
tonyp@3119 709 //
tonyp@3119 710 // min_young_length < max_young_length
tonyp@3119 711 // min_young_length is known to fit into the target pause time
tonyp@3119 712 // max_young_length is known not to fit into the target pause time
tonyp@3119 713 //
tonyp@3119 714 // Going into the loop we know the above hold as we've just
tonyp@3119 715 // checked them. Every time around the loop we check whether
tonyp@3119 716 // the middle value between min_young_length and
tonyp@3119 717 // max_young_length fits into the target pause time. If it
tonyp@3119 718 // does, it becomes the new min. If it doesn't, it becomes
tonyp@3119 719 // the new max. This way we maintain the loop invariants.
tonyp@3119 720
tonyp@3119 721 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 722 size_t diff = (max_young_length - min_young_length) / 2;
tonyp@3119 723 while (diff > 0) {
tonyp@3119 724 size_t young_length = min_young_length + diff;
tonyp@3119 725 if (predict_will_fit(young_length, base_time_ms,
tonyp@3119 726 base_free_regions, target_pause_time_ms)) {
tonyp@3119 727 min_young_length = young_length;
tonyp@3119 728 } else {
tonyp@3119 729 max_young_length = young_length;
tonyp@3119 730 }
tonyp@3119 731 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 732 diff = (max_young_length - min_young_length) / 2;
tonyp@3119 733 }
tonyp@3119 734 // The results is min_young_length which, according to the
tonyp@3119 735 // loop invariants, should fit within the target pause time.
tonyp@3119 736
tonyp@3119 737 // These are the post-conditions of the binary search above:
tonyp@3119 738 assert(min_young_length < max_young_length,
tonyp@3119 739 "otherwise we should have discovered that max_young_length "
tonyp@3119 740 "fits into the pause target and not done the binary search");
tonyp@3119 741 assert(predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 742 base_free_regions, target_pause_time_ms),
tonyp@3119 743 "min_young_length, the result of the binary search, should "
tonyp@3119 744 "fit into the pause target");
tonyp@3119 745 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
tonyp@3119 746 base_free_regions, target_pause_time_ms),
tonyp@3119 747 "min_young_length, the result of the binary search, should be "
tonyp@3119 748 "optimal, so no larger length should fit into the pause target");
tonyp@3119 749 }
tonyp@3119 750 } else {
tonyp@3119 751 // Even the minimum length doesn't fit into the pause time
tonyp@3119 752 // target, return it as the result nevertheless.
tonyp@3119 753 }
tonyp@3119 754 return base_min_length + min_young_length;
ysr@777 755 }
ysr@777 756
apetrusenko@980 757 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
apetrusenko@980 758 double survivor_regions_evac_time = 0.0;
apetrusenko@980 759 for (HeapRegion * r = _recorded_survivor_head;
apetrusenko@980 760 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
apetrusenko@980 761 r = r->get_next_young_region()) {
apetrusenko@980 762 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
apetrusenko@980 763 }
apetrusenko@980 764 return survivor_regions_evac_time;
apetrusenko@980 765 }
apetrusenko@980 766
tonyp@3119 767 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
ysr@777 768 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
ysr@777 769
johnc@1829 770 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
ysr@777 771 if (rs_lengths > _rs_lengths_prediction) {
ysr@777 772 // add 10% to avoid having to recalculate often
ysr@777 773 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
tonyp@3119 774 update_young_list_target_length(rs_lengths_prediction);
ysr@777 775 }
ysr@777 776 }
ysr@777 777
tonyp@3119 778
tonyp@3119 779
ysr@777 780 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
ysr@777 781 bool is_tlab,
ysr@777 782 bool* gc_overhead_limit_was_exceeded) {
ysr@777 783 guarantee(false, "Not using this policy feature yet.");
ysr@777 784 return NULL;
ysr@777 785 }
ysr@777 786
ysr@777 787 // This method controls how a collector handles one or more
ysr@777 788 // of its generations being fully allocated.
ysr@777 789 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
ysr@777 790 bool is_tlab) {
ysr@777 791 guarantee(false, "Not using this policy feature yet.");
ysr@777 792 return NULL;
ysr@777 793 }
ysr@777 794
ysr@777 795
ysr@777 796 #ifndef PRODUCT
ysr@777 797 bool G1CollectorPolicy::verify_young_ages() {
johnc@1829 798 HeapRegion* head = _g1->young_list()->first_region();
ysr@777 799 return
ysr@777 800 verify_young_ages(head, _short_lived_surv_rate_group);
ysr@777 801 // also call verify_young_ages on any additional surv rate groups
ysr@777 802 }
ysr@777 803
ysr@777 804 bool
ysr@777 805 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
ysr@777 806 SurvRateGroup *surv_rate_group) {
ysr@777 807 guarantee( surv_rate_group != NULL, "pre-condition" );
ysr@777 808
ysr@777 809 const char* name = surv_rate_group->name();
ysr@777 810 bool ret = true;
ysr@777 811 int prev_age = -1;
ysr@777 812
ysr@777 813 for (HeapRegion* curr = head;
ysr@777 814 curr != NULL;
ysr@777 815 curr = curr->get_next_young_region()) {
ysr@777 816 SurvRateGroup* group = curr->surv_rate_group();
ysr@777 817 if (group == NULL && !curr->is_survivor()) {
ysr@777 818 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
ysr@777 819 ret = false;
ysr@777 820 }
ysr@777 821
ysr@777 822 if (surv_rate_group == group) {
ysr@777 823 int age = curr->age_in_surv_rate_group();
ysr@777 824
ysr@777 825 if (age < 0) {
ysr@777 826 gclog_or_tty->print_cr("## %s: encountered negative age", name);
ysr@777 827 ret = false;
ysr@777 828 }
ysr@777 829
ysr@777 830 if (age <= prev_age) {
ysr@777 831 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
ysr@777 832 "(%d, %d)", name, age, prev_age);
ysr@777 833 ret = false;
ysr@777 834 }
ysr@777 835 prev_age = age;
ysr@777 836 }
ysr@777 837 }
ysr@777 838
ysr@777 839 return ret;
ysr@777 840 }
ysr@777 841 #endif // PRODUCT
ysr@777 842
ysr@777 843 void G1CollectorPolicy::record_full_collection_start() {
ysr@777 844 _cur_collection_start_sec = os::elapsedTime();
ysr@777 845 // Release the future to-space so that it is available for compaction into.
ysr@777 846 _g1->set_full_collection();
ysr@777 847 }
ysr@777 848
ysr@777 849 void G1CollectorPolicy::record_full_collection_end() {
ysr@777 850 // Consider this like a collection pause for the purposes of allocation
ysr@777 851 // since last pause.
ysr@777 852 double end_sec = os::elapsedTime();
ysr@777 853 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
ysr@777 854 double full_gc_time_ms = full_gc_time_sec * 1000.0;
ysr@777 855
ysr@777 856 _all_full_gc_times_ms->add(full_gc_time_ms);
ysr@777 857
tonyp@1030 858 update_recent_gc_times(end_sec, full_gc_time_ms);
ysr@777 859
ysr@777 860 _g1->clear_full_collection();
ysr@777 861
ysr@777 862 // "Nuke" the heuristics that control the fully/partially young GC
ysr@777 863 // transitions and make sure we start with fully young GCs after the
ysr@777 864 // Full GC.
ysr@777 865 set_full_young_gcs(true);
ysr@777 866 _last_full_young_gc = false;
ysr@777 867 _should_revert_to_full_young_gcs = false;
tonyp@1794 868 clear_initiate_conc_mark_if_possible();
tonyp@1794 869 clear_during_initial_mark_pause();
ysr@777 870 _known_garbage_bytes = 0;
ysr@777 871 _known_garbage_ratio = 0.0;
ysr@777 872 _in_marking_window = false;
ysr@777 873 _in_marking_window_im = false;
ysr@777 874
ysr@777 875 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 876 // also call this on any additional surv rate groups
ysr@777 877
apetrusenko@980 878 record_survivor_regions(0, NULL, NULL);
apetrusenko@980 879
ysr@777 880 _prev_region_num_young = _region_num_young;
ysr@777 881 _prev_region_num_tenured = _region_num_tenured;
ysr@777 882
ysr@777 883 _free_regions_at_end_of_collection = _g1->free_regions();
apetrusenko@980 884 // Reset survivors SurvRateGroup.
apetrusenko@980 885 _survivor_surv_rate_group->reset();
tonyp@3119 886 update_young_list_target_length();
tonyp@2315 887 }
ysr@777 888
ysr@777 889 void G1CollectorPolicy::record_stop_world_start() {
ysr@777 890 _stop_world_start = os::elapsedTime();
ysr@777 891 }
ysr@777 892
ysr@777 893 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
ysr@777 894 size_t start_used) {
ysr@777 895 if (PrintGCDetails) {
ysr@777 896 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 897 gclog_or_tty->print("[GC pause");
brutisso@3065 898 gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
ysr@777 899 }
ysr@777 900
tonyp@3119 901 // We only need to do this here as the policy will only be applied
tonyp@3119 902 // to the GC we're about to start. so, no point is calculating this
tonyp@3119 903 // every time we calculate / recalculate the target young length.
tonyp@3119 904 update_survivors_policy();
tonyp@3119 905
tonyp@2315 906 assert(_g1->used() == _g1->recalculate_used(),
tonyp@2315 907 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
tonyp@2315 908 _g1->used(), _g1->recalculate_used()));
ysr@777 909
ysr@777 910 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
ysr@777 911 _all_stop_world_times_ms->add(s_w_t_ms);
ysr@777 912 _stop_world_start = 0.0;
ysr@777 913
ysr@777 914 _cur_collection_start_sec = start_time_sec;
ysr@777 915 _cur_collection_pause_used_at_start_bytes = start_used;
ysr@777 916 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
ysr@777 917 _pending_cards = _g1->pending_card_num();
ysr@777 918 _max_pending_cards = _g1->max_pending_card_num();
ysr@777 919
ysr@777 920 _bytes_in_collection_set_before_gc = 0;
tonyp@3028 921 _bytes_copied_during_gc = 0;
ysr@777 922
tonyp@2961 923 YoungList* young_list = _g1->young_list();
tonyp@2961 924 _eden_bytes_before_gc = young_list->eden_used_bytes();
tonyp@2961 925 _survivor_bytes_before_gc = young_list->survivor_used_bytes();
tonyp@2961 926 _capacity_before_gc = _g1->capacity();
tonyp@2961 927
ysr@777 928 #ifdef DEBUG
ysr@777 929 // initialise these to something well known so that we can spot
ysr@777 930 // if they are not set properly
ysr@777 931
ysr@777 932 for (int i = 0; i < _parallel_gc_threads; ++i) {
tonyp@1966 933 _par_last_gc_worker_start_times_ms[i] = -1234.0;
tonyp@1966 934 _par_last_ext_root_scan_times_ms[i] = -1234.0;
tonyp@1966 935 _par_last_mark_stack_scan_times_ms[i] = -1234.0;
tonyp@1966 936 _par_last_update_rs_times_ms[i] = -1234.0;
tonyp@1966 937 _par_last_update_rs_processed_buffers[i] = -1234.0;
tonyp@1966 938 _par_last_scan_rs_times_ms[i] = -1234.0;
tonyp@1966 939 _par_last_obj_copy_times_ms[i] = -1234.0;
tonyp@1966 940 _par_last_termination_times_ms[i] = -1234.0;
tonyp@1966 941 _par_last_termination_attempts[i] = -1234.0;
tonyp@1966 942 _par_last_gc_worker_end_times_ms[i] = -1234.0;
brutisso@2712 943 _par_last_gc_worker_times_ms[i] = -1234.0;
ysr@777 944 }
ysr@777 945 #endif
ysr@777 946
ysr@777 947 for (int i = 0; i < _aux_num; ++i) {
ysr@777 948 _cur_aux_times_ms[i] = 0.0;
ysr@777 949 _cur_aux_times_set[i] = false;
ysr@777 950 }
ysr@777 951
ysr@777 952 _satb_drain_time_set = false;
ysr@777 953 _last_satb_drain_processed_buffers = -1;
ysr@777 954
brutisso@3065 955 _last_young_gc_full = false;
ysr@777 956
ysr@777 957 // do that for any other surv rate groups
ysr@777 958 _short_lived_surv_rate_group->stop_adding_regions();
tonyp@1717 959 _survivors_age_table.clear();
apetrusenko@980 960
ysr@777 961 assert( verify_young_ages(), "region age verification" );
ysr@777 962 }
ysr@777 963
ysr@777 964 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
ysr@777 965 _mark_closure_time_ms = mark_closure_time_ms;
ysr@777 966 }
ysr@777 967
brutisso@3065 968 void G1CollectorPolicy::record_concurrent_mark_init_end(double
ysr@777 969 mark_init_elapsed_time_ms) {
ysr@777 970 _during_marking = true;
tonyp@1794 971 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
tonyp@1794 972 clear_during_initial_mark_pause();
ysr@777 973 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
ysr@777 974 }
ysr@777 975
ysr@777 976 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
ysr@777 977 _mark_remark_start_sec = os::elapsedTime();
ysr@777 978 _during_marking = false;
ysr@777 979 }
ysr@777 980
ysr@777 981 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
ysr@777 982 double end_time_sec = os::elapsedTime();
ysr@777 983 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
ysr@777 984 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
ysr@777 985 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 986 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 987
ysr@777 988 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
ysr@777 989 }
ysr@777 990
ysr@777 991 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
ysr@777 992 _mark_cleanup_start_sec = os::elapsedTime();
ysr@777 993 }
ysr@777 994
ysr@777 995 void
ysr@777 996 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 997 size_t max_live_bytes) {
ysr@777 998 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 999 record_concurrent_mark_cleanup_end_work2();
ysr@777 1000 }
ysr@777 1001
ysr@777 1002 void
ysr@777 1003 G1CollectorPolicy::
ysr@777 1004 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 1005 size_t max_live_bytes) {
tonyp@3114 1006 if (_n_marks < 2) {
tonyp@3114 1007 _n_marks++;
tonyp@3114 1008 }
ysr@777 1009 }
ysr@777 1010
ysr@777 1011 // The important thing about this is that it includes "os::elapsedTime".
ysr@777 1012 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
ysr@777 1013 double end_time_sec = os::elapsedTime();
ysr@777 1014 double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
ysr@777 1015 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
ysr@777 1016 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 1017 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 1018
ysr@777 1019 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
ysr@777 1020
ysr@777 1021 _num_markings++;
ysr@777 1022 _n_pauses_at_mark_end = _n_pauses;
ysr@777 1023 _n_marks_since_last_pause++;
ysr@777 1024 }
ysr@777 1025
ysr@777 1026 void
ysr@777 1027 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
brutisso@3065 1028 _should_revert_to_full_young_gcs = false;
brutisso@3065 1029 _last_full_young_gc = true;
brutisso@3065 1030 _in_marking_window = false;
ysr@777 1031 }
ysr@777 1032
ysr@777 1033 void G1CollectorPolicy::record_concurrent_pause() {
ysr@777 1034 if (_stop_world_start > 0.0) {
ysr@777 1035 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
ysr@777 1036 _all_yield_times_ms->add(yield_ms);
ysr@777 1037 }
ysr@777 1038 }
ysr@777 1039
ysr@777 1040 void G1CollectorPolicy::record_concurrent_pause_end() {
ysr@777 1041 }
ysr@777 1042
ysr@777 1043 template<class T>
ysr@777 1044 T sum_of(T* sum_arr, int start, int n, int N) {
ysr@777 1045 T sum = (T)0;
ysr@777 1046 for (int i = 0; i < n; i++) {
ysr@777 1047 int j = (start + i) % N;
ysr@777 1048 sum += sum_arr[j];
ysr@777 1049 }
ysr@777 1050 return sum;
ysr@777 1051 }
ysr@777 1052
tonyp@1966 1053 void G1CollectorPolicy::print_par_stats(int level,
tonyp@1966 1054 const char* str,
brutisso@2712 1055 double* data) {
ysr@777 1056 double min = data[0], max = data[0];
ysr@777 1057 double total = 0.0;
brutisso@2645 1058 LineBuffer buf(level);
brutisso@2645 1059 buf.append("[%s (ms):", str);
ysr@777 1060 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 1061 double val = data[i];
ysr@777 1062 if (val < min)
ysr@777 1063 min = val;
ysr@777 1064 if (val > max)
ysr@777 1065 max = val;
ysr@777 1066 total += val;
brutisso@2645 1067 buf.append(" %3.1lf", val);
ysr@777 1068 }
brutisso@2712 1069 buf.append_and_print_cr("");
brutisso@2712 1070 double avg = total / (double) ParallelGCThreads;
brutisso@2712 1071 buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
brutisso@2712 1072 avg, min, max, max - min);
ysr@777 1073 }
ysr@777 1074
tonyp@1966 1075 void G1CollectorPolicy::print_par_sizes(int level,
tonyp@1966 1076 const char* str,
brutisso@2712 1077 double* data) {
ysr@777 1078 double min = data[0], max = data[0];
ysr@777 1079 double total = 0.0;
brutisso@2645 1080 LineBuffer buf(level);
brutisso@2645 1081 buf.append("[%s :", str);
ysr@777 1082 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 1083 double val = data[i];
ysr@777 1084 if (val < min)
ysr@777 1085 min = val;
ysr@777 1086 if (val > max)
ysr@777 1087 max = val;
ysr@777 1088 total += val;
brutisso@2645 1089 buf.append(" %d", (int) val);
ysr@777 1090 }
brutisso@2712 1091 buf.append_and_print_cr("");
brutisso@2712 1092 double avg = total / (double) ParallelGCThreads;
brutisso@2712 1093 buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
brutisso@2712 1094 (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
ysr@777 1095 }
ysr@777 1096
ysr@777 1097 void G1CollectorPolicy::print_stats (int level,
ysr@777 1098 const char* str,
ysr@777 1099 double value) {
brutisso@2645 1100 LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
ysr@777 1101 }
ysr@777 1102
ysr@777 1103 void G1CollectorPolicy::print_stats (int level,
ysr@777 1104 const char* str,
ysr@777 1105 int value) {
brutisso@2645 1106 LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
ysr@777 1107 }
ysr@777 1108
ysr@777 1109 double G1CollectorPolicy::avg_value (double* data) {
jmasa@2188 1110 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1111 double ret = 0.0;
ysr@777 1112 for (uint i = 0; i < ParallelGCThreads; ++i)
ysr@777 1113 ret += data[i];
ysr@777 1114 return ret / (double) ParallelGCThreads;
ysr@777 1115 } else {
ysr@777 1116 return data[0];
ysr@777 1117 }
ysr@777 1118 }
ysr@777 1119
ysr@777 1120 double G1CollectorPolicy::max_value (double* data) {
jmasa@2188 1121 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1122 double ret = data[0];
ysr@777 1123 for (uint i = 1; i < ParallelGCThreads; ++i)
ysr@777 1124 if (data[i] > ret)
ysr@777 1125 ret = data[i];
ysr@777 1126 return ret;
ysr@777 1127 } else {
ysr@777 1128 return data[0];
ysr@777 1129 }
ysr@777 1130 }
ysr@777 1131
ysr@777 1132 double G1CollectorPolicy::sum_of_values (double* data) {
jmasa@2188 1133 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1134 double sum = 0.0;
ysr@777 1135 for (uint i = 0; i < ParallelGCThreads; i++)
ysr@777 1136 sum += data[i];
ysr@777 1137 return sum;
ysr@777 1138 } else {
ysr@777 1139 return data[0];
ysr@777 1140 }
ysr@777 1141 }
ysr@777 1142
ysr@777 1143 double G1CollectorPolicy::max_sum (double* data1,
ysr@777 1144 double* data2) {
ysr@777 1145 double ret = data1[0] + data2[0];
ysr@777 1146
jmasa@2188 1147 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1148 for (uint i = 1; i < ParallelGCThreads; ++i) {
ysr@777 1149 double data = data1[i] + data2[i];
ysr@777 1150 if (data > ret)
ysr@777 1151 ret = data;
ysr@777 1152 }
ysr@777 1153 }
ysr@777 1154 return ret;
ysr@777 1155 }
ysr@777 1156
ysr@777 1157 // Anything below that is considered to be zero
ysr@777 1158 #define MIN_TIMER_GRANULARITY 0.0000001
ysr@777 1159
tonyp@2062 1160 void G1CollectorPolicy::record_collection_pause_end() {
ysr@777 1161 double end_time_sec = os::elapsedTime();
ysr@777 1162 double elapsed_ms = _last_pause_time_ms;
jmasa@2188 1163 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 1164 size_t rs_size =
ysr@777 1165 _cur_collection_pause_used_regions_at_start - collection_set_size();
ysr@777 1166 size_t cur_used_bytes = _g1->used();
ysr@777 1167 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
ysr@777 1168 bool last_pause_included_initial_mark = false;
tonyp@2062 1169 bool update_stats = !_g1->evacuation_failed();
ysr@777 1170
ysr@777 1171 #ifndef PRODUCT
ysr@777 1172 if (G1YoungSurvRateVerbose) {
ysr@777 1173 gclog_or_tty->print_cr("");
ysr@777 1174 _short_lived_surv_rate_group->print();
ysr@777 1175 // do that for any other surv rate groups too
ysr@777 1176 }
ysr@777 1177 #endif // PRODUCT
ysr@777 1178
brutisso@3065 1179 last_pause_included_initial_mark = during_initial_mark_pause();
brutisso@3065 1180 if (last_pause_included_initial_mark)
brutisso@3065 1181 record_concurrent_mark_init_end(0.0);
brutisso@3065 1182
tonyp@3114 1183 size_t marking_initiating_used_threshold =
brutisso@3065 1184 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
brutisso@3065 1185
brutisso@3065 1186 if (!_g1->mark_in_progress() && !_last_full_young_gc) {
brutisso@3065 1187 assert(!last_pause_included_initial_mark, "invariant");
tonyp@3114 1188 if (cur_used_bytes > marking_initiating_used_threshold) {
tonyp@3114 1189 if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
tonyp@1794 1190 assert(!during_initial_mark_pause(), "we should not see this here");
tonyp@1794 1191
tonyp@3114 1192 ergo_verbose3(ErgoConcCycles,
tonyp@3114 1193 "request concurrent cycle initiation",
tonyp@3114 1194 ergo_format_reason("occupancy higher than threshold")
tonyp@3114 1195 ergo_format_byte("occupancy")
tonyp@3114 1196 ergo_format_byte_perc("threshold"),
tonyp@3114 1197 cur_used_bytes,
tonyp@3114 1198 marking_initiating_used_threshold,
tonyp@3114 1199 (double) InitiatingHeapOccupancyPercent);
tonyp@3114 1200
tonyp@1794 1201 // Note: this might have already been set, if during the last
tonyp@1794 1202 // pause we decided to start a cycle but at the beginning of
tonyp@1794 1203 // this pause we decided to postpone it. That's OK.
tonyp@1794 1204 set_initiate_conc_mark_if_possible();
tonyp@3114 1205 } else {
tonyp@3114 1206 ergo_verbose2(ErgoConcCycles,
tonyp@3114 1207 "do not request concurrent cycle initiation",
tonyp@3114 1208 ergo_format_reason("occupancy lower than previous occupancy")
tonyp@3114 1209 ergo_format_byte("occupancy")
tonyp@3114 1210 ergo_format_byte("previous occupancy"),
tonyp@3114 1211 cur_used_bytes,
tonyp@3114 1212 _prev_collection_pause_used_at_end_bytes);
tonyp@3114 1213 }
ysr@777 1214 }
ysr@777 1215 }
ysr@777 1216
brutisso@3065 1217 _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
brutisso@3065 1218
ysr@777 1219 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
ysr@777 1220 end_time_sec, false);
ysr@777 1221
ysr@777 1222 guarantee(_cur_collection_pause_used_regions_at_start >=
ysr@777 1223 collection_set_size(),
ysr@777 1224 "Negative RS size?");
ysr@777 1225
ysr@777 1226 // This assert is exempted when we're doing parallel collection pauses,
ysr@777 1227 // because the fragmentation caused by the parallel GC allocation buffers
ysr@777 1228 // can lead to more memory being used during collection than was used
ysr@777 1229 // before. Best leave this out until the fragmentation problem is fixed.
ysr@777 1230 // Pauses in which evacuation failed can also lead to negative
ysr@777 1231 // collections, since no space is reclaimed from a region containing an
ysr@777 1232 // object whose evacuation failed.
ysr@777 1233 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1234 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1235 // (DLD, 10/05.)
ysr@777 1236 assert((true || parallel) // Always using GC LABs now.
ysr@777 1237 || _g1->evacuation_failed()
ysr@777 1238 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
ysr@777 1239 "Negative collection");
ysr@777 1240
ysr@777 1241 size_t freed_bytes =
ysr@777 1242 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
ysr@777 1243 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
johnc@1829 1244
ysr@777 1245 double survival_fraction =
ysr@777 1246 (double)surviving_bytes/
ysr@777 1247 (double)_collection_set_bytes_used_before;
ysr@777 1248
ysr@777 1249 _n_pauses++;
ysr@777 1250
johnc@3021 1251 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
johnc@3021 1252 double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
johnc@3021 1253 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
johnc@3021 1254 double update_rs_processed_buffers =
johnc@3021 1255 sum_of_values(_par_last_update_rs_processed_buffers);
johnc@3021 1256 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
johnc@3021 1257 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
johnc@3021 1258 double termination_time = avg_value(_par_last_termination_times_ms);
johnc@3021 1259
johnc@3021 1260 double parallel_known_time = update_rs_time +
johnc@3021 1261 ext_root_scan_time +
johnc@3021 1262 mark_stack_scan_time +
johnc@3021 1263 scan_rs_time +
johnc@3021 1264 obj_copy_time +
johnc@3021 1265 termination_time;
johnc@3021 1266
johnc@3021 1267 double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
johnc@3021 1268
johnc@3021 1269 PauseSummary* summary = _summary;
johnc@3021 1270
tonyp@1030 1271 if (update_stats) {
johnc@3021 1272 _recent_rs_scan_times_ms->add(scan_rs_time);
ysr@777 1273 _recent_pause_times_ms->add(elapsed_ms);
ysr@777 1274 _recent_rs_sizes->add(rs_size);
ysr@777 1275
johnc@3021 1276 MainBodySummary* body_summary = summary->main_body_summary();
johnc@3021 1277 guarantee(body_summary != NULL, "should not be null!");
johnc@3021 1278
johnc@3021 1279 if (_satb_drain_time_set)
johnc@3021 1280 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
johnc@3021 1281 else
johnc@3021 1282 body_summary->record_satb_drain_time_ms(0.0);
johnc@3021 1283
johnc@3021 1284 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
johnc@3021 1285 body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
johnc@3021 1286 body_summary->record_update_rs_time_ms(update_rs_time);
johnc@3021 1287 body_summary->record_scan_rs_time_ms(scan_rs_time);
johnc@3021 1288 body_summary->record_obj_copy_time_ms(obj_copy_time);
johnc@3021 1289 if (parallel) {
johnc@3021 1290 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
johnc@3021 1291 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
johnc@3021 1292 body_summary->record_termination_time_ms(termination_time);
johnc@3021 1293 body_summary->record_parallel_other_time_ms(parallel_other_time);
johnc@3021 1294 }
johnc@3021 1295 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
johnc@3021 1296
ysr@777 1297 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1298 // fragmentation can produce negative collections. Same with evac
ysr@777 1299 // failure.
ysr@777 1300 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1301 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1302 // (DLD, 10/05.
ysr@777 1303 assert((true || parallel)
ysr@777 1304 || _g1->evacuation_failed()
ysr@777 1305 || surviving_bytes <= _collection_set_bytes_used_before,
ysr@777 1306 "Or else negative collection!");
ysr@777 1307 _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
ysr@777 1308 _recent_CS_bytes_surviving->add(surviving_bytes);
ysr@777 1309
ysr@777 1310 // this is where we update the allocation rate of the application
ysr@777 1311 double app_time_ms =
ysr@777 1312 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
ysr@777 1313 if (app_time_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1314 // This usually happens due to the timer not having the required
ysr@777 1315 // granularity. Some Linuxes are the usual culprits.
ysr@777 1316 // We'll just set it to something (arbitrarily) small.
ysr@777 1317 app_time_ms = 1.0;
ysr@777 1318 }
ysr@777 1319 size_t regions_allocated =
ysr@777 1320 (_region_num_young - _prev_region_num_young) +
ysr@777 1321 (_region_num_tenured - _prev_region_num_tenured);
ysr@777 1322 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
ysr@777 1323 _alloc_rate_ms_seq->add(alloc_rate_ms);
ysr@777 1324 _prev_region_num_young = _region_num_young;
ysr@777 1325 _prev_region_num_tenured = _region_num_tenured;
ysr@777 1326
ysr@777 1327 double interval_ms =
ysr@777 1328 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
ysr@777 1329 update_recent_gc_times(end_time_sec, elapsed_ms);
ysr@777 1330 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
ysr@1521 1331 if (recent_avg_pause_time_ratio() < 0.0 ||
ysr@1521 1332 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
ysr@1521 1333 #ifndef PRODUCT
ysr@1521 1334 // Dump info to allow post-facto debugging
ysr@1521 1335 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
ysr@1521 1336 gclog_or_tty->print_cr("-------------------------------------------");
ysr@1521 1337 gclog_or_tty->print_cr("Recent GC Times (ms):");
ysr@1521 1338 _recent_gc_times_ms->dump();
ysr@1521 1339 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
ysr@1521 1340 _recent_prev_end_times_for_all_gcs_sec->dump();
ysr@1521 1341 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
ysr@1521 1342 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
ysr@1522 1343 // In debug mode, terminate the JVM if the user wants to debug at this point.
ysr@1522 1344 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
ysr@1522 1345 #endif // !PRODUCT
ysr@1522 1346 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
ysr@1522 1347 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
ysr@1521 1348 if (_recent_avg_pause_time_ratio < 0.0) {
ysr@1521 1349 _recent_avg_pause_time_ratio = 0.0;
ysr@1521 1350 } else {
ysr@1521 1351 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
ysr@1521 1352 _recent_avg_pause_time_ratio = 1.0;
ysr@1521 1353 }
ysr@1521 1354 }
ysr@777 1355 }
ysr@777 1356
ysr@777 1357 if (G1PolicyVerbose > 1) {
ysr@777 1358 gclog_or_tty->print_cr(" Recording collection pause(%d)", _n_pauses);
ysr@777 1359 }
ysr@777 1360
ysr@777 1361 if (G1PolicyVerbose > 1) {
ysr@777 1362 gclog_or_tty->print_cr(" ET: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1363 " ET-RS: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1364 " |RS|: " SIZE_FORMAT,
ysr@777 1365 elapsed_ms, recent_avg_time_for_pauses_ms(),
johnc@3021 1366 scan_rs_time, recent_avg_time_for_rs_scan_ms(),
ysr@777 1367 rs_size);
ysr@777 1368
ysr@777 1369 gclog_or_tty->print_cr(" Used at start: " SIZE_FORMAT"K"
ysr@777 1370 " At end " SIZE_FORMAT "K\n"
ysr@777 1371 " garbage : " SIZE_FORMAT "K"
ysr@777 1372 " of " SIZE_FORMAT "K\n"
ysr@777 1373 " survival : %6.2f%% (%6.2f%% avg)",
ysr@777 1374 _cur_collection_pause_used_at_start_bytes/K,
ysr@777 1375 _g1->used()/K, freed_bytes/K,
ysr@777 1376 _collection_set_bytes_used_before/K,
ysr@777 1377 survival_fraction*100.0,
ysr@777 1378 recent_avg_survival_fraction()*100.0);
ysr@777 1379 gclog_or_tty->print_cr(" Recent %% gc pause time: %6.2f",
ysr@777 1380 recent_avg_pause_time_ratio() * 100.0);
ysr@777 1381 }
ysr@777 1382
ysr@777 1383 double other_time_ms = elapsed_ms;
ysr@777 1384
tonyp@2062 1385 if (_satb_drain_time_set) {
tonyp@2062 1386 other_time_ms -= _cur_satb_drain_time_ms;
ysr@777 1387 }
ysr@777 1388
tonyp@2062 1389 if (parallel) {
tonyp@2062 1390 other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
tonyp@2062 1391 } else {
tonyp@2062 1392 other_time_ms -=
tonyp@2062 1393 update_rs_time +
tonyp@2062 1394 ext_root_scan_time + mark_stack_scan_time +
tonyp@2062 1395 scan_rs_time + obj_copy_time;
tonyp@2062 1396 }
tonyp@2062 1397
ysr@777 1398 if (PrintGCDetails) {
tonyp@2062 1399 gclog_or_tty->print_cr("%s, %1.8lf secs]",
ysr@777 1400 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
ysr@777 1401 elapsed_ms / 1000.0);
ysr@777 1402
tonyp@2062 1403 if (_satb_drain_time_set) {
tonyp@2062 1404 print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
tonyp@2062 1405 }
tonyp@2062 1406 if (_last_satb_drain_processed_buffers >= 0) {
tonyp@2062 1407 print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
tonyp@2062 1408 }
tonyp@2062 1409 if (parallel) {
tonyp@2062 1410 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
brutisso@2712 1411 print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms);
tonyp@2062 1412 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
brutisso@2712 1413 print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
brutisso@2712 1414 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
brutisso@2712 1415 print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
tonyp@2062 1416 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
tonyp@2062 1417 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
tonyp@2062 1418 print_par_stats(2, "Termination", _par_last_termination_times_ms);
brutisso@2712 1419 print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
brutisso@2712 1420 print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms);
brutisso@2712 1421
brutisso@2712 1422 for (int i = 0; i < _parallel_gc_threads; i++) {
brutisso@2712 1423 _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
brutisso@2712 1424 }
brutisso@2712 1425 print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
brutisso@2712 1426
johnc@3021 1427 print_stats(2, "Parallel Other", parallel_other_time);
tonyp@2062 1428 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
tonyp@2062 1429 } else {
tonyp@2062 1430 print_stats(1, "Update RS", update_rs_time);
tonyp@2062 1431 print_stats(2, "Processed Buffers",
tonyp@2062 1432 (int)update_rs_processed_buffers);
tonyp@2062 1433 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
tonyp@2062 1434 print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
tonyp@2062 1435 print_stats(1, "Scan RS", scan_rs_time);
tonyp@2062 1436 print_stats(1, "Object Copying", obj_copy_time);
ysr@777 1437 }
johnc@1325 1438 #ifndef PRODUCT
johnc@1325 1439 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
johnc@1325 1440 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
johnc@1325 1441 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
johnc@1325 1442 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
johnc@1325 1443 if (_num_cc_clears > 0) {
johnc@1325 1444 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
johnc@1325 1445 }
johnc@1325 1446 #endif
ysr@777 1447 print_stats(1, "Other", other_time_ms);
johnc@1829 1448 print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
johnc@1829 1449
ysr@777 1450 for (int i = 0; i < _aux_num; ++i) {
ysr@777 1451 if (_cur_aux_times_set[i]) {
ysr@777 1452 char buffer[96];
ysr@777 1453 sprintf(buffer, "Aux%d", i);
ysr@777 1454 print_stats(1, buffer, _cur_aux_times_ms[i]);
ysr@777 1455 }
ysr@777 1456 }
ysr@777 1457 }
ysr@777 1458
ysr@777 1459 _all_pause_times_ms->add(elapsed_ms);
tonyp@1083 1460 if (update_stats) {
tonyp@1083 1461 summary->record_total_time_ms(elapsed_ms);
tonyp@1083 1462 summary->record_other_time_ms(other_time_ms);
tonyp@1083 1463 }
ysr@777 1464 for (int i = 0; i < _aux_num; ++i)
ysr@777 1465 if (_cur_aux_times_set[i])
ysr@777 1466 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
ysr@777 1467
ysr@777 1468 // Reset marks-between-pauses counter.
ysr@777 1469 _n_marks_since_last_pause = 0;
ysr@777 1470
ysr@777 1471 // Update the efficiency-since-mark vars.
ysr@777 1472 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
ysr@777 1473 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1474 // This usually happens due to the timer not having the required
ysr@777 1475 // granularity. Some Linuxes are the usual culprits.
ysr@777 1476 // We'll just set it to something (arbitrarily) small.
ysr@777 1477 proc_ms = 1.0;
ysr@777 1478 }
ysr@777 1479 double cur_efficiency = (double) freed_bytes / proc_ms;
ysr@777 1480
ysr@777 1481 bool new_in_marking_window = _in_marking_window;
ysr@777 1482 bool new_in_marking_window_im = false;
tonyp@1794 1483 if (during_initial_mark_pause()) {
ysr@777 1484 new_in_marking_window = true;
ysr@777 1485 new_in_marking_window_im = true;
ysr@777 1486 }
ysr@777 1487
brutisso@3065 1488 if (_last_full_young_gc) {
tonyp@3114 1489 ergo_verbose2(ErgoPartiallyYoungGCs,
tonyp@3114 1490 "start partially-young GCs",
tonyp@3114 1491 ergo_format_byte_perc("known garbage"),
tonyp@3114 1492 _known_garbage_bytes, _known_garbage_ratio * 100.0);
brutisso@3065 1493 set_full_young_gcs(false);
brutisso@3065 1494 _last_full_young_gc = false;
brutisso@3065 1495 }
brutisso@3065 1496
brutisso@3065 1497 if ( !_last_young_gc_full ) {
tonyp@3114 1498 if (_should_revert_to_full_young_gcs) {
tonyp@3114 1499 ergo_verbose2(ErgoPartiallyYoungGCs,
tonyp@3114 1500 "end partially-young GCs",
tonyp@3114 1501 ergo_format_reason("partially-young GCs end requested")
tonyp@3114 1502 ergo_format_byte_perc("known garbage"),
tonyp@3114 1503 _known_garbage_bytes, _known_garbage_ratio * 100.0);
tonyp@3114 1504 set_full_young_gcs(true);
tonyp@3114 1505 } else if (_known_garbage_ratio < 0.05) {
tonyp@3114 1506 ergo_verbose3(ErgoPartiallyYoungGCs,
tonyp@3114 1507 "end partially-young GCs",
tonyp@3114 1508 ergo_format_reason("known garbage percent lower than threshold")
tonyp@3114 1509 ergo_format_byte_perc("known garbage")
tonyp@3114 1510 ergo_format_perc("threshold"),
tonyp@3114 1511 _known_garbage_bytes, _known_garbage_ratio * 100.0,
tonyp@3114 1512 0.05 * 100.0);
tonyp@3114 1513 set_full_young_gcs(true);
tonyp@3114 1514 } else if (adaptive_young_list_length() &&
tonyp@3114 1515 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
tonyp@3114 1516 ergo_verbose5(ErgoPartiallyYoungGCs,
tonyp@3114 1517 "end partially-young GCs",
tonyp@3114 1518 ergo_format_reason("current GC efficiency lower than "
tonyp@3114 1519 "predicted fully-young GC efficiency")
tonyp@3114 1520 ergo_format_double("GC efficiency factor")
tonyp@3114 1521 ergo_format_double("current GC efficiency")
tonyp@3114 1522 ergo_format_double("predicted fully-young GC efficiency")
tonyp@3114 1523 ergo_format_byte_perc("known garbage"),
tonyp@3114 1524 get_gc_eff_factor(), cur_efficiency,
tonyp@3114 1525 predict_young_gc_eff(),
tonyp@3114 1526 _known_garbage_bytes, _known_garbage_ratio * 100.0);
tonyp@3114 1527 set_full_young_gcs(true);
ysr@777 1528 }
brutisso@3065 1529 }
brutisso@3065 1530 _should_revert_to_full_young_gcs = false;
brutisso@3065 1531
brutisso@3065 1532 if (_last_young_gc_full && !_during_marking) {
brutisso@3065 1533 _young_gc_eff_seq->add(cur_efficiency);
ysr@777 1534 }
ysr@777 1535
ysr@777 1536 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 1537 // do that for any other surv rate groupsx
ysr@777 1538
ysr@777 1539 // <NEW PREDICTION>
ysr@777 1540
apetrusenko@1112 1541 if (update_stats) {
ysr@777 1542 double pause_time_ms = elapsed_ms;
ysr@777 1543
ysr@777 1544 size_t diff = 0;
ysr@777 1545 if (_max_pending_cards >= _pending_cards)
ysr@777 1546 diff = _max_pending_cards - _pending_cards;
ysr@777 1547 _pending_card_diff_seq->add((double) diff);
ysr@777 1548
ysr@777 1549 double cost_per_card_ms = 0.0;
ysr@777 1550 if (_pending_cards > 0) {
ysr@777 1551 cost_per_card_ms = update_rs_time / (double) _pending_cards;
ysr@777 1552 _cost_per_card_ms_seq->add(cost_per_card_ms);
ysr@777 1553 }
ysr@777 1554
ysr@777 1555 size_t cards_scanned = _g1->cards_scanned();
ysr@777 1556
ysr@777 1557 double cost_per_entry_ms = 0.0;
ysr@777 1558 if (cards_scanned > 10) {
ysr@777 1559 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
ysr@777 1560 if (_last_young_gc_full)
ysr@777 1561 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1562 else
ysr@777 1563 _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1564 }
ysr@777 1565
ysr@777 1566 if (_max_rs_lengths > 0) {
ysr@777 1567 double cards_per_entry_ratio =
ysr@777 1568 (double) cards_scanned / (double) _max_rs_lengths;
ysr@777 1569 if (_last_young_gc_full)
ysr@777 1570 _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1571 else
ysr@777 1572 _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1573 }
ysr@777 1574
ysr@777 1575 size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
ysr@777 1576 if (rs_length_diff >= 0)
ysr@777 1577 _rs_length_diff_seq->add((double) rs_length_diff);
ysr@777 1578
ysr@777 1579 size_t copied_bytes = surviving_bytes;
ysr@777 1580 double cost_per_byte_ms = 0.0;
ysr@777 1581 if (copied_bytes > 0) {
ysr@777 1582 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
ysr@777 1583 if (_in_marking_window)
ysr@777 1584 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
ysr@777 1585 else
ysr@777 1586 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
ysr@777 1587 }
ysr@777 1588
ysr@777 1589 double all_other_time_ms = pause_time_ms -
johnc@1829 1590 (update_rs_time + scan_rs_time + obj_copy_time +
ysr@777 1591 _mark_closure_time_ms + termination_time);
ysr@777 1592
ysr@777 1593 double young_other_time_ms = 0.0;
ysr@777 1594 if (_recorded_young_regions > 0) {
ysr@777 1595 young_other_time_ms =
ysr@777 1596 _recorded_young_cset_choice_time_ms +
ysr@777 1597 _recorded_young_free_cset_time_ms;
ysr@777 1598 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
ysr@777 1599 (double) _recorded_young_regions);
ysr@777 1600 }
ysr@777 1601 double non_young_other_time_ms = 0.0;
ysr@777 1602 if (_recorded_non_young_regions > 0) {
ysr@777 1603 non_young_other_time_ms =
ysr@777 1604 _recorded_non_young_cset_choice_time_ms +
ysr@777 1605 _recorded_non_young_free_cset_time_ms;
ysr@777 1606
ysr@777 1607 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
ysr@777 1608 (double) _recorded_non_young_regions);
ysr@777 1609 }
ysr@777 1610
ysr@777 1611 double constant_other_time_ms = all_other_time_ms -
ysr@777 1612 (young_other_time_ms + non_young_other_time_ms);
ysr@777 1613 _constant_other_time_ms_seq->add(constant_other_time_ms);
ysr@777 1614
ysr@777 1615 double survival_ratio = 0.0;
ysr@777 1616 if (_bytes_in_collection_set_before_gc > 0) {
tonyp@3028 1617 survival_ratio = (double) _bytes_copied_during_gc /
tonyp@3028 1618 (double) _bytes_in_collection_set_before_gc;
ysr@777 1619 }
ysr@777 1620
ysr@777 1621 _pending_cards_seq->add((double) _pending_cards);
ysr@777 1622 _scanned_cards_seq->add((double) cards_scanned);
ysr@777 1623 _rs_lengths_seq->add((double) _max_rs_lengths);
ysr@777 1624
ysr@777 1625 double expensive_region_limit_ms =
johnc@1186 1626 (double) MaxGCPauseMillis - predict_constant_other_time_ms();
ysr@777 1627 if (expensive_region_limit_ms < 0.0) {
ysr@777 1628 // this means that the other time was predicted to be longer than
ysr@777 1629 // than the max pause time
johnc@1186 1630 expensive_region_limit_ms = (double) MaxGCPauseMillis;
ysr@777 1631 }
ysr@777 1632 _expensive_region_limit_ms = expensive_region_limit_ms;
ysr@777 1633
ysr@777 1634 if (PREDICTIONS_VERBOSE) {
ysr@777 1635 gclog_or_tty->print_cr("");
ysr@777 1636 gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
johnc@1829 1637 "REGIONS %d %d %d "
ysr@777 1638 "PENDING_CARDS %d %d "
ysr@777 1639 "CARDS_SCANNED %d %d "
ysr@777 1640 "RS_LENGTHS %d %d "
ysr@777 1641 "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
ysr@777 1642 "SURVIVAL_RATIO %1.6lf %1.6lf "
ysr@777 1643 "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
ysr@777 1644 "OTHER_YOUNG %1.6lf %1.6lf "
ysr@777 1645 "OTHER_NON_YOUNG %1.6lf %1.6lf "
ysr@777 1646 "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
ysr@777 1647 "ELAPSED %1.6lf %1.6lf ",
ysr@777 1648 _cur_collection_start_sec,
ysr@777 1649 (!_last_young_gc_full) ? 2 :
ysr@777 1650 (last_pause_included_initial_mark) ? 1 : 0,
ysr@777 1651 _recorded_region_num,
ysr@777 1652 _recorded_young_regions,
ysr@777 1653 _recorded_non_young_regions,
ysr@777 1654 _predicted_pending_cards, _pending_cards,
ysr@777 1655 _predicted_cards_scanned, cards_scanned,
ysr@777 1656 _predicted_rs_lengths, _max_rs_lengths,
ysr@777 1657 _predicted_rs_update_time_ms, update_rs_time,
ysr@777 1658 _predicted_rs_scan_time_ms, scan_rs_time,
ysr@777 1659 _predicted_survival_ratio, survival_ratio,
ysr@777 1660 _predicted_object_copy_time_ms, obj_copy_time,
ysr@777 1661 _predicted_constant_other_time_ms, constant_other_time_ms,
ysr@777 1662 _predicted_young_other_time_ms, young_other_time_ms,
ysr@777 1663 _predicted_non_young_other_time_ms,
ysr@777 1664 non_young_other_time_ms,
ysr@777 1665 _vtime_diff_ms, termination_time,
ysr@777 1666 _predicted_pause_time_ms, elapsed_ms);
ysr@777 1667 }
ysr@777 1668
ysr@777 1669 if (G1PolicyVerbose > 0) {
ysr@777 1670 gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
ysr@777 1671 _predicted_pause_time_ms,
ysr@777 1672 (_within_target) ? "within" : "outside",
ysr@777 1673 elapsed_ms);
ysr@777 1674 }
ysr@777 1675
ysr@777 1676 }
ysr@777 1677
ysr@777 1678 _in_marking_window = new_in_marking_window;
ysr@777 1679 _in_marking_window_im = new_in_marking_window_im;
ysr@777 1680 _free_regions_at_end_of_collection = _g1->free_regions();
tonyp@3119 1681 update_young_list_target_length();
ysr@777 1682
iveresov@1546 1683 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
tonyp@1717 1684 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
iveresov@1546 1685 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
ysr@777 1686 // </NEW PREDICTION>
ysr@777 1687 }
ysr@777 1688
tonyp@2961 1689 #define EXT_SIZE_FORMAT "%d%s"
tonyp@2961 1690 #define EXT_SIZE_PARAMS(bytes) \
tonyp@2961 1691 byte_size_in_proper_unit((bytes)), \
tonyp@2961 1692 proper_unit_for_byte_size((bytes))
tonyp@2961 1693
tonyp@2961 1694 void G1CollectorPolicy::print_heap_transition() {
tonyp@2961 1695 if (PrintGCDetails) {
tonyp@2961 1696 YoungList* young_list = _g1->young_list();
tonyp@2961 1697 size_t eden_bytes = young_list->eden_used_bytes();
tonyp@2961 1698 size_t survivor_bytes = young_list->survivor_used_bytes();
tonyp@2961 1699 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
tonyp@2961 1700 size_t used = _g1->used();
tonyp@2961 1701 size_t capacity = _g1->capacity();
tonyp@2961 1702
tonyp@2961 1703 gclog_or_tty->print_cr(
tonyp@2961 1704 " [Eden: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
tonyp@2961 1705 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
tonyp@2961 1706 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
tonyp@2961 1707 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
tonyp@2961 1708 EXT_SIZE_PARAMS(_eden_bytes_before_gc),
tonyp@2961 1709 EXT_SIZE_PARAMS(eden_bytes),
tonyp@2961 1710 EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
tonyp@2961 1711 EXT_SIZE_PARAMS(survivor_bytes),
tonyp@2961 1712 EXT_SIZE_PARAMS(used_before_gc),
tonyp@2961 1713 EXT_SIZE_PARAMS(_capacity_before_gc),
tonyp@2961 1714 EXT_SIZE_PARAMS(used),
tonyp@2961 1715 EXT_SIZE_PARAMS(capacity));
tonyp@2961 1716 } else if (PrintGC) {
tonyp@2961 1717 _g1->print_size_transition(gclog_or_tty,
tonyp@2961 1718 _cur_collection_pause_used_at_start_bytes,
tonyp@2961 1719 _g1->used(), _g1->capacity());
tonyp@2961 1720 }
tonyp@2961 1721 }
tonyp@2961 1722
ysr@777 1723 // <NEW PREDICTION>
ysr@777 1724
iveresov@1546 1725 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 1726 double update_rs_processed_buffers,
iveresov@1546 1727 double goal_ms) {
iveresov@1546 1728 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
iveresov@1546 1729 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
iveresov@1546 1730
tonyp@1717 1731 if (G1UseAdaptiveConcRefinement) {
iveresov@1546 1732 const int k_gy = 3, k_gr = 6;
iveresov@1546 1733 const double inc_k = 1.1, dec_k = 0.9;
iveresov@1546 1734
iveresov@1546 1735 int g = cg1r->green_zone();
iveresov@1546 1736 if (update_rs_time > goal_ms) {
iveresov@1546 1737 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
iveresov@1546 1738 } else {
iveresov@1546 1739 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
iveresov@1546 1740 g = (int)MAX2(g * inc_k, g + 1.0);
iveresov@1546 1741 }
iveresov@1546 1742 }
iveresov@1546 1743 // Change the refinement threads params
iveresov@1546 1744 cg1r->set_green_zone(g);
iveresov@1546 1745 cg1r->set_yellow_zone(g * k_gy);
iveresov@1546 1746 cg1r->set_red_zone(g * k_gr);
iveresov@1546 1747 cg1r->reinitialize_threads();
iveresov@1546 1748
iveresov@1546 1749 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
iveresov@1546 1750 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
iveresov@1546 1751 cg1r->yellow_zone());
iveresov@1546 1752 // Change the barrier params
iveresov@1546 1753 dcqs.set_process_completed_threshold(processing_threshold);
iveresov@1546 1754 dcqs.set_max_completed_queue(cg1r->red_zone());
iveresov@1546 1755 }
iveresov@1546 1756
iveresov@1546 1757 int curr_queue_size = dcqs.completed_buffers_num();
iveresov@1546 1758 if (curr_queue_size >= cg1r->yellow_zone()) {
iveresov@1546 1759 dcqs.set_completed_queue_padding(curr_queue_size);
iveresov@1546 1760 } else {
iveresov@1546 1761 dcqs.set_completed_queue_padding(0);
iveresov@1546 1762 }
iveresov@1546 1763 dcqs.notify_if_necessary();
iveresov@1546 1764 }
iveresov@1546 1765
ysr@777 1766 double
ysr@777 1767 G1CollectorPolicy::
ysr@777 1768 predict_young_collection_elapsed_time_ms(size_t adjustment) {
ysr@777 1769 guarantee( adjustment == 0 || adjustment == 1, "invariant" );
ysr@777 1770
ysr@777 1771 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@1829 1772 size_t young_num = g1h->young_list()->length();
ysr@777 1773 if (young_num == 0)
ysr@777 1774 return 0.0;
ysr@777 1775
ysr@777 1776 young_num += adjustment;
ysr@777 1777 size_t pending_cards = predict_pending_cards();
johnc@1829 1778 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
ysr@777 1779 predict_rs_length_diff();
ysr@777 1780 size_t card_num;
ysr@777 1781 if (full_young_gcs())
ysr@777 1782 card_num = predict_young_card_num(rs_lengths);
ysr@777 1783 else
ysr@777 1784 card_num = predict_non_young_card_num(rs_lengths);
ysr@777 1785 size_t young_byte_size = young_num * HeapRegion::GrainBytes;
ysr@777 1786 double accum_yg_surv_rate =
ysr@777 1787 _short_lived_surv_rate_group->accum_surv_rate(adjustment);
ysr@777 1788
ysr@777 1789 size_t bytes_to_copy =
ysr@777 1790 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
ysr@777 1791
ysr@777 1792 return
ysr@777 1793 predict_rs_update_time_ms(pending_cards) +
ysr@777 1794 predict_rs_scan_time_ms(card_num) +
ysr@777 1795 predict_object_copy_time_ms(bytes_to_copy) +
ysr@777 1796 predict_young_other_time_ms(young_num) +
ysr@777 1797 predict_constant_other_time_ms();
ysr@777 1798 }
ysr@777 1799
ysr@777 1800 double
ysr@777 1801 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
ysr@777 1802 size_t rs_length = predict_rs_length_diff();
ysr@777 1803 size_t card_num;
ysr@777 1804 if (full_young_gcs())
ysr@777 1805 card_num = predict_young_card_num(rs_length);
ysr@777 1806 else
ysr@777 1807 card_num = predict_non_young_card_num(rs_length);
ysr@777 1808 return predict_base_elapsed_time_ms(pending_cards, card_num);
ysr@777 1809 }
ysr@777 1810
ysr@777 1811 double
ysr@777 1812 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 1813 size_t scanned_cards) {
ysr@777 1814 return
ysr@777 1815 predict_rs_update_time_ms(pending_cards) +
ysr@777 1816 predict_rs_scan_time_ms(scanned_cards) +
ysr@777 1817 predict_constant_other_time_ms();
ysr@777 1818 }
ysr@777 1819
ysr@777 1820 double
ysr@777 1821 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
ysr@777 1822 bool young) {
ysr@777 1823 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1824 size_t card_num;
ysr@777 1825 if (full_young_gcs())
ysr@777 1826 card_num = predict_young_card_num(rs_length);
ysr@777 1827 else
ysr@777 1828 card_num = predict_non_young_card_num(rs_length);
ysr@777 1829 size_t bytes_to_copy = predict_bytes_to_copy(hr);
ysr@777 1830
ysr@777 1831 double region_elapsed_time_ms =
ysr@777 1832 predict_rs_scan_time_ms(card_num) +
ysr@777 1833 predict_object_copy_time_ms(bytes_to_copy);
ysr@777 1834
ysr@777 1835 if (young)
ysr@777 1836 region_elapsed_time_ms += predict_young_other_time_ms(1);
ysr@777 1837 else
ysr@777 1838 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
ysr@777 1839
ysr@777 1840 return region_elapsed_time_ms;
ysr@777 1841 }
ysr@777 1842
ysr@777 1843 size_t
ysr@777 1844 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
ysr@777 1845 size_t bytes_to_copy;
ysr@777 1846 if (hr->is_marked())
ysr@777 1847 bytes_to_copy = hr->max_live_bytes();
ysr@777 1848 else {
ysr@777 1849 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
ysr@777 1850 "invariant" );
ysr@777 1851 int age = hr->age_in_surv_rate_group();
apetrusenko@980 1852 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
ysr@777 1853 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
ysr@777 1854 }
ysr@777 1855
ysr@777 1856 return bytes_to_copy;
ysr@777 1857 }
ysr@777 1858
ysr@777 1859 void
ysr@777 1860 G1CollectorPolicy::start_recording_regions() {
ysr@777 1861 _recorded_rs_lengths = 0;
ysr@777 1862 _recorded_young_regions = 0;
ysr@777 1863 _recorded_non_young_regions = 0;
ysr@777 1864
ysr@777 1865 #if PREDICTIONS_VERBOSE
ysr@777 1866 _recorded_marked_bytes = 0;
ysr@777 1867 _recorded_young_bytes = 0;
ysr@777 1868 _predicted_bytes_to_copy = 0;
johnc@1829 1869 _predicted_rs_lengths = 0;
johnc@1829 1870 _predicted_cards_scanned = 0;
ysr@777 1871 #endif // PREDICTIONS_VERBOSE
ysr@777 1872 }
ysr@777 1873
ysr@777 1874 void
johnc@1829 1875 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
ysr@777 1876 #if PREDICTIONS_VERBOSE
johnc@1829 1877 if (!young) {
ysr@777 1878 _recorded_marked_bytes += hr->max_live_bytes();
ysr@777 1879 }
ysr@777 1880 _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
ysr@777 1881 #endif // PREDICTIONS_VERBOSE
ysr@777 1882
ysr@777 1883 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1884 _recorded_rs_lengths += rs_length;
ysr@777 1885 }
ysr@777 1886
ysr@777 1887 void
johnc@1829 1888 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
johnc@1829 1889 assert(!hr->is_young(), "should not call this");
johnc@1829 1890 ++_recorded_non_young_regions;
johnc@1829 1891 record_cset_region_info(hr, false);
johnc@1829 1892 }
johnc@1829 1893
johnc@1829 1894 void
johnc@1829 1895 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
johnc@1829 1896 _recorded_young_regions = n_regions;
johnc@1829 1897 }
johnc@1829 1898
johnc@1829 1899 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
johnc@1829 1900 #if PREDICTIONS_VERBOSE
johnc@1829 1901 _recorded_young_bytes = bytes;
johnc@1829 1902 #endif // PREDICTIONS_VERBOSE
johnc@1829 1903 }
johnc@1829 1904
johnc@1829 1905 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
johnc@1829 1906 _recorded_rs_lengths = rs_lengths;
johnc@1829 1907 }
johnc@1829 1908
johnc@1829 1909 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 1910 _predicted_bytes_to_copy = bytes;
ysr@777 1911 }
ysr@777 1912
ysr@777 1913 void
ysr@777 1914 G1CollectorPolicy::end_recording_regions() {
johnc@1829 1915 // The _predicted_pause_time_ms field is referenced in code
johnc@1829 1916 // not under PREDICTIONS_VERBOSE. Let's initialize it.
johnc@1829 1917 _predicted_pause_time_ms = -1.0;
johnc@1829 1918
ysr@777 1919 #if PREDICTIONS_VERBOSE
ysr@777 1920 _predicted_pending_cards = predict_pending_cards();
ysr@777 1921 _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
ysr@777 1922 if (full_young_gcs())
ysr@777 1923 _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
ysr@777 1924 else
ysr@777 1925 _predicted_cards_scanned +=
ysr@777 1926 predict_non_young_card_num(_predicted_rs_lengths);
ysr@777 1927 _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
ysr@777 1928
ysr@777 1929 _predicted_rs_update_time_ms =
ysr@777 1930 predict_rs_update_time_ms(_g1->pending_card_num());
ysr@777 1931 _predicted_rs_scan_time_ms =
ysr@777 1932 predict_rs_scan_time_ms(_predicted_cards_scanned);
ysr@777 1933 _predicted_object_copy_time_ms =
ysr@777 1934 predict_object_copy_time_ms(_predicted_bytes_to_copy);
ysr@777 1935 _predicted_constant_other_time_ms =
ysr@777 1936 predict_constant_other_time_ms();
ysr@777 1937 _predicted_young_other_time_ms =
ysr@777 1938 predict_young_other_time_ms(_recorded_young_regions);
ysr@777 1939 _predicted_non_young_other_time_ms =
ysr@777 1940 predict_non_young_other_time_ms(_recorded_non_young_regions);
ysr@777 1941
ysr@777 1942 _predicted_pause_time_ms =
ysr@777 1943 _predicted_rs_update_time_ms +
ysr@777 1944 _predicted_rs_scan_time_ms +
ysr@777 1945 _predicted_object_copy_time_ms +
ysr@777 1946 _predicted_constant_other_time_ms +
ysr@777 1947 _predicted_young_other_time_ms +
ysr@777 1948 _predicted_non_young_other_time_ms;
ysr@777 1949 #endif // PREDICTIONS_VERBOSE
ysr@777 1950 }
ysr@777 1951
ysr@777 1952 void G1CollectorPolicy::check_if_region_is_too_expensive(double
ysr@777 1953 predicted_time_ms) {
ysr@777 1954 // I don't think we need to do this when in young GC mode since
ysr@777 1955 // marking will be initiated next time we hit the soft limit anyway...
ysr@777 1956 if (predicted_time_ms > _expensive_region_limit_ms) {
tonyp@3114 1957 ergo_verbose2(ErgoPartiallyYoungGCs,
tonyp@3114 1958 "request partially-young GCs end",
tonyp@3114 1959 ergo_format_reason("predicted region time higher than threshold")
tonyp@3114 1960 ergo_format_ms("predicted region time")
tonyp@3114 1961 ergo_format_ms("threshold"),
tonyp@3114 1962 predicted_time_ms, _expensive_region_limit_ms);
brutisso@3065 1963 // no point in doing another partial one
brutisso@3065 1964 _should_revert_to_full_young_gcs = true;
ysr@777 1965 }
ysr@777 1966 }
ysr@777 1967
ysr@777 1968 // </NEW PREDICTION>
ysr@777 1969
ysr@777 1970
ysr@777 1971 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
ysr@777 1972 double elapsed_ms) {
ysr@777 1973 _recent_gc_times_ms->add(elapsed_ms);
ysr@777 1974 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
ysr@777 1975 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
ysr@777 1976 }
ysr@777 1977
ysr@777 1978 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
johnc@3021 1979 if (_recent_pause_times_ms->num() == 0) {
johnc@3021 1980 return (double) MaxGCPauseMillis;
johnc@3021 1981 }
johnc@3021 1982 return _recent_pause_times_ms->avg();
ysr@777 1983 }
ysr@777 1984
johnc@3021 1985 double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
johnc@3021 1986 if (_recent_rs_scan_times_ms->num() == 0) {
johnc@1186 1987 return (double)MaxGCPauseMillis/3.0;
johnc@3021 1988 }
johnc@3021 1989 return _recent_rs_scan_times_ms->avg();
ysr@777 1990 }
ysr@777 1991
ysr@777 1992 int G1CollectorPolicy::number_of_recent_gcs() {
johnc@3021 1993 assert(_recent_rs_scan_times_ms->num() ==
ysr@777 1994 _recent_pause_times_ms->num(), "Sequence out of sync");
ysr@777 1995 assert(_recent_pause_times_ms->num() ==
ysr@777 1996 _recent_CS_bytes_used_before->num(), "Sequence out of sync");
ysr@777 1997 assert(_recent_CS_bytes_used_before->num() ==
ysr@777 1998 _recent_CS_bytes_surviving->num(), "Sequence out of sync");
johnc@3021 1999
ysr@777 2000 return _recent_pause_times_ms->num();
ysr@777 2001 }
ysr@777 2002
ysr@777 2003 double G1CollectorPolicy::recent_avg_survival_fraction() {
ysr@777 2004 return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 2005 _recent_CS_bytes_used_before);
ysr@777 2006 }
ysr@777 2007
ysr@777 2008 double G1CollectorPolicy::last_survival_fraction() {
ysr@777 2009 return last_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 2010 _recent_CS_bytes_used_before);
ysr@777 2011 }
ysr@777 2012
ysr@777 2013 double
ysr@777 2014 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 2015 TruncatedSeq* before) {
ysr@777 2016 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 2017 if (before->sum() > 0.0) {
ysr@777 2018 double recent_survival_rate = surviving->sum() / before->sum();
ysr@777 2019 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 2020 // fragmentation can produce negative collections.
ysr@777 2021 // Further, we're now always doing parallel collection. But I'm still
ysr@777 2022 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 2023 // (DLD, 10/05.)
jmasa@2188 2024 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
ysr@777 2025 _g1->evacuation_failed() ||
ysr@777 2026 recent_survival_rate <= 1.0, "Or bad frac");
ysr@777 2027 return recent_survival_rate;
ysr@777 2028 } else {
ysr@777 2029 return 1.0; // Be conservative.
ysr@777 2030 }
ysr@777 2031 }
ysr@777 2032
ysr@777 2033 double
ysr@777 2034 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 2035 TruncatedSeq* before) {
ysr@777 2036 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 2037 if (surviving->num() > 0 && before->last() > 0.0) {
ysr@777 2038 double last_survival_rate = surviving->last() / before->last();
ysr@777 2039 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 2040 // fragmentation can produce negative collections.
ysr@777 2041 // Further, we're now always doing parallel collection. But I'm still
ysr@777 2042 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 2043 // (DLD, 10/05.)
jmasa@2188 2044 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
ysr@777 2045 last_survival_rate <= 1.0, "Or bad frac");
ysr@777 2046 return last_survival_rate;
ysr@777 2047 } else {
ysr@777 2048 return 1.0;
ysr@777 2049 }
ysr@777 2050 }
ysr@777 2051
ysr@777 2052 static const int survival_min_obs = 5;
ysr@777 2053 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
ysr@777 2054 static const double min_survival_rate = 0.1;
ysr@777 2055
ysr@777 2056 double
ysr@777 2057 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
ysr@777 2058 double latest) {
ysr@777 2059 double res = avg;
ysr@777 2060 if (number_of_recent_gcs() < survival_min_obs) {
ysr@777 2061 res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
ysr@777 2062 }
ysr@777 2063 res = MAX2(res, latest);
ysr@777 2064 res = MAX2(res, min_survival_rate);
ysr@777 2065 // In the parallel case, LAB fragmentation can produce "negative
ysr@777 2066 // collections"; so can evac failure. Cap at 1.0
ysr@777 2067 res = MIN2(res, 1.0);
ysr@777 2068 return res;
ysr@777 2069 }
ysr@777 2070
ysr@777 2071 size_t G1CollectorPolicy::expansion_amount() {
tonyp@3114 2072 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
tonyp@3114 2073 double threshold = _gc_overhead_perc;
tonyp@3114 2074 if (recent_gc_overhead > threshold) {
johnc@1186 2075 // We will double the existing space, or take
johnc@1186 2076 // G1ExpandByPercentOfAvailable % of the available expansion
johnc@1186 2077 // space, whichever is smaller, bounded below by a minimum
johnc@1186 2078 // expansion (unless that's all that's left.)
ysr@777 2079 const size_t min_expand_bytes = 1*M;
johnc@2504 2080 size_t reserved_bytes = _g1->max_capacity();
ysr@777 2081 size_t committed_bytes = _g1->capacity();
ysr@777 2082 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
ysr@777 2083 size_t expand_bytes;
ysr@777 2084 size_t expand_bytes_via_pct =
johnc@1186 2085 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
ysr@777 2086 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
ysr@777 2087 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
ysr@777 2088 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
tonyp@3114 2089
tonyp@3114 2090 ergo_verbose5(ErgoHeapSizing,
tonyp@3114 2091 "attempt heap expansion",
tonyp@3114 2092 ergo_format_reason("recent GC overhead higher than "
tonyp@3114 2093 "threshold after GC")
tonyp@3114 2094 ergo_format_perc("recent GC overhead")
tonyp@3114 2095 ergo_format_perc("threshold")
tonyp@3114 2096 ergo_format_byte("uncommitted")
tonyp@3114 2097 ergo_format_byte_perc("calculated expansion amount"),
tonyp@3114 2098 recent_gc_overhead, threshold,
tonyp@3114 2099 uncommitted_bytes,
tonyp@3114 2100 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
tonyp@3114 2101
ysr@777 2102 return expand_bytes;
ysr@777 2103 } else {
ysr@777 2104 return 0;
ysr@777 2105 }
ysr@777 2106 }
ysr@777 2107
ysr@777 2108 void G1CollectorPolicy::note_start_of_mark_thread() {
ysr@777 2109 _mark_thread_startup_sec = os::elapsedTime();
ysr@777 2110 }
ysr@777 2111
ysr@777 2112 class CountCSClosure: public HeapRegionClosure {
ysr@777 2113 G1CollectorPolicy* _g1_policy;
ysr@777 2114 public:
ysr@777 2115 CountCSClosure(G1CollectorPolicy* g1_policy) :
ysr@777 2116 _g1_policy(g1_policy) {}
ysr@777 2117 bool doHeapRegion(HeapRegion* r) {
ysr@777 2118 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
ysr@777 2119 return false;
ysr@777 2120 }
ysr@777 2121 };
ysr@777 2122
ysr@777 2123 void G1CollectorPolicy::count_CS_bytes_used() {
ysr@777 2124 CountCSClosure cs_closure(this);
ysr@777 2125 _g1->collection_set_iterate(&cs_closure);
ysr@777 2126 }
ysr@777 2127
ysr@777 2128 void G1CollectorPolicy::print_summary (int level,
ysr@777 2129 const char* str,
ysr@777 2130 NumberSeq* seq) const {
ysr@777 2131 double sum = seq->sum();
brutisso@2645 2132 LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
ysr@777 2133 str, sum / 1000.0, seq->avg());
ysr@777 2134 }
ysr@777 2135
ysr@777 2136 void G1CollectorPolicy::print_summary_sd (int level,
ysr@777 2137 const char* str,
ysr@777 2138 NumberSeq* seq) const {
ysr@777 2139 print_summary(level, str, seq);
brutisso@2645 2140 LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
ysr@777 2141 seq->num(), seq->sd(), seq->maximum());
ysr@777 2142 }
ysr@777 2143
ysr@777 2144 void G1CollectorPolicy::check_other_times(int level,
ysr@777 2145 NumberSeq* other_times_ms,
ysr@777 2146 NumberSeq* calc_other_times_ms) const {
ysr@777 2147 bool should_print = false;
brutisso@2645 2148 LineBuffer buf(level + 2);
ysr@777 2149
ysr@777 2150 double max_sum = MAX2(fabs(other_times_ms->sum()),
ysr@777 2151 fabs(calc_other_times_ms->sum()));
ysr@777 2152 double min_sum = MIN2(fabs(other_times_ms->sum()),
ysr@777 2153 fabs(calc_other_times_ms->sum()));
ysr@777 2154 double sum_ratio = max_sum / min_sum;
ysr@777 2155 if (sum_ratio > 1.1) {
ysr@777 2156 should_print = true;
brutisso@2645 2157 buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
ysr@777 2158 }
ysr@777 2159
ysr@777 2160 double max_avg = MAX2(fabs(other_times_ms->avg()),
ysr@777 2161 fabs(calc_other_times_ms->avg()));
ysr@777 2162 double min_avg = MIN2(fabs(other_times_ms->avg()),
ysr@777 2163 fabs(calc_other_times_ms->avg()));
ysr@777 2164 double avg_ratio = max_avg / min_avg;
ysr@777 2165 if (avg_ratio > 1.1) {
ysr@777 2166 should_print = true;
brutisso@2645 2167 buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
ysr@777 2168 }
ysr@777 2169
ysr@777 2170 if (other_times_ms->sum() < -0.01) {
brutisso@2645 2171 buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
ysr@777 2172 }
ysr@777 2173
ysr@777 2174 if (other_times_ms->avg() < -0.01) {
brutisso@2645 2175 buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
ysr@777 2176 }
ysr@777 2177
ysr@777 2178 if (calc_other_times_ms->sum() < -0.01) {
ysr@777 2179 should_print = true;
brutisso@2645 2180 buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
ysr@777 2181 }
ysr@777 2182
ysr@777 2183 if (calc_other_times_ms->avg() < -0.01) {
ysr@777 2184 should_print = true;
brutisso@2645 2185 buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
ysr@777 2186 }
ysr@777 2187
ysr@777 2188 if (should_print)
ysr@777 2189 print_summary(level, "Other(Calc)", calc_other_times_ms);
ysr@777 2190 }
ysr@777 2191
ysr@777 2192 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
jmasa@2188 2193 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 2194 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 2195 if (summary->get_total_seq()->num() > 0) {
apetrusenko@1112 2196 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
ysr@777 2197 if (body_summary != NULL) {
ysr@777 2198 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
ysr@777 2199 if (parallel) {
ysr@777 2200 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
ysr@777 2201 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2202 print_summary(2, "Ext Root Scanning",
ysr@777 2203 body_summary->get_ext_root_scan_seq());
ysr@777 2204 print_summary(2, "Mark Stack Scanning",
ysr@777 2205 body_summary->get_mark_stack_scan_seq());
ysr@777 2206 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2207 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2208 print_summary(2, "Termination", body_summary->get_termination_seq());
ysr@777 2209 print_summary(2, "Other", body_summary->get_parallel_other_seq());
ysr@777 2210 {
ysr@777 2211 NumberSeq* other_parts[] = {
ysr@777 2212 body_summary->get_update_rs_seq(),
ysr@777 2213 body_summary->get_ext_root_scan_seq(),
ysr@777 2214 body_summary->get_mark_stack_scan_seq(),
ysr@777 2215 body_summary->get_scan_rs_seq(),
ysr@777 2216 body_summary->get_obj_copy_seq(),
ysr@777 2217 body_summary->get_termination_seq()
ysr@777 2218 };
ysr@777 2219 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
johnc@2134 2220 6, other_parts);
ysr@777 2221 check_other_times(2, body_summary->get_parallel_other_seq(),
ysr@777 2222 &calc_other_times_ms);
ysr@777 2223 }
ysr@777 2224 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
ysr@777 2225 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
ysr@777 2226 } else {
ysr@777 2227 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2228 print_summary(1, "Ext Root Scanning",
ysr@777 2229 body_summary->get_ext_root_scan_seq());
ysr@777 2230 print_summary(1, "Mark Stack Scanning",
ysr@777 2231 body_summary->get_mark_stack_scan_seq());
ysr@777 2232 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2233 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2234 }
ysr@777 2235 }
ysr@777 2236 print_summary(1, "Other", summary->get_other_seq());
ysr@777 2237 {
johnc@2134 2238 if (body_summary != NULL) {
johnc@2134 2239 NumberSeq calc_other_times_ms;
johnc@2134 2240 if (parallel) {
johnc@2134 2241 // parallel
johnc@2134 2242 NumberSeq* other_parts[] = {
johnc@2134 2243 body_summary->get_satb_drain_seq(),
johnc@2134 2244 body_summary->get_parallel_seq(),
johnc@2134 2245 body_summary->get_clear_ct_seq()
johnc@2134 2246 };
johnc@2134 2247 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
johnc@2134 2248 3, other_parts);
johnc@2134 2249 } else {
johnc@2134 2250 // serial
johnc@2134 2251 NumberSeq* other_parts[] = {
johnc@2134 2252 body_summary->get_satb_drain_seq(),
johnc@2134 2253 body_summary->get_update_rs_seq(),
johnc@2134 2254 body_summary->get_ext_root_scan_seq(),
johnc@2134 2255 body_summary->get_mark_stack_scan_seq(),
johnc@2134 2256 body_summary->get_scan_rs_seq(),
johnc@2134 2257 body_summary->get_obj_copy_seq()
johnc@2134 2258 };
johnc@2134 2259 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
johnc@2134 2260 6, other_parts);
johnc@2134 2261 }
johnc@2134 2262 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
ysr@777 2263 }
ysr@777 2264 }
ysr@777 2265 } else {
brutisso@2645 2266 LineBuffer(1).append_and_print_cr("none");
ysr@777 2267 }
brutisso@2645 2268 LineBuffer(0).append_and_print_cr("");
ysr@777 2269 }
ysr@777 2270
ysr@777 2271 void G1CollectorPolicy::print_tracing_info() const {
ysr@777 2272 if (TraceGen0Time) {
ysr@777 2273 gclog_or_tty->print_cr("ALL PAUSES");
ysr@777 2274 print_summary_sd(0, "Total", _all_pause_times_ms);
ysr@777 2275 gclog_or_tty->print_cr("");
ysr@777 2276 gclog_or_tty->print_cr("");
ysr@777 2277 gclog_or_tty->print_cr(" Full Young GC Pauses: %8d", _full_young_pause_num);
ysr@777 2278 gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num);
ysr@777 2279 gclog_or_tty->print_cr("");
ysr@777 2280
apetrusenko@1112 2281 gclog_or_tty->print_cr("EVACUATION PAUSES");
apetrusenko@1112 2282 print_summary(_summary);
ysr@777 2283
ysr@777 2284 gclog_or_tty->print_cr("MISC");
ysr@777 2285 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
ysr@777 2286 print_summary_sd(0, "Yields", _all_yield_times_ms);
ysr@777 2287 for (int i = 0; i < _aux_num; ++i) {
ysr@777 2288 if (_all_aux_times_ms[i].num() > 0) {
ysr@777 2289 char buffer[96];
ysr@777 2290 sprintf(buffer, "Aux%d", i);
ysr@777 2291 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
ysr@777 2292 }
ysr@777 2293 }
ysr@777 2294
ysr@777 2295 size_t all_region_num = _region_num_young + _region_num_tenured;
ysr@777 2296 gclog_or_tty->print_cr(" New Regions %8d, Young %8d (%6.2lf%%), "
ysr@777 2297 "Tenured %8d (%6.2lf%%)",
ysr@777 2298 all_region_num,
ysr@777 2299 _region_num_young,
ysr@777 2300 (double) _region_num_young / (double) all_region_num * 100.0,
ysr@777 2301 _region_num_tenured,
ysr@777 2302 (double) _region_num_tenured / (double) all_region_num * 100.0);
ysr@777 2303 }
ysr@777 2304 if (TraceGen1Time) {
ysr@777 2305 if (_all_full_gc_times_ms->num() > 0) {
ysr@777 2306 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
ysr@777 2307 _all_full_gc_times_ms->num(),
ysr@777 2308 _all_full_gc_times_ms->sum() / 1000.0);
ysr@777 2309 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
ysr@777 2310 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 2311 _all_full_gc_times_ms->sd(),
ysr@777 2312 _all_full_gc_times_ms->maximum());
ysr@777 2313 }
ysr@777 2314 }
ysr@777 2315 }
ysr@777 2316
ysr@777 2317 void G1CollectorPolicy::print_yg_surv_rate_info() const {
ysr@777 2318 #ifndef PRODUCT
ysr@777 2319 _short_lived_surv_rate_group->print_surv_rate_summary();
ysr@777 2320 // add this call for any other surv rate groups
ysr@777 2321 #endif // PRODUCT
ysr@777 2322 }
ysr@777 2323
tonyp@3114 2324 void G1CollectorPolicy::update_region_num(bool young) {
tonyp@2315 2325 if (young) {
ysr@777 2326 ++_region_num_young;
ysr@777 2327 } else {
ysr@777 2328 ++_region_num_tenured;
ysr@777 2329 }
ysr@777 2330 }
ysr@777 2331
ysr@777 2332 #ifndef PRODUCT
ysr@777 2333 // for debugging, bit of a hack...
ysr@777 2334 static char*
ysr@777 2335 region_num_to_mbs(int length) {
ysr@777 2336 static char buffer[64];
ysr@777 2337 double bytes = (double) (length * HeapRegion::GrainBytes);
ysr@777 2338 double mbs = bytes / (double) (1024 * 1024);
ysr@777 2339 sprintf(buffer, "%7.2lfMB", mbs);
ysr@777 2340 return buffer;
ysr@777 2341 }
ysr@777 2342 #endif // PRODUCT
ysr@777 2343
apetrusenko@980 2344 size_t G1CollectorPolicy::max_regions(int purpose) {
ysr@777 2345 switch (purpose) {
ysr@777 2346 case GCAllocForSurvived:
apetrusenko@980 2347 return _max_survivor_regions;
ysr@777 2348 case GCAllocForTenured:
apetrusenko@980 2349 return REGIONS_UNLIMITED;
ysr@777 2350 default:
apetrusenko@980 2351 ShouldNotReachHere();
apetrusenko@980 2352 return REGIONS_UNLIMITED;
ysr@777 2353 };
ysr@777 2354 }
ysr@777 2355
tonyp@3119 2356 void G1CollectorPolicy::update_max_gc_locker_expansion() {
tonyp@2333 2357 size_t expansion_region_num = 0;
tonyp@2333 2358 if (GCLockerEdenExpansionPercent > 0) {
tonyp@2333 2359 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
tonyp@2333 2360 double expansion_region_num_d = perc * (double) _young_list_target_length;
tonyp@2333 2361 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
tonyp@2333 2362 // less than 1.0) we'll get 1.
tonyp@2333 2363 expansion_region_num = (size_t) ceil(expansion_region_num_d);
tonyp@2333 2364 } else {
tonyp@2333 2365 assert(expansion_region_num == 0, "sanity");
tonyp@2333 2366 }
tonyp@2333 2367 _young_list_max_length = _young_list_target_length + expansion_region_num;
tonyp@2333 2368 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
tonyp@2333 2369 }
tonyp@2333 2370
apetrusenko@980 2371 // Calculates survivor space parameters.
tonyp@3119 2372 void G1CollectorPolicy::update_survivors_policy() {
tonyp@3119 2373 double max_survivor_regions_d =
tonyp@3119 2374 (double) _young_list_target_length / (double) SurvivorRatio;
tonyp@3119 2375 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
tonyp@3119 2376 // smaller than 1.0) we'll get 1.
tonyp@3119 2377 _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
tonyp@3119 2378
tonyp@3066 2379 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
apetrusenko@980 2380 HeapRegion::GrainWords * _max_survivor_regions);
apetrusenko@980 2381 }
apetrusenko@980 2382
ysr@777 2383 #ifndef PRODUCT
ysr@777 2384 class HRSortIndexIsOKClosure: public HeapRegionClosure {
ysr@777 2385 CollectionSetChooser* _chooser;
ysr@777 2386 public:
ysr@777 2387 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
ysr@777 2388 _chooser(chooser) {}
ysr@777 2389
ysr@777 2390 bool doHeapRegion(HeapRegion* r) {
ysr@777 2391 if (!r->continuesHumongous()) {
ysr@777 2392 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
ysr@777 2393 }
ysr@777 2394 return false;
ysr@777 2395 }
ysr@777 2396 };
ysr@777 2397
ysr@777 2398 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
ysr@777 2399 HRSortIndexIsOKClosure cl(_collectionSetChooser);
ysr@777 2400 _g1->heap_region_iterate(&cl);
ysr@777 2401 return true;
ysr@777 2402 }
ysr@777 2403 #endif
ysr@777 2404
tonyp@3114 2405 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
tonyp@3114 2406 GCCause::Cause gc_cause) {
tonyp@2011 2407 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@2011 2408 if (!during_cycle) {
tonyp@3114 2409 ergo_verbose1(ErgoConcCycles,
tonyp@3114 2410 "request concurrent cycle initiation",
tonyp@3114 2411 ergo_format_reason("requested by GC cause")
tonyp@3114 2412 ergo_format_str("GC cause"),
tonyp@3114 2413 GCCause::to_string(gc_cause));
tonyp@2011 2414 set_initiate_conc_mark_if_possible();
tonyp@2011 2415 return true;
tonyp@2011 2416 } else {
tonyp@3114 2417 ergo_verbose1(ErgoConcCycles,
tonyp@3114 2418 "do not request concurrent cycle initiation",
tonyp@3114 2419 ergo_format_reason("concurrent cycle already in progress")
tonyp@3114 2420 ergo_format_str("GC cause"),
tonyp@3114 2421 GCCause::to_string(gc_cause));
tonyp@2011 2422 return false;
tonyp@2011 2423 }
tonyp@2011 2424 }
tonyp@2011 2425
ysr@777 2426 void
tonyp@1794 2427 G1CollectorPolicy::decide_on_conc_mark_initiation() {
tonyp@1794 2428 // We are about to decide on whether this pause will be an
tonyp@1794 2429 // initial-mark pause.
tonyp@1794 2430
tonyp@1794 2431 // First, during_initial_mark_pause() should not be already set. We
tonyp@1794 2432 // will set it here if we have to. However, it should be cleared by
tonyp@1794 2433 // the end of the pause (it's only set for the duration of an
tonyp@1794 2434 // initial-mark pause).
tonyp@1794 2435 assert(!during_initial_mark_pause(), "pre-condition");
tonyp@1794 2436
tonyp@1794 2437 if (initiate_conc_mark_if_possible()) {
tonyp@1794 2438 // We had noticed on a previous pause that the heap occupancy has
tonyp@1794 2439 // gone over the initiating threshold and we should start a
tonyp@1794 2440 // concurrent marking cycle. So we might initiate one.
tonyp@1794 2441
tonyp@1794 2442 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@1794 2443 if (!during_cycle) {
tonyp@1794 2444 // The concurrent marking thread is not "during a cycle", i.e.,
tonyp@1794 2445 // it has completed the last one. So we can go ahead and
tonyp@1794 2446 // initiate a new cycle.
tonyp@1794 2447
tonyp@1794 2448 set_during_initial_mark_pause();
tonyp@1794 2449
tonyp@1794 2450 // And we can now clear initiate_conc_mark_if_possible() as
tonyp@1794 2451 // we've already acted on it.
tonyp@1794 2452 clear_initiate_conc_mark_if_possible();
tonyp@3114 2453
tonyp@3114 2454 ergo_verbose0(ErgoConcCycles,
tonyp@3114 2455 "initiate concurrent cycle",
tonyp@3114 2456 ergo_format_reason("concurrent cycle initiation requested"));
tonyp@1794 2457 } else {
tonyp@1794 2458 // The concurrent marking thread is still finishing up the
tonyp@1794 2459 // previous cycle. If we start one right now the two cycles
tonyp@1794 2460 // overlap. In particular, the concurrent marking thread might
tonyp@1794 2461 // be in the process of clearing the next marking bitmap (which
tonyp@1794 2462 // we will use for the next cycle if we start one). Starting a
tonyp@1794 2463 // cycle now will be bad given that parts of the marking
tonyp@1794 2464 // information might get cleared by the marking thread. And we
tonyp@1794 2465 // cannot wait for the marking thread to finish the cycle as it
tonyp@1794 2466 // periodically yields while clearing the next marking bitmap
tonyp@1794 2467 // and, if it's in a yield point, it's waiting for us to
tonyp@1794 2468 // finish. So, at this point we will not start a cycle and we'll
tonyp@1794 2469 // let the concurrent marking thread complete the last one.
tonyp@3114 2470 ergo_verbose0(ErgoConcCycles,
tonyp@3114 2471 "do not initiate concurrent cycle",
tonyp@3114 2472 ergo_format_reason("concurrent cycle already in progress"));
tonyp@1794 2473 }
tonyp@1794 2474 }
tonyp@1794 2475 }
tonyp@1794 2476
tonyp@1794 2477 void
ysr@777 2478 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2479 record_collection_pause_start(double start_time_sec, size_t start_used) {
ysr@777 2480 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
ysr@777 2481 }
ysr@777 2482
ysr@777 2483 class KnownGarbageClosure: public HeapRegionClosure {
ysr@777 2484 CollectionSetChooser* _hrSorted;
ysr@777 2485
ysr@777 2486 public:
ysr@777 2487 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
ysr@777 2488 _hrSorted(hrSorted)
ysr@777 2489 {}
ysr@777 2490
ysr@777 2491 bool doHeapRegion(HeapRegion* r) {
ysr@777 2492 // We only include humongous regions in collection
ysr@777 2493 // sets when concurrent mark shows that their contained object is
ysr@777 2494 // unreachable.
ysr@777 2495
ysr@777 2496 // Do we have any marking information for this region?
ysr@777 2497 if (r->is_marked()) {
ysr@777 2498 // We don't include humongous regions in collection
ysr@777 2499 // sets because we collect them immediately at the end of a marking
ysr@777 2500 // cycle. We also don't include young regions because we *must*
ysr@777 2501 // include them in the next collection pause.
ysr@777 2502 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2503 _hrSorted->addMarkedHeapRegion(r);
ysr@777 2504 }
ysr@777 2505 }
ysr@777 2506 return false;
ysr@777 2507 }
ysr@777 2508 };
ysr@777 2509
ysr@777 2510 class ParKnownGarbageHRClosure: public HeapRegionClosure {
ysr@777 2511 CollectionSetChooser* _hrSorted;
ysr@777 2512 jint _marked_regions_added;
ysr@777 2513 jint _chunk_size;
ysr@777 2514 jint _cur_chunk_idx;
ysr@777 2515 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
ysr@777 2516 int _worker;
ysr@777 2517 int _invokes;
ysr@777 2518
ysr@777 2519 void get_new_chunk() {
ysr@777 2520 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
ysr@777 2521 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
ysr@777 2522 }
ysr@777 2523 void add_region(HeapRegion* r) {
ysr@777 2524 if (_cur_chunk_idx == _cur_chunk_end) {
ysr@777 2525 get_new_chunk();
ysr@777 2526 }
ysr@777 2527 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
ysr@777 2528 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
ysr@777 2529 _marked_regions_added++;
ysr@777 2530 _cur_chunk_idx++;
ysr@777 2531 }
ysr@777 2532
ysr@777 2533 public:
ysr@777 2534 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
ysr@777 2535 jint chunk_size,
ysr@777 2536 int worker) :
ysr@777 2537 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
ysr@777 2538 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
ysr@777 2539 _invokes(0)
ysr@777 2540 {}
ysr@777 2541
ysr@777 2542 bool doHeapRegion(HeapRegion* r) {
ysr@777 2543 // We only include humongous regions in collection
ysr@777 2544 // sets when concurrent mark shows that their contained object is
ysr@777 2545 // unreachable.
ysr@777 2546 _invokes++;
ysr@777 2547
ysr@777 2548 // Do we have any marking information for this region?
ysr@777 2549 if (r->is_marked()) {
ysr@777 2550 // We don't include humongous regions in collection
ysr@777 2551 // sets because we collect them immediately at the end of a marking
ysr@777 2552 // cycle.
ysr@777 2553 // We also do not include young regions in collection sets
ysr@777 2554 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2555 add_region(r);
ysr@777 2556 }
ysr@777 2557 }
ysr@777 2558 return false;
ysr@777 2559 }
ysr@777 2560 jint marked_regions_added() { return _marked_regions_added; }
ysr@777 2561 int invokes() { return _invokes; }
ysr@777 2562 };
ysr@777 2563
ysr@777 2564 class ParKnownGarbageTask: public AbstractGangTask {
ysr@777 2565 CollectionSetChooser* _hrSorted;
ysr@777 2566 jint _chunk_size;
ysr@777 2567 G1CollectedHeap* _g1;
ysr@777 2568 public:
ysr@777 2569 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
ysr@777 2570 AbstractGangTask("ParKnownGarbageTask"),
ysr@777 2571 _hrSorted(hrSorted), _chunk_size(chunk_size),
ysr@777 2572 _g1(G1CollectedHeap::heap())
ysr@777 2573 {}
ysr@777 2574
ysr@777 2575 void work(int i) {
ysr@777 2576 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
ysr@777 2577 // Back to zero for the claim value.
tonyp@790 2578 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
tonyp@790 2579 HeapRegion::InitialClaimValue);
ysr@777 2580 jint regions_added = parKnownGarbageCl.marked_regions_added();
ysr@777 2581 _hrSorted->incNumMarkedHeapRegions(regions_added);
ysr@777 2582 if (G1PrintParCleanupStats) {
brutisso@2645 2583 gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
ysr@777 2584 i, parKnownGarbageCl.invokes(), regions_added);
ysr@777 2585 }
ysr@777 2586 }
ysr@777 2587 };
ysr@777 2588
ysr@777 2589 void
ysr@777 2590 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2591 record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 2592 size_t max_live_bytes) {
ysr@777 2593 double start;
ysr@777 2594 if (G1PrintParCleanupStats) start = os::elapsedTime();
ysr@777 2595 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 2596
ysr@777 2597 _collectionSetChooser->clearMarkedHeapRegions();
ysr@777 2598 double clear_marked_end;
ysr@777 2599 if (G1PrintParCleanupStats) {
ysr@777 2600 clear_marked_end = os::elapsedTime();
ysr@777 2601 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.",
ysr@777 2602 (clear_marked_end - start)*1000.0);
ysr@777 2603 }
jmasa@2188 2604 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 2605 const size_t OverpartitionFactor = 4;
kvn@1926 2606 const size_t MinWorkUnit = 8;
kvn@1926 2607 const size_t WorkUnit =
ysr@777 2608 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
kvn@1926 2609 MinWorkUnit);
ysr@777 2610 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
kvn@1926 2611 WorkUnit);
ysr@777 2612 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
kvn@1926 2613 (int) WorkUnit);
ysr@777 2614 _g1->workers()->run_task(&parKnownGarbageTask);
tonyp@790 2615
tonyp@790 2616 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2617 "sanity check");
ysr@777 2618 } else {
ysr@777 2619 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
ysr@777 2620 _g1->heap_region_iterate(&knownGarbagecl);
ysr@777 2621 }
ysr@777 2622 double known_garbage_end;
ysr@777 2623 if (G1PrintParCleanupStats) {
ysr@777 2624 known_garbage_end = os::elapsedTime();
ysr@777 2625 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
ysr@777 2626 (known_garbage_end - clear_marked_end)*1000.0);
ysr@777 2627 }
ysr@777 2628 _collectionSetChooser->sortMarkedHeapRegions();
ysr@777 2629 double sort_end;
ysr@777 2630 if (G1PrintParCleanupStats) {
ysr@777 2631 sort_end = os::elapsedTime();
ysr@777 2632 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
ysr@777 2633 (sort_end - known_garbage_end)*1000.0);
ysr@777 2634 }
ysr@777 2635
ysr@777 2636 record_concurrent_mark_cleanup_end_work2();
ysr@777 2637 double work2_end;
ysr@777 2638 if (G1PrintParCleanupStats) {
ysr@777 2639 work2_end = os::elapsedTime();
ysr@777 2640 gclog_or_tty->print_cr(" work2: %8.3f ms.",
ysr@777 2641 (work2_end - sort_end)*1000.0);
ysr@777 2642 }
ysr@777 2643 }
ysr@777 2644
johnc@1829 2645 // Add the heap region at the head of the non-incremental collection set
ysr@777 2646 void G1CollectorPolicy::
ysr@777 2647 add_to_collection_set(HeapRegion* hr) {
johnc@1829 2648 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2649 assert(!hr->is_young(), "non-incremental add of young region");
johnc@1829 2650
ysr@777 2651 if (_g1->mark_in_progress())
ysr@777 2652 _g1->concurrent_mark()->registerCSetRegion(hr);
ysr@777 2653
johnc@1829 2654 assert(!hr->in_collection_set(), "should not already be in the CSet");
ysr@777 2655 hr->set_in_collection_set(true);
ysr@777 2656 hr->set_next_in_collection_set(_collection_set);
ysr@777 2657 _collection_set = hr;
ysr@777 2658 _collection_set_size++;
ysr@777 2659 _collection_set_bytes_used_before += hr->used();
tonyp@961 2660 _g1->register_region_with_in_cset_fast_test(hr);
ysr@777 2661 }
ysr@777 2662
johnc@1829 2663 // Initialize the per-collection-set information
johnc@1829 2664 void G1CollectorPolicy::start_incremental_cset_building() {
johnc@1829 2665 assert(_inc_cset_build_state == Inactive, "Precondition");
johnc@1829 2666
johnc@1829 2667 _inc_cset_head = NULL;
johnc@1829 2668 _inc_cset_tail = NULL;
johnc@1829 2669 _inc_cset_size = 0;
johnc@1829 2670 _inc_cset_bytes_used_before = 0;
johnc@1829 2671
brutisso@3065 2672 _inc_cset_young_index = 0;
johnc@1829 2673
johnc@1829 2674 _inc_cset_max_finger = 0;
johnc@1829 2675 _inc_cset_recorded_young_bytes = 0;
johnc@1829 2676 _inc_cset_recorded_rs_lengths = 0;
johnc@1829 2677 _inc_cset_predicted_elapsed_time_ms = 0;
johnc@1829 2678 _inc_cset_predicted_bytes_to_copy = 0;
johnc@1829 2679 _inc_cset_build_state = Active;
johnc@1829 2680 }
johnc@1829 2681
johnc@1829 2682 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
johnc@1829 2683 // This routine is used when:
johnc@1829 2684 // * adding survivor regions to the incremental cset at the end of an
johnc@1829 2685 // evacuation pause,
johnc@1829 2686 // * adding the current allocation region to the incremental cset
johnc@1829 2687 // when it is retired, and
johnc@1829 2688 // * updating existing policy information for a region in the
johnc@1829 2689 // incremental cset via young list RSet sampling.
johnc@1829 2690 // Therefore this routine may be called at a safepoint by the
johnc@1829 2691 // VM thread, or in-between safepoints by mutator threads (when
johnc@1829 2692 // retiring the current allocation region) or a concurrent
johnc@1829 2693 // refine thread (RSet sampling).
johnc@1829 2694
johnc@1829 2695 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
johnc@1829 2696 size_t used_bytes = hr->used();
johnc@1829 2697
johnc@1829 2698 _inc_cset_recorded_rs_lengths += rs_length;
johnc@1829 2699 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
johnc@1829 2700
johnc@1829 2701 _inc_cset_bytes_used_before += used_bytes;
johnc@1829 2702
johnc@1829 2703 // Cache the values we have added to the aggregated informtion
johnc@1829 2704 // in the heap region in case we have to remove this region from
johnc@1829 2705 // the incremental collection set, or it is updated by the
johnc@1829 2706 // rset sampling code
johnc@1829 2707 hr->set_recorded_rs_length(rs_length);
johnc@1829 2708 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
johnc@1829 2709
johnc@1829 2710 #if PREDICTIONS_VERBOSE
johnc@1829 2711 size_t bytes_to_copy = predict_bytes_to_copy(hr);
johnc@1829 2712 _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
johnc@1829 2713
johnc@1829 2714 // Record the number of bytes used in this region
johnc@1829 2715 _inc_cset_recorded_young_bytes += used_bytes;
johnc@1829 2716
johnc@1829 2717 // Cache the values we have added to the aggregated informtion
johnc@1829 2718 // in the heap region in case we have to remove this region from
johnc@1829 2719 // the incremental collection set, or it is updated by the
johnc@1829 2720 // rset sampling code
johnc@1829 2721 hr->set_predicted_bytes_to_copy(bytes_to_copy);
johnc@1829 2722 #endif // PREDICTIONS_VERBOSE
johnc@1829 2723 }
johnc@1829 2724
johnc@1829 2725 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
johnc@1829 2726 // This routine is currently only called as part of the updating of
johnc@1829 2727 // existing policy information for regions in the incremental cset that
johnc@1829 2728 // is performed by the concurrent refine thread(s) as part of young list
johnc@1829 2729 // RSet sampling. Therefore we should not be at a safepoint.
johnc@1829 2730
johnc@1829 2731 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
johnc@1829 2732 assert(hr->is_young(), "it should be");
johnc@1829 2733
johnc@1829 2734 size_t used_bytes = hr->used();
johnc@1829 2735 size_t old_rs_length = hr->recorded_rs_length();
johnc@1829 2736 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
johnc@1829 2737
johnc@1829 2738 // Subtract the old recorded/predicted policy information for
johnc@1829 2739 // the given heap region from the collection set info.
johnc@1829 2740 _inc_cset_recorded_rs_lengths -= old_rs_length;
johnc@1829 2741 _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
johnc@1829 2742
johnc@1829 2743 _inc_cset_bytes_used_before -= used_bytes;
johnc@1829 2744
johnc@1829 2745 // Clear the values cached in the heap region
johnc@1829 2746 hr->set_recorded_rs_length(0);
johnc@1829 2747 hr->set_predicted_elapsed_time_ms(0);
johnc@1829 2748
johnc@1829 2749 #if PREDICTIONS_VERBOSE
johnc@1829 2750 size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
johnc@1829 2751 _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
johnc@1829 2752
johnc@1829 2753 // Subtract the number of bytes used in this region
johnc@1829 2754 _inc_cset_recorded_young_bytes -= used_bytes;
johnc@1829 2755
johnc@1829 2756 // Clear the values cached in the heap region
johnc@1829 2757 hr->set_predicted_bytes_to_copy(0);
johnc@1829 2758 #endif // PREDICTIONS_VERBOSE
johnc@1829 2759 }
johnc@1829 2760
johnc@1829 2761 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
johnc@1829 2762 // Update the collection set information that is dependent on the new RS length
johnc@1829 2763 assert(hr->is_young(), "Precondition");
johnc@1829 2764
johnc@1829 2765 remove_from_incremental_cset_info(hr);
johnc@1829 2766 add_to_incremental_cset_info(hr, new_rs_length);
johnc@1829 2767 }
johnc@1829 2768
johnc@1829 2769 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
johnc@1829 2770 assert( hr->is_young(), "invariant");
johnc@1829 2771 assert( hr->young_index_in_cset() == -1, "invariant" );
johnc@1829 2772 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2773
johnc@1829 2774 // We need to clear and set the cached recorded/cached collection set
johnc@1829 2775 // information in the heap region here (before the region gets added
johnc@1829 2776 // to the collection set). An individual heap region's cached values
johnc@1829 2777 // are calculated, aggregated with the policy collection set info,
johnc@1829 2778 // and cached in the heap region here (initially) and (subsequently)
johnc@1829 2779 // by the Young List sampling code.
johnc@1829 2780
johnc@1829 2781 size_t rs_length = hr->rem_set()->occupied();
johnc@1829 2782 add_to_incremental_cset_info(hr, rs_length);
johnc@1829 2783
johnc@1829 2784 HeapWord* hr_end = hr->end();
johnc@1829 2785 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
johnc@1829 2786
johnc@1829 2787 assert(!hr->in_collection_set(), "invariant");
johnc@1829 2788 hr->set_in_collection_set(true);
johnc@1829 2789 assert( hr->next_in_collection_set() == NULL, "invariant");
johnc@1829 2790
johnc@1829 2791 _inc_cset_size++;
johnc@1829 2792 _g1->register_region_with_in_cset_fast_test(hr);
johnc@1829 2793
johnc@1829 2794 hr->set_young_index_in_cset((int) _inc_cset_young_index);
johnc@1829 2795 ++_inc_cset_young_index;
johnc@1829 2796 }
johnc@1829 2797
johnc@1829 2798 // Add the region at the RHS of the incremental cset
johnc@1829 2799 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
johnc@1829 2800 // We should only ever be appending survivors at the end of a pause
johnc@1829 2801 assert( hr->is_survivor(), "Logic");
johnc@1829 2802
johnc@1829 2803 // Do the 'common' stuff
johnc@1829 2804 add_region_to_incremental_cset_common(hr);
johnc@1829 2805
johnc@1829 2806 // Now add the region at the right hand side
johnc@1829 2807 if (_inc_cset_tail == NULL) {
johnc@1829 2808 assert(_inc_cset_head == NULL, "invariant");
johnc@1829 2809 _inc_cset_head = hr;
johnc@1829 2810 } else {
johnc@1829 2811 _inc_cset_tail->set_next_in_collection_set(hr);
johnc@1829 2812 }
johnc@1829 2813 _inc_cset_tail = hr;
johnc@1829 2814 }
johnc@1829 2815
johnc@1829 2816 // Add the region to the LHS of the incremental cset
johnc@1829 2817 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
johnc@1829 2818 // Survivors should be added to the RHS at the end of a pause
johnc@1829 2819 assert(!hr->is_survivor(), "Logic");
johnc@1829 2820
johnc@1829 2821 // Do the 'common' stuff
johnc@1829 2822 add_region_to_incremental_cset_common(hr);
johnc@1829 2823
johnc@1829 2824 // Add the region at the left hand side
johnc@1829 2825 hr->set_next_in_collection_set(_inc_cset_head);
johnc@1829 2826 if (_inc_cset_head == NULL) {
johnc@1829 2827 assert(_inc_cset_tail == NULL, "Invariant");
johnc@1829 2828 _inc_cset_tail = hr;
johnc@1829 2829 }
johnc@1829 2830 _inc_cset_head = hr;
johnc@1829 2831 }
johnc@1829 2832
johnc@1829 2833 #ifndef PRODUCT
johnc@1829 2834 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
johnc@1829 2835 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
johnc@1829 2836
johnc@1829 2837 st->print_cr("\nCollection_set:");
johnc@1829 2838 HeapRegion* csr = list_head;
johnc@1829 2839 while (csr != NULL) {
johnc@1829 2840 HeapRegion* next = csr->next_in_collection_set();
johnc@1829 2841 assert(csr->in_collection_set(), "bad CS");
johnc@1829 2842 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
johnc@1829 2843 "age: %4d, y: %d, surv: %d",
johnc@1829 2844 csr->bottom(), csr->end(),
johnc@1829 2845 csr->top(),
johnc@1829 2846 csr->prev_top_at_mark_start(),
johnc@1829 2847 csr->next_top_at_mark_start(),
johnc@1829 2848 csr->top_at_conc_mark_count(),
johnc@1829 2849 csr->age_in_surv_rate_group_cond(),
johnc@1829 2850 csr->is_young(),
johnc@1829 2851 csr->is_survivor());
johnc@1829 2852 csr = next;
johnc@1829 2853 }
johnc@1829 2854 }
johnc@1829 2855 #endif // !PRODUCT
johnc@1829 2856
tonyp@2062 2857 void
tonyp@2011 2858 G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
tonyp@2011 2859 double target_pause_time_ms) {
johnc@1829 2860 // Set this here - in case we're not doing young collections.
johnc@1829 2861 double non_young_start_time_sec = os::elapsedTime();
johnc@1829 2862
tonyp@3114 2863 YoungList* young_list = _g1->young_list();
tonyp@3114 2864
ysr@777 2865 start_recording_regions();
ysr@777 2866
tonyp@2011 2867 guarantee(target_pause_time_ms > 0.0,
tonyp@2011 2868 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2011 2869 target_pause_time_ms));
tonyp@2011 2870 guarantee(_collection_set == NULL, "Precondition");
ysr@777 2871
ysr@777 2872 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
ysr@777 2873 double predicted_pause_time_ms = base_time_ms;
ysr@777 2874
tonyp@2011 2875 double time_remaining_ms = target_pause_time_ms - base_time_ms;
ysr@777 2876
tonyp@3114 2877 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2878 "start choosing CSet",
tonyp@3114 2879 ergo_format_ms("predicted base time")
tonyp@3114 2880 ergo_format_ms("remaining time")
tonyp@3114 2881 ergo_format_ms("target pause time"),
tonyp@3114 2882 base_time_ms, time_remaining_ms, target_pause_time_ms);
tonyp@3114 2883
ysr@777 2884 // the 10% and 50% values are arbitrary...
tonyp@3114 2885 double threshold = 0.10 * target_pause_time_ms;
tonyp@3114 2886 if (time_remaining_ms < threshold) {
tonyp@3114 2887 double prev_time_remaining_ms = time_remaining_ms;
tonyp@2011 2888 time_remaining_ms = 0.50 * target_pause_time_ms;
ysr@777 2889 _within_target = false;
tonyp@3114 2890 ergo_verbose3(ErgoCSetConstruction,
tonyp@3114 2891 "adjust remaining time",
tonyp@3114 2892 ergo_format_reason("remaining time lower than threshold")
tonyp@3114 2893 ergo_format_ms("remaining time")
tonyp@3114 2894 ergo_format_ms("threshold")
tonyp@3114 2895 ergo_format_ms("adjusted remaining time"),
tonyp@3114 2896 prev_time_remaining_ms, threshold, time_remaining_ms);
ysr@777 2897 } else {
ysr@777 2898 _within_target = true;
ysr@777 2899 }
ysr@777 2900
tonyp@3114 2901 size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
tonyp@3114 2902
tonyp@3114 2903 HeapRegion* hr;
tonyp@3114 2904 double young_start_time_sec = os::elapsedTime();
ysr@777 2905
apetrusenko@1112 2906 _collection_set_bytes_used_before = 0;
apetrusenko@1112 2907 _collection_set_size = 0;
brutisso@3065 2908 _young_cset_length = 0;
brutisso@3065 2909 _last_young_gc_full = full_young_gcs() ? true : false;
brutisso@3065 2910
tonyp@3114 2911 if (_last_young_gc_full) {
brutisso@3065 2912 ++_full_young_pause_num;
tonyp@3114 2913 } else {
brutisso@3065 2914 ++_partial_young_pause_num;
tonyp@3114 2915 }
brutisso@3065 2916
brutisso@3065 2917 // The young list is laid with the survivor regions from the previous
brutisso@3065 2918 // pause are appended to the RHS of the young list, i.e.
brutisso@3065 2919 // [Newly Young Regions ++ Survivors from last pause].
brutisso@3065 2920
tonyp@3114 2921 size_t survivor_region_num = young_list->survivor_length();
tonyp@3114 2922 size_t eden_region_num = young_list->length() - survivor_region_num;
tonyp@3114 2923 size_t old_region_num = 0;
tonyp@3114 2924 hr = young_list->first_survivor_region();
brutisso@3065 2925 while (hr != NULL) {
brutisso@3065 2926 assert(hr->is_survivor(), "badly formed young list");
brutisso@3065 2927 hr->set_young();
brutisso@3065 2928 hr = hr->get_next_young_region();
brutisso@3065 2929 }
brutisso@3065 2930
tonyp@3114 2931 // Clear the fields that point to the survivor list - they are all young now.
tonyp@3114 2932 young_list->clear_survivors();
brutisso@3065 2933
brutisso@3065 2934 if (_g1->mark_in_progress())
brutisso@3065 2935 _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
brutisso@3065 2936
brutisso@3065 2937 _young_cset_length = _inc_cset_young_index;
brutisso@3065 2938 _collection_set = _inc_cset_head;
brutisso@3065 2939 _collection_set_size = _inc_cset_size;
brutisso@3065 2940 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
brutisso@3065 2941 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2942 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2943
tonyp@3114 2944 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2945 "add young regions to CSet",
tonyp@3114 2946 ergo_format_region("eden")
tonyp@3114 2947 ergo_format_region("survivors")
tonyp@3114 2948 ergo_format_ms("predicted young region time"),
tonyp@3114 2949 eden_region_num, survivor_region_num,
tonyp@3114 2950 _inc_cset_predicted_elapsed_time_ms);
tonyp@3114 2951
brutisso@3065 2952 // The number of recorded young regions is the incremental
brutisso@3065 2953 // collection set's current size
brutisso@3065 2954 set_recorded_young_regions(_inc_cset_size);
brutisso@3065 2955 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
brutisso@3065 2956 set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
johnc@1829 2957 #if PREDICTIONS_VERBOSE
brutisso@3065 2958 set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
johnc@1829 2959 #endif // PREDICTIONS_VERBOSE
johnc@1829 2960
tonyp@3114 2961 assert(_inc_cset_size == young_list->length(), "Invariant");
brutisso@3065 2962
brutisso@3065 2963 double young_end_time_sec = os::elapsedTime();
brutisso@3065 2964 _recorded_young_cset_choice_time_ms =
brutisso@3065 2965 (young_end_time_sec - young_start_time_sec) * 1000.0;
brutisso@3065 2966
brutisso@3065 2967 // We are doing young collections so reset this.
brutisso@3065 2968 non_young_start_time_sec = young_end_time_sec;
brutisso@3065 2969
brutisso@3065 2970 if (!full_young_gcs()) {
ysr@777 2971 bool should_continue = true;
ysr@777 2972 NumberSeq seq;
ysr@777 2973 double avg_prediction = 100000000000000000.0; // something very large
johnc@1829 2974
tonyp@3114 2975 size_t prev_collection_set_size = _collection_set_size;
tonyp@3114 2976 double prev_predicted_pause_time_ms = predicted_pause_time_ms;
ysr@777 2977 do {
ysr@777 2978 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
ysr@777 2979 avg_prediction);
apetrusenko@1112 2980 if (hr != NULL) {
ysr@777 2981 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
ysr@777 2982 time_remaining_ms -= predicted_time_ms;
ysr@777 2983 predicted_pause_time_ms += predicted_time_ms;
ysr@777 2984 add_to_collection_set(hr);
johnc@1829 2985 record_non_young_cset_region(hr);
ysr@777 2986 seq.add(predicted_time_ms);
ysr@777 2987 avg_prediction = seq.avg() + seq.sd();
ysr@777 2988 }
tonyp@3114 2989
tonyp@3114 2990 should_continue = true;
tonyp@3114 2991 if (hr == NULL) {
tonyp@3114 2992 // No need for an ergo verbose message here,
tonyp@3114 2993 // getNextMarkRegion() does this when it returns NULL.
tonyp@3114 2994 should_continue = false;
tonyp@3114 2995 } else {
tonyp@3114 2996 if (adaptive_young_list_length()) {
tonyp@3114 2997 if (time_remaining_ms < 0.0) {
tonyp@3114 2998 ergo_verbose1(ErgoCSetConstruction,
tonyp@3114 2999 "stop adding old regions to CSet",
tonyp@3114 3000 ergo_format_reason("remaining time is lower than 0")
tonyp@3114 3001 ergo_format_ms("remaining time"),
tonyp@3114 3002 time_remaining_ms);
tonyp@3114 3003 should_continue = false;
tonyp@3114 3004 }
tonyp@3114 3005 } else {
tonyp@3114 3006 if (_collection_set_size < _young_list_fixed_length) {
tonyp@3114 3007 ergo_verbose2(ErgoCSetConstruction,
tonyp@3114 3008 "stop adding old regions to CSet",
tonyp@3114 3009 ergo_format_reason("CSet length lower than target")
tonyp@3114 3010 ergo_format_region("CSet")
tonyp@3114 3011 ergo_format_region("young target"),
tonyp@3114 3012 _collection_set_size, _young_list_fixed_length);
tonyp@3114 3013 should_continue = false;
tonyp@3114 3014 }
tonyp@3114 3015 }
tonyp@3114 3016 }
ysr@777 3017 } while (should_continue);
ysr@777 3018
ysr@777 3019 if (!adaptive_young_list_length() &&
tonyp@3114 3020 _collection_set_size < _young_list_fixed_length) {
tonyp@3114 3021 ergo_verbose2(ErgoCSetConstruction,
tonyp@3114 3022 "request partially-young GCs end",
tonyp@3114 3023 ergo_format_reason("CSet length lower than target")
tonyp@3114 3024 ergo_format_region("CSet")
tonyp@3114 3025 ergo_format_region("young target"),
tonyp@3114 3026 _collection_set_size, _young_list_fixed_length);
ysr@777 3027 _should_revert_to_full_young_gcs = true;
tonyp@3114 3028 }
tonyp@3114 3029
tonyp@3114 3030 old_region_num = _collection_set_size - prev_collection_set_size;
tonyp@3114 3031
tonyp@3114 3032 ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 3033 "add old regions to CSet",
tonyp@3114 3034 ergo_format_region("old")
tonyp@3114 3035 ergo_format_ms("predicted old region time"),
tonyp@3114 3036 old_region_num,
tonyp@3114 3037 predicted_pause_time_ms - prev_predicted_pause_time_ms);
ysr@777 3038 }
ysr@777 3039
johnc@1829 3040 stop_incremental_cset_building();
johnc@1829 3041
ysr@777 3042 count_CS_bytes_used();
ysr@777 3043
ysr@777 3044 end_recording_regions();
ysr@777 3045
tonyp@3114 3046 ergo_verbose5(ErgoCSetConstruction,
tonyp@3114 3047 "finish choosing CSet",
tonyp@3114 3048 ergo_format_region("eden")
tonyp@3114 3049 ergo_format_region("survivors")
tonyp@3114 3050 ergo_format_region("old")
tonyp@3114 3051 ergo_format_ms("predicted pause time")
tonyp@3114 3052 ergo_format_ms("target pause time"),
tonyp@3114 3053 eden_region_num, survivor_region_num, old_region_num,
tonyp@3114 3054 predicted_pause_time_ms, target_pause_time_ms);
tonyp@3114 3055
ysr@777 3056 double non_young_end_time_sec = os::elapsedTime();
ysr@777 3057 _recorded_non_young_cset_choice_time_ms =
ysr@777 3058 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
ysr@777 3059 }
ysr@777 3060
ysr@777 3061 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
ysr@777 3062 G1CollectorPolicy::record_full_collection_end();
ysr@777 3063 _collectionSetChooser->updateAfterFullCollection();
ysr@777 3064 }
ysr@777 3065
ysr@777 3066 void G1CollectorPolicy_BestRegionsFirst::
tonyp@2062 3067 record_collection_pause_end() {
tonyp@2062 3068 G1CollectorPolicy::record_collection_pause_end();
ysr@777 3069 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
ysr@777 3070 }

mercurial