src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 3172
d912b598c6c3
child 3178
273b46400613
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

ysr@777 1 /*
tonyp@2472 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@3114 31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
stefank@2314 32 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 33 #include "gc_implementation/shared/gcPolicyCounters.hpp"
stefank@2314 34 #include "runtime/arguments.hpp"
stefank@2314 35 #include "runtime/java.hpp"
stefank@2314 36 #include "runtime/mutexLocker.hpp"
stefank@2314 37 #include "utilities/debug.hpp"
ysr@777 38
ysr@777 39 #define PREDICTIONS_VERBOSE 0
ysr@777 40
ysr@777 41 // <NEW PREDICTION>
ysr@777 42
ysr@777 43 // Different defaults for different number of GC threads
ysr@777 44 // They were chosen by running GCOld and SPECjbb on debris with different
ysr@777 45 // numbers of GC threads and choosing them based on the results
ysr@777 46
ysr@777 47 // all the same
ysr@777 48 static double rs_length_diff_defaults[] = {
ysr@777 49 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
ysr@777 50 };
ysr@777 51
ysr@777 52 static double cost_per_card_ms_defaults[] = {
ysr@777 53 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
ysr@777 54 };
ysr@777 55
ysr@777 56 // all the same
ysr@777 57 static double fully_young_cards_per_entry_ratio_defaults[] = {
ysr@777 58 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
ysr@777 59 };
ysr@777 60
ysr@777 61 static double cost_per_entry_ms_defaults[] = {
ysr@777 62 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
ysr@777 63 };
ysr@777 64
ysr@777 65 static double cost_per_byte_ms_defaults[] = {
ysr@777 66 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
ysr@777 67 };
ysr@777 68
ysr@777 69 // these should be pretty consistent
ysr@777 70 static double constant_other_time_ms_defaults[] = {
ysr@777 71 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
ysr@777 72 };
ysr@777 73
ysr@777 74
ysr@777 75 static double young_other_cost_per_region_ms_defaults[] = {
ysr@777 76 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
ysr@777 77 };
ysr@777 78
ysr@777 79 static double non_young_other_cost_per_region_ms_defaults[] = {
ysr@777 80 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
ysr@777 81 };
ysr@777 82
ysr@777 83 // </NEW PREDICTION>
ysr@777 84
brutisso@2645 85 // Help class for avoiding interleaved logging
brutisso@2645 86 class LineBuffer: public StackObj {
brutisso@2645 87
brutisso@2645 88 private:
brutisso@2645 89 static const int BUFFER_LEN = 1024;
brutisso@2645 90 static const int INDENT_CHARS = 3;
brutisso@2645 91 char _buffer[BUFFER_LEN];
brutisso@2645 92 int _indent_level;
brutisso@2645 93 int _cur;
brutisso@2645 94
brutisso@2645 95 void vappend(const char* format, va_list ap) {
brutisso@2645 96 int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
brutisso@2645 97 if (res != -1) {
brutisso@2645 98 _cur += res;
brutisso@2645 99 } else {
brutisso@2645 100 DEBUG_ONLY(warning("buffer too small in LineBuffer");)
brutisso@2645 101 _buffer[BUFFER_LEN -1] = 0;
brutisso@2645 102 _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
brutisso@2645 103 }
brutisso@2645 104 }
brutisso@2645 105
brutisso@2645 106 public:
brutisso@2645 107 explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
brutisso@2645 108 for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
brutisso@2645 109 _buffer[_cur] = ' ';
brutisso@2645 110 }
brutisso@2645 111 }
brutisso@2645 112
brutisso@2645 113 #ifndef PRODUCT
brutisso@2645 114 ~LineBuffer() {
brutisso@2645 115 assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
brutisso@2645 116 }
brutisso@2645 117 #endif
brutisso@2645 118
brutisso@2645 119 void append(const char* format, ...) {
brutisso@2645 120 va_list ap;
brutisso@2645 121 va_start(ap, format);
brutisso@2645 122 vappend(format, ap);
brutisso@2645 123 va_end(ap);
brutisso@2645 124 }
brutisso@2645 125
brutisso@2645 126 void append_and_print_cr(const char* format, ...) {
brutisso@2645 127 va_list ap;
brutisso@2645 128 va_start(ap, format);
brutisso@2645 129 vappend(format, ap);
brutisso@2645 130 va_end(ap);
brutisso@2645 131 gclog_or_tty->print_cr("%s", _buffer);
brutisso@2645 132 _cur = _indent_level * INDENT_CHARS;
brutisso@2645 133 }
brutisso@2645 134 };
brutisso@2645 135
ysr@777 136 G1CollectorPolicy::G1CollectorPolicy() :
jmasa@2188 137 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
johnc@3021 138 ? ParallelGCThreads : 1),
jmasa@2188 139
ysr@777 140 _n_pauses(0),
johnc@3021 141 _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 142 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 143 _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 144 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 145 _all_pause_times_ms(new NumberSeq()),
ysr@777 146 _stop_world_start(0.0),
ysr@777 147 _all_stop_world_times_ms(new NumberSeq()),
ysr@777 148 _all_yield_times_ms(new NumberSeq()),
brutisso@3120 149 _using_new_ratio_calculations(false),
ysr@777 150
ysr@777 151 _all_mod_union_times_ms(new NumberSeq()),
ysr@777 152
apetrusenko@1112 153 _summary(new Summary()),
ysr@777 154
johnc@3175 155 _cur_clear_ct_time_ms(0.0),
johnc@3175 156
johnc@3175 157 _cur_ref_proc_time_ms(0.0),
johnc@3175 158 _cur_ref_enq_time_ms(0.0),
johnc@3175 159
johnc@1325 160 #ifndef PRODUCT
johnc@1325 161 _min_clear_cc_time_ms(-1.0),
johnc@1325 162 _max_clear_cc_time_ms(-1.0),
johnc@1325 163 _cur_clear_cc_time_ms(0.0),
johnc@1325 164 _cum_clear_cc_time_ms(0.0),
johnc@1325 165 _num_cc_clears(0L),
johnc@1325 166 #endif
ysr@777 167
ysr@777 168 _region_num_young(0),
ysr@777 169 _region_num_tenured(0),
ysr@777 170 _prev_region_num_young(0),
ysr@777 171 _prev_region_num_tenured(0),
ysr@777 172
ysr@777 173 _aux_num(10),
ysr@777 174 _all_aux_times_ms(new NumberSeq[_aux_num]),
ysr@777 175 _cur_aux_start_times_ms(new double[_aux_num]),
ysr@777 176 _cur_aux_times_ms(new double[_aux_num]),
ysr@777 177 _cur_aux_times_set(new bool[_aux_num]),
ysr@777 178
ysr@777 179 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 180 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 181
ysr@777 182 // <NEW PREDICTION>
ysr@777 183
ysr@777 184 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 185 _prev_collection_pause_end_ms(0.0),
ysr@777 186 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 187 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 188 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 189 _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 190 _partially_young_cards_per_entry_ratio_seq(
ysr@777 191 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 192 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 193 _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 194 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 195 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 196 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 197 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 198 _non_young_other_cost_per_region_ms_seq(
ysr@777 199 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 200
ysr@777 201 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 202 _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 203 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 204
johnc@1186 205 _pause_time_target_ms((double) MaxGCPauseMillis),
ysr@777 206
ysr@777 207 // </NEW PREDICTION>
ysr@777 208
ysr@777 209 _full_young_gcs(true),
ysr@777 210 _full_young_pause_num(0),
ysr@777 211 _partial_young_pause_num(0),
ysr@777 212
ysr@777 213 _during_marking(false),
ysr@777 214 _in_marking_window(false),
ysr@777 215 _in_marking_window_im(false),
ysr@777 216
ysr@777 217 _known_garbage_ratio(0.0),
ysr@777 218 _known_garbage_bytes(0),
ysr@777 219
ysr@777 220 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 221
ysr@777 222 _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 223
ysr@777 224 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 225 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 226
ysr@777 227 _recent_avg_pause_time_ratio(0.0),
ysr@777 228 _num_markings(0),
ysr@777 229 _n_marks(0),
ysr@777 230 _n_pauses_at_mark_end(0),
ysr@777 231
ysr@777 232 _all_full_gc_times_ms(new NumberSeq()),
ysr@777 233
ysr@777 234 // G1PausesBtwnConcMark defaults to -1
ysr@777 235 // so the hack is to do the cast QQQ FIXME
ysr@777 236 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
ysr@777 237 _n_marks_since_last_pause(0),
tonyp@1794 238 _initiate_conc_mark_if_possible(false),
tonyp@1794 239 _during_initial_mark_pause(false),
ysr@777 240 _should_revert_to_full_young_gcs(false),
ysr@777 241 _last_full_young_gc(false),
ysr@777 242
tonyp@2961 243 _eden_bytes_before_gc(0),
tonyp@2961 244 _survivor_bytes_before_gc(0),
tonyp@2961 245 _capacity_before_gc(0),
tonyp@2961 246
ysr@777 247 _prev_collection_pause_used_at_end_bytes(0),
ysr@777 248
ysr@777 249 _collection_set(NULL),
johnc@1829 250 _collection_set_size(0),
johnc@1829 251 _collection_set_bytes_used_before(0),
johnc@1829 252
johnc@1829 253 // Incremental CSet attributes
johnc@1829 254 _inc_cset_build_state(Inactive),
johnc@1829 255 _inc_cset_head(NULL),
johnc@1829 256 _inc_cset_tail(NULL),
johnc@1829 257 _inc_cset_size(0),
johnc@1829 258 _inc_cset_young_index(0),
johnc@1829 259 _inc_cset_bytes_used_before(0),
johnc@1829 260 _inc_cset_max_finger(NULL),
johnc@1829 261 _inc_cset_recorded_young_bytes(0),
johnc@1829 262 _inc_cset_recorded_rs_lengths(0),
johnc@1829 263 _inc_cset_predicted_elapsed_time_ms(0.0),
johnc@1829 264 _inc_cset_predicted_bytes_to_copy(0),
johnc@1829 265
ysr@777 266 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 267 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 268 #endif // _MSC_VER
ysr@777 269
ysr@777 270 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
ysr@777 271 G1YoungSurvRateNumRegionsSummary)),
ysr@777 272 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
apetrusenko@980 273 G1YoungSurvRateNumRegionsSummary)),
ysr@777 274 // add here any more surv rate groups
apetrusenko@980 275 _recorded_survivor_regions(0),
apetrusenko@980 276 _recorded_survivor_head(NULL),
apetrusenko@980 277 _recorded_survivor_tail(NULL),
tonyp@1791 278 _survivors_age_table(true),
tonyp@1791 279
tonyp@3114 280 _gc_overhead_perc(0.0) {
tonyp@3114 281
tonyp@1377 282 // Set up the region size and associated fields. Given that the
tonyp@1377 283 // policy is created before the heap, we have to set this up here,
tonyp@1377 284 // so it's done as soon as possible.
tonyp@1377 285 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
iveresov@1696 286 HeapRegionRemSet::setup_remset_size();
tonyp@1377 287
tonyp@3114 288 G1ErgoVerbose::initialize();
tonyp@3114 289 if (PrintAdaptiveSizePolicy) {
tonyp@3114 290 // Currently, we only use a single switch for all the heuristics.
tonyp@3114 291 G1ErgoVerbose::set_enabled(true);
tonyp@3114 292 // Given that we don't currently have a verboseness level
tonyp@3114 293 // parameter, we'll hardcode this to high. This can be easily
tonyp@3114 294 // changed in the future.
tonyp@3114 295 G1ErgoVerbose::set_level(ErgoHigh);
tonyp@3114 296 } else {
tonyp@3114 297 G1ErgoVerbose::set_enabled(false);
tonyp@3114 298 }
tonyp@3114 299
apetrusenko@1826 300 // Verify PLAB sizes
apetrusenko@1826 301 const uint region_size = HeapRegion::GrainWords;
apetrusenko@1826 302 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
apetrusenko@1826 303 char buffer[128];
apetrusenko@1826 304 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
apetrusenko@1826 305 OldPLABSize > region_size ? "Old" : "Young", region_size);
apetrusenko@1826 306 vm_exit_during_initialization(buffer);
apetrusenko@1826 307 }
apetrusenko@1826 308
ysr@777 309 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
ysr@777 310 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
ysr@777 311
tonyp@1966 312 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
ysr@777 313 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 314 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 315
ysr@777 316 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 317 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
ysr@777 318
ysr@777 319 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 320
ysr@777 321 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
ysr@777 322
ysr@777 323 _par_last_termination_times_ms = new double[_parallel_gc_threads];
tonyp@1966 324 _par_last_termination_attempts = new double[_parallel_gc_threads];
tonyp@1966 325 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
brutisso@2712 326 _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
ysr@777 327
ysr@777 328 // start conservatively
johnc@1186 329 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
ysr@777 330
ysr@777 331 // <NEW PREDICTION>
ysr@777 332
ysr@777 333 int index;
ysr@777 334 if (ParallelGCThreads == 0)
ysr@777 335 index = 0;
ysr@777 336 else if (ParallelGCThreads > 8)
ysr@777 337 index = 7;
ysr@777 338 else
ysr@777 339 index = ParallelGCThreads - 1;
ysr@777 340
ysr@777 341 _pending_card_diff_seq->add(0.0);
ysr@777 342 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
ysr@777 343 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
ysr@777 344 _fully_young_cards_per_entry_ratio_seq->add(
ysr@777 345 fully_young_cards_per_entry_ratio_defaults[index]);
ysr@777 346 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
ysr@777 347 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
ysr@777 348 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
ysr@777 349 _young_other_cost_per_region_ms_seq->add(
ysr@777 350 young_other_cost_per_region_ms_defaults[index]);
ysr@777 351 _non_young_other_cost_per_region_ms_seq->add(
ysr@777 352 non_young_other_cost_per_region_ms_defaults[index]);
ysr@777 353
ysr@777 354 // </NEW PREDICTION>
ysr@777 355
tonyp@1965 356 // Below, we might need to calculate the pause time target based on
tonyp@1965 357 // the pause interval. When we do so we are going to give G1 maximum
tonyp@1965 358 // flexibility and allow it to do pauses when it needs to. So, we'll
tonyp@1965 359 // arrange that the pause interval to be pause time target + 1 to
tonyp@1965 360 // ensure that a) the pause time target is maximized with respect to
tonyp@1965 361 // the pause interval and b) we maintain the invariant that pause
tonyp@1965 362 // time target < pause interval. If the user does not want this
tonyp@1965 363 // maximum flexibility, they will have to set the pause interval
tonyp@1965 364 // explicitly.
tonyp@1965 365
tonyp@1965 366 // First make sure that, if either parameter is set, its value is
tonyp@1965 367 // reasonable.
tonyp@1965 368 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 369 if (MaxGCPauseMillis < 1) {
tonyp@1965 370 vm_exit_during_initialization("MaxGCPauseMillis should be "
tonyp@1965 371 "greater than 0");
tonyp@1965 372 }
tonyp@1965 373 }
tonyp@1965 374 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 375 if (GCPauseIntervalMillis < 1) {
tonyp@1965 376 vm_exit_during_initialization("GCPauseIntervalMillis should be "
tonyp@1965 377 "greater than 0");
tonyp@1965 378 }
tonyp@1965 379 }
tonyp@1965 380
tonyp@1965 381 // Then, if the pause time target parameter was not set, set it to
tonyp@1965 382 // the default value.
tonyp@1965 383 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 384 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 385 // The default pause time target in G1 is 200ms
tonyp@1965 386 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
tonyp@1965 387 } else {
tonyp@1965 388 // We do not allow the pause interval to be set without the
tonyp@1965 389 // pause time target
tonyp@1965 390 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
tonyp@1965 391 "without setting MaxGCPauseMillis");
tonyp@1965 392 }
tonyp@1965 393 }
tonyp@1965 394
tonyp@1965 395 // Then, if the interval parameter was not set, set it according to
tonyp@1965 396 // the pause time target (this will also deal with the case when the
tonyp@1965 397 // pause time target is the default value).
tonyp@1965 398 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 399 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
tonyp@1965 400 }
tonyp@1965 401
tonyp@1965 402 // Finally, make sure that the two parameters are consistent.
tonyp@1965 403 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
tonyp@1965 404 char buffer[256];
tonyp@1965 405 jio_snprintf(buffer, 256,
tonyp@1965 406 "MaxGCPauseMillis (%u) should be less than "
tonyp@1965 407 "GCPauseIntervalMillis (%u)",
tonyp@1965 408 MaxGCPauseMillis, GCPauseIntervalMillis);
tonyp@1965 409 vm_exit_during_initialization(buffer);
tonyp@1965 410 }
tonyp@1965 411
tonyp@1965 412 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
johnc@1186 413 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
ysr@777 414 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
johnc@1186 415 _sigma = (double) G1ConfidencePercent / 100.0;
ysr@777 416
ysr@777 417 // start conservatively (around 50ms is about right)
ysr@777 418 _concurrent_mark_remark_times_ms->add(0.05);
ysr@777 419 _concurrent_mark_cleanup_times_ms->add(0.20);
ysr@777 420 _tenuring_threshold = MaxTenuringThreshold;
tonyp@3066 421 // _max_survivor_regions will be calculated by
tonyp@3119 422 // update_young_list_target_length() during initialization.
tonyp@3066 423 _max_survivor_regions = 0;
apetrusenko@980 424
tonyp@1791 425 assert(GCTimeRatio > 0,
tonyp@1791 426 "we should have set it to a default value set_g1_gc_flags() "
tonyp@1791 427 "if a user set it to 0");
tonyp@1791 428 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
tonyp@1791 429
tonyp@3119 430 uintx reserve_perc = G1ReservePercent;
tonyp@3119 431 // Put an artificial ceiling on this so that it's not set to a silly value.
tonyp@3119 432 if (reserve_perc > 50) {
tonyp@3119 433 reserve_perc = 50;
tonyp@3119 434 warning("G1ReservePercent is set to a value that is too large, "
tonyp@3119 435 "it's been updated to %u", reserve_perc);
tonyp@3119 436 }
tonyp@3119 437 _reserve_factor = (double) reserve_perc / 100.0;
brutisso@3120 438 // This will be set when the heap is expanded
tonyp@3119 439 // for the first time during initialization.
tonyp@3119 440 _reserve_regions = 0;
tonyp@3119 441
ysr@777 442 initialize_all();
ysr@777 443 }
ysr@777 444
ysr@777 445 // Increment "i", mod "len"
ysr@777 446 static void inc_mod(int& i, int len) {
ysr@777 447 i++; if (i == len) i = 0;
ysr@777 448 }
ysr@777 449
ysr@777 450 void G1CollectorPolicy::initialize_flags() {
ysr@777 451 set_min_alignment(HeapRegion::GrainBytes);
ysr@777 452 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
apetrusenko@982 453 if (SurvivorRatio < 1) {
apetrusenko@982 454 vm_exit_during_initialization("Invalid survivor ratio specified");
apetrusenko@982 455 }
ysr@777 456 CollectorPolicy::initialize_flags();
ysr@777 457 }
ysr@777 458
tonyp@1720 459 // The easiest way to deal with the parsing of the NewSize /
tonyp@1720 460 // MaxNewSize / etc. parameteres is to re-use the code in the
tonyp@1720 461 // TwoGenerationCollectorPolicy class. This is similar to what
tonyp@1720 462 // ParallelScavenge does with its GenerationSizer class (see
tonyp@1720 463 // ParallelScavengeHeap::initialize()). We might change this in the
tonyp@1720 464 // future, but it's a good start.
tonyp@1720 465 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
tonyp@3172 466 private:
tonyp@3172 467 size_t size_to_region_num(size_t byte_size) {
tonyp@3172 468 return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
tonyp@3172 469 }
tonyp@1720 470
tonyp@1720 471 public:
tonyp@1720 472 G1YoungGenSizer() {
tonyp@1720 473 initialize_flags();
tonyp@1720 474 initialize_size_info();
tonyp@1720 475 }
tonyp@1720 476 size_t min_young_region_num() {
tonyp@1720 477 return size_to_region_num(_min_gen0_size);
tonyp@1720 478 }
tonyp@1720 479 size_t initial_young_region_num() {
tonyp@1720 480 return size_to_region_num(_initial_gen0_size);
tonyp@1720 481 }
tonyp@1720 482 size_t max_young_region_num() {
tonyp@1720 483 return size_to_region_num(_max_gen0_size);
tonyp@1720 484 }
tonyp@1720 485 };
tonyp@1720 486
brutisso@3120 487 void G1CollectorPolicy::update_young_list_size_using_newratio(size_t number_of_heap_regions) {
brutisso@3120 488 assert(number_of_heap_regions > 0, "Heap must be initialized");
brutisso@3120 489 size_t young_size = number_of_heap_regions / (NewRatio + 1);
brutisso@3120 490 _min_desired_young_length = young_size;
brutisso@3120 491 _max_desired_young_length = young_size;
brutisso@3120 492 }
brutisso@3120 493
ysr@777 494 void G1CollectorPolicy::init() {
ysr@777 495 // Set aside an initial future to_space.
ysr@777 496 _g1 = G1CollectedHeap::heap();
ysr@777 497
ysr@777 498 assert(Heap_lock->owned_by_self(), "Locking discipline.");
ysr@777 499
apetrusenko@980 500 initialize_gc_policy_counters();
apetrusenko@980 501
brutisso@3065 502 G1YoungGenSizer sizer;
brutisso@3065 503 size_t initial_region_num = sizer.initial_young_region_num();
brutisso@3120 504 _min_desired_young_length = sizer.min_young_region_num();
brutisso@3120 505 _max_desired_young_length = sizer.max_young_region_num();
brutisso@3120 506
brutisso@3120 507 if (FLAG_IS_CMDLINE(NewRatio)) {
brutisso@3120 508 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
tonyp@3172 509 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
brutisso@3120 510 } else {
brutisso@3120 511 // Treat NewRatio as a fixed size that is only recalculated when the heap size changes
tonyp@3172 512 update_young_list_size_using_newratio(_g1->n_regions());
brutisso@3120 513 _using_new_ratio_calculations = true;
brutisso@3120 514 }
brutisso@3120 515 }
brutisso@3120 516
brutisso@3120 517 // GenCollectorPolicy guarantees that min <= initial <= max.
brutisso@3120 518 // Asserting here just to state that we rely on this property.
brutisso@3120 519 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
brutisso@3120 520 assert(initial_region_num <= _max_desired_young_length, "Initial young gen size too large");
brutisso@3120 521 assert(_min_desired_young_length <= initial_region_num, "Initial young gen size too small");
brutisso@3120 522
brutisso@3120 523 set_adaptive_young_list_length(_min_desired_young_length < _max_desired_young_length);
brutisso@3120 524 if (adaptive_young_list_length()) {
brutisso@3065 525 _young_list_fixed_length = 0;
johnc@1829 526 } else {
brutisso@3065 527 _young_list_fixed_length = initial_region_num;
ysr@777 528 }
brutisso@3065 529 _free_regions_at_end_of_collection = _g1->free_regions();
tonyp@3119 530 update_young_list_target_length();
brutisso@3120 531 _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
johnc@1829 532
johnc@1829 533 // We may immediately start allocating regions and placing them on the
johnc@1829 534 // collection set list. Initialize the per-collection set info
johnc@1829 535 start_incremental_cset_building();
ysr@777 536 }
ysr@777 537
apetrusenko@980 538 // Create the jstat counters for the policy.
tonyp@3119 539 void G1CollectorPolicy::initialize_gc_policy_counters() {
brutisso@3065 540 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
apetrusenko@980 541 }
apetrusenko@980 542
tonyp@3119 543 bool G1CollectorPolicy::predict_will_fit(size_t young_length,
tonyp@3119 544 double base_time_ms,
tonyp@3119 545 size_t base_free_regions,
tonyp@3119 546 double target_pause_time_ms) {
tonyp@3119 547 if (young_length >= base_free_regions) {
tonyp@3119 548 // end condition 1: not enough space for the young regions
tonyp@3119 549 return false;
ysr@777 550 }
tonyp@3119 551
tonyp@3119 552 double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
tonyp@3119 553 size_t bytes_to_copy =
tonyp@3119 554 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
tonyp@3119 555 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
tonyp@3119 556 double young_other_time_ms = predict_young_other_time_ms(young_length);
tonyp@3119 557 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
tonyp@3119 558 if (pause_time_ms > target_pause_time_ms) {
tonyp@3119 559 // end condition 2: prediction is over the target pause time
tonyp@3119 560 return false;
tonyp@3119 561 }
tonyp@3119 562
tonyp@3119 563 size_t free_bytes =
tonyp@3119 564 (base_free_regions - young_length) * HeapRegion::GrainBytes;
tonyp@3119 565 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
tonyp@3119 566 // end condition 3: out-of-space (conservatively!)
tonyp@3119 567 return false;
tonyp@3119 568 }
tonyp@3119 569
tonyp@3119 570 // success!
tonyp@3119 571 return true;
ysr@777 572 }
ysr@777 573
brutisso@3120 574 void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
brutisso@3120 575 // re-calculate the necessary reserve
brutisso@3120 576 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
tonyp@3119 577 // We use ceiling so that if reserve_regions_d is > 0.0 (but
tonyp@3119 578 // smaller than 1.0) we'll get 1.
tonyp@3119 579 _reserve_regions = (size_t) ceil(reserve_regions_d);
brutisso@3120 580
brutisso@3120 581 if (_using_new_ratio_calculations) {
brutisso@3120 582 // -XX:NewRatio was specified so we need to update the
brutisso@3120 583 // young gen length when the heap size has changed.
brutisso@3120 584 update_young_list_size_using_newratio(new_number_of_regions);
brutisso@3120 585 }
tonyp@3119 586 }
tonyp@3119 587
tonyp@3119 588 size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
tonyp@3119 589 size_t base_min_length) {
tonyp@3119 590 size_t desired_min_length = 0;
ysr@777 591 if (adaptive_young_list_length()) {
tonyp@3119 592 if (_alloc_rate_ms_seq->num() > 3) {
tonyp@3119 593 double now_sec = os::elapsedTime();
tonyp@3119 594 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
tonyp@3119 595 double alloc_rate_ms = predict_alloc_rate_ms();
tonyp@3119 596 desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
tonyp@3119 597 } else {
tonyp@3119 598 // otherwise we don't have enough info to make the prediction
tonyp@3119 599 }
ysr@777 600 }
brutisso@3120 601 desired_min_length += base_min_length;
brutisso@3120 602 // make sure we don't go below any user-defined minimum bound
brutisso@3120 603 return MAX2(_min_desired_young_length, desired_min_length);
ysr@777 604 }
ysr@777 605
tonyp@3119 606 size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
tonyp@3119 607 // Here, we might want to also take into account any additional
tonyp@3119 608 // constraints (i.e., user-defined minimum bound). Currently, we
tonyp@3119 609 // effectively don't set this bound.
brutisso@3120 610 return _max_desired_young_length;
tonyp@3119 611 }
tonyp@3119 612
tonyp@3119 613 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
tonyp@3119 614 if (rs_lengths == (size_t) -1) {
tonyp@3119 615 // if it's set to the default value (-1), we should predict it;
tonyp@3119 616 // otherwise, use the given value.
tonyp@3119 617 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
tonyp@3119 618 }
tonyp@3119 619
tonyp@3119 620 // Calculate the absolute and desired min bounds.
tonyp@3119 621
tonyp@3119 622 // This is how many young regions we already have (currently: the survivors).
tonyp@3119 623 size_t base_min_length = recorded_survivor_regions();
tonyp@3119 624 // This is the absolute minimum young length, which ensures that we
tonyp@3119 625 // can allocate one eden region in the worst-case.
tonyp@3119 626 size_t absolute_min_length = base_min_length + 1;
tonyp@3119 627 size_t desired_min_length =
tonyp@3119 628 calculate_young_list_desired_min_length(base_min_length);
tonyp@3119 629 if (desired_min_length < absolute_min_length) {
tonyp@3119 630 desired_min_length = absolute_min_length;
tonyp@3119 631 }
tonyp@3119 632
tonyp@3119 633 // Calculate the absolute and desired max bounds.
tonyp@3119 634
tonyp@3119 635 // We will try our best not to "eat" into the reserve.
tonyp@3119 636 size_t absolute_max_length = 0;
tonyp@3119 637 if (_free_regions_at_end_of_collection > _reserve_regions) {
tonyp@3119 638 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
tonyp@3119 639 }
tonyp@3119 640 size_t desired_max_length = calculate_young_list_desired_max_length();
tonyp@3119 641 if (desired_max_length > absolute_max_length) {
tonyp@3119 642 desired_max_length = absolute_max_length;
tonyp@3119 643 }
tonyp@3119 644
tonyp@3119 645 size_t young_list_target_length = 0;
tonyp@3119 646 if (adaptive_young_list_length()) {
tonyp@3119 647 if (full_young_gcs()) {
tonyp@3119 648 young_list_target_length =
tonyp@3119 649 calculate_young_list_target_length(rs_lengths,
tonyp@3119 650 base_min_length,
tonyp@3119 651 desired_min_length,
tonyp@3119 652 desired_max_length);
tonyp@3119 653 _rs_lengths_prediction = rs_lengths;
tonyp@3119 654 } else {
tonyp@3119 655 // Don't calculate anything and let the code below bound it to
tonyp@3119 656 // the desired_min_length, i.e., do the next GC as soon as
tonyp@3119 657 // possible to maximize how many old regions we can add to it.
ysr@777 658 }
ysr@777 659 } else {
tonyp@3119 660 if (full_young_gcs()) {
tonyp@3119 661 young_list_target_length = _young_list_fixed_length;
tonyp@3119 662 } else {
tonyp@3119 663 // A bit arbitrary: during partially-young GCs we allocate half
tonyp@3119 664 // the young regions to try to add old regions to the CSet.
tonyp@3119 665 young_list_target_length = _young_list_fixed_length / 2;
tonyp@3119 666 // We choose to accept that we might go under the desired min
tonyp@3119 667 // length given that we intentionally ask for a smaller young gen.
tonyp@3119 668 desired_min_length = absolute_min_length;
tonyp@3119 669 }
ysr@777 670 }
ysr@777 671
tonyp@3119 672 // Make sure we don't go over the desired max length, nor under the
tonyp@3119 673 // desired min length. In case they clash, desired_min_length wins
tonyp@3119 674 // which is why that test is second.
tonyp@3119 675 if (young_list_target_length > desired_max_length) {
tonyp@3119 676 young_list_target_length = desired_max_length;
tonyp@3119 677 }
tonyp@3119 678 if (young_list_target_length < desired_min_length) {
tonyp@3119 679 young_list_target_length = desired_min_length;
tonyp@3119 680 }
tonyp@3119 681
tonyp@3119 682 assert(young_list_target_length > recorded_survivor_regions(),
tonyp@3119 683 "we should be able to allocate at least one eden region");
tonyp@3119 684 assert(young_list_target_length >= absolute_min_length, "post-condition");
tonyp@3119 685 _young_list_target_length = young_list_target_length;
tonyp@3119 686
tonyp@3119 687 update_max_gc_locker_expansion();
ysr@777 688 }
ysr@777 689
tonyp@3119 690 size_t
tonyp@3119 691 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
tonyp@3119 692 size_t base_min_length,
tonyp@3119 693 size_t desired_min_length,
tonyp@3119 694 size_t desired_max_length) {
tonyp@3119 695 assert(adaptive_young_list_length(), "pre-condition");
tonyp@3119 696 assert(full_young_gcs(), "only call this for fully-young GCs");
tonyp@3119 697
tonyp@3119 698 // In case some edge-condition makes the desired max length too small...
tonyp@3119 699 if (desired_max_length <= desired_min_length) {
tonyp@3119 700 return desired_min_length;
tonyp@3119 701 }
tonyp@3119 702
tonyp@3119 703 // We'll adjust min_young_length and max_young_length not to include
tonyp@3119 704 // the already allocated young regions (i.e., so they reflect the
tonyp@3119 705 // min and max eden regions we'll allocate). The base_min_length
tonyp@3119 706 // will be reflected in the predictions by the
tonyp@3119 707 // survivor_regions_evac_time prediction.
tonyp@3119 708 assert(desired_min_length > base_min_length, "invariant");
tonyp@3119 709 size_t min_young_length = desired_min_length - base_min_length;
tonyp@3119 710 assert(desired_max_length > base_min_length, "invariant");
tonyp@3119 711 size_t max_young_length = desired_max_length - base_min_length;
tonyp@3119 712
tonyp@3119 713 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
tonyp@3119 714 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
tonyp@3119 715 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
tonyp@3119 716 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
tonyp@3119 717 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
tonyp@3119 718 double base_time_ms =
tonyp@3119 719 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
tonyp@3119 720 survivor_regions_evac_time;
tonyp@3119 721 size_t available_free_regions = _free_regions_at_end_of_collection;
tonyp@3119 722 size_t base_free_regions = 0;
tonyp@3119 723 if (available_free_regions > _reserve_regions) {
tonyp@3119 724 base_free_regions = available_free_regions - _reserve_regions;
tonyp@3119 725 }
tonyp@3119 726
tonyp@3119 727 // Here, we will make sure that the shortest young length that
tonyp@3119 728 // makes sense fits within the target pause time.
tonyp@3119 729
tonyp@3119 730 if (predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 731 base_free_regions, target_pause_time_ms)) {
tonyp@3119 732 // The shortest young length will fit into the target pause time;
tonyp@3119 733 // we'll now check whether the absolute maximum number of young
tonyp@3119 734 // regions will fit in the target pause time. If not, we'll do
tonyp@3119 735 // a binary search between min_young_length and max_young_length.
tonyp@3119 736 if (predict_will_fit(max_young_length, base_time_ms,
tonyp@3119 737 base_free_regions, target_pause_time_ms)) {
tonyp@3119 738 // The maximum young length will fit into the target pause time.
tonyp@3119 739 // We are done so set min young length to the maximum length (as
tonyp@3119 740 // the result is assumed to be returned in min_young_length).
tonyp@3119 741 min_young_length = max_young_length;
tonyp@3119 742 } else {
tonyp@3119 743 // The maximum possible number of young regions will not fit within
tonyp@3119 744 // the target pause time so we'll search for the optimal
tonyp@3119 745 // length. The loop invariants are:
tonyp@3119 746 //
tonyp@3119 747 // min_young_length < max_young_length
tonyp@3119 748 // min_young_length is known to fit into the target pause time
tonyp@3119 749 // max_young_length is known not to fit into the target pause time
tonyp@3119 750 //
tonyp@3119 751 // Going into the loop we know the above hold as we've just
tonyp@3119 752 // checked them. Every time around the loop we check whether
tonyp@3119 753 // the middle value between min_young_length and
tonyp@3119 754 // max_young_length fits into the target pause time. If it
tonyp@3119 755 // does, it becomes the new min. If it doesn't, it becomes
tonyp@3119 756 // the new max. This way we maintain the loop invariants.
tonyp@3119 757
tonyp@3119 758 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 759 size_t diff = (max_young_length - min_young_length) / 2;
tonyp@3119 760 while (diff > 0) {
tonyp@3119 761 size_t young_length = min_young_length + diff;
tonyp@3119 762 if (predict_will_fit(young_length, base_time_ms,
tonyp@3119 763 base_free_regions, target_pause_time_ms)) {
tonyp@3119 764 min_young_length = young_length;
tonyp@3119 765 } else {
tonyp@3119 766 max_young_length = young_length;
tonyp@3119 767 }
tonyp@3119 768 assert(min_young_length < max_young_length, "invariant");
tonyp@3119 769 diff = (max_young_length - min_young_length) / 2;
tonyp@3119 770 }
tonyp@3119 771 // The results is min_young_length which, according to the
tonyp@3119 772 // loop invariants, should fit within the target pause time.
tonyp@3119 773
tonyp@3119 774 // These are the post-conditions of the binary search above:
tonyp@3119 775 assert(min_young_length < max_young_length,
tonyp@3119 776 "otherwise we should have discovered that max_young_length "
tonyp@3119 777 "fits into the pause target and not done the binary search");
tonyp@3119 778 assert(predict_will_fit(min_young_length, base_time_ms,
tonyp@3119 779 base_free_regions, target_pause_time_ms),
tonyp@3119 780 "min_young_length, the result of the binary search, should "
tonyp@3119 781 "fit into the pause target");
tonyp@3119 782 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
tonyp@3119 783 base_free_regions, target_pause_time_ms),
tonyp@3119 784 "min_young_length, the result of the binary search, should be "
tonyp@3119 785 "optimal, so no larger length should fit into the pause target");
tonyp@3119 786 }
tonyp@3119 787 } else {
tonyp@3119 788 // Even the minimum length doesn't fit into the pause time
tonyp@3119 789 // target, return it as the result nevertheless.
tonyp@3119 790 }
tonyp@3119 791 return base_min_length + min_young_length;
ysr@777 792 }
ysr@777 793
apetrusenko@980 794 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
apetrusenko@980 795 double survivor_regions_evac_time = 0.0;
apetrusenko@980 796 for (HeapRegion * r = _recorded_survivor_head;
apetrusenko@980 797 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
apetrusenko@980 798 r = r->get_next_young_region()) {
apetrusenko@980 799 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
apetrusenko@980 800 }
apetrusenko@980 801 return survivor_regions_evac_time;
apetrusenko@980 802 }
apetrusenko@980 803
tonyp@3119 804 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
ysr@777 805 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
ysr@777 806
johnc@1829 807 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
ysr@777 808 if (rs_lengths > _rs_lengths_prediction) {
ysr@777 809 // add 10% to avoid having to recalculate often
ysr@777 810 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
tonyp@3119 811 update_young_list_target_length(rs_lengths_prediction);
ysr@777 812 }
ysr@777 813 }
ysr@777 814
tonyp@3119 815
tonyp@3119 816
ysr@777 817 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
ysr@777 818 bool is_tlab,
ysr@777 819 bool* gc_overhead_limit_was_exceeded) {
ysr@777 820 guarantee(false, "Not using this policy feature yet.");
ysr@777 821 return NULL;
ysr@777 822 }
ysr@777 823
ysr@777 824 // This method controls how a collector handles one or more
ysr@777 825 // of its generations being fully allocated.
ysr@777 826 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
ysr@777 827 bool is_tlab) {
ysr@777 828 guarantee(false, "Not using this policy feature yet.");
ysr@777 829 return NULL;
ysr@777 830 }
ysr@777 831
ysr@777 832
ysr@777 833 #ifndef PRODUCT
ysr@777 834 bool G1CollectorPolicy::verify_young_ages() {
johnc@1829 835 HeapRegion* head = _g1->young_list()->first_region();
ysr@777 836 return
ysr@777 837 verify_young_ages(head, _short_lived_surv_rate_group);
ysr@777 838 // also call verify_young_ages on any additional surv rate groups
ysr@777 839 }
ysr@777 840
ysr@777 841 bool
ysr@777 842 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
ysr@777 843 SurvRateGroup *surv_rate_group) {
ysr@777 844 guarantee( surv_rate_group != NULL, "pre-condition" );
ysr@777 845
ysr@777 846 const char* name = surv_rate_group->name();
ysr@777 847 bool ret = true;
ysr@777 848 int prev_age = -1;
ysr@777 849
ysr@777 850 for (HeapRegion* curr = head;
ysr@777 851 curr != NULL;
ysr@777 852 curr = curr->get_next_young_region()) {
ysr@777 853 SurvRateGroup* group = curr->surv_rate_group();
ysr@777 854 if (group == NULL && !curr->is_survivor()) {
ysr@777 855 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
ysr@777 856 ret = false;
ysr@777 857 }
ysr@777 858
ysr@777 859 if (surv_rate_group == group) {
ysr@777 860 int age = curr->age_in_surv_rate_group();
ysr@777 861
ysr@777 862 if (age < 0) {
ysr@777 863 gclog_or_tty->print_cr("## %s: encountered negative age", name);
ysr@777 864 ret = false;
ysr@777 865 }
ysr@777 866
ysr@777 867 if (age <= prev_age) {
ysr@777 868 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
ysr@777 869 "(%d, %d)", name, age, prev_age);
ysr@777 870 ret = false;
ysr@777 871 }
ysr@777 872 prev_age = age;
ysr@777 873 }
ysr@777 874 }
ysr@777 875
ysr@777 876 return ret;
ysr@777 877 }
ysr@777 878 #endif // PRODUCT
ysr@777 879
ysr@777 880 void G1CollectorPolicy::record_full_collection_start() {
ysr@777 881 _cur_collection_start_sec = os::elapsedTime();
ysr@777 882 // Release the future to-space so that it is available for compaction into.
ysr@777 883 _g1->set_full_collection();
ysr@777 884 }
ysr@777 885
ysr@777 886 void G1CollectorPolicy::record_full_collection_end() {
ysr@777 887 // Consider this like a collection pause for the purposes of allocation
ysr@777 888 // since last pause.
ysr@777 889 double end_sec = os::elapsedTime();
ysr@777 890 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
ysr@777 891 double full_gc_time_ms = full_gc_time_sec * 1000.0;
ysr@777 892
ysr@777 893 _all_full_gc_times_ms->add(full_gc_time_ms);
ysr@777 894
tonyp@1030 895 update_recent_gc_times(end_sec, full_gc_time_ms);
ysr@777 896
ysr@777 897 _g1->clear_full_collection();
ysr@777 898
ysr@777 899 // "Nuke" the heuristics that control the fully/partially young GC
ysr@777 900 // transitions and make sure we start with fully young GCs after the
ysr@777 901 // Full GC.
ysr@777 902 set_full_young_gcs(true);
ysr@777 903 _last_full_young_gc = false;
ysr@777 904 _should_revert_to_full_young_gcs = false;
tonyp@1794 905 clear_initiate_conc_mark_if_possible();
tonyp@1794 906 clear_during_initial_mark_pause();
ysr@777 907 _known_garbage_bytes = 0;
ysr@777 908 _known_garbage_ratio = 0.0;
ysr@777 909 _in_marking_window = false;
ysr@777 910 _in_marking_window_im = false;
ysr@777 911
ysr@777 912 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 913 // also call this on any additional surv rate groups
ysr@777 914
apetrusenko@980 915 record_survivor_regions(0, NULL, NULL);
apetrusenko@980 916
ysr@777 917 _prev_region_num_young = _region_num_young;
ysr@777 918 _prev_region_num_tenured = _region_num_tenured;
ysr@777 919
ysr@777 920 _free_regions_at_end_of_collection = _g1->free_regions();
apetrusenko@980 921 // Reset survivors SurvRateGroup.
apetrusenko@980 922 _survivor_surv_rate_group->reset();
tonyp@3119 923 update_young_list_target_length();
tonyp@2315 924 }
ysr@777 925
ysr@777 926 void G1CollectorPolicy::record_stop_world_start() {
ysr@777 927 _stop_world_start = os::elapsedTime();
ysr@777 928 }
ysr@777 929
ysr@777 930 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
ysr@777 931 size_t start_used) {
ysr@777 932 if (PrintGCDetails) {
ysr@777 933 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 934 gclog_or_tty->print("[GC pause");
brutisso@3065 935 gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
ysr@777 936 }
ysr@777 937
tonyp@3119 938 // We only need to do this here as the policy will only be applied
tonyp@3119 939 // to the GC we're about to start. so, no point is calculating this
tonyp@3119 940 // every time we calculate / recalculate the target young length.
tonyp@3119 941 update_survivors_policy();
tonyp@3119 942
tonyp@2315 943 assert(_g1->used() == _g1->recalculate_used(),
tonyp@2315 944 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
tonyp@2315 945 _g1->used(), _g1->recalculate_used()));
ysr@777 946
ysr@777 947 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
ysr@777 948 _all_stop_world_times_ms->add(s_w_t_ms);
ysr@777 949 _stop_world_start = 0.0;
ysr@777 950
ysr@777 951 _cur_collection_start_sec = start_time_sec;
ysr@777 952 _cur_collection_pause_used_at_start_bytes = start_used;
ysr@777 953 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
ysr@777 954 _pending_cards = _g1->pending_card_num();
ysr@777 955 _max_pending_cards = _g1->max_pending_card_num();
ysr@777 956
ysr@777 957 _bytes_in_collection_set_before_gc = 0;
tonyp@3028 958 _bytes_copied_during_gc = 0;
ysr@777 959
tonyp@2961 960 YoungList* young_list = _g1->young_list();
tonyp@2961 961 _eden_bytes_before_gc = young_list->eden_used_bytes();
tonyp@2961 962 _survivor_bytes_before_gc = young_list->survivor_used_bytes();
tonyp@2961 963 _capacity_before_gc = _g1->capacity();
tonyp@2961 964
ysr@777 965 #ifdef DEBUG
ysr@777 966 // initialise these to something well known so that we can spot
ysr@777 967 // if they are not set properly
ysr@777 968
ysr@777 969 for (int i = 0; i < _parallel_gc_threads; ++i) {
tonyp@1966 970 _par_last_gc_worker_start_times_ms[i] = -1234.0;
tonyp@1966 971 _par_last_ext_root_scan_times_ms[i] = -1234.0;
tonyp@1966 972 _par_last_mark_stack_scan_times_ms[i] = -1234.0;
tonyp@1966 973 _par_last_update_rs_times_ms[i] = -1234.0;
tonyp@1966 974 _par_last_update_rs_processed_buffers[i] = -1234.0;
tonyp@1966 975 _par_last_scan_rs_times_ms[i] = -1234.0;
tonyp@1966 976 _par_last_obj_copy_times_ms[i] = -1234.0;
tonyp@1966 977 _par_last_termination_times_ms[i] = -1234.0;
tonyp@1966 978 _par_last_termination_attempts[i] = -1234.0;
tonyp@1966 979 _par_last_gc_worker_end_times_ms[i] = -1234.0;
brutisso@2712 980 _par_last_gc_worker_times_ms[i] = -1234.0;
ysr@777 981 }
ysr@777 982 #endif
ysr@777 983
ysr@777 984 for (int i = 0; i < _aux_num; ++i) {
ysr@777 985 _cur_aux_times_ms[i] = 0.0;
ysr@777 986 _cur_aux_times_set[i] = false;
ysr@777 987 }
ysr@777 988
ysr@777 989 _satb_drain_time_set = false;
ysr@777 990 _last_satb_drain_processed_buffers = -1;
ysr@777 991
brutisso@3065 992 _last_young_gc_full = false;
ysr@777 993
ysr@777 994 // do that for any other surv rate groups
ysr@777 995 _short_lived_surv_rate_group->stop_adding_regions();
tonyp@1717 996 _survivors_age_table.clear();
apetrusenko@980 997
ysr@777 998 assert( verify_young_ages(), "region age verification" );
ysr@777 999 }
ysr@777 1000
ysr@777 1001 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
ysr@777 1002 _mark_closure_time_ms = mark_closure_time_ms;
ysr@777 1003 }
ysr@777 1004
brutisso@3065 1005 void G1CollectorPolicy::record_concurrent_mark_init_end(double
ysr@777 1006 mark_init_elapsed_time_ms) {
ysr@777 1007 _during_marking = true;
tonyp@1794 1008 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
tonyp@1794 1009 clear_during_initial_mark_pause();
ysr@777 1010 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
ysr@777 1011 }
ysr@777 1012
ysr@777 1013 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
ysr@777 1014 _mark_remark_start_sec = os::elapsedTime();
ysr@777 1015 _during_marking = false;
ysr@777 1016 }
ysr@777 1017
ysr@777 1018 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
ysr@777 1019 double end_time_sec = os::elapsedTime();
ysr@777 1020 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
ysr@777 1021 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
ysr@777 1022 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 1023 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 1024
ysr@777 1025 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
ysr@777 1026 }
ysr@777 1027
ysr@777 1028 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
ysr@777 1029 _mark_cleanup_start_sec = os::elapsedTime();
ysr@777 1030 }
ysr@777 1031
ysr@777 1032 void
ysr@777 1033 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 1034 size_t max_live_bytes) {
ysr@777 1035 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 1036 record_concurrent_mark_cleanup_end_work2();
ysr@777 1037 }
ysr@777 1038
ysr@777 1039 void
ysr@777 1040 G1CollectorPolicy::
ysr@777 1041 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 1042 size_t max_live_bytes) {
tonyp@3114 1043 if (_n_marks < 2) {
tonyp@3114 1044 _n_marks++;
tonyp@3114 1045 }
ysr@777 1046 }
ysr@777 1047
ysr@777 1048 // The important thing about this is that it includes "os::elapsedTime".
ysr@777 1049 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
ysr@777 1050 double end_time_sec = os::elapsedTime();
ysr@777 1051 double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
ysr@777 1052 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
ysr@777 1053 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 1054 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 1055
ysr@777 1056 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
ysr@777 1057
ysr@777 1058 _num_markings++;
ysr@777 1059 _n_pauses_at_mark_end = _n_pauses;
ysr@777 1060 _n_marks_since_last_pause++;
ysr@777 1061 }
ysr@777 1062
ysr@777 1063 void
ysr@777 1064 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
brutisso@3065 1065 _should_revert_to_full_young_gcs = false;
brutisso@3065 1066 _last_full_young_gc = true;
brutisso@3065 1067 _in_marking_window = false;
ysr@777 1068 }
ysr@777 1069
ysr@777 1070 void G1CollectorPolicy::record_concurrent_pause() {
ysr@777 1071 if (_stop_world_start > 0.0) {
ysr@777 1072 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
ysr@777 1073 _all_yield_times_ms->add(yield_ms);
ysr@777 1074 }
ysr@777 1075 }
ysr@777 1076
ysr@777 1077 void G1CollectorPolicy::record_concurrent_pause_end() {
ysr@777 1078 }
ysr@777 1079
ysr@777 1080 template<class T>
ysr@777 1081 T sum_of(T* sum_arr, int start, int n, int N) {
ysr@777 1082 T sum = (T)0;
ysr@777 1083 for (int i = 0; i < n; i++) {
ysr@777 1084 int j = (start + i) % N;
ysr@777 1085 sum += sum_arr[j];
ysr@777 1086 }
ysr@777 1087 return sum;
ysr@777 1088 }
ysr@777 1089
tonyp@1966 1090 void G1CollectorPolicy::print_par_stats(int level,
tonyp@1966 1091 const char* str,
brutisso@2712 1092 double* data) {
ysr@777 1093 double min = data[0], max = data[0];
ysr@777 1094 double total = 0.0;
brutisso@2645 1095 LineBuffer buf(level);
brutisso@2645 1096 buf.append("[%s (ms):", str);
ysr@777 1097 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 1098 double val = data[i];
ysr@777 1099 if (val < min)
ysr@777 1100 min = val;
ysr@777 1101 if (val > max)
ysr@777 1102 max = val;
ysr@777 1103 total += val;
brutisso@2645 1104 buf.append(" %3.1lf", val);
ysr@777 1105 }
brutisso@2712 1106 buf.append_and_print_cr("");
brutisso@2712 1107 double avg = total / (double) ParallelGCThreads;
brutisso@2712 1108 buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
brutisso@2712 1109 avg, min, max, max - min);
ysr@777 1110 }
ysr@777 1111
tonyp@1966 1112 void G1CollectorPolicy::print_par_sizes(int level,
tonyp@1966 1113 const char* str,
brutisso@2712 1114 double* data) {
ysr@777 1115 double min = data[0], max = data[0];
ysr@777 1116 double total = 0.0;
brutisso@2645 1117 LineBuffer buf(level);
brutisso@2645 1118 buf.append("[%s :", str);
ysr@777 1119 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 1120 double val = data[i];
ysr@777 1121 if (val < min)
ysr@777 1122 min = val;
ysr@777 1123 if (val > max)
ysr@777 1124 max = val;
ysr@777 1125 total += val;
brutisso@2645 1126 buf.append(" %d", (int) val);
ysr@777 1127 }
brutisso@2712 1128 buf.append_and_print_cr("");
brutisso@2712 1129 double avg = total / (double) ParallelGCThreads;
brutisso@2712 1130 buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
brutisso@2712 1131 (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
ysr@777 1132 }
ysr@777 1133
ysr@777 1134 void G1CollectorPolicy::print_stats (int level,
ysr@777 1135 const char* str,
ysr@777 1136 double value) {
brutisso@2645 1137 LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
ysr@777 1138 }
ysr@777 1139
ysr@777 1140 void G1CollectorPolicy::print_stats (int level,
ysr@777 1141 const char* str,
ysr@777 1142 int value) {
brutisso@2645 1143 LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
ysr@777 1144 }
ysr@777 1145
ysr@777 1146 double G1CollectorPolicy::avg_value (double* data) {
jmasa@2188 1147 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1148 double ret = 0.0;
ysr@777 1149 for (uint i = 0; i < ParallelGCThreads; ++i)
ysr@777 1150 ret += data[i];
ysr@777 1151 return ret / (double) ParallelGCThreads;
ysr@777 1152 } else {
ysr@777 1153 return data[0];
ysr@777 1154 }
ysr@777 1155 }
ysr@777 1156
ysr@777 1157 double G1CollectorPolicy::max_value (double* data) {
jmasa@2188 1158 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1159 double ret = data[0];
ysr@777 1160 for (uint i = 1; i < ParallelGCThreads; ++i)
ysr@777 1161 if (data[i] > ret)
ysr@777 1162 ret = data[i];
ysr@777 1163 return ret;
ysr@777 1164 } else {
ysr@777 1165 return data[0];
ysr@777 1166 }
ysr@777 1167 }
ysr@777 1168
ysr@777 1169 double G1CollectorPolicy::sum_of_values (double* data) {
jmasa@2188 1170 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1171 double sum = 0.0;
ysr@777 1172 for (uint i = 0; i < ParallelGCThreads; i++)
ysr@777 1173 sum += data[i];
ysr@777 1174 return sum;
ysr@777 1175 } else {
ysr@777 1176 return data[0];
ysr@777 1177 }
ysr@777 1178 }
ysr@777 1179
ysr@777 1180 double G1CollectorPolicy::max_sum (double* data1,
ysr@777 1181 double* data2) {
ysr@777 1182 double ret = data1[0] + data2[0];
ysr@777 1183
jmasa@2188 1184 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1185 for (uint i = 1; i < ParallelGCThreads; ++i) {
ysr@777 1186 double data = data1[i] + data2[i];
ysr@777 1187 if (data > ret)
ysr@777 1188 ret = data;
ysr@777 1189 }
ysr@777 1190 }
ysr@777 1191 return ret;
ysr@777 1192 }
ysr@777 1193
ysr@777 1194 // Anything below that is considered to be zero
ysr@777 1195 #define MIN_TIMER_GRANULARITY 0.0000001
ysr@777 1196
tonyp@2062 1197 void G1CollectorPolicy::record_collection_pause_end() {
ysr@777 1198 double end_time_sec = os::elapsedTime();
ysr@777 1199 double elapsed_ms = _last_pause_time_ms;
jmasa@2188 1200 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 1201 size_t rs_size =
ysr@777 1202 _cur_collection_pause_used_regions_at_start - collection_set_size();
ysr@777 1203 size_t cur_used_bytes = _g1->used();
ysr@777 1204 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
ysr@777 1205 bool last_pause_included_initial_mark = false;
tonyp@2062 1206 bool update_stats = !_g1->evacuation_failed();
ysr@777 1207
ysr@777 1208 #ifndef PRODUCT
ysr@777 1209 if (G1YoungSurvRateVerbose) {
ysr@777 1210 gclog_or_tty->print_cr("");
ysr@777 1211 _short_lived_surv_rate_group->print();
ysr@777 1212 // do that for any other surv rate groups too
ysr@777 1213 }
ysr@777 1214 #endif // PRODUCT
ysr@777 1215
brutisso@3065 1216 last_pause_included_initial_mark = during_initial_mark_pause();
brutisso@3065 1217 if (last_pause_included_initial_mark)
brutisso@3065 1218 record_concurrent_mark_init_end(0.0);
brutisso@3065 1219
tonyp@3114 1220 size_t marking_initiating_used_threshold =
brutisso@3065 1221 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
brutisso@3065 1222
brutisso@3065 1223 if (!_g1->mark_in_progress() && !_last_full_young_gc) {
brutisso@3065 1224 assert(!last_pause_included_initial_mark, "invariant");
tonyp@3114 1225 if (cur_used_bytes > marking_initiating_used_threshold) {
tonyp@3114 1226 if (cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
tonyp@1794 1227 assert(!during_initial_mark_pause(), "we should not see this here");
tonyp@1794 1228
tonyp@3114 1229 ergo_verbose3(ErgoConcCycles,
tonyp@3114 1230 "request concurrent cycle initiation",
tonyp@3114 1231 ergo_format_reason("occupancy higher than threshold")
tonyp@3114 1232 ergo_format_byte("occupancy")
tonyp@3114 1233 ergo_format_byte_perc("threshold"),
tonyp@3114 1234 cur_used_bytes,
tonyp@3114 1235 marking_initiating_used_threshold,
tonyp@3114 1236 (double) InitiatingHeapOccupancyPercent);
tonyp@3114 1237
tonyp@1794 1238 // Note: this might have already been set, if during the last
tonyp@1794 1239 // pause we decided to start a cycle but at the beginning of
tonyp@1794 1240 // this pause we decided to postpone it. That's OK.
tonyp@1794 1241 set_initiate_conc_mark_if_possible();
tonyp@3114 1242 } else {
tonyp@3114 1243 ergo_verbose2(ErgoConcCycles,
tonyp@3114 1244 "do not request concurrent cycle initiation",
tonyp@3114 1245 ergo_format_reason("occupancy lower than previous occupancy")
tonyp@3114 1246 ergo_format_byte("occupancy")
tonyp@3114 1247 ergo_format_byte("previous occupancy"),
tonyp@3114 1248 cur_used_bytes,
tonyp@3114 1249 _prev_collection_pause_used_at_end_bytes);
tonyp@3114 1250 }
ysr@777 1251 }
ysr@777 1252 }
ysr@777 1253
brutisso@3065 1254 _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
brutisso@3065 1255
ysr@777 1256 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
ysr@777 1257 end_time_sec, false);
ysr@777 1258
ysr@777 1259 guarantee(_cur_collection_pause_used_regions_at_start >=
ysr@777 1260 collection_set_size(),
ysr@777 1261 "Negative RS size?");
ysr@777 1262
ysr@777 1263 // This assert is exempted when we're doing parallel collection pauses,
ysr@777 1264 // because the fragmentation caused by the parallel GC allocation buffers
ysr@777 1265 // can lead to more memory being used during collection than was used
ysr@777 1266 // before. Best leave this out until the fragmentation problem is fixed.
ysr@777 1267 // Pauses in which evacuation failed can also lead to negative
ysr@777 1268 // collections, since no space is reclaimed from a region containing an
ysr@777 1269 // object whose evacuation failed.
ysr@777 1270 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1271 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1272 // (DLD, 10/05.)
ysr@777 1273 assert((true || parallel) // Always using GC LABs now.
ysr@777 1274 || _g1->evacuation_failed()
ysr@777 1275 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
ysr@777 1276 "Negative collection");
ysr@777 1277
ysr@777 1278 size_t freed_bytes =
ysr@777 1279 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
ysr@777 1280 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
johnc@1829 1281
ysr@777 1282 double survival_fraction =
ysr@777 1283 (double)surviving_bytes/
ysr@777 1284 (double)_collection_set_bytes_used_before;
ysr@777 1285
ysr@777 1286 _n_pauses++;
ysr@777 1287
johnc@3021 1288 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
johnc@3021 1289 double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
johnc@3021 1290 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
johnc@3021 1291 double update_rs_processed_buffers =
johnc@3021 1292 sum_of_values(_par_last_update_rs_processed_buffers);
johnc@3021 1293 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
johnc@3021 1294 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
johnc@3021 1295 double termination_time = avg_value(_par_last_termination_times_ms);
johnc@3021 1296
johnc@3021 1297 double parallel_known_time = update_rs_time +
johnc@3021 1298 ext_root_scan_time +
johnc@3021 1299 mark_stack_scan_time +
johnc@3021 1300 scan_rs_time +
johnc@3021 1301 obj_copy_time +
johnc@3021 1302 termination_time;
johnc@3021 1303
johnc@3021 1304 double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
johnc@3021 1305
johnc@3021 1306 PauseSummary* summary = _summary;
johnc@3021 1307
tonyp@1030 1308 if (update_stats) {
johnc@3021 1309 _recent_rs_scan_times_ms->add(scan_rs_time);
ysr@777 1310 _recent_pause_times_ms->add(elapsed_ms);
ysr@777 1311 _recent_rs_sizes->add(rs_size);
ysr@777 1312
johnc@3021 1313 MainBodySummary* body_summary = summary->main_body_summary();
johnc@3021 1314 guarantee(body_summary != NULL, "should not be null!");
johnc@3021 1315
johnc@3021 1316 if (_satb_drain_time_set)
johnc@3021 1317 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
johnc@3021 1318 else
johnc@3021 1319 body_summary->record_satb_drain_time_ms(0.0);
johnc@3021 1320
johnc@3021 1321 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
johnc@3021 1322 body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
johnc@3021 1323 body_summary->record_update_rs_time_ms(update_rs_time);
johnc@3021 1324 body_summary->record_scan_rs_time_ms(scan_rs_time);
johnc@3021 1325 body_summary->record_obj_copy_time_ms(obj_copy_time);
johnc@3021 1326 if (parallel) {
johnc@3021 1327 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
johnc@3021 1328 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
johnc@3021 1329 body_summary->record_termination_time_ms(termination_time);
johnc@3021 1330 body_summary->record_parallel_other_time_ms(parallel_other_time);
johnc@3021 1331 }
johnc@3021 1332 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
johnc@3021 1333
ysr@777 1334 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1335 // fragmentation can produce negative collections. Same with evac
ysr@777 1336 // failure.
ysr@777 1337 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1338 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1339 // (DLD, 10/05.
ysr@777 1340 assert((true || parallel)
ysr@777 1341 || _g1->evacuation_failed()
ysr@777 1342 || surviving_bytes <= _collection_set_bytes_used_before,
ysr@777 1343 "Or else negative collection!");
ysr@777 1344 _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
ysr@777 1345 _recent_CS_bytes_surviving->add(surviving_bytes);
ysr@777 1346
ysr@777 1347 // this is where we update the allocation rate of the application
ysr@777 1348 double app_time_ms =
ysr@777 1349 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
ysr@777 1350 if (app_time_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1351 // This usually happens due to the timer not having the required
ysr@777 1352 // granularity. Some Linuxes are the usual culprits.
ysr@777 1353 // We'll just set it to something (arbitrarily) small.
ysr@777 1354 app_time_ms = 1.0;
ysr@777 1355 }
ysr@777 1356 size_t regions_allocated =
ysr@777 1357 (_region_num_young - _prev_region_num_young) +
ysr@777 1358 (_region_num_tenured - _prev_region_num_tenured);
ysr@777 1359 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
ysr@777 1360 _alloc_rate_ms_seq->add(alloc_rate_ms);
ysr@777 1361 _prev_region_num_young = _region_num_young;
ysr@777 1362 _prev_region_num_tenured = _region_num_tenured;
ysr@777 1363
ysr@777 1364 double interval_ms =
ysr@777 1365 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
ysr@777 1366 update_recent_gc_times(end_time_sec, elapsed_ms);
ysr@777 1367 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
ysr@1521 1368 if (recent_avg_pause_time_ratio() < 0.0 ||
ysr@1521 1369 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
ysr@1521 1370 #ifndef PRODUCT
ysr@1521 1371 // Dump info to allow post-facto debugging
ysr@1521 1372 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
ysr@1521 1373 gclog_or_tty->print_cr("-------------------------------------------");
ysr@1521 1374 gclog_or_tty->print_cr("Recent GC Times (ms):");
ysr@1521 1375 _recent_gc_times_ms->dump();
ysr@1521 1376 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
ysr@1521 1377 _recent_prev_end_times_for_all_gcs_sec->dump();
ysr@1521 1378 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
ysr@1521 1379 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
ysr@1522 1380 // In debug mode, terminate the JVM if the user wants to debug at this point.
ysr@1522 1381 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
ysr@1522 1382 #endif // !PRODUCT
ysr@1522 1383 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
ysr@1522 1384 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
ysr@1521 1385 if (_recent_avg_pause_time_ratio < 0.0) {
ysr@1521 1386 _recent_avg_pause_time_ratio = 0.0;
ysr@1521 1387 } else {
ysr@1521 1388 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
ysr@1521 1389 _recent_avg_pause_time_ratio = 1.0;
ysr@1521 1390 }
ysr@1521 1391 }
ysr@777 1392 }
ysr@777 1393
ysr@777 1394 if (G1PolicyVerbose > 1) {
ysr@777 1395 gclog_or_tty->print_cr(" Recording collection pause(%d)", _n_pauses);
ysr@777 1396 }
ysr@777 1397
ysr@777 1398 if (G1PolicyVerbose > 1) {
ysr@777 1399 gclog_or_tty->print_cr(" ET: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1400 " ET-RS: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1401 " |RS|: " SIZE_FORMAT,
ysr@777 1402 elapsed_ms, recent_avg_time_for_pauses_ms(),
johnc@3021 1403 scan_rs_time, recent_avg_time_for_rs_scan_ms(),
ysr@777 1404 rs_size);
ysr@777 1405
ysr@777 1406 gclog_or_tty->print_cr(" Used at start: " SIZE_FORMAT"K"
ysr@777 1407 " At end " SIZE_FORMAT "K\n"
ysr@777 1408 " garbage : " SIZE_FORMAT "K"
ysr@777 1409 " of " SIZE_FORMAT "K\n"
ysr@777 1410 " survival : %6.2f%% (%6.2f%% avg)",
ysr@777 1411 _cur_collection_pause_used_at_start_bytes/K,
ysr@777 1412 _g1->used()/K, freed_bytes/K,
ysr@777 1413 _collection_set_bytes_used_before/K,
ysr@777 1414 survival_fraction*100.0,
ysr@777 1415 recent_avg_survival_fraction()*100.0);
ysr@777 1416 gclog_or_tty->print_cr(" Recent %% gc pause time: %6.2f",
ysr@777 1417 recent_avg_pause_time_ratio() * 100.0);
ysr@777 1418 }
ysr@777 1419
ysr@777 1420 double other_time_ms = elapsed_ms;
ysr@777 1421
tonyp@2062 1422 if (_satb_drain_time_set) {
tonyp@2062 1423 other_time_ms -= _cur_satb_drain_time_ms;
ysr@777 1424 }
ysr@777 1425
tonyp@2062 1426 if (parallel) {
tonyp@2062 1427 other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
tonyp@2062 1428 } else {
tonyp@2062 1429 other_time_ms -=
tonyp@2062 1430 update_rs_time +
tonyp@2062 1431 ext_root_scan_time + mark_stack_scan_time +
tonyp@2062 1432 scan_rs_time + obj_copy_time;
tonyp@2062 1433 }
tonyp@2062 1434
ysr@777 1435 if (PrintGCDetails) {
tonyp@2062 1436 gclog_or_tty->print_cr("%s, %1.8lf secs]",
ysr@777 1437 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
ysr@777 1438 elapsed_ms / 1000.0);
ysr@777 1439
tonyp@2062 1440 if (_satb_drain_time_set) {
tonyp@2062 1441 print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
tonyp@2062 1442 }
tonyp@2062 1443 if (_last_satb_drain_processed_buffers >= 0) {
tonyp@2062 1444 print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
tonyp@2062 1445 }
tonyp@2062 1446 if (parallel) {
tonyp@2062 1447 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
brutisso@2712 1448 print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms);
tonyp@2062 1449 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
brutisso@2712 1450 print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
brutisso@2712 1451 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
brutisso@2712 1452 print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
tonyp@2062 1453 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
tonyp@2062 1454 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
tonyp@2062 1455 print_par_stats(2, "Termination", _par_last_termination_times_ms);
brutisso@2712 1456 print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
brutisso@2712 1457 print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms);
brutisso@2712 1458
brutisso@2712 1459 for (int i = 0; i < _parallel_gc_threads; i++) {
brutisso@2712 1460 _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
brutisso@2712 1461 }
brutisso@2712 1462 print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
brutisso@2712 1463
johnc@3021 1464 print_stats(2, "Parallel Other", parallel_other_time);
tonyp@2062 1465 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
tonyp@2062 1466 } else {
tonyp@2062 1467 print_stats(1, "Update RS", update_rs_time);
tonyp@2062 1468 print_stats(2, "Processed Buffers",
tonyp@2062 1469 (int)update_rs_processed_buffers);
tonyp@2062 1470 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
tonyp@2062 1471 print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
tonyp@2062 1472 print_stats(1, "Scan RS", scan_rs_time);
tonyp@2062 1473 print_stats(1, "Object Copying", obj_copy_time);
ysr@777 1474 }
johnc@1325 1475 #ifndef PRODUCT
johnc@1325 1476 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
johnc@1325 1477 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
johnc@1325 1478 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
johnc@1325 1479 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
johnc@1325 1480 if (_num_cc_clears > 0) {
johnc@1325 1481 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
johnc@1325 1482 }
johnc@1325 1483 #endif
ysr@777 1484 print_stats(1, "Other", other_time_ms);
johnc@1829 1485 print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
johnc@3175 1486 print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
johnc@3175 1487 print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
johnc@1829 1488
ysr@777 1489 for (int i = 0; i < _aux_num; ++i) {
ysr@777 1490 if (_cur_aux_times_set[i]) {
ysr@777 1491 char buffer[96];
ysr@777 1492 sprintf(buffer, "Aux%d", i);
ysr@777 1493 print_stats(1, buffer, _cur_aux_times_ms[i]);
ysr@777 1494 }
ysr@777 1495 }
ysr@777 1496 }
ysr@777 1497
ysr@777 1498 _all_pause_times_ms->add(elapsed_ms);
tonyp@1083 1499 if (update_stats) {
tonyp@1083 1500 summary->record_total_time_ms(elapsed_ms);
tonyp@1083 1501 summary->record_other_time_ms(other_time_ms);
tonyp@1083 1502 }
ysr@777 1503 for (int i = 0; i < _aux_num; ++i)
ysr@777 1504 if (_cur_aux_times_set[i])
ysr@777 1505 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
ysr@777 1506
ysr@777 1507 // Reset marks-between-pauses counter.
ysr@777 1508 _n_marks_since_last_pause = 0;
ysr@777 1509
ysr@777 1510 // Update the efficiency-since-mark vars.
ysr@777 1511 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
ysr@777 1512 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1513 // This usually happens due to the timer not having the required
ysr@777 1514 // granularity. Some Linuxes are the usual culprits.
ysr@777 1515 // We'll just set it to something (arbitrarily) small.
ysr@777 1516 proc_ms = 1.0;
ysr@777 1517 }
ysr@777 1518 double cur_efficiency = (double) freed_bytes / proc_ms;
ysr@777 1519
ysr@777 1520 bool new_in_marking_window = _in_marking_window;
ysr@777 1521 bool new_in_marking_window_im = false;
tonyp@1794 1522 if (during_initial_mark_pause()) {
ysr@777 1523 new_in_marking_window = true;
ysr@777 1524 new_in_marking_window_im = true;
ysr@777 1525 }
ysr@777 1526
brutisso@3065 1527 if (_last_full_young_gc) {
tonyp@3114 1528 ergo_verbose2(ErgoPartiallyYoungGCs,
tonyp@3114 1529 "start partially-young GCs",
tonyp@3114 1530 ergo_format_byte_perc("known garbage"),
tonyp@3114 1531 _known_garbage_bytes, _known_garbage_ratio * 100.0);
brutisso@3065 1532 set_full_young_gcs(false);
brutisso@3065 1533 _last_full_young_gc = false;
brutisso@3065 1534 }
brutisso@3065 1535
brutisso@3065 1536 if ( !_last_young_gc_full ) {
tonyp@3114 1537 if (_should_revert_to_full_young_gcs) {
tonyp@3114 1538 ergo_verbose2(ErgoPartiallyYoungGCs,
tonyp@3114 1539 "end partially-young GCs",
tonyp@3114 1540 ergo_format_reason("partially-young GCs end requested")
tonyp@3114 1541 ergo_format_byte_perc("known garbage"),
tonyp@3114 1542 _known_garbage_bytes, _known_garbage_ratio * 100.0);
tonyp@3114 1543 set_full_young_gcs(true);
tonyp@3114 1544 } else if (_known_garbage_ratio < 0.05) {
tonyp@3114 1545 ergo_verbose3(ErgoPartiallyYoungGCs,
tonyp@3114 1546 "end partially-young GCs",
tonyp@3114 1547 ergo_format_reason("known garbage percent lower than threshold")
tonyp@3114 1548 ergo_format_byte_perc("known garbage")
tonyp@3114 1549 ergo_format_perc("threshold"),
tonyp@3114 1550 _known_garbage_bytes, _known_garbage_ratio * 100.0,
tonyp@3114 1551 0.05 * 100.0);
tonyp@3114 1552 set_full_young_gcs(true);
tonyp@3114 1553 } else if (adaptive_young_list_length() &&
tonyp@3114 1554 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
tonyp@3114 1555 ergo_verbose5(ErgoPartiallyYoungGCs,
tonyp@3114 1556 "end partially-young GCs",
tonyp@3114 1557 ergo_format_reason("current GC efficiency lower than "
tonyp@3114 1558 "predicted fully-young GC efficiency")
tonyp@3114 1559 ergo_format_double("GC efficiency factor")
tonyp@3114 1560 ergo_format_double("current GC efficiency")
tonyp@3114 1561 ergo_format_double("predicted fully-young GC efficiency")
tonyp@3114 1562 ergo_format_byte_perc("known garbage"),
tonyp@3114 1563 get_gc_eff_factor(), cur_efficiency,
tonyp@3114 1564 predict_young_gc_eff(),
tonyp@3114 1565 _known_garbage_bytes, _known_garbage_ratio * 100.0);
tonyp@3114 1566 set_full_young_gcs(true);
ysr@777 1567 }
brutisso@3065 1568 }
brutisso@3065 1569 _should_revert_to_full_young_gcs = false;
brutisso@3065 1570
brutisso@3065 1571 if (_last_young_gc_full && !_during_marking) {
brutisso@3065 1572 _young_gc_eff_seq->add(cur_efficiency);
ysr@777 1573 }
ysr@777 1574
ysr@777 1575 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 1576 // do that for any other surv rate groupsx
ysr@777 1577
ysr@777 1578 // <NEW PREDICTION>
ysr@777 1579
apetrusenko@1112 1580 if (update_stats) {
ysr@777 1581 double pause_time_ms = elapsed_ms;
ysr@777 1582
ysr@777 1583 size_t diff = 0;
ysr@777 1584 if (_max_pending_cards >= _pending_cards)
ysr@777 1585 diff = _max_pending_cards - _pending_cards;
ysr@777 1586 _pending_card_diff_seq->add((double) diff);
ysr@777 1587
ysr@777 1588 double cost_per_card_ms = 0.0;
ysr@777 1589 if (_pending_cards > 0) {
ysr@777 1590 cost_per_card_ms = update_rs_time / (double) _pending_cards;
ysr@777 1591 _cost_per_card_ms_seq->add(cost_per_card_ms);
ysr@777 1592 }
ysr@777 1593
ysr@777 1594 size_t cards_scanned = _g1->cards_scanned();
ysr@777 1595
ysr@777 1596 double cost_per_entry_ms = 0.0;
ysr@777 1597 if (cards_scanned > 10) {
ysr@777 1598 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
ysr@777 1599 if (_last_young_gc_full)
ysr@777 1600 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1601 else
ysr@777 1602 _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1603 }
ysr@777 1604
ysr@777 1605 if (_max_rs_lengths > 0) {
ysr@777 1606 double cards_per_entry_ratio =
ysr@777 1607 (double) cards_scanned / (double) _max_rs_lengths;
ysr@777 1608 if (_last_young_gc_full)
ysr@777 1609 _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1610 else
ysr@777 1611 _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1612 }
ysr@777 1613
ysr@777 1614 size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
ysr@777 1615 if (rs_length_diff >= 0)
ysr@777 1616 _rs_length_diff_seq->add((double) rs_length_diff);
ysr@777 1617
ysr@777 1618 size_t copied_bytes = surviving_bytes;
ysr@777 1619 double cost_per_byte_ms = 0.0;
ysr@777 1620 if (copied_bytes > 0) {
ysr@777 1621 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
ysr@777 1622 if (_in_marking_window)
ysr@777 1623 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
ysr@777 1624 else
ysr@777 1625 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
ysr@777 1626 }
ysr@777 1627
ysr@777 1628 double all_other_time_ms = pause_time_ms -
johnc@1829 1629 (update_rs_time + scan_rs_time + obj_copy_time +
ysr@777 1630 _mark_closure_time_ms + termination_time);
ysr@777 1631
ysr@777 1632 double young_other_time_ms = 0.0;
ysr@777 1633 if (_recorded_young_regions > 0) {
ysr@777 1634 young_other_time_ms =
ysr@777 1635 _recorded_young_cset_choice_time_ms +
ysr@777 1636 _recorded_young_free_cset_time_ms;
ysr@777 1637 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
ysr@777 1638 (double) _recorded_young_regions);
ysr@777 1639 }
ysr@777 1640 double non_young_other_time_ms = 0.0;
ysr@777 1641 if (_recorded_non_young_regions > 0) {
ysr@777 1642 non_young_other_time_ms =
ysr@777 1643 _recorded_non_young_cset_choice_time_ms +
ysr@777 1644 _recorded_non_young_free_cset_time_ms;
ysr@777 1645
ysr@777 1646 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
ysr@777 1647 (double) _recorded_non_young_regions);
ysr@777 1648 }
ysr@777 1649
ysr@777 1650 double constant_other_time_ms = all_other_time_ms -
ysr@777 1651 (young_other_time_ms + non_young_other_time_ms);
ysr@777 1652 _constant_other_time_ms_seq->add(constant_other_time_ms);
ysr@777 1653
ysr@777 1654 double survival_ratio = 0.0;
ysr@777 1655 if (_bytes_in_collection_set_before_gc > 0) {
tonyp@3028 1656 survival_ratio = (double) _bytes_copied_during_gc /
tonyp@3028 1657 (double) _bytes_in_collection_set_before_gc;
ysr@777 1658 }
ysr@777 1659
ysr@777 1660 _pending_cards_seq->add((double) _pending_cards);
ysr@777 1661 _scanned_cards_seq->add((double) cards_scanned);
ysr@777 1662 _rs_lengths_seq->add((double) _max_rs_lengths);
ysr@777 1663
ysr@777 1664 double expensive_region_limit_ms =
johnc@1186 1665 (double) MaxGCPauseMillis - predict_constant_other_time_ms();
ysr@777 1666 if (expensive_region_limit_ms < 0.0) {
ysr@777 1667 // this means that the other time was predicted to be longer than
ysr@777 1668 // than the max pause time
johnc@1186 1669 expensive_region_limit_ms = (double) MaxGCPauseMillis;
ysr@777 1670 }
ysr@777 1671 _expensive_region_limit_ms = expensive_region_limit_ms;
ysr@777 1672
ysr@777 1673 if (PREDICTIONS_VERBOSE) {
ysr@777 1674 gclog_or_tty->print_cr("");
ysr@777 1675 gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
johnc@1829 1676 "REGIONS %d %d %d "
ysr@777 1677 "PENDING_CARDS %d %d "
ysr@777 1678 "CARDS_SCANNED %d %d "
ysr@777 1679 "RS_LENGTHS %d %d "
ysr@777 1680 "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
ysr@777 1681 "SURVIVAL_RATIO %1.6lf %1.6lf "
ysr@777 1682 "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
ysr@777 1683 "OTHER_YOUNG %1.6lf %1.6lf "
ysr@777 1684 "OTHER_NON_YOUNG %1.6lf %1.6lf "
ysr@777 1685 "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
ysr@777 1686 "ELAPSED %1.6lf %1.6lf ",
ysr@777 1687 _cur_collection_start_sec,
ysr@777 1688 (!_last_young_gc_full) ? 2 :
ysr@777 1689 (last_pause_included_initial_mark) ? 1 : 0,
ysr@777 1690 _recorded_region_num,
ysr@777 1691 _recorded_young_regions,
ysr@777 1692 _recorded_non_young_regions,
ysr@777 1693 _predicted_pending_cards, _pending_cards,
ysr@777 1694 _predicted_cards_scanned, cards_scanned,
ysr@777 1695 _predicted_rs_lengths, _max_rs_lengths,
ysr@777 1696 _predicted_rs_update_time_ms, update_rs_time,
ysr@777 1697 _predicted_rs_scan_time_ms, scan_rs_time,
ysr@777 1698 _predicted_survival_ratio, survival_ratio,
ysr@777 1699 _predicted_object_copy_time_ms, obj_copy_time,
ysr@777 1700 _predicted_constant_other_time_ms, constant_other_time_ms,
ysr@777 1701 _predicted_young_other_time_ms, young_other_time_ms,
ysr@777 1702 _predicted_non_young_other_time_ms,
ysr@777 1703 non_young_other_time_ms,
ysr@777 1704 _vtime_diff_ms, termination_time,
ysr@777 1705 _predicted_pause_time_ms, elapsed_ms);
ysr@777 1706 }
ysr@777 1707
ysr@777 1708 if (G1PolicyVerbose > 0) {
ysr@777 1709 gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
ysr@777 1710 _predicted_pause_time_ms,
ysr@777 1711 (_within_target) ? "within" : "outside",
ysr@777 1712 elapsed_ms);
ysr@777 1713 }
ysr@777 1714
ysr@777 1715 }
ysr@777 1716
ysr@777 1717 _in_marking_window = new_in_marking_window;
ysr@777 1718 _in_marking_window_im = new_in_marking_window_im;
ysr@777 1719 _free_regions_at_end_of_collection = _g1->free_regions();
tonyp@3119 1720 update_young_list_target_length();
ysr@777 1721
iveresov@1546 1722 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
tonyp@1717 1723 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
iveresov@1546 1724 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
ysr@777 1725 // </NEW PREDICTION>
ysr@777 1726 }
ysr@777 1727
tonyp@2961 1728 #define EXT_SIZE_FORMAT "%d%s"
tonyp@2961 1729 #define EXT_SIZE_PARAMS(bytes) \
tonyp@2961 1730 byte_size_in_proper_unit((bytes)), \
tonyp@2961 1731 proper_unit_for_byte_size((bytes))
tonyp@2961 1732
tonyp@2961 1733 void G1CollectorPolicy::print_heap_transition() {
tonyp@2961 1734 if (PrintGCDetails) {
tonyp@2961 1735 YoungList* young_list = _g1->young_list();
tonyp@2961 1736 size_t eden_bytes = young_list->eden_used_bytes();
tonyp@2961 1737 size_t survivor_bytes = young_list->survivor_used_bytes();
tonyp@2961 1738 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
tonyp@2961 1739 size_t used = _g1->used();
tonyp@2961 1740 size_t capacity = _g1->capacity();
brutisso@3120 1741 size_t eden_capacity =
brutisso@3120 1742 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
tonyp@2961 1743
tonyp@2961 1744 gclog_or_tty->print_cr(
brutisso@3120 1745 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
brutisso@3120 1746 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
brutisso@3120 1747 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
brutisso@3120 1748 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
brutisso@3120 1749 EXT_SIZE_PARAMS(_eden_bytes_before_gc),
brutisso@3120 1750 EXT_SIZE_PARAMS(_prev_eden_capacity),
brutisso@3120 1751 EXT_SIZE_PARAMS(eden_bytes),
brutisso@3120 1752 EXT_SIZE_PARAMS(eden_capacity),
brutisso@3120 1753 EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
brutisso@3120 1754 EXT_SIZE_PARAMS(survivor_bytes),
brutisso@3120 1755 EXT_SIZE_PARAMS(used_before_gc),
brutisso@3120 1756 EXT_SIZE_PARAMS(_capacity_before_gc),
brutisso@3120 1757 EXT_SIZE_PARAMS(used),
brutisso@3120 1758 EXT_SIZE_PARAMS(capacity));
brutisso@3120 1759
brutisso@3120 1760 _prev_eden_capacity = eden_capacity;
tonyp@2961 1761 } else if (PrintGC) {
tonyp@2961 1762 _g1->print_size_transition(gclog_or_tty,
tonyp@2961 1763 _cur_collection_pause_used_at_start_bytes,
tonyp@2961 1764 _g1->used(), _g1->capacity());
tonyp@2961 1765 }
tonyp@2961 1766 }
tonyp@2961 1767
ysr@777 1768 // <NEW PREDICTION>
ysr@777 1769
iveresov@1546 1770 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 1771 double update_rs_processed_buffers,
iveresov@1546 1772 double goal_ms) {
iveresov@1546 1773 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
iveresov@1546 1774 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
iveresov@1546 1775
tonyp@1717 1776 if (G1UseAdaptiveConcRefinement) {
iveresov@1546 1777 const int k_gy = 3, k_gr = 6;
iveresov@1546 1778 const double inc_k = 1.1, dec_k = 0.9;
iveresov@1546 1779
iveresov@1546 1780 int g = cg1r->green_zone();
iveresov@1546 1781 if (update_rs_time > goal_ms) {
iveresov@1546 1782 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
iveresov@1546 1783 } else {
iveresov@1546 1784 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
iveresov@1546 1785 g = (int)MAX2(g * inc_k, g + 1.0);
iveresov@1546 1786 }
iveresov@1546 1787 }
iveresov@1546 1788 // Change the refinement threads params
iveresov@1546 1789 cg1r->set_green_zone(g);
iveresov@1546 1790 cg1r->set_yellow_zone(g * k_gy);
iveresov@1546 1791 cg1r->set_red_zone(g * k_gr);
iveresov@1546 1792 cg1r->reinitialize_threads();
iveresov@1546 1793
iveresov@1546 1794 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
iveresov@1546 1795 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
iveresov@1546 1796 cg1r->yellow_zone());
iveresov@1546 1797 // Change the barrier params
iveresov@1546 1798 dcqs.set_process_completed_threshold(processing_threshold);
iveresov@1546 1799 dcqs.set_max_completed_queue(cg1r->red_zone());
iveresov@1546 1800 }
iveresov@1546 1801
iveresov@1546 1802 int curr_queue_size = dcqs.completed_buffers_num();
iveresov@1546 1803 if (curr_queue_size >= cg1r->yellow_zone()) {
iveresov@1546 1804 dcqs.set_completed_queue_padding(curr_queue_size);
iveresov@1546 1805 } else {
iveresov@1546 1806 dcqs.set_completed_queue_padding(0);
iveresov@1546 1807 }
iveresov@1546 1808 dcqs.notify_if_necessary();
iveresov@1546 1809 }
iveresov@1546 1810
ysr@777 1811 double
ysr@777 1812 G1CollectorPolicy::
ysr@777 1813 predict_young_collection_elapsed_time_ms(size_t adjustment) {
ysr@777 1814 guarantee( adjustment == 0 || adjustment == 1, "invariant" );
ysr@777 1815
ysr@777 1816 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@1829 1817 size_t young_num = g1h->young_list()->length();
ysr@777 1818 if (young_num == 0)
ysr@777 1819 return 0.0;
ysr@777 1820
ysr@777 1821 young_num += adjustment;
ysr@777 1822 size_t pending_cards = predict_pending_cards();
johnc@1829 1823 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
ysr@777 1824 predict_rs_length_diff();
ysr@777 1825 size_t card_num;
ysr@777 1826 if (full_young_gcs())
ysr@777 1827 card_num = predict_young_card_num(rs_lengths);
ysr@777 1828 else
ysr@777 1829 card_num = predict_non_young_card_num(rs_lengths);
ysr@777 1830 size_t young_byte_size = young_num * HeapRegion::GrainBytes;
ysr@777 1831 double accum_yg_surv_rate =
ysr@777 1832 _short_lived_surv_rate_group->accum_surv_rate(adjustment);
ysr@777 1833
ysr@777 1834 size_t bytes_to_copy =
ysr@777 1835 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
ysr@777 1836
ysr@777 1837 return
ysr@777 1838 predict_rs_update_time_ms(pending_cards) +
ysr@777 1839 predict_rs_scan_time_ms(card_num) +
ysr@777 1840 predict_object_copy_time_ms(bytes_to_copy) +
ysr@777 1841 predict_young_other_time_ms(young_num) +
ysr@777 1842 predict_constant_other_time_ms();
ysr@777 1843 }
ysr@777 1844
ysr@777 1845 double
ysr@777 1846 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
ysr@777 1847 size_t rs_length = predict_rs_length_diff();
ysr@777 1848 size_t card_num;
ysr@777 1849 if (full_young_gcs())
ysr@777 1850 card_num = predict_young_card_num(rs_length);
ysr@777 1851 else
ysr@777 1852 card_num = predict_non_young_card_num(rs_length);
ysr@777 1853 return predict_base_elapsed_time_ms(pending_cards, card_num);
ysr@777 1854 }
ysr@777 1855
ysr@777 1856 double
ysr@777 1857 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 1858 size_t scanned_cards) {
ysr@777 1859 return
ysr@777 1860 predict_rs_update_time_ms(pending_cards) +
ysr@777 1861 predict_rs_scan_time_ms(scanned_cards) +
ysr@777 1862 predict_constant_other_time_ms();
ysr@777 1863 }
ysr@777 1864
ysr@777 1865 double
ysr@777 1866 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
ysr@777 1867 bool young) {
ysr@777 1868 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1869 size_t card_num;
ysr@777 1870 if (full_young_gcs())
ysr@777 1871 card_num = predict_young_card_num(rs_length);
ysr@777 1872 else
ysr@777 1873 card_num = predict_non_young_card_num(rs_length);
ysr@777 1874 size_t bytes_to_copy = predict_bytes_to_copy(hr);
ysr@777 1875
ysr@777 1876 double region_elapsed_time_ms =
ysr@777 1877 predict_rs_scan_time_ms(card_num) +
ysr@777 1878 predict_object_copy_time_ms(bytes_to_copy);
ysr@777 1879
ysr@777 1880 if (young)
ysr@777 1881 region_elapsed_time_ms += predict_young_other_time_ms(1);
ysr@777 1882 else
ysr@777 1883 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
ysr@777 1884
ysr@777 1885 return region_elapsed_time_ms;
ysr@777 1886 }
ysr@777 1887
ysr@777 1888 size_t
ysr@777 1889 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
ysr@777 1890 size_t bytes_to_copy;
ysr@777 1891 if (hr->is_marked())
ysr@777 1892 bytes_to_copy = hr->max_live_bytes();
ysr@777 1893 else {
ysr@777 1894 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
ysr@777 1895 "invariant" );
ysr@777 1896 int age = hr->age_in_surv_rate_group();
apetrusenko@980 1897 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
ysr@777 1898 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
ysr@777 1899 }
ysr@777 1900
ysr@777 1901 return bytes_to_copy;
ysr@777 1902 }
ysr@777 1903
ysr@777 1904 void
ysr@777 1905 G1CollectorPolicy::start_recording_regions() {
ysr@777 1906 _recorded_rs_lengths = 0;
ysr@777 1907 _recorded_young_regions = 0;
ysr@777 1908 _recorded_non_young_regions = 0;
ysr@777 1909
ysr@777 1910 #if PREDICTIONS_VERBOSE
ysr@777 1911 _recorded_marked_bytes = 0;
ysr@777 1912 _recorded_young_bytes = 0;
ysr@777 1913 _predicted_bytes_to_copy = 0;
johnc@1829 1914 _predicted_rs_lengths = 0;
johnc@1829 1915 _predicted_cards_scanned = 0;
ysr@777 1916 #endif // PREDICTIONS_VERBOSE
ysr@777 1917 }
ysr@777 1918
ysr@777 1919 void
johnc@1829 1920 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
ysr@777 1921 #if PREDICTIONS_VERBOSE
johnc@1829 1922 if (!young) {
ysr@777 1923 _recorded_marked_bytes += hr->max_live_bytes();
ysr@777 1924 }
ysr@777 1925 _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
ysr@777 1926 #endif // PREDICTIONS_VERBOSE
ysr@777 1927
ysr@777 1928 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1929 _recorded_rs_lengths += rs_length;
ysr@777 1930 }
ysr@777 1931
ysr@777 1932 void
johnc@1829 1933 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
johnc@1829 1934 assert(!hr->is_young(), "should not call this");
johnc@1829 1935 ++_recorded_non_young_regions;
johnc@1829 1936 record_cset_region_info(hr, false);
johnc@1829 1937 }
johnc@1829 1938
johnc@1829 1939 void
johnc@1829 1940 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
johnc@1829 1941 _recorded_young_regions = n_regions;
johnc@1829 1942 }
johnc@1829 1943
johnc@1829 1944 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
johnc@1829 1945 #if PREDICTIONS_VERBOSE
johnc@1829 1946 _recorded_young_bytes = bytes;
johnc@1829 1947 #endif // PREDICTIONS_VERBOSE
johnc@1829 1948 }
johnc@1829 1949
johnc@1829 1950 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
johnc@1829 1951 _recorded_rs_lengths = rs_lengths;
johnc@1829 1952 }
johnc@1829 1953
johnc@1829 1954 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 1955 _predicted_bytes_to_copy = bytes;
ysr@777 1956 }
ysr@777 1957
ysr@777 1958 void
ysr@777 1959 G1CollectorPolicy::end_recording_regions() {
johnc@1829 1960 // The _predicted_pause_time_ms field is referenced in code
johnc@1829 1961 // not under PREDICTIONS_VERBOSE. Let's initialize it.
johnc@1829 1962 _predicted_pause_time_ms = -1.0;
johnc@1829 1963
ysr@777 1964 #if PREDICTIONS_VERBOSE
ysr@777 1965 _predicted_pending_cards = predict_pending_cards();
ysr@777 1966 _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
ysr@777 1967 if (full_young_gcs())
ysr@777 1968 _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
ysr@777 1969 else
ysr@777 1970 _predicted_cards_scanned +=
ysr@777 1971 predict_non_young_card_num(_predicted_rs_lengths);
ysr@777 1972 _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
ysr@777 1973
ysr@777 1974 _predicted_rs_update_time_ms =
ysr@777 1975 predict_rs_update_time_ms(_g1->pending_card_num());
ysr@777 1976 _predicted_rs_scan_time_ms =
ysr@777 1977 predict_rs_scan_time_ms(_predicted_cards_scanned);
ysr@777 1978 _predicted_object_copy_time_ms =
ysr@777 1979 predict_object_copy_time_ms(_predicted_bytes_to_copy);
ysr@777 1980 _predicted_constant_other_time_ms =
ysr@777 1981 predict_constant_other_time_ms();
ysr@777 1982 _predicted_young_other_time_ms =
ysr@777 1983 predict_young_other_time_ms(_recorded_young_regions);
ysr@777 1984 _predicted_non_young_other_time_ms =
ysr@777 1985 predict_non_young_other_time_ms(_recorded_non_young_regions);
ysr@777 1986
ysr@777 1987 _predicted_pause_time_ms =
ysr@777 1988 _predicted_rs_update_time_ms +
ysr@777 1989 _predicted_rs_scan_time_ms +
ysr@777 1990 _predicted_object_copy_time_ms +
ysr@777 1991 _predicted_constant_other_time_ms +
ysr@777 1992 _predicted_young_other_time_ms +
ysr@777 1993 _predicted_non_young_other_time_ms;
ysr@777 1994 #endif // PREDICTIONS_VERBOSE
ysr@777 1995 }
ysr@777 1996
ysr@777 1997 void G1CollectorPolicy::check_if_region_is_too_expensive(double
ysr@777 1998 predicted_time_ms) {
ysr@777 1999 // I don't think we need to do this when in young GC mode since
ysr@777 2000 // marking will be initiated next time we hit the soft limit anyway...
ysr@777 2001 if (predicted_time_ms > _expensive_region_limit_ms) {
tonyp@3114 2002 ergo_verbose2(ErgoPartiallyYoungGCs,
tonyp@3114 2003 "request partially-young GCs end",
tonyp@3114 2004 ergo_format_reason("predicted region time higher than threshold")
tonyp@3114 2005 ergo_format_ms("predicted region time")
tonyp@3114 2006 ergo_format_ms("threshold"),
tonyp@3114 2007 predicted_time_ms, _expensive_region_limit_ms);
brutisso@3065 2008 // no point in doing another partial one
brutisso@3065 2009 _should_revert_to_full_young_gcs = true;
ysr@777 2010 }
ysr@777 2011 }
ysr@777 2012
ysr@777 2013 // </NEW PREDICTION>
ysr@777 2014
ysr@777 2015
ysr@777 2016 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
ysr@777 2017 double elapsed_ms) {
ysr@777 2018 _recent_gc_times_ms->add(elapsed_ms);
ysr@777 2019 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
ysr@777 2020 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
ysr@777 2021 }
ysr@777 2022
ysr@777 2023 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
johnc@3021 2024 if (_recent_pause_times_ms->num() == 0) {
johnc@3021 2025 return (double) MaxGCPauseMillis;
johnc@3021 2026 }
johnc@3021 2027 return _recent_pause_times_ms->avg();
ysr@777 2028 }
ysr@777 2029
johnc@3021 2030 double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
johnc@3021 2031 if (_recent_rs_scan_times_ms->num() == 0) {
johnc@1186 2032 return (double)MaxGCPauseMillis/3.0;
johnc@3021 2033 }
johnc@3021 2034 return _recent_rs_scan_times_ms->avg();
ysr@777 2035 }
ysr@777 2036
ysr@777 2037 int G1CollectorPolicy::number_of_recent_gcs() {
johnc@3021 2038 assert(_recent_rs_scan_times_ms->num() ==
ysr@777 2039 _recent_pause_times_ms->num(), "Sequence out of sync");
ysr@777 2040 assert(_recent_pause_times_ms->num() ==
ysr@777 2041 _recent_CS_bytes_used_before->num(), "Sequence out of sync");
ysr@777 2042 assert(_recent_CS_bytes_used_before->num() ==
ysr@777 2043 _recent_CS_bytes_surviving->num(), "Sequence out of sync");
johnc@3021 2044
ysr@777 2045 return _recent_pause_times_ms->num();
ysr@777 2046 }
ysr@777 2047
ysr@777 2048 double G1CollectorPolicy::recent_avg_survival_fraction() {
ysr@777 2049 return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 2050 _recent_CS_bytes_used_before);
ysr@777 2051 }
ysr@777 2052
ysr@777 2053 double G1CollectorPolicy::last_survival_fraction() {
ysr@777 2054 return last_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 2055 _recent_CS_bytes_used_before);
ysr@777 2056 }
ysr@777 2057
ysr@777 2058 double
ysr@777 2059 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 2060 TruncatedSeq* before) {
ysr@777 2061 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 2062 if (before->sum() > 0.0) {
ysr@777 2063 double recent_survival_rate = surviving->sum() / before->sum();
ysr@777 2064 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 2065 // fragmentation can produce negative collections.
ysr@777 2066 // Further, we're now always doing parallel collection. But I'm still
ysr@777 2067 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 2068 // (DLD, 10/05.)
jmasa@2188 2069 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
ysr@777 2070 _g1->evacuation_failed() ||
ysr@777 2071 recent_survival_rate <= 1.0, "Or bad frac");
ysr@777 2072 return recent_survival_rate;
ysr@777 2073 } else {
ysr@777 2074 return 1.0; // Be conservative.
ysr@777 2075 }
ysr@777 2076 }
ysr@777 2077
ysr@777 2078 double
ysr@777 2079 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 2080 TruncatedSeq* before) {
ysr@777 2081 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 2082 if (surviving->num() > 0 && before->last() > 0.0) {
ysr@777 2083 double last_survival_rate = surviving->last() / before->last();
ysr@777 2084 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 2085 // fragmentation can produce negative collections.
ysr@777 2086 // Further, we're now always doing parallel collection. But I'm still
ysr@777 2087 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 2088 // (DLD, 10/05.)
jmasa@2188 2089 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
ysr@777 2090 last_survival_rate <= 1.0, "Or bad frac");
ysr@777 2091 return last_survival_rate;
ysr@777 2092 } else {
ysr@777 2093 return 1.0;
ysr@777 2094 }
ysr@777 2095 }
ysr@777 2096
ysr@777 2097 static const int survival_min_obs = 5;
ysr@777 2098 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
ysr@777 2099 static const double min_survival_rate = 0.1;
ysr@777 2100
ysr@777 2101 double
ysr@777 2102 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
ysr@777 2103 double latest) {
ysr@777 2104 double res = avg;
ysr@777 2105 if (number_of_recent_gcs() < survival_min_obs) {
ysr@777 2106 res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
ysr@777 2107 }
ysr@777 2108 res = MAX2(res, latest);
ysr@777 2109 res = MAX2(res, min_survival_rate);
ysr@777 2110 // In the parallel case, LAB fragmentation can produce "negative
ysr@777 2111 // collections"; so can evac failure. Cap at 1.0
ysr@777 2112 res = MIN2(res, 1.0);
ysr@777 2113 return res;
ysr@777 2114 }
ysr@777 2115
ysr@777 2116 size_t G1CollectorPolicy::expansion_amount() {
tonyp@3114 2117 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
tonyp@3114 2118 double threshold = _gc_overhead_perc;
tonyp@3114 2119 if (recent_gc_overhead > threshold) {
johnc@1186 2120 // We will double the existing space, or take
johnc@1186 2121 // G1ExpandByPercentOfAvailable % of the available expansion
johnc@1186 2122 // space, whichever is smaller, bounded below by a minimum
johnc@1186 2123 // expansion (unless that's all that's left.)
ysr@777 2124 const size_t min_expand_bytes = 1*M;
johnc@2504 2125 size_t reserved_bytes = _g1->max_capacity();
ysr@777 2126 size_t committed_bytes = _g1->capacity();
ysr@777 2127 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
ysr@777 2128 size_t expand_bytes;
ysr@777 2129 size_t expand_bytes_via_pct =
johnc@1186 2130 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
ysr@777 2131 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
ysr@777 2132 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
ysr@777 2133 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
tonyp@3114 2134
tonyp@3114 2135 ergo_verbose5(ErgoHeapSizing,
tonyp@3114 2136 "attempt heap expansion",
tonyp@3114 2137 ergo_format_reason("recent GC overhead higher than "
tonyp@3114 2138 "threshold after GC")
tonyp@3114 2139 ergo_format_perc("recent GC overhead")
tonyp@3114 2140 ergo_format_perc("threshold")
tonyp@3114 2141 ergo_format_byte("uncommitted")
tonyp@3114 2142 ergo_format_byte_perc("calculated expansion amount"),
tonyp@3114 2143 recent_gc_overhead, threshold,
tonyp@3114 2144 uncommitted_bytes,
tonyp@3114 2145 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
tonyp@3114 2146
ysr@777 2147 return expand_bytes;
ysr@777 2148 } else {
ysr@777 2149 return 0;
ysr@777 2150 }
ysr@777 2151 }
ysr@777 2152
ysr@777 2153 void G1CollectorPolicy::note_start_of_mark_thread() {
ysr@777 2154 _mark_thread_startup_sec = os::elapsedTime();
ysr@777 2155 }
ysr@777 2156
ysr@777 2157 class CountCSClosure: public HeapRegionClosure {
ysr@777 2158 G1CollectorPolicy* _g1_policy;
ysr@777 2159 public:
ysr@777 2160 CountCSClosure(G1CollectorPolicy* g1_policy) :
ysr@777 2161 _g1_policy(g1_policy) {}
ysr@777 2162 bool doHeapRegion(HeapRegion* r) {
ysr@777 2163 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
ysr@777 2164 return false;
ysr@777 2165 }
ysr@777 2166 };
ysr@777 2167
ysr@777 2168 void G1CollectorPolicy::count_CS_bytes_used() {
ysr@777 2169 CountCSClosure cs_closure(this);
ysr@777 2170 _g1->collection_set_iterate(&cs_closure);
ysr@777 2171 }
ysr@777 2172
ysr@777 2173 void G1CollectorPolicy::print_summary (int level,
ysr@777 2174 const char* str,
ysr@777 2175 NumberSeq* seq) const {
ysr@777 2176 double sum = seq->sum();
brutisso@2645 2177 LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
ysr@777 2178 str, sum / 1000.0, seq->avg());
ysr@777 2179 }
ysr@777 2180
ysr@777 2181 void G1CollectorPolicy::print_summary_sd (int level,
ysr@777 2182 const char* str,
ysr@777 2183 NumberSeq* seq) const {
ysr@777 2184 print_summary(level, str, seq);
brutisso@2645 2185 LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
ysr@777 2186 seq->num(), seq->sd(), seq->maximum());
ysr@777 2187 }
ysr@777 2188
ysr@777 2189 void G1CollectorPolicy::check_other_times(int level,
ysr@777 2190 NumberSeq* other_times_ms,
ysr@777 2191 NumberSeq* calc_other_times_ms) const {
ysr@777 2192 bool should_print = false;
brutisso@2645 2193 LineBuffer buf(level + 2);
ysr@777 2194
ysr@777 2195 double max_sum = MAX2(fabs(other_times_ms->sum()),
ysr@777 2196 fabs(calc_other_times_ms->sum()));
ysr@777 2197 double min_sum = MIN2(fabs(other_times_ms->sum()),
ysr@777 2198 fabs(calc_other_times_ms->sum()));
ysr@777 2199 double sum_ratio = max_sum / min_sum;
ysr@777 2200 if (sum_ratio > 1.1) {
ysr@777 2201 should_print = true;
brutisso@2645 2202 buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
ysr@777 2203 }
ysr@777 2204
ysr@777 2205 double max_avg = MAX2(fabs(other_times_ms->avg()),
ysr@777 2206 fabs(calc_other_times_ms->avg()));
ysr@777 2207 double min_avg = MIN2(fabs(other_times_ms->avg()),
ysr@777 2208 fabs(calc_other_times_ms->avg()));
ysr@777 2209 double avg_ratio = max_avg / min_avg;
ysr@777 2210 if (avg_ratio > 1.1) {
ysr@777 2211 should_print = true;
brutisso@2645 2212 buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
ysr@777 2213 }
ysr@777 2214
ysr@777 2215 if (other_times_ms->sum() < -0.01) {
brutisso@2645 2216 buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
ysr@777 2217 }
ysr@777 2218
ysr@777 2219 if (other_times_ms->avg() < -0.01) {
brutisso@2645 2220 buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
ysr@777 2221 }
ysr@777 2222
ysr@777 2223 if (calc_other_times_ms->sum() < -0.01) {
ysr@777 2224 should_print = true;
brutisso@2645 2225 buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
ysr@777 2226 }
ysr@777 2227
ysr@777 2228 if (calc_other_times_ms->avg() < -0.01) {
ysr@777 2229 should_print = true;
brutisso@2645 2230 buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
ysr@777 2231 }
ysr@777 2232
ysr@777 2233 if (should_print)
ysr@777 2234 print_summary(level, "Other(Calc)", calc_other_times_ms);
ysr@777 2235 }
ysr@777 2236
ysr@777 2237 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
jmasa@2188 2238 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 2239 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 2240 if (summary->get_total_seq()->num() > 0) {
apetrusenko@1112 2241 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
ysr@777 2242 if (body_summary != NULL) {
ysr@777 2243 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
ysr@777 2244 if (parallel) {
ysr@777 2245 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
ysr@777 2246 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2247 print_summary(2, "Ext Root Scanning",
ysr@777 2248 body_summary->get_ext_root_scan_seq());
ysr@777 2249 print_summary(2, "Mark Stack Scanning",
ysr@777 2250 body_summary->get_mark_stack_scan_seq());
ysr@777 2251 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2252 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2253 print_summary(2, "Termination", body_summary->get_termination_seq());
ysr@777 2254 print_summary(2, "Other", body_summary->get_parallel_other_seq());
ysr@777 2255 {
ysr@777 2256 NumberSeq* other_parts[] = {
ysr@777 2257 body_summary->get_update_rs_seq(),
ysr@777 2258 body_summary->get_ext_root_scan_seq(),
ysr@777 2259 body_summary->get_mark_stack_scan_seq(),
ysr@777 2260 body_summary->get_scan_rs_seq(),
ysr@777 2261 body_summary->get_obj_copy_seq(),
ysr@777 2262 body_summary->get_termination_seq()
ysr@777 2263 };
ysr@777 2264 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
johnc@2134 2265 6, other_parts);
ysr@777 2266 check_other_times(2, body_summary->get_parallel_other_seq(),
ysr@777 2267 &calc_other_times_ms);
ysr@777 2268 }
ysr@777 2269 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
ysr@777 2270 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
ysr@777 2271 } else {
ysr@777 2272 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2273 print_summary(1, "Ext Root Scanning",
ysr@777 2274 body_summary->get_ext_root_scan_seq());
ysr@777 2275 print_summary(1, "Mark Stack Scanning",
ysr@777 2276 body_summary->get_mark_stack_scan_seq());
ysr@777 2277 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2278 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2279 }
ysr@777 2280 }
ysr@777 2281 print_summary(1, "Other", summary->get_other_seq());
ysr@777 2282 {
johnc@2134 2283 if (body_summary != NULL) {
johnc@2134 2284 NumberSeq calc_other_times_ms;
johnc@2134 2285 if (parallel) {
johnc@2134 2286 // parallel
johnc@2134 2287 NumberSeq* other_parts[] = {
johnc@2134 2288 body_summary->get_satb_drain_seq(),
johnc@2134 2289 body_summary->get_parallel_seq(),
johnc@2134 2290 body_summary->get_clear_ct_seq()
johnc@2134 2291 };
johnc@2134 2292 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
johnc@2134 2293 3, other_parts);
johnc@2134 2294 } else {
johnc@2134 2295 // serial
johnc@2134 2296 NumberSeq* other_parts[] = {
johnc@2134 2297 body_summary->get_satb_drain_seq(),
johnc@2134 2298 body_summary->get_update_rs_seq(),
johnc@2134 2299 body_summary->get_ext_root_scan_seq(),
johnc@2134 2300 body_summary->get_mark_stack_scan_seq(),
johnc@2134 2301 body_summary->get_scan_rs_seq(),
johnc@2134 2302 body_summary->get_obj_copy_seq()
johnc@2134 2303 };
johnc@2134 2304 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
johnc@2134 2305 6, other_parts);
johnc@2134 2306 }
johnc@2134 2307 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
ysr@777 2308 }
ysr@777 2309 }
ysr@777 2310 } else {
brutisso@2645 2311 LineBuffer(1).append_and_print_cr("none");
ysr@777 2312 }
brutisso@2645 2313 LineBuffer(0).append_and_print_cr("");
ysr@777 2314 }
ysr@777 2315
ysr@777 2316 void G1CollectorPolicy::print_tracing_info() const {
ysr@777 2317 if (TraceGen0Time) {
ysr@777 2318 gclog_or_tty->print_cr("ALL PAUSES");
ysr@777 2319 print_summary_sd(0, "Total", _all_pause_times_ms);
ysr@777 2320 gclog_or_tty->print_cr("");
ysr@777 2321 gclog_or_tty->print_cr("");
ysr@777 2322 gclog_or_tty->print_cr(" Full Young GC Pauses: %8d", _full_young_pause_num);
ysr@777 2323 gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num);
ysr@777 2324 gclog_or_tty->print_cr("");
ysr@777 2325
apetrusenko@1112 2326 gclog_or_tty->print_cr("EVACUATION PAUSES");
apetrusenko@1112 2327 print_summary(_summary);
ysr@777 2328
ysr@777 2329 gclog_or_tty->print_cr("MISC");
ysr@777 2330 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
ysr@777 2331 print_summary_sd(0, "Yields", _all_yield_times_ms);
ysr@777 2332 for (int i = 0; i < _aux_num; ++i) {
ysr@777 2333 if (_all_aux_times_ms[i].num() > 0) {
ysr@777 2334 char buffer[96];
ysr@777 2335 sprintf(buffer, "Aux%d", i);
ysr@777 2336 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
ysr@777 2337 }
ysr@777 2338 }
ysr@777 2339
ysr@777 2340 size_t all_region_num = _region_num_young + _region_num_tenured;
ysr@777 2341 gclog_or_tty->print_cr(" New Regions %8d, Young %8d (%6.2lf%%), "
ysr@777 2342 "Tenured %8d (%6.2lf%%)",
ysr@777 2343 all_region_num,
ysr@777 2344 _region_num_young,
ysr@777 2345 (double) _region_num_young / (double) all_region_num * 100.0,
ysr@777 2346 _region_num_tenured,
ysr@777 2347 (double) _region_num_tenured / (double) all_region_num * 100.0);
ysr@777 2348 }
ysr@777 2349 if (TraceGen1Time) {
ysr@777 2350 if (_all_full_gc_times_ms->num() > 0) {
ysr@777 2351 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
ysr@777 2352 _all_full_gc_times_ms->num(),
ysr@777 2353 _all_full_gc_times_ms->sum() / 1000.0);
ysr@777 2354 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
ysr@777 2355 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 2356 _all_full_gc_times_ms->sd(),
ysr@777 2357 _all_full_gc_times_ms->maximum());
ysr@777 2358 }
ysr@777 2359 }
ysr@777 2360 }
ysr@777 2361
ysr@777 2362 void G1CollectorPolicy::print_yg_surv_rate_info() const {
ysr@777 2363 #ifndef PRODUCT
ysr@777 2364 _short_lived_surv_rate_group->print_surv_rate_summary();
ysr@777 2365 // add this call for any other surv rate groups
ysr@777 2366 #endif // PRODUCT
ysr@777 2367 }
ysr@777 2368
tonyp@3114 2369 void G1CollectorPolicy::update_region_num(bool young) {
tonyp@2315 2370 if (young) {
ysr@777 2371 ++_region_num_young;
ysr@777 2372 } else {
ysr@777 2373 ++_region_num_tenured;
ysr@777 2374 }
ysr@777 2375 }
ysr@777 2376
ysr@777 2377 #ifndef PRODUCT
ysr@777 2378 // for debugging, bit of a hack...
ysr@777 2379 static char*
ysr@777 2380 region_num_to_mbs(int length) {
ysr@777 2381 static char buffer[64];
ysr@777 2382 double bytes = (double) (length * HeapRegion::GrainBytes);
ysr@777 2383 double mbs = bytes / (double) (1024 * 1024);
ysr@777 2384 sprintf(buffer, "%7.2lfMB", mbs);
ysr@777 2385 return buffer;
ysr@777 2386 }
ysr@777 2387 #endif // PRODUCT
ysr@777 2388
apetrusenko@980 2389 size_t G1CollectorPolicy::max_regions(int purpose) {
ysr@777 2390 switch (purpose) {
ysr@777 2391 case GCAllocForSurvived:
apetrusenko@980 2392 return _max_survivor_regions;
ysr@777 2393 case GCAllocForTenured:
apetrusenko@980 2394 return REGIONS_UNLIMITED;
ysr@777 2395 default:
apetrusenko@980 2396 ShouldNotReachHere();
apetrusenko@980 2397 return REGIONS_UNLIMITED;
ysr@777 2398 };
ysr@777 2399 }
ysr@777 2400
tonyp@3119 2401 void G1CollectorPolicy::update_max_gc_locker_expansion() {
tonyp@2333 2402 size_t expansion_region_num = 0;
tonyp@2333 2403 if (GCLockerEdenExpansionPercent > 0) {
tonyp@2333 2404 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
tonyp@2333 2405 double expansion_region_num_d = perc * (double) _young_list_target_length;
tonyp@2333 2406 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
tonyp@2333 2407 // less than 1.0) we'll get 1.
tonyp@2333 2408 expansion_region_num = (size_t) ceil(expansion_region_num_d);
tonyp@2333 2409 } else {
tonyp@2333 2410 assert(expansion_region_num == 0, "sanity");
tonyp@2333 2411 }
tonyp@2333 2412 _young_list_max_length = _young_list_target_length + expansion_region_num;
tonyp@2333 2413 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
tonyp@2333 2414 }
tonyp@2333 2415
apetrusenko@980 2416 // Calculates survivor space parameters.
tonyp@3119 2417 void G1CollectorPolicy::update_survivors_policy() {
tonyp@3119 2418 double max_survivor_regions_d =
tonyp@3119 2419 (double) _young_list_target_length / (double) SurvivorRatio;
tonyp@3119 2420 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
tonyp@3119 2421 // smaller than 1.0) we'll get 1.
tonyp@3119 2422 _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
tonyp@3119 2423
tonyp@3066 2424 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
apetrusenko@980 2425 HeapRegion::GrainWords * _max_survivor_regions);
apetrusenko@980 2426 }
apetrusenko@980 2427
ysr@777 2428 #ifndef PRODUCT
ysr@777 2429 class HRSortIndexIsOKClosure: public HeapRegionClosure {
ysr@777 2430 CollectionSetChooser* _chooser;
ysr@777 2431 public:
ysr@777 2432 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
ysr@777 2433 _chooser(chooser) {}
ysr@777 2434
ysr@777 2435 bool doHeapRegion(HeapRegion* r) {
ysr@777 2436 if (!r->continuesHumongous()) {
ysr@777 2437 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
ysr@777 2438 }
ysr@777 2439 return false;
ysr@777 2440 }
ysr@777 2441 };
ysr@777 2442
ysr@777 2443 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
ysr@777 2444 HRSortIndexIsOKClosure cl(_collectionSetChooser);
ysr@777 2445 _g1->heap_region_iterate(&cl);
ysr@777 2446 return true;
ysr@777 2447 }
ysr@777 2448 #endif
ysr@777 2449
tonyp@3114 2450 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
tonyp@3114 2451 GCCause::Cause gc_cause) {
tonyp@2011 2452 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@2011 2453 if (!during_cycle) {
tonyp@3114 2454 ergo_verbose1(ErgoConcCycles,
tonyp@3114 2455 "request concurrent cycle initiation",
tonyp@3114 2456 ergo_format_reason("requested by GC cause")
tonyp@3114 2457 ergo_format_str("GC cause"),
tonyp@3114 2458 GCCause::to_string(gc_cause));
tonyp@2011 2459 set_initiate_conc_mark_if_possible();
tonyp@2011 2460 return true;
tonyp@2011 2461 } else {
tonyp@3114 2462 ergo_verbose1(ErgoConcCycles,
tonyp@3114 2463 "do not request concurrent cycle initiation",
tonyp@3114 2464 ergo_format_reason("concurrent cycle already in progress")
tonyp@3114 2465 ergo_format_str("GC cause"),
tonyp@3114 2466 GCCause::to_string(gc_cause));
tonyp@2011 2467 return false;
tonyp@2011 2468 }
tonyp@2011 2469 }
tonyp@2011 2470
ysr@777 2471 void
tonyp@1794 2472 G1CollectorPolicy::decide_on_conc_mark_initiation() {
tonyp@1794 2473 // We are about to decide on whether this pause will be an
tonyp@1794 2474 // initial-mark pause.
tonyp@1794 2475
tonyp@1794 2476 // First, during_initial_mark_pause() should not be already set. We
tonyp@1794 2477 // will set it here if we have to. However, it should be cleared by
tonyp@1794 2478 // the end of the pause (it's only set for the duration of an
tonyp@1794 2479 // initial-mark pause).
tonyp@1794 2480 assert(!during_initial_mark_pause(), "pre-condition");
tonyp@1794 2481
tonyp@1794 2482 if (initiate_conc_mark_if_possible()) {
tonyp@1794 2483 // We had noticed on a previous pause that the heap occupancy has
tonyp@1794 2484 // gone over the initiating threshold and we should start a
tonyp@1794 2485 // concurrent marking cycle. So we might initiate one.
tonyp@1794 2486
tonyp@1794 2487 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@1794 2488 if (!during_cycle) {
tonyp@1794 2489 // The concurrent marking thread is not "during a cycle", i.e.,
tonyp@1794 2490 // it has completed the last one. So we can go ahead and
tonyp@1794 2491 // initiate a new cycle.
tonyp@1794 2492
tonyp@1794 2493 set_during_initial_mark_pause();
tonyp@1794 2494
tonyp@1794 2495 // And we can now clear initiate_conc_mark_if_possible() as
tonyp@1794 2496 // we've already acted on it.
tonyp@1794 2497 clear_initiate_conc_mark_if_possible();
tonyp@3114 2498
tonyp@3114 2499 ergo_verbose0(ErgoConcCycles,
tonyp@3114 2500 "initiate concurrent cycle",
tonyp@3114 2501 ergo_format_reason("concurrent cycle initiation requested"));
tonyp@1794 2502 } else {
tonyp@1794 2503 // The concurrent marking thread is still finishing up the
tonyp@1794 2504 // previous cycle. If we start one right now the two cycles
tonyp@1794 2505 // overlap. In particular, the concurrent marking thread might
tonyp@1794 2506 // be in the process of clearing the next marking bitmap (which
tonyp@1794 2507 // we will use for the next cycle if we start one). Starting a
tonyp@1794 2508 // cycle now will be bad given that parts of the marking
tonyp@1794 2509 // information might get cleared by the marking thread. And we
tonyp@1794 2510 // cannot wait for the marking thread to finish the cycle as it
tonyp@1794 2511 // periodically yields while clearing the next marking bitmap
tonyp@1794 2512 // and, if it's in a yield point, it's waiting for us to
tonyp@1794 2513 // finish. So, at this point we will not start a cycle and we'll
tonyp@1794 2514 // let the concurrent marking thread complete the last one.
tonyp@3114 2515 ergo_verbose0(ErgoConcCycles,
tonyp@3114 2516 "do not initiate concurrent cycle",
tonyp@3114 2517 ergo_format_reason("concurrent cycle already in progress"));
tonyp@1794 2518 }
tonyp@1794 2519 }
tonyp@1794 2520 }
tonyp@1794 2521
tonyp@1794 2522 void
ysr@777 2523 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2524 record_collection_pause_start(double start_time_sec, size_t start_used) {
ysr@777 2525 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
ysr@777 2526 }
ysr@777 2527
ysr@777 2528 class KnownGarbageClosure: public HeapRegionClosure {
ysr@777 2529 CollectionSetChooser* _hrSorted;
ysr@777 2530
ysr@777 2531 public:
ysr@777 2532 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
ysr@777 2533 _hrSorted(hrSorted)
ysr@777 2534 {}
ysr@777 2535
ysr@777 2536 bool doHeapRegion(HeapRegion* r) {
ysr@777 2537 // We only include humongous regions in collection
ysr@777 2538 // sets when concurrent mark shows that their contained object is
ysr@777 2539 // unreachable.
ysr@777 2540
ysr@777 2541 // Do we have any marking information for this region?
ysr@777 2542 if (r->is_marked()) {
ysr@777 2543 // We don't include humongous regions in collection
ysr@777 2544 // sets because we collect them immediately at the end of a marking
ysr@777 2545 // cycle. We also don't include young regions because we *must*
ysr@777 2546 // include them in the next collection pause.
ysr@777 2547 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2548 _hrSorted->addMarkedHeapRegion(r);
ysr@777 2549 }
ysr@777 2550 }
ysr@777 2551 return false;
ysr@777 2552 }
ysr@777 2553 };
ysr@777 2554
ysr@777 2555 class ParKnownGarbageHRClosure: public HeapRegionClosure {
ysr@777 2556 CollectionSetChooser* _hrSorted;
ysr@777 2557 jint _marked_regions_added;
ysr@777 2558 jint _chunk_size;
ysr@777 2559 jint _cur_chunk_idx;
ysr@777 2560 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
ysr@777 2561 int _worker;
ysr@777 2562 int _invokes;
ysr@777 2563
ysr@777 2564 void get_new_chunk() {
ysr@777 2565 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
ysr@777 2566 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
ysr@777 2567 }
ysr@777 2568 void add_region(HeapRegion* r) {
ysr@777 2569 if (_cur_chunk_idx == _cur_chunk_end) {
ysr@777 2570 get_new_chunk();
ysr@777 2571 }
ysr@777 2572 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
ysr@777 2573 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
ysr@777 2574 _marked_regions_added++;
ysr@777 2575 _cur_chunk_idx++;
ysr@777 2576 }
ysr@777 2577
ysr@777 2578 public:
ysr@777 2579 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
ysr@777 2580 jint chunk_size,
ysr@777 2581 int worker) :
ysr@777 2582 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
ysr@777 2583 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
ysr@777 2584 _invokes(0)
ysr@777 2585 {}
ysr@777 2586
ysr@777 2587 bool doHeapRegion(HeapRegion* r) {
ysr@777 2588 // We only include humongous regions in collection
ysr@777 2589 // sets when concurrent mark shows that their contained object is
ysr@777 2590 // unreachable.
ysr@777 2591 _invokes++;
ysr@777 2592
ysr@777 2593 // Do we have any marking information for this region?
ysr@777 2594 if (r->is_marked()) {
ysr@777 2595 // We don't include humongous regions in collection
ysr@777 2596 // sets because we collect them immediately at the end of a marking
ysr@777 2597 // cycle.
ysr@777 2598 // We also do not include young regions in collection sets
ysr@777 2599 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2600 add_region(r);
ysr@777 2601 }
ysr@777 2602 }
ysr@777 2603 return false;
ysr@777 2604 }
ysr@777 2605 jint marked_regions_added() { return _marked_regions_added; }
ysr@777 2606 int invokes() { return _invokes; }
ysr@777 2607 };
ysr@777 2608
ysr@777 2609 class ParKnownGarbageTask: public AbstractGangTask {
ysr@777 2610 CollectionSetChooser* _hrSorted;
ysr@777 2611 jint _chunk_size;
ysr@777 2612 G1CollectedHeap* _g1;
ysr@777 2613 public:
ysr@777 2614 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
ysr@777 2615 AbstractGangTask("ParKnownGarbageTask"),
ysr@777 2616 _hrSorted(hrSorted), _chunk_size(chunk_size),
ysr@777 2617 _g1(G1CollectedHeap::heap())
ysr@777 2618 {}
ysr@777 2619
ysr@777 2620 void work(int i) {
ysr@777 2621 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
ysr@777 2622 // Back to zero for the claim value.
tonyp@790 2623 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
tonyp@790 2624 HeapRegion::InitialClaimValue);
ysr@777 2625 jint regions_added = parKnownGarbageCl.marked_regions_added();
ysr@777 2626 _hrSorted->incNumMarkedHeapRegions(regions_added);
ysr@777 2627 if (G1PrintParCleanupStats) {
brutisso@2645 2628 gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
ysr@777 2629 i, parKnownGarbageCl.invokes(), regions_added);
ysr@777 2630 }
ysr@777 2631 }
ysr@777 2632 };
ysr@777 2633
ysr@777 2634 void
ysr@777 2635 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2636 record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 2637 size_t max_live_bytes) {
ysr@777 2638 double start;
ysr@777 2639 if (G1PrintParCleanupStats) start = os::elapsedTime();
ysr@777 2640 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 2641
ysr@777 2642 _collectionSetChooser->clearMarkedHeapRegions();
ysr@777 2643 double clear_marked_end;
ysr@777 2644 if (G1PrintParCleanupStats) {
ysr@777 2645 clear_marked_end = os::elapsedTime();
ysr@777 2646 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.",
ysr@777 2647 (clear_marked_end - start)*1000.0);
ysr@777 2648 }
jmasa@2188 2649 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 2650 const size_t OverpartitionFactor = 4;
kvn@1926 2651 const size_t MinWorkUnit = 8;
kvn@1926 2652 const size_t WorkUnit =
ysr@777 2653 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
kvn@1926 2654 MinWorkUnit);
ysr@777 2655 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
kvn@1926 2656 WorkUnit);
ysr@777 2657 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
kvn@1926 2658 (int) WorkUnit);
ysr@777 2659 _g1->workers()->run_task(&parKnownGarbageTask);
tonyp@790 2660
tonyp@790 2661 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2662 "sanity check");
ysr@777 2663 } else {
ysr@777 2664 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
ysr@777 2665 _g1->heap_region_iterate(&knownGarbagecl);
ysr@777 2666 }
ysr@777 2667 double known_garbage_end;
ysr@777 2668 if (G1PrintParCleanupStats) {
ysr@777 2669 known_garbage_end = os::elapsedTime();
ysr@777 2670 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
ysr@777 2671 (known_garbage_end - clear_marked_end)*1000.0);
ysr@777 2672 }
ysr@777 2673 _collectionSetChooser->sortMarkedHeapRegions();
ysr@777 2674 double sort_end;
ysr@777 2675 if (G1PrintParCleanupStats) {
ysr@777 2676 sort_end = os::elapsedTime();
ysr@777 2677 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
ysr@777 2678 (sort_end - known_garbage_end)*1000.0);
ysr@777 2679 }
ysr@777 2680
ysr@777 2681 record_concurrent_mark_cleanup_end_work2();
ysr@777 2682 double work2_end;
ysr@777 2683 if (G1PrintParCleanupStats) {
ysr@777 2684 work2_end = os::elapsedTime();
ysr@777 2685 gclog_or_tty->print_cr(" work2: %8.3f ms.",
ysr@777 2686 (work2_end - sort_end)*1000.0);
ysr@777 2687 }
ysr@777 2688 }
ysr@777 2689
johnc@1829 2690 // Add the heap region at the head of the non-incremental collection set
ysr@777 2691 void G1CollectorPolicy::
ysr@777 2692 add_to_collection_set(HeapRegion* hr) {
johnc@1829 2693 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2694 assert(!hr->is_young(), "non-incremental add of young region");
johnc@1829 2695
ysr@777 2696 if (_g1->mark_in_progress())
ysr@777 2697 _g1->concurrent_mark()->registerCSetRegion(hr);
ysr@777 2698
johnc@1829 2699 assert(!hr->in_collection_set(), "should not already be in the CSet");
ysr@777 2700 hr->set_in_collection_set(true);
ysr@777 2701 hr->set_next_in_collection_set(_collection_set);
ysr@777 2702 _collection_set = hr;
ysr@777 2703 _collection_set_size++;
ysr@777 2704 _collection_set_bytes_used_before += hr->used();
tonyp@961 2705 _g1->register_region_with_in_cset_fast_test(hr);
ysr@777 2706 }
ysr@777 2707
johnc@1829 2708 // Initialize the per-collection-set information
johnc@1829 2709 void G1CollectorPolicy::start_incremental_cset_building() {
johnc@1829 2710 assert(_inc_cset_build_state == Inactive, "Precondition");
johnc@1829 2711
johnc@1829 2712 _inc_cset_head = NULL;
johnc@1829 2713 _inc_cset_tail = NULL;
johnc@1829 2714 _inc_cset_size = 0;
johnc@1829 2715 _inc_cset_bytes_used_before = 0;
johnc@1829 2716
brutisso@3065 2717 _inc_cset_young_index = 0;
johnc@1829 2718
johnc@1829 2719 _inc_cset_max_finger = 0;
johnc@1829 2720 _inc_cset_recorded_young_bytes = 0;
johnc@1829 2721 _inc_cset_recorded_rs_lengths = 0;
johnc@1829 2722 _inc_cset_predicted_elapsed_time_ms = 0;
johnc@1829 2723 _inc_cset_predicted_bytes_to_copy = 0;
johnc@1829 2724 _inc_cset_build_state = Active;
johnc@1829 2725 }
johnc@1829 2726
johnc@1829 2727 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
johnc@1829 2728 // This routine is used when:
johnc@1829 2729 // * adding survivor regions to the incremental cset at the end of an
johnc@1829 2730 // evacuation pause,
johnc@1829 2731 // * adding the current allocation region to the incremental cset
johnc@1829 2732 // when it is retired, and
johnc@1829 2733 // * updating existing policy information for a region in the
johnc@1829 2734 // incremental cset via young list RSet sampling.
johnc@1829 2735 // Therefore this routine may be called at a safepoint by the
johnc@1829 2736 // VM thread, or in-between safepoints by mutator threads (when
johnc@1829 2737 // retiring the current allocation region) or a concurrent
johnc@1829 2738 // refine thread (RSet sampling).
johnc@1829 2739
johnc@1829 2740 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
johnc@1829 2741 size_t used_bytes = hr->used();
johnc@1829 2742
johnc@1829 2743 _inc_cset_recorded_rs_lengths += rs_length;
johnc@1829 2744 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
johnc@1829 2745
johnc@1829 2746 _inc_cset_bytes_used_before += used_bytes;
johnc@1829 2747
johnc@1829 2748 // Cache the values we have added to the aggregated informtion
johnc@1829 2749 // in the heap region in case we have to remove this region from
johnc@1829 2750 // the incremental collection set, or it is updated by the
johnc@1829 2751 // rset sampling code
johnc@1829 2752 hr->set_recorded_rs_length(rs_length);
johnc@1829 2753 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
johnc@1829 2754
johnc@1829 2755 #if PREDICTIONS_VERBOSE
johnc@1829 2756 size_t bytes_to_copy = predict_bytes_to_copy(hr);
johnc@1829 2757 _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
johnc@1829 2758
johnc@1829 2759 // Record the number of bytes used in this region
johnc@1829 2760 _inc_cset_recorded_young_bytes += used_bytes;
johnc@1829 2761
johnc@1829 2762 // Cache the values we have added to the aggregated informtion
johnc@1829 2763 // in the heap region in case we have to remove this region from
johnc@1829 2764 // the incremental collection set, or it is updated by the
johnc@1829 2765 // rset sampling code
johnc@1829 2766 hr->set_predicted_bytes_to_copy(bytes_to_copy);
johnc@1829 2767 #endif // PREDICTIONS_VERBOSE
johnc@1829 2768 }
johnc@1829 2769
johnc@1829 2770 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
johnc@1829 2771 // This routine is currently only called as part of the updating of
johnc@1829 2772 // existing policy information for regions in the incremental cset that
johnc@1829 2773 // is performed by the concurrent refine thread(s) as part of young list
johnc@1829 2774 // RSet sampling. Therefore we should not be at a safepoint.
johnc@1829 2775
johnc@1829 2776 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
johnc@1829 2777 assert(hr->is_young(), "it should be");
johnc@1829 2778
johnc@1829 2779 size_t used_bytes = hr->used();
johnc@1829 2780 size_t old_rs_length = hr->recorded_rs_length();
johnc@1829 2781 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
johnc@1829 2782
johnc@1829 2783 // Subtract the old recorded/predicted policy information for
johnc@1829 2784 // the given heap region from the collection set info.
johnc@1829 2785 _inc_cset_recorded_rs_lengths -= old_rs_length;
johnc@1829 2786 _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
johnc@1829 2787
johnc@1829 2788 _inc_cset_bytes_used_before -= used_bytes;
johnc@1829 2789
johnc@1829 2790 // Clear the values cached in the heap region
johnc@1829 2791 hr->set_recorded_rs_length(0);
johnc@1829 2792 hr->set_predicted_elapsed_time_ms(0);
johnc@1829 2793
johnc@1829 2794 #if PREDICTIONS_VERBOSE
johnc@1829 2795 size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
johnc@1829 2796 _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
johnc@1829 2797
johnc@1829 2798 // Subtract the number of bytes used in this region
johnc@1829 2799 _inc_cset_recorded_young_bytes -= used_bytes;
johnc@1829 2800
johnc@1829 2801 // Clear the values cached in the heap region
johnc@1829 2802 hr->set_predicted_bytes_to_copy(0);
johnc@1829 2803 #endif // PREDICTIONS_VERBOSE
johnc@1829 2804 }
johnc@1829 2805
johnc@1829 2806 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
johnc@1829 2807 // Update the collection set information that is dependent on the new RS length
johnc@1829 2808 assert(hr->is_young(), "Precondition");
johnc@1829 2809
johnc@1829 2810 remove_from_incremental_cset_info(hr);
johnc@1829 2811 add_to_incremental_cset_info(hr, new_rs_length);
johnc@1829 2812 }
johnc@1829 2813
johnc@1829 2814 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
johnc@1829 2815 assert( hr->is_young(), "invariant");
johnc@1829 2816 assert( hr->young_index_in_cset() == -1, "invariant" );
johnc@1829 2817 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2818
johnc@1829 2819 // We need to clear and set the cached recorded/cached collection set
johnc@1829 2820 // information in the heap region here (before the region gets added
johnc@1829 2821 // to the collection set). An individual heap region's cached values
johnc@1829 2822 // are calculated, aggregated with the policy collection set info,
johnc@1829 2823 // and cached in the heap region here (initially) and (subsequently)
johnc@1829 2824 // by the Young List sampling code.
johnc@1829 2825
johnc@1829 2826 size_t rs_length = hr->rem_set()->occupied();
johnc@1829 2827 add_to_incremental_cset_info(hr, rs_length);
johnc@1829 2828
johnc@1829 2829 HeapWord* hr_end = hr->end();
johnc@1829 2830 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
johnc@1829 2831
johnc@1829 2832 assert(!hr->in_collection_set(), "invariant");
johnc@1829 2833 hr->set_in_collection_set(true);
johnc@1829 2834 assert( hr->next_in_collection_set() == NULL, "invariant");
johnc@1829 2835
johnc@1829 2836 _inc_cset_size++;
johnc@1829 2837 _g1->register_region_with_in_cset_fast_test(hr);
johnc@1829 2838
johnc@1829 2839 hr->set_young_index_in_cset((int) _inc_cset_young_index);
johnc@1829 2840 ++_inc_cset_young_index;
johnc@1829 2841 }
johnc@1829 2842
johnc@1829 2843 // Add the region at the RHS of the incremental cset
johnc@1829 2844 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
johnc@1829 2845 // We should only ever be appending survivors at the end of a pause
johnc@1829 2846 assert( hr->is_survivor(), "Logic");
johnc@1829 2847
johnc@1829 2848 // Do the 'common' stuff
johnc@1829 2849 add_region_to_incremental_cset_common(hr);
johnc@1829 2850
johnc@1829 2851 // Now add the region at the right hand side
johnc@1829 2852 if (_inc_cset_tail == NULL) {
johnc@1829 2853 assert(_inc_cset_head == NULL, "invariant");
johnc@1829 2854 _inc_cset_head = hr;
johnc@1829 2855 } else {
johnc@1829 2856 _inc_cset_tail->set_next_in_collection_set(hr);
johnc@1829 2857 }
johnc@1829 2858 _inc_cset_tail = hr;
johnc@1829 2859 }
johnc@1829 2860
johnc@1829 2861 // Add the region to the LHS of the incremental cset
johnc@1829 2862 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
johnc@1829 2863 // Survivors should be added to the RHS at the end of a pause
johnc@1829 2864 assert(!hr->is_survivor(), "Logic");
johnc@1829 2865
johnc@1829 2866 // Do the 'common' stuff
johnc@1829 2867 add_region_to_incremental_cset_common(hr);
johnc@1829 2868
johnc@1829 2869 // Add the region at the left hand side
johnc@1829 2870 hr->set_next_in_collection_set(_inc_cset_head);
johnc@1829 2871 if (_inc_cset_head == NULL) {
johnc@1829 2872 assert(_inc_cset_tail == NULL, "Invariant");
johnc@1829 2873 _inc_cset_tail = hr;
johnc@1829 2874 }
johnc@1829 2875 _inc_cset_head = hr;
johnc@1829 2876 }
johnc@1829 2877
johnc@1829 2878 #ifndef PRODUCT
johnc@1829 2879 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
johnc@1829 2880 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
johnc@1829 2881
johnc@1829 2882 st->print_cr("\nCollection_set:");
johnc@1829 2883 HeapRegion* csr = list_head;
johnc@1829 2884 while (csr != NULL) {
johnc@1829 2885 HeapRegion* next = csr->next_in_collection_set();
johnc@1829 2886 assert(csr->in_collection_set(), "bad CS");
johnc@1829 2887 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
johnc@1829 2888 "age: %4d, y: %d, surv: %d",
johnc@1829 2889 csr->bottom(), csr->end(),
johnc@1829 2890 csr->top(),
johnc@1829 2891 csr->prev_top_at_mark_start(),
johnc@1829 2892 csr->next_top_at_mark_start(),
johnc@1829 2893 csr->top_at_conc_mark_count(),
johnc@1829 2894 csr->age_in_surv_rate_group_cond(),
johnc@1829 2895 csr->is_young(),
johnc@1829 2896 csr->is_survivor());
johnc@1829 2897 csr = next;
johnc@1829 2898 }
johnc@1829 2899 }
johnc@1829 2900 #endif // !PRODUCT
johnc@1829 2901
tonyp@2062 2902 void
tonyp@2011 2903 G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
tonyp@2011 2904 double target_pause_time_ms) {
johnc@1829 2905 // Set this here - in case we're not doing young collections.
johnc@1829 2906 double non_young_start_time_sec = os::elapsedTime();
johnc@1829 2907
tonyp@3114 2908 YoungList* young_list = _g1->young_list();
tonyp@3114 2909
ysr@777 2910 start_recording_regions();
ysr@777 2911
tonyp@2011 2912 guarantee(target_pause_time_ms > 0.0,
tonyp@2011 2913 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2011 2914 target_pause_time_ms));
tonyp@2011 2915 guarantee(_collection_set == NULL, "Precondition");
ysr@777 2916
ysr@777 2917 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
ysr@777 2918 double predicted_pause_time_ms = base_time_ms;
ysr@777 2919
tonyp@2011 2920 double time_remaining_ms = target_pause_time_ms - base_time_ms;
ysr@777 2921
tonyp@3114 2922 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2923 "start choosing CSet",
tonyp@3114 2924 ergo_format_ms("predicted base time")
tonyp@3114 2925 ergo_format_ms("remaining time")
tonyp@3114 2926 ergo_format_ms("target pause time"),
tonyp@3114 2927 base_time_ms, time_remaining_ms, target_pause_time_ms);
tonyp@3114 2928
ysr@777 2929 // the 10% and 50% values are arbitrary...
tonyp@3114 2930 double threshold = 0.10 * target_pause_time_ms;
tonyp@3114 2931 if (time_remaining_ms < threshold) {
tonyp@3114 2932 double prev_time_remaining_ms = time_remaining_ms;
tonyp@2011 2933 time_remaining_ms = 0.50 * target_pause_time_ms;
ysr@777 2934 _within_target = false;
tonyp@3114 2935 ergo_verbose3(ErgoCSetConstruction,
tonyp@3114 2936 "adjust remaining time",
tonyp@3114 2937 ergo_format_reason("remaining time lower than threshold")
tonyp@3114 2938 ergo_format_ms("remaining time")
tonyp@3114 2939 ergo_format_ms("threshold")
tonyp@3114 2940 ergo_format_ms("adjusted remaining time"),
tonyp@3114 2941 prev_time_remaining_ms, threshold, time_remaining_ms);
ysr@777 2942 } else {
ysr@777 2943 _within_target = true;
ysr@777 2944 }
ysr@777 2945
tonyp@3114 2946 size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
tonyp@3114 2947
tonyp@3114 2948 HeapRegion* hr;
tonyp@3114 2949 double young_start_time_sec = os::elapsedTime();
ysr@777 2950
apetrusenko@1112 2951 _collection_set_bytes_used_before = 0;
apetrusenko@1112 2952 _collection_set_size = 0;
brutisso@3065 2953 _young_cset_length = 0;
brutisso@3065 2954 _last_young_gc_full = full_young_gcs() ? true : false;
brutisso@3065 2955
tonyp@3114 2956 if (_last_young_gc_full) {
brutisso@3065 2957 ++_full_young_pause_num;
tonyp@3114 2958 } else {
brutisso@3065 2959 ++_partial_young_pause_num;
tonyp@3114 2960 }
brutisso@3065 2961
brutisso@3065 2962 // The young list is laid with the survivor regions from the previous
brutisso@3065 2963 // pause are appended to the RHS of the young list, i.e.
brutisso@3065 2964 // [Newly Young Regions ++ Survivors from last pause].
brutisso@3065 2965
tonyp@3114 2966 size_t survivor_region_num = young_list->survivor_length();
tonyp@3114 2967 size_t eden_region_num = young_list->length() - survivor_region_num;
tonyp@3114 2968 size_t old_region_num = 0;
tonyp@3114 2969 hr = young_list->first_survivor_region();
brutisso@3065 2970 while (hr != NULL) {
brutisso@3065 2971 assert(hr->is_survivor(), "badly formed young list");
brutisso@3065 2972 hr->set_young();
brutisso@3065 2973 hr = hr->get_next_young_region();
brutisso@3065 2974 }
brutisso@3065 2975
tonyp@3114 2976 // Clear the fields that point to the survivor list - they are all young now.
tonyp@3114 2977 young_list->clear_survivors();
brutisso@3065 2978
brutisso@3065 2979 if (_g1->mark_in_progress())
brutisso@3065 2980 _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
brutisso@3065 2981
brutisso@3065 2982 _young_cset_length = _inc_cset_young_index;
brutisso@3065 2983 _collection_set = _inc_cset_head;
brutisso@3065 2984 _collection_set_size = _inc_cset_size;
brutisso@3065 2985 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
brutisso@3065 2986 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2987 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
brutisso@3065 2988
tonyp@3114 2989 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 2990 "add young regions to CSet",
tonyp@3114 2991 ergo_format_region("eden")
tonyp@3114 2992 ergo_format_region("survivors")
tonyp@3114 2993 ergo_format_ms("predicted young region time"),
tonyp@3114 2994 eden_region_num, survivor_region_num,
tonyp@3114 2995 _inc_cset_predicted_elapsed_time_ms);
tonyp@3114 2996
brutisso@3065 2997 // The number of recorded young regions is the incremental
brutisso@3065 2998 // collection set's current size
brutisso@3065 2999 set_recorded_young_regions(_inc_cset_size);
brutisso@3065 3000 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
brutisso@3065 3001 set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
johnc@1829 3002 #if PREDICTIONS_VERBOSE
brutisso@3065 3003 set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
johnc@1829 3004 #endif // PREDICTIONS_VERBOSE
johnc@1829 3005
tonyp@3114 3006 assert(_inc_cset_size == young_list->length(), "Invariant");
brutisso@3065 3007
brutisso@3065 3008 double young_end_time_sec = os::elapsedTime();
brutisso@3065 3009 _recorded_young_cset_choice_time_ms =
brutisso@3065 3010 (young_end_time_sec - young_start_time_sec) * 1000.0;
brutisso@3065 3011
brutisso@3065 3012 // We are doing young collections so reset this.
brutisso@3065 3013 non_young_start_time_sec = young_end_time_sec;
brutisso@3065 3014
brutisso@3065 3015 if (!full_young_gcs()) {
ysr@777 3016 bool should_continue = true;
ysr@777 3017 NumberSeq seq;
ysr@777 3018 double avg_prediction = 100000000000000000.0; // something very large
johnc@1829 3019
tonyp@3114 3020 size_t prev_collection_set_size = _collection_set_size;
tonyp@3114 3021 double prev_predicted_pause_time_ms = predicted_pause_time_ms;
ysr@777 3022 do {
ysr@777 3023 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
ysr@777 3024 avg_prediction);
apetrusenko@1112 3025 if (hr != NULL) {
ysr@777 3026 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
ysr@777 3027 time_remaining_ms -= predicted_time_ms;
ysr@777 3028 predicted_pause_time_ms += predicted_time_ms;
ysr@777 3029 add_to_collection_set(hr);
johnc@1829 3030 record_non_young_cset_region(hr);
ysr@777 3031 seq.add(predicted_time_ms);
ysr@777 3032 avg_prediction = seq.avg() + seq.sd();
ysr@777 3033 }
tonyp@3114 3034
tonyp@3114 3035 should_continue = true;
tonyp@3114 3036 if (hr == NULL) {
tonyp@3114 3037 // No need for an ergo verbose message here,
tonyp@3114 3038 // getNextMarkRegion() does this when it returns NULL.
tonyp@3114 3039 should_continue = false;
tonyp@3114 3040 } else {
tonyp@3114 3041 if (adaptive_young_list_length()) {
tonyp@3114 3042 if (time_remaining_ms < 0.0) {
tonyp@3114 3043 ergo_verbose1(ErgoCSetConstruction,
tonyp@3114 3044 "stop adding old regions to CSet",
tonyp@3114 3045 ergo_format_reason("remaining time is lower than 0")
tonyp@3114 3046 ergo_format_ms("remaining time"),
tonyp@3114 3047 time_remaining_ms);
tonyp@3114 3048 should_continue = false;
tonyp@3114 3049 }
tonyp@3114 3050 } else {
tonyp@3126 3051 if (_collection_set_size >= _young_list_fixed_length) {
tonyp@3114 3052 ergo_verbose2(ErgoCSetConstruction,
tonyp@3114 3053 "stop adding old regions to CSet",
tonyp@3126 3054 ergo_format_reason("CSet length reached target")
tonyp@3114 3055 ergo_format_region("CSet")
tonyp@3114 3056 ergo_format_region("young target"),
tonyp@3114 3057 _collection_set_size, _young_list_fixed_length);
tonyp@3114 3058 should_continue = false;
tonyp@3114 3059 }
tonyp@3114 3060 }
tonyp@3114 3061 }
ysr@777 3062 } while (should_continue);
ysr@777 3063
ysr@777 3064 if (!adaptive_young_list_length() &&
tonyp@3114 3065 _collection_set_size < _young_list_fixed_length) {
tonyp@3114 3066 ergo_verbose2(ErgoCSetConstruction,
tonyp@3114 3067 "request partially-young GCs end",
tonyp@3114 3068 ergo_format_reason("CSet length lower than target")
tonyp@3114 3069 ergo_format_region("CSet")
tonyp@3114 3070 ergo_format_region("young target"),
tonyp@3114 3071 _collection_set_size, _young_list_fixed_length);
ysr@777 3072 _should_revert_to_full_young_gcs = true;
tonyp@3114 3073 }
tonyp@3114 3074
tonyp@3114 3075 old_region_num = _collection_set_size - prev_collection_set_size;
tonyp@3114 3076
tonyp@3114 3077 ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
tonyp@3114 3078 "add old regions to CSet",
tonyp@3114 3079 ergo_format_region("old")
tonyp@3114 3080 ergo_format_ms("predicted old region time"),
tonyp@3114 3081 old_region_num,
tonyp@3114 3082 predicted_pause_time_ms - prev_predicted_pause_time_ms);
ysr@777 3083 }
ysr@777 3084
johnc@1829 3085 stop_incremental_cset_building();
johnc@1829 3086
ysr@777 3087 count_CS_bytes_used();
ysr@777 3088
ysr@777 3089 end_recording_regions();
ysr@777 3090
tonyp@3114 3091 ergo_verbose5(ErgoCSetConstruction,
tonyp@3114 3092 "finish choosing CSet",
tonyp@3114 3093 ergo_format_region("eden")
tonyp@3114 3094 ergo_format_region("survivors")
tonyp@3114 3095 ergo_format_region("old")
tonyp@3114 3096 ergo_format_ms("predicted pause time")
tonyp@3114 3097 ergo_format_ms("target pause time"),
tonyp@3114 3098 eden_region_num, survivor_region_num, old_region_num,
tonyp@3114 3099 predicted_pause_time_ms, target_pause_time_ms);
tonyp@3114 3100
ysr@777 3101 double non_young_end_time_sec = os::elapsedTime();
ysr@777 3102 _recorded_non_young_cset_choice_time_ms =
ysr@777 3103 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
ysr@777 3104 }
ysr@777 3105
ysr@777 3106 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
ysr@777 3107 G1CollectorPolicy::record_full_collection_end();
ysr@777 3108 _collectionSetChooser->updateAfterFullCollection();
ysr@777 3109 }
ysr@777 3110
ysr@777 3111 void G1CollectorPolicy_BestRegionsFirst::
tonyp@2062 3112 record_collection_pause_end() {
tonyp@2062 3113 G1CollectorPolicy::record_collection_pause_end();
ysr@777 3114 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
ysr@777 3115 }

mercurial