src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Fri, 12 Aug 2011 11:31:06 -0400

author
tonyp
date
Fri, 12 Aug 2011 11:31:06 -0400
changeset 3028
f44782f04dd4
parent 3021
14a2fd14c0db
child 3065
ff53346271fe
permissions
-rw-r--r--

7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
Summary: Refactor the allocation code during GC to use the G1AllocRegion abstraction. Use separate subclasses of G1AllocRegion for survivor and old regions. Avoid BOT updates and dirty survivor cards incrementally for the former.
Reviewed-by: brutisso, johnc, ysr

ysr@777 1 /*
tonyp@2472 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
stefank@2314 31 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 32 #include "gc_implementation/shared/gcPolicyCounters.hpp"
stefank@2314 33 #include "runtime/arguments.hpp"
stefank@2314 34 #include "runtime/java.hpp"
stefank@2314 35 #include "runtime/mutexLocker.hpp"
stefank@2314 36 #include "utilities/debug.hpp"
ysr@777 37
ysr@777 38 #define PREDICTIONS_VERBOSE 0
ysr@777 39
ysr@777 40 // <NEW PREDICTION>
ysr@777 41
ysr@777 42 // Different defaults for different number of GC threads
ysr@777 43 // They were chosen by running GCOld and SPECjbb on debris with different
ysr@777 44 // numbers of GC threads and choosing them based on the results
ysr@777 45
ysr@777 46 // all the same
ysr@777 47 static double rs_length_diff_defaults[] = {
ysr@777 48 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
ysr@777 49 };
ysr@777 50
ysr@777 51 static double cost_per_card_ms_defaults[] = {
ysr@777 52 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
ysr@777 53 };
ysr@777 54
ysr@777 55 // all the same
ysr@777 56 static double fully_young_cards_per_entry_ratio_defaults[] = {
ysr@777 57 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
ysr@777 58 };
ysr@777 59
ysr@777 60 static double cost_per_entry_ms_defaults[] = {
ysr@777 61 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
ysr@777 62 };
ysr@777 63
ysr@777 64 static double cost_per_byte_ms_defaults[] = {
ysr@777 65 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
ysr@777 66 };
ysr@777 67
ysr@777 68 // these should be pretty consistent
ysr@777 69 static double constant_other_time_ms_defaults[] = {
ysr@777 70 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
ysr@777 71 };
ysr@777 72
ysr@777 73
ysr@777 74 static double young_other_cost_per_region_ms_defaults[] = {
ysr@777 75 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
ysr@777 76 };
ysr@777 77
ysr@777 78 static double non_young_other_cost_per_region_ms_defaults[] = {
ysr@777 79 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
ysr@777 80 };
ysr@777 81
ysr@777 82 // </NEW PREDICTION>
ysr@777 83
brutisso@2645 84 // Help class for avoiding interleaved logging
brutisso@2645 85 class LineBuffer: public StackObj {
brutisso@2645 86
brutisso@2645 87 private:
brutisso@2645 88 static const int BUFFER_LEN = 1024;
brutisso@2645 89 static const int INDENT_CHARS = 3;
brutisso@2645 90 char _buffer[BUFFER_LEN];
brutisso@2645 91 int _indent_level;
brutisso@2645 92 int _cur;
brutisso@2645 93
brutisso@2645 94 void vappend(const char* format, va_list ap) {
brutisso@2645 95 int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
brutisso@2645 96 if (res != -1) {
brutisso@2645 97 _cur += res;
brutisso@2645 98 } else {
brutisso@2645 99 DEBUG_ONLY(warning("buffer too small in LineBuffer");)
brutisso@2645 100 _buffer[BUFFER_LEN -1] = 0;
brutisso@2645 101 _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
brutisso@2645 102 }
brutisso@2645 103 }
brutisso@2645 104
brutisso@2645 105 public:
brutisso@2645 106 explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
brutisso@2645 107 for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
brutisso@2645 108 _buffer[_cur] = ' ';
brutisso@2645 109 }
brutisso@2645 110 }
brutisso@2645 111
brutisso@2645 112 #ifndef PRODUCT
brutisso@2645 113 ~LineBuffer() {
brutisso@2645 114 assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
brutisso@2645 115 }
brutisso@2645 116 #endif
brutisso@2645 117
brutisso@2645 118 void append(const char* format, ...) {
brutisso@2645 119 va_list ap;
brutisso@2645 120 va_start(ap, format);
brutisso@2645 121 vappend(format, ap);
brutisso@2645 122 va_end(ap);
brutisso@2645 123 }
brutisso@2645 124
brutisso@2645 125 void append_and_print_cr(const char* format, ...) {
brutisso@2645 126 va_list ap;
brutisso@2645 127 va_start(ap, format);
brutisso@2645 128 vappend(format, ap);
brutisso@2645 129 va_end(ap);
brutisso@2645 130 gclog_or_tty->print_cr("%s", _buffer);
brutisso@2645 131 _cur = _indent_level * INDENT_CHARS;
brutisso@2645 132 }
brutisso@2645 133 };
brutisso@2645 134
ysr@777 135 G1CollectorPolicy::G1CollectorPolicy() :
jmasa@2188 136 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
johnc@3021 137 ? ParallelGCThreads : 1),
jmasa@2188 138
ysr@777 139 _n_pauses(0),
johnc@3021 140 _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 141 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 142 _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 143 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 144 _all_pause_times_ms(new NumberSeq()),
ysr@777 145 _stop_world_start(0.0),
ysr@777 146 _all_stop_world_times_ms(new NumberSeq()),
ysr@777 147 _all_yield_times_ms(new NumberSeq()),
ysr@777 148
ysr@777 149 _all_mod_union_times_ms(new NumberSeq()),
ysr@777 150
apetrusenko@1112 151 _summary(new Summary()),
ysr@777 152
johnc@1325 153 #ifndef PRODUCT
ysr@777 154 _cur_clear_ct_time_ms(0.0),
johnc@1325 155 _min_clear_cc_time_ms(-1.0),
johnc@1325 156 _max_clear_cc_time_ms(-1.0),
johnc@1325 157 _cur_clear_cc_time_ms(0.0),
johnc@1325 158 _cum_clear_cc_time_ms(0.0),
johnc@1325 159 _num_cc_clears(0L),
johnc@1325 160 #endif
ysr@777 161
ysr@777 162 _region_num_young(0),
ysr@777 163 _region_num_tenured(0),
ysr@777 164 _prev_region_num_young(0),
ysr@777 165 _prev_region_num_tenured(0),
ysr@777 166
ysr@777 167 _aux_num(10),
ysr@777 168 _all_aux_times_ms(new NumberSeq[_aux_num]),
ysr@777 169 _cur_aux_start_times_ms(new double[_aux_num]),
ysr@777 170 _cur_aux_times_ms(new double[_aux_num]),
ysr@777 171 _cur_aux_times_set(new bool[_aux_num]),
ysr@777 172
ysr@777 173 _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 174 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 175 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 176
ysr@777 177 // <NEW PREDICTION>
ysr@777 178
ysr@777 179 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 180 _prev_collection_pause_end_ms(0.0),
ysr@777 181 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 182 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 183 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 184 _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 185 _partially_young_cards_per_entry_ratio_seq(
ysr@777 186 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 187 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 188 _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 189 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 190 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 191 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 192 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 193 _non_young_other_cost_per_region_ms_seq(
ysr@777 194 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 195
ysr@777 196 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 197 _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 198 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 199
johnc@1186 200 _pause_time_target_ms((double) MaxGCPauseMillis),
ysr@777 201
ysr@777 202 // </NEW PREDICTION>
ysr@777 203
ysr@777 204 _in_young_gc_mode(false),
ysr@777 205 _full_young_gcs(true),
ysr@777 206 _full_young_pause_num(0),
ysr@777 207 _partial_young_pause_num(0),
ysr@777 208
ysr@777 209 _during_marking(false),
ysr@777 210 _in_marking_window(false),
ysr@777 211 _in_marking_window_im(false),
ysr@777 212
ysr@777 213 _known_garbage_ratio(0.0),
ysr@777 214 _known_garbage_bytes(0),
ysr@777 215
ysr@777 216 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 217
ysr@777 218 _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 219
ysr@777 220 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 221 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 222
ysr@777 223 _recent_avg_pause_time_ratio(0.0),
ysr@777 224 _num_markings(0),
ysr@777 225 _n_marks(0),
ysr@777 226 _n_pauses_at_mark_end(0),
ysr@777 227
ysr@777 228 _all_full_gc_times_ms(new NumberSeq()),
ysr@777 229
ysr@777 230 // G1PausesBtwnConcMark defaults to -1
ysr@777 231 // so the hack is to do the cast QQQ FIXME
ysr@777 232 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
ysr@777 233 _n_marks_since_last_pause(0),
tonyp@1794 234 _initiate_conc_mark_if_possible(false),
tonyp@1794 235 _during_initial_mark_pause(false),
ysr@777 236 _should_revert_to_full_young_gcs(false),
ysr@777 237 _last_full_young_gc(false),
ysr@777 238
tonyp@2961 239 _eden_bytes_before_gc(0),
tonyp@2961 240 _survivor_bytes_before_gc(0),
tonyp@2961 241 _capacity_before_gc(0),
tonyp@2961 242
ysr@777 243 _prev_collection_pause_used_at_end_bytes(0),
ysr@777 244
ysr@777 245 _collection_set(NULL),
johnc@1829 246 _collection_set_size(0),
johnc@1829 247 _collection_set_bytes_used_before(0),
johnc@1829 248
johnc@1829 249 // Incremental CSet attributes
johnc@1829 250 _inc_cset_build_state(Inactive),
johnc@1829 251 _inc_cset_head(NULL),
johnc@1829 252 _inc_cset_tail(NULL),
johnc@1829 253 _inc_cset_size(0),
johnc@1829 254 _inc_cset_young_index(0),
johnc@1829 255 _inc_cset_bytes_used_before(0),
johnc@1829 256 _inc_cset_max_finger(NULL),
johnc@1829 257 _inc_cset_recorded_young_bytes(0),
johnc@1829 258 _inc_cset_recorded_rs_lengths(0),
johnc@1829 259 _inc_cset_predicted_elapsed_time_ms(0.0),
johnc@1829 260 _inc_cset_predicted_bytes_to_copy(0),
johnc@1829 261
ysr@777 262 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 263 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 264 #endif // _MSC_VER
ysr@777 265
ysr@777 266 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
ysr@777 267 G1YoungSurvRateNumRegionsSummary)),
ysr@777 268 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
apetrusenko@980 269 G1YoungSurvRateNumRegionsSummary)),
ysr@777 270 // add here any more surv rate groups
apetrusenko@980 271 _recorded_survivor_regions(0),
apetrusenko@980 272 _recorded_survivor_head(NULL),
apetrusenko@980 273 _recorded_survivor_tail(NULL),
tonyp@1791 274 _survivors_age_table(true),
tonyp@1791 275
tonyp@1791 276 _gc_overhead_perc(0.0)
apetrusenko@980 277
ysr@777 278 {
tonyp@1377 279 // Set up the region size and associated fields. Given that the
tonyp@1377 280 // policy is created before the heap, we have to set this up here,
tonyp@1377 281 // so it's done as soon as possible.
tonyp@1377 282 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
iveresov@1696 283 HeapRegionRemSet::setup_remset_size();
tonyp@1377 284
apetrusenko@1826 285 // Verify PLAB sizes
apetrusenko@1826 286 const uint region_size = HeapRegion::GrainWords;
apetrusenko@1826 287 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
apetrusenko@1826 288 char buffer[128];
apetrusenko@1826 289 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
apetrusenko@1826 290 OldPLABSize > region_size ? "Old" : "Young", region_size);
apetrusenko@1826 291 vm_exit_during_initialization(buffer);
apetrusenko@1826 292 }
apetrusenko@1826 293
ysr@777 294 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
ysr@777 295 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
ysr@777 296
tonyp@1966 297 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
ysr@777 298 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 299 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 300
ysr@777 301 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 302 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
ysr@777 303
ysr@777 304 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 305
ysr@777 306 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
ysr@777 307
ysr@777 308 _par_last_termination_times_ms = new double[_parallel_gc_threads];
tonyp@1966 309 _par_last_termination_attempts = new double[_parallel_gc_threads];
tonyp@1966 310 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
brutisso@2712 311 _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
ysr@777 312
ysr@777 313 // start conservatively
johnc@1186 314 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
ysr@777 315
ysr@777 316 // <NEW PREDICTION>
ysr@777 317
ysr@777 318 int index;
ysr@777 319 if (ParallelGCThreads == 0)
ysr@777 320 index = 0;
ysr@777 321 else if (ParallelGCThreads > 8)
ysr@777 322 index = 7;
ysr@777 323 else
ysr@777 324 index = ParallelGCThreads - 1;
ysr@777 325
ysr@777 326 _pending_card_diff_seq->add(0.0);
ysr@777 327 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
ysr@777 328 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
ysr@777 329 _fully_young_cards_per_entry_ratio_seq->add(
ysr@777 330 fully_young_cards_per_entry_ratio_defaults[index]);
ysr@777 331 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
ysr@777 332 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
ysr@777 333 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
ysr@777 334 _young_other_cost_per_region_ms_seq->add(
ysr@777 335 young_other_cost_per_region_ms_defaults[index]);
ysr@777 336 _non_young_other_cost_per_region_ms_seq->add(
ysr@777 337 non_young_other_cost_per_region_ms_defaults[index]);
ysr@777 338
ysr@777 339 // </NEW PREDICTION>
ysr@777 340
tonyp@1965 341 // Below, we might need to calculate the pause time target based on
tonyp@1965 342 // the pause interval. When we do so we are going to give G1 maximum
tonyp@1965 343 // flexibility and allow it to do pauses when it needs to. So, we'll
tonyp@1965 344 // arrange that the pause interval to be pause time target + 1 to
tonyp@1965 345 // ensure that a) the pause time target is maximized with respect to
tonyp@1965 346 // the pause interval and b) we maintain the invariant that pause
tonyp@1965 347 // time target < pause interval. If the user does not want this
tonyp@1965 348 // maximum flexibility, they will have to set the pause interval
tonyp@1965 349 // explicitly.
tonyp@1965 350
tonyp@1965 351 // First make sure that, if either parameter is set, its value is
tonyp@1965 352 // reasonable.
tonyp@1965 353 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 354 if (MaxGCPauseMillis < 1) {
tonyp@1965 355 vm_exit_during_initialization("MaxGCPauseMillis should be "
tonyp@1965 356 "greater than 0");
tonyp@1965 357 }
tonyp@1965 358 }
tonyp@1965 359 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 360 if (GCPauseIntervalMillis < 1) {
tonyp@1965 361 vm_exit_during_initialization("GCPauseIntervalMillis should be "
tonyp@1965 362 "greater than 0");
tonyp@1965 363 }
tonyp@1965 364 }
tonyp@1965 365
tonyp@1965 366 // Then, if the pause time target parameter was not set, set it to
tonyp@1965 367 // the default value.
tonyp@1965 368 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 369 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 370 // The default pause time target in G1 is 200ms
tonyp@1965 371 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
tonyp@1965 372 } else {
tonyp@1965 373 // We do not allow the pause interval to be set without the
tonyp@1965 374 // pause time target
tonyp@1965 375 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
tonyp@1965 376 "without setting MaxGCPauseMillis");
tonyp@1965 377 }
tonyp@1965 378 }
tonyp@1965 379
tonyp@1965 380 // Then, if the interval parameter was not set, set it according to
tonyp@1965 381 // the pause time target (this will also deal with the case when the
tonyp@1965 382 // pause time target is the default value).
tonyp@1965 383 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 384 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
tonyp@1965 385 }
tonyp@1965 386
tonyp@1965 387 // Finally, make sure that the two parameters are consistent.
tonyp@1965 388 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
tonyp@1965 389 char buffer[256];
tonyp@1965 390 jio_snprintf(buffer, 256,
tonyp@1965 391 "MaxGCPauseMillis (%u) should be less than "
tonyp@1965 392 "GCPauseIntervalMillis (%u)",
tonyp@1965 393 MaxGCPauseMillis, GCPauseIntervalMillis);
tonyp@1965 394 vm_exit_during_initialization(buffer);
tonyp@1965 395 }
tonyp@1965 396
tonyp@1965 397 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
johnc@1186 398 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
ysr@777 399 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
johnc@1186 400 _sigma = (double) G1ConfidencePercent / 100.0;
ysr@777 401
ysr@777 402 // start conservatively (around 50ms is about right)
ysr@777 403 _concurrent_mark_init_times_ms->add(0.05);
ysr@777 404 _concurrent_mark_remark_times_ms->add(0.05);
ysr@777 405 _concurrent_mark_cleanup_times_ms->add(0.20);
ysr@777 406 _tenuring_threshold = MaxTenuringThreshold;
ysr@777 407
tonyp@1717 408 // if G1FixedSurvivorSpaceSize is 0 which means the size is not
tonyp@1717 409 // fixed, then _max_survivor_regions will be calculated at
johnc@1829 410 // calculate_young_list_target_length during initialization
tonyp@1717 411 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
apetrusenko@980 412
tonyp@1791 413 assert(GCTimeRatio > 0,
tonyp@1791 414 "we should have set it to a default value set_g1_gc_flags() "
tonyp@1791 415 "if a user set it to 0");
tonyp@1791 416 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
tonyp@1791 417
ysr@777 418 initialize_all();
ysr@777 419 }
ysr@777 420
ysr@777 421 // Increment "i", mod "len"
ysr@777 422 static void inc_mod(int& i, int len) {
ysr@777 423 i++; if (i == len) i = 0;
ysr@777 424 }
ysr@777 425
ysr@777 426 void G1CollectorPolicy::initialize_flags() {
ysr@777 427 set_min_alignment(HeapRegion::GrainBytes);
ysr@777 428 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
apetrusenko@982 429 if (SurvivorRatio < 1) {
apetrusenko@982 430 vm_exit_during_initialization("Invalid survivor ratio specified");
apetrusenko@982 431 }
ysr@777 432 CollectorPolicy::initialize_flags();
ysr@777 433 }
ysr@777 434
tonyp@1720 435 // The easiest way to deal with the parsing of the NewSize /
tonyp@1720 436 // MaxNewSize / etc. parameteres is to re-use the code in the
tonyp@1720 437 // TwoGenerationCollectorPolicy class. This is similar to what
tonyp@1720 438 // ParallelScavenge does with its GenerationSizer class (see
tonyp@1720 439 // ParallelScavengeHeap::initialize()). We might change this in the
tonyp@1720 440 // future, but it's a good start.
tonyp@1720 441 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
tonyp@1720 442 size_t size_to_region_num(size_t byte_size) {
tonyp@1720 443 return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
tonyp@1720 444 }
tonyp@1720 445
tonyp@1720 446 public:
tonyp@1720 447 G1YoungGenSizer() {
tonyp@1720 448 initialize_flags();
tonyp@1720 449 initialize_size_info();
tonyp@1720 450 }
tonyp@1720 451
tonyp@1720 452 size_t min_young_region_num() {
tonyp@1720 453 return size_to_region_num(_min_gen0_size);
tonyp@1720 454 }
tonyp@1720 455 size_t initial_young_region_num() {
tonyp@1720 456 return size_to_region_num(_initial_gen0_size);
tonyp@1720 457 }
tonyp@1720 458 size_t max_young_region_num() {
tonyp@1720 459 return size_to_region_num(_max_gen0_size);
tonyp@1720 460 }
tonyp@1720 461 };
tonyp@1720 462
ysr@777 463 void G1CollectorPolicy::init() {
ysr@777 464 // Set aside an initial future to_space.
ysr@777 465 _g1 = G1CollectedHeap::heap();
ysr@777 466
ysr@777 467 assert(Heap_lock->owned_by_self(), "Locking discipline.");
ysr@777 468
apetrusenko@980 469 initialize_gc_policy_counters();
apetrusenko@980 470
ysr@777 471 if (G1Gen) {
ysr@777 472 _in_young_gc_mode = true;
ysr@777 473
tonyp@1720 474 G1YoungGenSizer sizer;
tonyp@1720 475 size_t initial_region_num = sizer.initial_young_region_num();
tonyp@1720 476
tonyp@1720 477 if (UseAdaptiveSizePolicy) {
ysr@777 478 set_adaptive_young_list_length(true);
ysr@777 479 _young_list_fixed_length = 0;
ysr@777 480 } else {
ysr@777 481 set_adaptive_young_list_length(false);
tonyp@1720 482 _young_list_fixed_length = initial_region_num;
ysr@777 483 }
johnc@1829 484 _free_regions_at_end_of_collection = _g1->free_regions();
johnc@1829 485 calculate_young_list_min_length();
johnc@1829 486 guarantee( _young_list_min_length == 0, "invariant, not enough info" );
johnc@1829 487 calculate_young_list_target_length();
johnc@1829 488 } else {
ysr@777 489 _young_list_fixed_length = 0;
ysr@777 490 _in_young_gc_mode = false;
ysr@777 491 }
johnc@1829 492
johnc@1829 493 // We may immediately start allocating regions and placing them on the
johnc@1829 494 // collection set list. Initialize the per-collection set info
johnc@1829 495 start_incremental_cset_building();
ysr@777 496 }
ysr@777 497
apetrusenko@980 498 // Create the jstat counters for the policy.
apetrusenko@980 499 void G1CollectorPolicy::initialize_gc_policy_counters()
apetrusenko@980 500 {
apetrusenko@980 501 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
apetrusenko@980 502 }
apetrusenko@980 503
ysr@777 504 void G1CollectorPolicy::calculate_young_list_min_length() {
ysr@777 505 _young_list_min_length = 0;
ysr@777 506
ysr@777 507 if (!adaptive_young_list_length())
ysr@777 508 return;
ysr@777 509
ysr@777 510 if (_alloc_rate_ms_seq->num() > 3) {
ysr@777 511 double now_sec = os::elapsedTime();
ysr@777 512 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
ysr@777 513 double alloc_rate_ms = predict_alloc_rate_ms();
tonyp@2315 514 size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
tonyp@2315 515 size_t current_region_num = _g1->young_list()->length();
ysr@777 516 _young_list_min_length = min_regions + current_region_num;
ysr@777 517 }
ysr@777 518 }
ysr@777 519
johnc@1829 520 void G1CollectorPolicy::calculate_young_list_target_length() {
ysr@777 521 if (adaptive_young_list_length()) {
ysr@777 522 size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
johnc@1829 523 calculate_young_list_target_length(rs_lengths);
ysr@777 524 } else {
ysr@777 525 if (full_young_gcs())
ysr@777 526 _young_list_target_length = _young_list_fixed_length;
ysr@777 527 else
ysr@777 528 _young_list_target_length = _young_list_fixed_length / 2;
ysr@777 529 }
tonyp@2315 530
tonyp@2315 531 // Make sure we allow the application to allocate at least one
tonyp@2315 532 // region before we need to do a collection again.
tonyp@2315 533 size_t min_length = _g1->young_list()->length() + 1;
tonyp@2315 534 _young_list_target_length = MAX2(_young_list_target_length, min_length);
tonyp@2333 535 calculate_max_gc_locker_expansion();
apetrusenko@980 536 calculate_survivors_policy();
ysr@777 537 }
ysr@777 538
johnc@1829 539 void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
ysr@777 540 guarantee( adaptive_young_list_length(), "pre-condition" );
johnc@1829 541 guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" );
ysr@777 542
ysr@777 543 double start_time_sec = os::elapsedTime();
tonyp@1717 544 size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
ysr@777 545 min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
ysr@777 546 size_t reserve_regions =
ysr@777 547 (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
ysr@777 548
ysr@777 549 if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
ysr@777 550 // we are in fully-young mode and there are free regions in the heap
ysr@777 551
apetrusenko@980 552 double survivor_regions_evac_time =
apetrusenko@980 553 predict_survivor_regions_evac_time();
apetrusenko@980 554
ysr@777 555 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
ysr@777 556 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
ysr@777 557 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
johnc@1829 558 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
apetrusenko@980 559 double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
apetrusenko@980 560 + survivor_regions_evac_time;
johnc@1829 561
ysr@777 562 // the result
ysr@777 563 size_t final_young_length = 0;
johnc@1829 564
johnc@1829 565 size_t init_free_regions =
johnc@1829 566 MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions);
johnc@1829 567
johnc@1829 568 // if we're still under the pause target...
johnc@1829 569 if (base_time_ms <= target_pause_time_ms) {
johnc@1829 570 // We make sure that the shortest young length that makes sense
johnc@1829 571 // fits within the target pause time.
johnc@1829 572 size_t min_young_length = 1;
johnc@1829 573
johnc@1829 574 if (predict_will_fit(min_young_length, base_time_ms,
johnc@1829 575 init_free_regions, target_pause_time_ms)) {
johnc@1829 576 // The shortest young length will fit within the target pause time;
johnc@1829 577 // we'll now check whether the absolute maximum number of young
johnc@1829 578 // regions will fit in the target pause time. If not, we'll do
johnc@1829 579 // a binary search between min_young_length and max_young_length
johnc@1829 580 size_t abs_max_young_length = _free_regions_at_end_of_collection - 1;
johnc@1829 581 size_t max_young_length = abs_max_young_length;
johnc@1829 582
johnc@1829 583 if (max_young_length > min_young_length) {
johnc@1829 584 // Let's check if the initial max young length will fit within the
johnc@1829 585 // target pause. If so then there is no need to search for a maximal
johnc@1829 586 // young length - we'll return the initial maximum
johnc@1829 587
johnc@1829 588 if (predict_will_fit(max_young_length, base_time_ms,
johnc@1829 589 init_free_regions, target_pause_time_ms)) {
johnc@1829 590 // The maximum young length will satisfy the target pause time.
johnc@1829 591 // We are done so set min young length to this maximum length.
johnc@1829 592 // The code after the loop will then set final_young_length using
johnc@1829 593 // the value cached in the minimum length.
johnc@1829 594 min_young_length = max_young_length;
johnc@1829 595 } else {
johnc@1829 596 // The maximum possible number of young regions will not fit within
johnc@1829 597 // the target pause time so let's search....
johnc@1829 598
johnc@1829 599 size_t diff = (max_young_length - min_young_length) / 2;
johnc@1829 600 max_young_length = min_young_length + diff;
johnc@1829 601
johnc@1829 602 while (max_young_length > min_young_length) {
johnc@1829 603 if (predict_will_fit(max_young_length, base_time_ms,
johnc@1829 604 init_free_regions, target_pause_time_ms)) {
johnc@1829 605
johnc@1829 606 // The current max young length will fit within the target
johnc@1829 607 // pause time. Note we do not exit the loop here. By setting
johnc@1829 608 // min = max, and then increasing the max below means that
johnc@1829 609 // we will continue searching for an upper bound in the
johnc@1829 610 // range [max..max+diff]
johnc@1829 611 min_young_length = max_young_length;
johnc@1829 612 }
johnc@1829 613 diff = (max_young_length - min_young_length) / 2;
johnc@1829 614 max_young_length = min_young_length + diff;
johnc@1829 615 }
johnc@1829 616 // the above loop found a maximal young length that will fit
johnc@1829 617 // within the target pause time.
johnc@1829 618 }
johnc@1829 619 assert(min_young_length <= abs_max_young_length, "just checking");
johnc@1829 620 }
johnc@1829 621 final_young_length = min_young_length;
johnc@1829 622 }
ysr@777 623 }
johnc@1829 624 // and we're done!
ysr@777 625
ysr@777 626 // we should have at least one region in the target young length
apetrusenko@980 627 _young_list_target_length =
tonyp@2315 628 final_young_length + _recorded_survivor_regions;
ysr@777 629
ysr@777 630 // let's keep an eye of how long we spend on this calculation
ysr@777 631 // right now, I assume that we'll print it when we need it; we
ysr@777 632 // should really adde it to the breakdown of a pause
ysr@777 633 double end_time_sec = os::elapsedTime();
ysr@777 634 double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
ysr@777 635
johnc@1829 636 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 637 // leave this in for debugging, just in case
johnc@1829 638 gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", "
johnc@1829 639 "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT,
ysr@777 640 target_pause_time_ms,
johnc@1829 641 _young_list_target_length
ysr@777 642 elapsed_time_ms,
ysr@777 643 full_young_gcs() ? "full" : "partial",
tonyp@1794 644 during_initial_mark_pause() ? " i-m" : "",
apetrusenko@980 645 _in_marking_window,
apetrusenko@980 646 _in_marking_window_im);
johnc@1829 647 #endif // TRACE_CALC_YOUNG_LENGTH
ysr@777 648
ysr@777 649 if (_young_list_target_length < _young_list_min_length) {
johnc@1829 650 // bummer; this means that, if we do a pause when the maximal
johnc@1829 651 // length dictates, we'll violate the pause spacing target (the
ysr@777 652 // min length was calculate based on the application's current
ysr@777 653 // alloc rate);
ysr@777 654
ysr@777 655 // so, we have to bite the bullet, and allocate the minimum
ysr@777 656 // number. We'll violate our target, but we just can't meet it.
ysr@777 657
johnc@1829 658 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 659 // leave this in for debugging, just in case
ysr@777 660 gclog_or_tty->print_cr("adjusted target length from "
johnc@1829 661 SIZE_FORMAT " to " SIZE_FORMAT,
johnc@1829 662 _young_list_target_length, _young_list_min_length);
johnc@1829 663 #endif // TRACE_CALC_YOUNG_LENGTH
johnc@1829 664
johnc@1829 665 _young_list_target_length = _young_list_min_length;
ysr@777 666 }
ysr@777 667 } else {
ysr@777 668 // we are in a partially-young mode or we've run out of regions (due
ysr@777 669 // to evacuation failure)
ysr@777 670
johnc@1829 671 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 672 // leave this in for debugging, just in case
ysr@777 673 gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
johnc@1829 674 _young_list_min_length);
johnc@1829 675 #endif // TRACE_CALC_YOUNG_LENGTH
johnc@1829 676 // we'll do the pause as soon as possible by choosing the minimum
tonyp@2315 677 _young_list_target_length = _young_list_min_length;
ysr@777 678 }
ysr@777 679
ysr@777 680 _rs_lengths_prediction = rs_lengths;
ysr@777 681 }
ysr@777 682
johnc@1829 683 // This is used by: calculate_young_list_target_length(rs_length). It
johnc@1829 684 // returns true iff:
johnc@1829 685 // the predicted pause time for the given young list will not overflow
johnc@1829 686 // the target pause time
johnc@1829 687 // and:
johnc@1829 688 // the predicted amount of surviving data will not overflow the
johnc@1829 689 // the amount of free space available for survivor regions.
johnc@1829 690 //
ysr@777 691 bool
johnc@1829 692 G1CollectorPolicy::predict_will_fit(size_t young_length,
johnc@1829 693 double base_time_ms,
johnc@1829 694 size_t init_free_regions,
johnc@1829 695 double target_pause_time_ms) {
ysr@777 696
ysr@777 697 if (young_length >= init_free_regions)
ysr@777 698 // end condition 1: not enough space for the young regions
ysr@777 699 return false;
ysr@777 700
ysr@777 701 double accum_surv_rate_adj = 0.0;
ysr@777 702 double accum_surv_rate =
ysr@777 703 accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
johnc@1829 704
ysr@777 705 size_t bytes_to_copy =
ysr@777 706 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
johnc@1829 707
ysr@777 708 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
johnc@1829 709
ysr@777 710 double young_other_time_ms =
johnc@1829 711 predict_young_other_time_ms(young_length);
johnc@1829 712
ysr@777 713 double pause_time_ms =
johnc@1829 714 base_time_ms + copy_time_ms + young_other_time_ms;
ysr@777 715
ysr@777 716 if (pause_time_ms > target_pause_time_ms)
ysr@777 717 // end condition 2: over the target pause time
ysr@777 718 return false;
ysr@777 719
ysr@777 720 size_t free_bytes =
ysr@777 721 (init_free_regions - young_length) * HeapRegion::GrainBytes;
ysr@777 722
ysr@777 723 if ((2.0 + sigma()) * (double) bytes_to_copy > (double) free_bytes)
ysr@777 724 // end condition 3: out of to-space (conservatively)
ysr@777 725 return false;
ysr@777 726
ysr@777 727 // success!
ysr@777 728 return true;
ysr@777 729 }
ysr@777 730
apetrusenko@980 731 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
apetrusenko@980 732 double survivor_regions_evac_time = 0.0;
apetrusenko@980 733 for (HeapRegion * r = _recorded_survivor_head;
apetrusenko@980 734 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
apetrusenko@980 735 r = r->get_next_young_region()) {
apetrusenko@980 736 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
apetrusenko@980 737 }
apetrusenko@980 738 return survivor_regions_evac_time;
apetrusenko@980 739 }
apetrusenko@980 740
ysr@777 741 void G1CollectorPolicy::check_prediction_validity() {
ysr@777 742 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
ysr@777 743
johnc@1829 744 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
ysr@777 745 if (rs_lengths > _rs_lengths_prediction) {
ysr@777 746 // add 10% to avoid having to recalculate often
ysr@777 747 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
johnc@1829 748 calculate_young_list_target_length(rs_lengths_prediction);
ysr@777 749 }
ysr@777 750 }
ysr@777 751
ysr@777 752 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
ysr@777 753 bool is_tlab,
ysr@777 754 bool* gc_overhead_limit_was_exceeded) {
ysr@777 755 guarantee(false, "Not using this policy feature yet.");
ysr@777 756 return NULL;
ysr@777 757 }
ysr@777 758
ysr@777 759 // This method controls how a collector handles one or more
ysr@777 760 // of its generations being fully allocated.
ysr@777 761 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
ysr@777 762 bool is_tlab) {
ysr@777 763 guarantee(false, "Not using this policy feature yet.");
ysr@777 764 return NULL;
ysr@777 765 }
ysr@777 766
ysr@777 767
ysr@777 768 #ifndef PRODUCT
ysr@777 769 bool G1CollectorPolicy::verify_young_ages() {
johnc@1829 770 HeapRegion* head = _g1->young_list()->first_region();
ysr@777 771 return
ysr@777 772 verify_young_ages(head, _short_lived_surv_rate_group);
ysr@777 773 // also call verify_young_ages on any additional surv rate groups
ysr@777 774 }
ysr@777 775
ysr@777 776 bool
ysr@777 777 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
ysr@777 778 SurvRateGroup *surv_rate_group) {
ysr@777 779 guarantee( surv_rate_group != NULL, "pre-condition" );
ysr@777 780
ysr@777 781 const char* name = surv_rate_group->name();
ysr@777 782 bool ret = true;
ysr@777 783 int prev_age = -1;
ysr@777 784
ysr@777 785 for (HeapRegion* curr = head;
ysr@777 786 curr != NULL;
ysr@777 787 curr = curr->get_next_young_region()) {
ysr@777 788 SurvRateGroup* group = curr->surv_rate_group();
ysr@777 789 if (group == NULL && !curr->is_survivor()) {
ysr@777 790 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
ysr@777 791 ret = false;
ysr@777 792 }
ysr@777 793
ysr@777 794 if (surv_rate_group == group) {
ysr@777 795 int age = curr->age_in_surv_rate_group();
ysr@777 796
ysr@777 797 if (age < 0) {
ysr@777 798 gclog_or_tty->print_cr("## %s: encountered negative age", name);
ysr@777 799 ret = false;
ysr@777 800 }
ysr@777 801
ysr@777 802 if (age <= prev_age) {
ysr@777 803 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
ysr@777 804 "(%d, %d)", name, age, prev_age);
ysr@777 805 ret = false;
ysr@777 806 }
ysr@777 807 prev_age = age;
ysr@777 808 }
ysr@777 809 }
ysr@777 810
ysr@777 811 return ret;
ysr@777 812 }
ysr@777 813 #endif // PRODUCT
ysr@777 814
ysr@777 815 void G1CollectorPolicy::record_full_collection_start() {
ysr@777 816 _cur_collection_start_sec = os::elapsedTime();
ysr@777 817 // Release the future to-space so that it is available for compaction into.
ysr@777 818 _g1->set_full_collection();
ysr@777 819 }
ysr@777 820
ysr@777 821 void G1CollectorPolicy::record_full_collection_end() {
ysr@777 822 // Consider this like a collection pause for the purposes of allocation
ysr@777 823 // since last pause.
ysr@777 824 double end_sec = os::elapsedTime();
ysr@777 825 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
ysr@777 826 double full_gc_time_ms = full_gc_time_sec * 1000.0;
ysr@777 827
ysr@777 828 _all_full_gc_times_ms->add(full_gc_time_ms);
ysr@777 829
tonyp@1030 830 update_recent_gc_times(end_sec, full_gc_time_ms);
ysr@777 831
ysr@777 832 _g1->clear_full_collection();
ysr@777 833
ysr@777 834 // "Nuke" the heuristics that control the fully/partially young GC
ysr@777 835 // transitions and make sure we start with fully young GCs after the
ysr@777 836 // Full GC.
ysr@777 837 set_full_young_gcs(true);
ysr@777 838 _last_full_young_gc = false;
ysr@777 839 _should_revert_to_full_young_gcs = false;
tonyp@1794 840 clear_initiate_conc_mark_if_possible();
tonyp@1794 841 clear_during_initial_mark_pause();
ysr@777 842 _known_garbage_bytes = 0;
ysr@777 843 _known_garbage_ratio = 0.0;
ysr@777 844 _in_marking_window = false;
ysr@777 845 _in_marking_window_im = false;
ysr@777 846
ysr@777 847 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 848 // also call this on any additional surv rate groups
ysr@777 849
apetrusenko@980 850 record_survivor_regions(0, NULL, NULL);
apetrusenko@980 851
ysr@777 852 _prev_region_num_young = _region_num_young;
ysr@777 853 _prev_region_num_tenured = _region_num_tenured;
ysr@777 854
ysr@777 855 _free_regions_at_end_of_collection = _g1->free_regions();
apetrusenko@980 856 // Reset survivors SurvRateGroup.
apetrusenko@980 857 _survivor_surv_rate_group->reset();
ysr@777 858 calculate_young_list_min_length();
johnc@1829 859 calculate_young_list_target_length();
tonyp@2315 860 }
ysr@777 861
ysr@777 862 void G1CollectorPolicy::record_stop_world_start() {
ysr@777 863 _stop_world_start = os::elapsedTime();
ysr@777 864 }
ysr@777 865
ysr@777 866 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
ysr@777 867 size_t start_used) {
ysr@777 868 if (PrintGCDetails) {
ysr@777 869 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 870 gclog_or_tty->print("[GC pause");
ysr@777 871 if (in_young_gc_mode())
ysr@777 872 gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
ysr@777 873 }
ysr@777 874
tonyp@2315 875 assert(_g1->used() == _g1->recalculate_used(),
tonyp@2315 876 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
tonyp@2315 877 _g1->used(), _g1->recalculate_used()));
ysr@777 878
ysr@777 879 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
ysr@777 880 _all_stop_world_times_ms->add(s_w_t_ms);
ysr@777 881 _stop_world_start = 0.0;
ysr@777 882
ysr@777 883 _cur_collection_start_sec = start_time_sec;
ysr@777 884 _cur_collection_pause_used_at_start_bytes = start_used;
ysr@777 885 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
ysr@777 886 _pending_cards = _g1->pending_card_num();
ysr@777 887 _max_pending_cards = _g1->max_pending_card_num();
ysr@777 888
ysr@777 889 _bytes_in_collection_set_before_gc = 0;
tonyp@3028 890 _bytes_copied_during_gc = 0;
ysr@777 891
tonyp@2961 892 YoungList* young_list = _g1->young_list();
tonyp@2961 893 _eden_bytes_before_gc = young_list->eden_used_bytes();
tonyp@2961 894 _survivor_bytes_before_gc = young_list->survivor_used_bytes();
tonyp@2961 895 _capacity_before_gc = _g1->capacity();
tonyp@2961 896
ysr@777 897 #ifdef DEBUG
ysr@777 898 // initialise these to something well known so that we can spot
ysr@777 899 // if they are not set properly
ysr@777 900
ysr@777 901 for (int i = 0; i < _parallel_gc_threads; ++i) {
tonyp@1966 902 _par_last_gc_worker_start_times_ms[i] = -1234.0;
tonyp@1966 903 _par_last_ext_root_scan_times_ms[i] = -1234.0;
tonyp@1966 904 _par_last_mark_stack_scan_times_ms[i] = -1234.0;
tonyp@1966 905 _par_last_update_rs_times_ms[i] = -1234.0;
tonyp@1966 906 _par_last_update_rs_processed_buffers[i] = -1234.0;
tonyp@1966 907 _par_last_scan_rs_times_ms[i] = -1234.0;
tonyp@1966 908 _par_last_obj_copy_times_ms[i] = -1234.0;
tonyp@1966 909 _par_last_termination_times_ms[i] = -1234.0;
tonyp@1966 910 _par_last_termination_attempts[i] = -1234.0;
tonyp@1966 911 _par_last_gc_worker_end_times_ms[i] = -1234.0;
brutisso@2712 912 _par_last_gc_worker_times_ms[i] = -1234.0;
ysr@777 913 }
ysr@777 914 #endif
ysr@777 915
ysr@777 916 for (int i = 0; i < _aux_num; ++i) {
ysr@777 917 _cur_aux_times_ms[i] = 0.0;
ysr@777 918 _cur_aux_times_set[i] = false;
ysr@777 919 }
ysr@777 920
ysr@777 921 _satb_drain_time_set = false;
ysr@777 922 _last_satb_drain_processed_buffers = -1;
ysr@777 923
ysr@777 924 if (in_young_gc_mode())
ysr@777 925 _last_young_gc_full = false;
ysr@777 926
ysr@777 927 // do that for any other surv rate groups
ysr@777 928 _short_lived_surv_rate_group->stop_adding_regions();
tonyp@1717 929 _survivors_age_table.clear();
apetrusenko@980 930
ysr@777 931 assert( verify_young_ages(), "region age verification" );
ysr@777 932 }
ysr@777 933
ysr@777 934 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
ysr@777 935 _mark_closure_time_ms = mark_closure_time_ms;
ysr@777 936 }
ysr@777 937
ysr@777 938 void G1CollectorPolicy::record_concurrent_mark_init_start() {
ysr@777 939 _mark_init_start_sec = os::elapsedTime();
ysr@777 940 guarantee(!in_young_gc_mode(), "should not do be here in young GC mode");
ysr@777 941 }
ysr@777 942
ysr@777 943 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
ysr@777 944 mark_init_elapsed_time_ms) {
ysr@777 945 _during_marking = true;
tonyp@1794 946 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
tonyp@1794 947 clear_during_initial_mark_pause();
ysr@777 948 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
ysr@777 949 }
ysr@777 950
ysr@777 951 void G1CollectorPolicy::record_concurrent_mark_init_end() {
ysr@777 952 double end_time_sec = os::elapsedTime();
ysr@777 953 double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
ysr@777 954 _concurrent_mark_init_times_ms->add(elapsed_time_ms);
ysr@777 955 record_concurrent_mark_init_end_pre(elapsed_time_ms);
ysr@777 956
ysr@777 957 _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
ysr@777 958 }
ysr@777 959
ysr@777 960 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
ysr@777 961 _mark_remark_start_sec = os::elapsedTime();
ysr@777 962 _during_marking = false;
ysr@777 963 }
ysr@777 964
ysr@777 965 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
ysr@777 966 double end_time_sec = os::elapsedTime();
ysr@777 967 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
ysr@777 968 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
ysr@777 969 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 970 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 971
ysr@777 972 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
ysr@777 973 }
ysr@777 974
ysr@777 975 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
ysr@777 976 _mark_cleanup_start_sec = os::elapsedTime();
ysr@777 977 }
ysr@777 978
ysr@777 979 void
ysr@777 980 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 981 size_t max_live_bytes) {
ysr@777 982 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 983 record_concurrent_mark_cleanup_end_work2();
ysr@777 984 }
ysr@777 985
ysr@777 986 void
ysr@777 987 G1CollectorPolicy::
ysr@777 988 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 989 size_t max_live_bytes) {
ysr@777 990 if (_n_marks < 2) _n_marks++;
ysr@777 991 if (G1PolicyVerbose > 0)
ysr@777 992 gclog_or_tty->print_cr("At end of marking, max_live is " SIZE_FORMAT " MB "
ysr@777 993 " (of " SIZE_FORMAT " MB heap).",
ysr@777 994 max_live_bytes/M, _g1->capacity()/M);
ysr@777 995 }
ysr@777 996
ysr@777 997 // The important thing about this is that it includes "os::elapsedTime".
ysr@777 998 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
ysr@777 999 double end_time_sec = os::elapsedTime();
ysr@777 1000 double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
ysr@777 1001 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
ysr@777 1002 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 1003 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 1004
ysr@777 1005 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
ysr@777 1006
ysr@777 1007 _num_markings++;
ysr@777 1008
ysr@777 1009 // We did a marking, so reset the "since_last_mark" variables.
ysr@777 1010 double considerConcMarkCost = 1.0;
ysr@777 1011 // If there are available processors, concurrent activity is free...
ysr@777 1012 if (Threads::number_of_non_daemon_threads() * 2 <
ysr@777 1013 os::active_processor_count()) {
ysr@777 1014 considerConcMarkCost = 0.0;
ysr@777 1015 }
ysr@777 1016 _n_pauses_at_mark_end = _n_pauses;
ysr@777 1017 _n_marks_since_last_pause++;
ysr@777 1018 }
ysr@777 1019
ysr@777 1020 void
ysr@777 1021 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
ysr@777 1022 if (in_young_gc_mode()) {
ysr@777 1023 _should_revert_to_full_young_gcs = false;
ysr@777 1024 _last_full_young_gc = true;
ysr@777 1025 _in_marking_window = false;
ysr@777 1026 if (adaptive_young_list_length())
johnc@1829 1027 calculate_young_list_target_length();
ysr@777 1028 }
ysr@777 1029 }
ysr@777 1030
ysr@777 1031 void G1CollectorPolicy::record_concurrent_pause() {
ysr@777 1032 if (_stop_world_start > 0.0) {
ysr@777 1033 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
ysr@777 1034 _all_yield_times_ms->add(yield_ms);
ysr@777 1035 }
ysr@777 1036 }
ysr@777 1037
ysr@777 1038 void G1CollectorPolicy::record_concurrent_pause_end() {
ysr@777 1039 }
ysr@777 1040
ysr@777 1041 template<class T>
ysr@777 1042 T sum_of(T* sum_arr, int start, int n, int N) {
ysr@777 1043 T sum = (T)0;
ysr@777 1044 for (int i = 0; i < n; i++) {
ysr@777 1045 int j = (start + i) % N;
ysr@777 1046 sum += sum_arr[j];
ysr@777 1047 }
ysr@777 1048 return sum;
ysr@777 1049 }
ysr@777 1050
tonyp@1966 1051 void G1CollectorPolicy::print_par_stats(int level,
tonyp@1966 1052 const char* str,
brutisso@2712 1053 double* data) {
ysr@777 1054 double min = data[0], max = data[0];
ysr@777 1055 double total = 0.0;
brutisso@2645 1056 LineBuffer buf(level);
brutisso@2645 1057 buf.append("[%s (ms):", str);
ysr@777 1058 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 1059 double val = data[i];
ysr@777 1060 if (val < min)
ysr@777 1061 min = val;
ysr@777 1062 if (val > max)
ysr@777 1063 max = val;
ysr@777 1064 total += val;
brutisso@2645 1065 buf.append(" %3.1lf", val);
ysr@777 1066 }
brutisso@2712 1067 buf.append_and_print_cr("");
brutisso@2712 1068 double avg = total / (double) ParallelGCThreads;
brutisso@2712 1069 buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
brutisso@2712 1070 avg, min, max, max - min);
ysr@777 1071 }
ysr@777 1072
tonyp@1966 1073 void G1CollectorPolicy::print_par_sizes(int level,
tonyp@1966 1074 const char* str,
brutisso@2712 1075 double* data) {
ysr@777 1076 double min = data[0], max = data[0];
ysr@777 1077 double total = 0.0;
brutisso@2645 1078 LineBuffer buf(level);
brutisso@2645 1079 buf.append("[%s :", str);
ysr@777 1080 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 1081 double val = data[i];
ysr@777 1082 if (val < min)
ysr@777 1083 min = val;
ysr@777 1084 if (val > max)
ysr@777 1085 max = val;
ysr@777 1086 total += val;
brutisso@2645 1087 buf.append(" %d", (int) val);
ysr@777 1088 }
brutisso@2712 1089 buf.append_and_print_cr("");
brutisso@2712 1090 double avg = total / (double) ParallelGCThreads;
brutisso@2712 1091 buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
brutisso@2712 1092 (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
ysr@777 1093 }
ysr@777 1094
ysr@777 1095 void G1CollectorPolicy::print_stats (int level,
ysr@777 1096 const char* str,
ysr@777 1097 double value) {
brutisso@2645 1098 LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
ysr@777 1099 }
ysr@777 1100
ysr@777 1101 void G1CollectorPolicy::print_stats (int level,
ysr@777 1102 const char* str,
ysr@777 1103 int value) {
brutisso@2645 1104 LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
ysr@777 1105 }
ysr@777 1106
ysr@777 1107 double G1CollectorPolicy::avg_value (double* data) {
jmasa@2188 1108 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1109 double ret = 0.0;
ysr@777 1110 for (uint i = 0; i < ParallelGCThreads; ++i)
ysr@777 1111 ret += data[i];
ysr@777 1112 return ret / (double) ParallelGCThreads;
ysr@777 1113 } else {
ysr@777 1114 return data[0];
ysr@777 1115 }
ysr@777 1116 }
ysr@777 1117
ysr@777 1118 double G1CollectorPolicy::max_value (double* data) {
jmasa@2188 1119 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1120 double ret = data[0];
ysr@777 1121 for (uint i = 1; i < ParallelGCThreads; ++i)
ysr@777 1122 if (data[i] > ret)
ysr@777 1123 ret = data[i];
ysr@777 1124 return ret;
ysr@777 1125 } else {
ysr@777 1126 return data[0];
ysr@777 1127 }
ysr@777 1128 }
ysr@777 1129
ysr@777 1130 double G1CollectorPolicy::sum_of_values (double* data) {
jmasa@2188 1131 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1132 double sum = 0.0;
ysr@777 1133 for (uint i = 0; i < ParallelGCThreads; i++)
ysr@777 1134 sum += data[i];
ysr@777 1135 return sum;
ysr@777 1136 } else {
ysr@777 1137 return data[0];
ysr@777 1138 }
ysr@777 1139 }
ysr@777 1140
ysr@777 1141 double G1CollectorPolicy::max_sum (double* data1,
ysr@777 1142 double* data2) {
ysr@777 1143 double ret = data1[0] + data2[0];
ysr@777 1144
jmasa@2188 1145 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1146 for (uint i = 1; i < ParallelGCThreads; ++i) {
ysr@777 1147 double data = data1[i] + data2[i];
ysr@777 1148 if (data > ret)
ysr@777 1149 ret = data;
ysr@777 1150 }
ysr@777 1151 }
ysr@777 1152 return ret;
ysr@777 1153 }
ysr@777 1154
ysr@777 1155 // Anything below that is considered to be zero
ysr@777 1156 #define MIN_TIMER_GRANULARITY 0.0000001
ysr@777 1157
tonyp@2062 1158 void G1CollectorPolicy::record_collection_pause_end() {
ysr@777 1159 double end_time_sec = os::elapsedTime();
ysr@777 1160 double elapsed_ms = _last_pause_time_ms;
jmasa@2188 1161 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 1162 size_t rs_size =
ysr@777 1163 _cur_collection_pause_used_regions_at_start - collection_set_size();
ysr@777 1164 size_t cur_used_bytes = _g1->used();
ysr@777 1165 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
ysr@777 1166 bool last_pause_included_initial_mark = false;
tonyp@2062 1167 bool update_stats = !_g1->evacuation_failed();
ysr@777 1168
ysr@777 1169 #ifndef PRODUCT
ysr@777 1170 if (G1YoungSurvRateVerbose) {
ysr@777 1171 gclog_or_tty->print_cr("");
ysr@777 1172 _short_lived_surv_rate_group->print();
ysr@777 1173 // do that for any other surv rate groups too
ysr@777 1174 }
ysr@777 1175 #endif // PRODUCT
ysr@777 1176
ysr@777 1177 if (in_young_gc_mode()) {
tonyp@1794 1178 last_pause_included_initial_mark = during_initial_mark_pause();
ysr@777 1179 if (last_pause_included_initial_mark)
ysr@777 1180 record_concurrent_mark_init_end_pre(0.0);
ysr@777 1181
ysr@777 1182 size_t min_used_targ =
tonyp@1718 1183 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
ysr@777 1184
tonyp@1794 1185
tonyp@1794 1186 if (!_g1->mark_in_progress() && !_last_full_young_gc) {
tonyp@1794 1187 assert(!last_pause_included_initial_mark, "invariant");
tonyp@1794 1188 if (cur_used_bytes > min_used_targ &&
tonyp@1794 1189 cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
tonyp@1794 1190 assert(!during_initial_mark_pause(), "we should not see this here");
tonyp@1794 1191
tonyp@1794 1192 // Note: this might have already been set, if during the last
tonyp@1794 1193 // pause we decided to start a cycle but at the beginning of
tonyp@1794 1194 // this pause we decided to postpone it. That's OK.
tonyp@1794 1195 set_initiate_conc_mark_if_possible();
ysr@777 1196 }
ysr@777 1197 }
ysr@777 1198
ysr@777 1199 _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
ysr@777 1200 }
ysr@777 1201
ysr@777 1202 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
ysr@777 1203 end_time_sec, false);
ysr@777 1204
ysr@777 1205 guarantee(_cur_collection_pause_used_regions_at_start >=
ysr@777 1206 collection_set_size(),
ysr@777 1207 "Negative RS size?");
ysr@777 1208
ysr@777 1209 // This assert is exempted when we're doing parallel collection pauses,
ysr@777 1210 // because the fragmentation caused by the parallel GC allocation buffers
ysr@777 1211 // can lead to more memory being used during collection than was used
ysr@777 1212 // before. Best leave this out until the fragmentation problem is fixed.
ysr@777 1213 // Pauses in which evacuation failed can also lead to negative
ysr@777 1214 // collections, since no space is reclaimed from a region containing an
ysr@777 1215 // object whose evacuation failed.
ysr@777 1216 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1217 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1218 // (DLD, 10/05.)
ysr@777 1219 assert((true || parallel) // Always using GC LABs now.
ysr@777 1220 || _g1->evacuation_failed()
ysr@777 1221 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
ysr@777 1222 "Negative collection");
ysr@777 1223
ysr@777 1224 size_t freed_bytes =
ysr@777 1225 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
ysr@777 1226 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
johnc@1829 1227
ysr@777 1228 double survival_fraction =
ysr@777 1229 (double)surviving_bytes/
ysr@777 1230 (double)_collection_set_bytes_used_before;
ysr@777 1231
ysr@777 1232 _n_pauses++;
ysr@777 1233
johnc@3021 1234 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
johnc@3021 1235 double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
johnc@3021 1236 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
johnc@3021 1237 double update_rs_processed_buffers =
johnc@3021 1238 sum_of_values(_par_last_update_rs_processed_buffers);
johnc@3021 1239 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
johnc@3021 1240 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
johnc@3021 1241 double termination_time = avg_value(_par_last_termination_times_ms);
johnc@3021 1242
johnc@3021 1243 double parallel_known_time = update_rs_time +
johnc@3021 1244 ext_root_scan_time +
johnc@3021 1245 mark_stack_scan_time +
johnc@3021 1246 scan_rs_time +
johnc@3021 1247 obj_copy_time +
johnc@3021 1248 termination_time;
johnc@3021 1249
johnc@3021 1250 double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
johnc@3021 1251
johnc@3021 1252 PauseSummary* summary = _summary;
johnc@3021 1253
tonyp@1030 1254 if (update_stats) {
johnc@3021 1255 _recent_rs_scan_times_ms->add(scan_rs_time);
ysr@777 1256 _recent_pause_times_ms->add(elapsed_ms);
ysr@777 1257 _recent_rs_sizes->add(rs_size);
ysr@777 1258
johnc@3021 1259 MainBodySummary* body_summary = summary->main_body_summary();
johnc@3021 1260 guarantee(body_summary != NULL, "should not be null!");
johnc@3021 1261
johnc@3021 1262 if (_satb_drain_time_set)
johnc@3021 1263 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
johnc@3021 1264 else
johnc@3021 1265 body_summary->record_satb_drain_time_ms(0.0);
johnc@3021 1266
johnc@3021 1267 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
johnc@3021 1268 body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
johnc@3021 1269 body_summary->record_update_rs_time_ms(update_rs_time);
johnc@3021 1270 body_summary->record_scan_rs_time_ms(scan_rs_time);
johnc@3021 1271 body_summary->record_obj_copy_time_ms(obj_copy_time);
johnc@3021 1272 if (parallel) {
johnc@3021 1273 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
johnc@3021 1274 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
johnc@3021 1275 body_summary->record_termination_time_ms(termination_time);
johnc@3021 1276 body_summary->record_parallel_other_time_ms(parallel_other_time);
johnc@3021 1277 }
johnc@3021 1278 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
johnc@3021 1279
ysr@777 1280 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1281 // fragmentation can produce negative collections. Same with evac
ysr@777 1282 // failure.
ysr@777 1283 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1284 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1285 // (DLD, 10/05.
ysr@777 1286 assert((true || parallel)
ysr@777 1287 || _g1->evacuation_failed()
ysr@777 1288 || surviving_bytes <= _collection_set_bytes_used_before,
ysr@777 1289 "Or else negative collection!");
ysr@777 1290 _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
ysr@777 1291 _recent_CS_bytes_surviving->add(surviving_bytes);
ysr@777 1292
ysr@777 1293 // this is where we update the allocation rate of the application
ysr@777 1294 double app_time_ms =
ysr@777 1295 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
ysr@777 1296 if (app_time_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1297 // This usually happens due to the timer not having the required
ysr@777 1298 // granularity. Some Linuxes are the usual culprits.
ysr@777 1299 // We'll just set it to something (arbitrarily) small.
ysr@777 1300 app_time_ms = 1.0;
ysr@777 1301 }
ysr@777 1302 size_t regions_allocated =
ysr@777 1303 (_region_num_young - _prev_region_num_young) +
ysr@777 1304 (_region_num_tenured - _prev_region_num_tenured);
ysr@777 1305 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
ysr@777 1306 _alloc_rate_ms_seq->add(alloc_rate_ms);
ysr@777 1307 _prev_region_num_young = _region_num_young;
ysr@777 1308 _prev_region_num_tenured = _region_num_tenured;
ysr@777 1309
ysr@777 1310 double interval_ms =
ysr@777 1311 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
ysr@777 1312 update_recent_gc_times(end_time_sec, elapsed_ms);
ysr@777 1313 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
ysr@1521 1314 if (recent_avg_pause_time_ratio() < 0.0 ||
ysr@1521 1315 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
ysr@1521 1316 #ifndef PRODUCT
ysr@1521 1317 // Dump info to allow post-facto debugging
ysr@1521 1318 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
ysr@1521 1319 gclog_or_tty->print_cr("-------------------------------------------");
ysr@1521 1320 gclog_or_tty->print_cr("Recent GC Times (ms):");
ysr@1521 1321 _recent_gc_times_ms->dump();
ysr@1521 1322 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
ysr@1521 1323 _recent_prev_end_times_for_all_gcs_sec->dump();
ysr@1521 1324 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
ysr@1521 1325 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
ysr@1522 1326 // In debug mode, terminate the JVM if the user wants to debug at this point.
ysr@1522 1327 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
ysr@1522 1328 #endif // !PRODUCT
ysr@1522 1329 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
ysr@1522 1330 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
ysr@1521 1331 if (_recent_avg_pause_time_ratio < 0.0) {
ysr@1521 1332 _recent_avg_pause_time_ratio = 0.0;
ysr@1521 1333 } else {
ysr@1521 1334 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
ysr@1521 1335 _recent_avg_pause_time_ratio = 1.0;
ysr@1521 1336 }
ysr@1521 1337 }
ysr@777 1338 }
ysr@777 1339
ysr@777 1340 if (G1PolicyVerbose > 1) {
ysr@777 1341 gclog_or_tty->print_cr(" Recording collection pause(%d)", _n_pauses);
ysr@777 1342 }
ysr@777 1343
ysr@777 1344 if (G1PolicyVerbose > 1) {
ysr@777 1345 gclog_or_tty->print_cr(" ET: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1346 " ET-RS: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1347 " |RS|: " SIZE_FORMAT,
ysr@777 1348 elapsed_ms, recent_avg_time_for_pauses_ms(),
johnc@3021 1349 scan_rs_time, recent_avg_time_for_rs_scan_ms(),
ysr@777 1350 rs_size);
ysr@777 1351
ysr@777 1352 gclog_or_tty->print_cr(" Used at start: " SIZE_FORMAT"K"
ysr@777 1353 " At end " SIZE_FORMAT "K\n"
ysr@777 1354 " garbage : " SIZE_FORMAT "K"
ysr@777 1355 " of " SIZE_FORMAT "K\n"
ysr@777 1356 " survival : %6.2f%% (%6.2f%% avg)",
ysr@777 1357 _cur_collection_pause_used_at_start_bytes/K,
ysr@777 1358 _g1->used()/K, freed_bytes/K,
ysr@777 1359 _collection_set_bytes_used_before/K,
ysr@777 1360 survival_fraction*100.0,
ysr@777 1361 recent_avg_survival_fraction()*100.0);
ysr@777 1362 gclog_or_tty->print_cr(" Recent %% gc pause time: %6.2f",
ysr@777 1363 recent_avg_pause_time_ratio() * 100.0);
ysr@777 1364 }
ysr@777 1365
ysr@777 1366 double other_time_ms = elapsed_ms;
ysr@777 1367
tonyp@2062 1368 if (_satb_drain_time_set) {
tonyp@2062 1369 other_time_ms -= _cur_satb_drain_time_ms;
ysr@777 1370 }
ysr@777 1371
tonyp@2062 1372 if (parallel) {
tonyp@2062 1373 other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
tonyp@2062 1374 } else {
tonyp@2062 1375 other_time_ms -=
tonyp@2062 1376 update_rs_time +
tonyp@2062 1377 ext_root_scan_time + mark_stack_scan_time +
tonyp@2062 1378 scan_rs_time + obj_copy_time;
tonyp@2062 1379 }
tonyp@2062 1380
ysr@777 1381 if (PrintGCDetails) {
tonyp@2062 1382 gclog_or_tty->print_cr("%s, %1.8lf secs]",
ysr@777 1383 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
ysr@777 1384 elapsed_ms / 1000.0);
ysr@777 1385
tonyp@2062 1386 if (_satb_drain_time_set) {
tonyp@2062 1387 print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
tonyp@2062 1388 }
tonyp@2062 1389 if (_last_satb_drain_processed_buffers >= 0) {
tonyp@2062 1390 print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
tonyp@2062 1391 }
tonyp@2062 1392 if (parallel) {
tonyp@2062 1393 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
brutisso@2712 1394 print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms);
tonyp@2062 1395 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
brutisso@2712 1396 print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
brutisso@2712 1397 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
brutisso@2712 1398 print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
tonyp@2062 1399 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
tonyp@2062 1400 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
tonyp@2062 1401 print_par_stats(2, "Termination", _par_last_termination_times_ms);
brutisso@2712 1402 print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
brutisso@2712 1403 print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms);
brutisso@2712 1404
brutisso@2712 1405 for (int i = 0; i < _parallel_gc_threads; i++) {
brutisso@2712 1406 _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
brutisso@2712 1407 }
brutisso@2712 1408 print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
brutisso@2712 1409
johnc@3021 1410 print_stats(2, "Parallel Other", parallel_other_time);
tonyp@2062 1411 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
tonyp@2062 1412 } else {
tonyp@2062 1413 print_stats(1, "Update RS", update_rs_time);
tonyp@2062 1414 print_stats(2, "Processed Buffers",
tonyp@2062 1415 (int)update_rs_processed_buffers);
tonyp@2062 1416 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
tonyp@2062 1417 print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
tonyp@2062 1418 print_stats(1, "Scan RS", scan_rs_time);
tonyp@2062 1419 print_stats(1, "Object Copying", obj_copy_time);
ysr@777 1420 }
johnc@1325 1421 #ifndef PRODUCT
johnc@1325 1422 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
johnc@1325 1423 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
johnc@1325 1424 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
johnc@1325 1425 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
johnc@1325 1426 if (_num_cc_clears > 0) {
johnc@1325 1427 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
johnc@1325 1428 }
johnc@1325 1429 #endif
ysr@777 1430 print_stats(1, "Other", other_time_ms);
johnc@1829 1431 print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
johnc@1829 1432
ysr@777 1433 for (int i = 0; i < _aux_num; ++i) {
ysr@777 1434 if (_cur_aux_times_set[i]) {
ysr@777 1435 char buffer[96];
ysr@777 1436 sprintf(buffer, "Aux%d", i);
ysr@777 1437 print_stats(1, buffer, _cur_aux_times_ms[i]);
ysr@777 1438 }
ysr@777 1439 }
ysr@777 1440 }
ysr@777 1441
ysr@777 1442 _all_pause_times_ms->add(elapsed_ms);
tonyp@1083 1443 if (update_stats) {
tonyp@1083 1444 summary->record_total_time_ms(elapsed_ms);
tonyp@1083 1445 summary->record_other_time_ms(other_time_ms);
tonyp@1083 1446 }
ysr@777 1447 for (int i = 0; i < _aux_num; ++i)
ysr@777 1448 if (_cur_aux_times_set[i])
ysr@777 1449 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
ysr@777 1450
ysr@777 1451 // Reset marks-between-pauses counter.
ysr@777 1452 _n_marks_since_last_pause = 0;
ysr@777 1453
ysr@777 1454 // Update the efficiency-since-mark vars.
ysr@777 1455 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
ysr@777 1456 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1457 // This usually happens due to the timer not having the required
ysr@777 1458 // granularity. Some Linuxes are the usual culprits.
ysr@777 1459 // We'll just set it to something (arbitrarily) small.
ysr@777 1460 proc_ms = 1.0;
ysr@777 1461 }
ysr@777 1462 double cur_efficiency = (double) freed_bytes / proc_ms;
ysr@777 1463
ysr@777 1464 bool new_in_marking_window = _in_marking_window;
ysr@777 1465 bool new_in_marking_window_im = false;
tonyp@1794 1466 if (during_initial_mark_pause()) {
ysr@777 1467 new_in_marking_window = true;
ysr@777 1468 new_in_marking_window_im = true;
ysr@777 1469 }
ysr@777 1470
ysr@777 1471 if (in_young_gc_mode()) {
ysr@777 1472 if (_last_full_young_gc) {
ysr@777 1473 set_full_young_gcs(false);
ysr@777 1474 _last_full_young_gc = false;
ysr@777 1475 }
ysr@777 1476
ysr@777 1477 if ( !_last_young_gc_full ) {
ysr@777 1478 if ( _should_revert_to_full_young_gcs ||
ysr@777 1479 _known_garbage_ratio < 0.05 ||
ysr@777 1480 (adaptive_young_list_length() &&
ysr@777 1481 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
ysr@777 1482 set_full_young_gcs(true);
ysr@777 1483 }
ysr@777 1484 }
ysr@777 1485 _should_revert_to_full_young_gcs = false;
ysr@777 1486
ysr@777 1487 if (_last_young_gc_full && !_during_marking)
ysr@777 1488 _young_gc_eff_seq->add(cur_efficiency);
ysr@777 1489 }
ysr@777 1490
ysr@777 1491 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 1492 // do that for any other surv rate groupsx
ysr@777 1493
ysr@777 1494 // <NEW PREDICTION>
ysr@777 1495
apetrusenko@1112 1496 if (update_stats) {
ysr@777 1497 double pause_time_ms = elapsed_ms;
ysr@777 1498
ysr@777 1499 size_t diff = 0;
ysr@777 1500 if (_max_pending_cards >= _pending_cards)
ysr@777 1501 diff = _max_pending_cards - _pending_cards;
ysr@777 1502 _pending_card_diff_seq->add((double) diff);
ysr@777 1503
ysr@777 1504 double cost_per_card_ms = 0.0;
ysr@777 1505 if (_pending_cards > 0) {
ysr@777 1506 cost_per_card_ms = update_rs_time / (double) _pending_cards;
ysr@777 1507 _cost_per_card_ms_seq->add(cost_per_card_ms);
ysr@777 1508 }
ysr@777 1509
ysr@777 1510 size_t cards_scanned = _g1->cards_scanned();
ysr@777 1511
ysr@777 1512 double cost_per_entry_ms = 0.0;
ysr@777 1513 if (cards_scanned > 10) {
ysr@777 1514 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
ysr@777 1515 if (_last_young_gc_full)
ysr@777 1516 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1517 else
ysr@777 1518 _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1519 }
ysr@777 1520
ysr@777 1521 if (_max_rs_lengths > 0) {
ysr@777 1522 double cards_per_entry_ratio =
ysr@777 1523 (double) cards_scanned / (double) _max_rs_lengths;
ysr@777 1524 if (_last_young_gc_full)
ysr@777 1525 _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1526 else
ysr@777 1527 _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1528 }
ysr@777 1529
ysr@777 1530 size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
ysr@777 1531 if (rs_length_diff >= 0)
ysr@777 1532 _rs_length_diff_seq->add((double) rs_length_diff);
ysr@777 1533
ysr@777 1534 size_t copied_bytes = surviving_bytes;
ysr@777 1535 double cost_per_byte_ms = 0.0;
ysr@777 1536 if (copied_bytes > 0) {
ysr@777 1537 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
ysr@777 1538 if (_in_marking_window)
ysr@777 1539 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
ysr@777 1540 else
ysr@777 1541 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
ysr@777 1542 }
ysr@777 1543
ysr@777 1544 double all_other_time_ms = pause_time_ms -
johnc@1829 1545 (update_rs_time + scan_rs_time + obj_copy_time +
ysr@777 1546 _mark_closure_time_ms + termination_time);
ysr@777 1547
ysr@777 1548 double young_other_time_ms = 0.0;
ysr@777 1549 if (_recorded_young_regions > 0) {
ysr@777 1550 young_other_time_ms =
ysr@777 1551 _recorded_young_cset_choice_time_ms +
ysr@777 1552 _recorded_young_free_cset_time_ms;
ysr@777 1553 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
ysr@777 1554 (double) _recorded_young_regions);
ysr@777 1555 }
ysr@777 1556 double non_young_other_time_ms = 0.0;
ysr@777 1557 if (_recorded_non_young_regions > 0) {
ysr@777 1558 non_young_other_time_ms =
ysr@777 1559 _recorded_non_young_cset_choice_time_ms +
ysr@777 1560 _recorded_non_young_free_cset_time_ms;
ysr@777 1561
ysr@777 1562 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
ysr@777 1563 (double) _recorded_non_young_regions);
ysr@777 1564 }
ysr@777 1565
ysr@777 1566 double constant_other_time_ms = all_other_time_ms -
ysr@777 1567 (young_other_time_ms + non_young_other_time_ms);
ysr@777 1568 _constant_other_time_ms_seq->add(constant_other_time_ms);
ysr@777 1569
ysr@777 1570 double survival_ratio = 0.0;
ysr@777 1571 if (_bytes_in_collection_set_before_gc > 0) {
tonyp@3028 1572 survival_ratio = (double) _bytes_copied_during_gc /
tonyp@3028 1573 (double) _bytes_in_collection_set_before_gc;
ysr@777 1574 }
ysr@777 1575
ysr@777 1576 _pending_cards_seq->add((double) _pending_cards);
ysr@777 1577 _scanned_cards_seq->add((double) cards_scanned);
ysr@777 1578 _rs_lengths_seq->add((double) _max_rs_lengths);
ysr@777 1579
ysr@777 1580 double expensive_region_limit_ms =
johnc@1186 1581 (double) MaxGCPauseMillis - predict_constant_other_time_ms();
ysr@777 1582 if (expensive_region_limit_ms < 0.0) {
ysr@777 1583 // this means that the other time was predicted to be longer than
ysr@777 1584 // than the max pause time
johnc@1186 1585 expensive_region_limit_ms = (double) MaxGCPauseMillis;
ysr@777 1586 }
ysr@777 1587 _expensive_region_limit_ms = expensive_region_limit_ms;
ysr@777 1588
ysr@777 1589 if (PREDICTIONS_VERBOSE) {
ysr@777 1590 gclog_or_tty->print_cr("");
ysr@777 1591 gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
johnc@1829 1592 "REGIONS %d %d %d "
ysr@777 1593 "PENDING_CARDS %d %d "
ysr@777 1594 "CARDS_SCANNED %d %d "
ysr@777 1595 "RS_LENGTHS %d %d "
ysr@777 1596 "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
ysr@777 1597 "SURVIVAL_RATIO %1.6lf %1.6lf "
ysr@777 1598 "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
ysr@777 1599 "OTHER_YOUNG %1.6lf %1.6lf "
ysr@777 1600 "OTHER_NON_YOUNG %1.6lf %1.6lf "
ysr@777 1601 "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
ysr@777 1602 "ELAPSED %1.6lf %1.6lf ",
ysr@777 1603 _cur_collection_start_sec,
ysr@777 1604 (!_last_young_gc_full) ? 2 :
ysr@777 1605 (last_pause_included_initial_mark) ? 1 : 0,
ysr@777 1606 _recorded_region_num,
ysr@777 1607 _recorded_young_regions,
ysr@777 1608 _recorded_non_young_regions,
ysr@777 1609 _predicted_pending_cards, _pending_cards,
ysr@777 1610 _predicted_cards_scanned, cards_scanned,
ysr@777 1611 _predicted_rs_lengths, _max_rs_lengths,
ysr@777 1612 _predicted_rs_update_time_ms, update_rs_time,
ysr@777 1613 _predicted_rs_scan_time_ms, scan_rs_time,
ysr@777 1614 _predicted_survival_ratio, survival_ratio,
ysr@777 1615 _predicted_object_copy_time_ms, obj_copy_time,
ysr@777 1616 _predicted_constant_other_time_ms, constant_other_time_ms,
ysr@777 1617 _predicted_young_other_time_ms, young_other_time_ms,
ysr@777 1618 _predicted_non_young_other_time_ms,
ysr@777 1619 non_young_other_time_ms,
ysr@777 1620 _vtime_diff_ms, termination_time,
ysr@777 1621 _predicted_pause_time_ms, elapsed_ms);
ysr@777 1622 }
ysr@777 1623
ysr@777 1624 if (G1PolicyVerbose > 0) {
ysr@777 1625 gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
ysr@777 1626 _predicted_pause_time_ms,
ysr@777 1627 (_within_target) ? "within" : "outside",
ysr@777 1628 elapsed_ms);
ysr@777 1629 }
ysr@777 1630
ysr@777 1631 }
ysr@777 1632
ysr@777 1633 _in_marking_window = new_in_marking_window;
ysr@777 1634 _in_marking_window_im = new_in_marking_window_im;
ysr@777 1635 _free_regions_at_end_of_collection = _g1->free_regions();
ysr@777 1636 calculate_young_list_min_length();
johnc@1829 1637 calculate_young_list_target_length();
ysr@777 1638
iveresov@1546 1639 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
tonyp@1717 1640 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
iveresov@1546 1641 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
ysr@777 1642 // </NEW PREDICTION>
ysr@777 1643 }
ysr@777 1644
tonyp@2961 1645 #define EXT_SIZE_FORMAT "%d%s"
tonyp@2961 1646 #define EXT_SIZE_PARAMS(bytes) \
tonyp@2961 1647 byte_size_in_proper_unit((bytes)), \
tonyp@2961 1648 proper_unit_for_byte_size((bytes))
tonyp@2961 1649
tonyp@2961 1650 void G1CollectorPolicy::print_heap_transition() {
tonyp@2961 1651 if (PrintGCDetails) {
tonyp@2961 1652 YoungList* young_list = _g1->young_list();
tonyp@2961 1653 size_t eden_bytes = young_list->eden_used_bytes();
tonyp@2961 1654 size_t survivor_bytes = young_list->survivor_used_bytes();
tonyp@2961 1655 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
tonyp@2961 1656 size_t used = _g1->used();
tonyp@2961 1657 size_t capacity = _g1->capacity();
tonyp@2961 1658
tonyp@2961 1659 gclog_or_tty->print_cr(
tonyp@2961 1660 " [Eden: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
tonyp@2961 1661 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
tonyp@2961 1662 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
tonyp@2961 1663 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
tonyp@2961 1664 EXT_SIZE_PARAMS(_eden_bytes_before_gc),
tonyp@2961 1665 EXT_SIZE_PARAMS(eden_bytes),
tonyp@2961 1666 EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
tonyp@2961 1667 EXT_SIZE_PARAMS(survivor_bytes),
tonyp@2961 1668 EXT_SIZE_PARAMS(used_before_gc),
tonyp@2961 1669 EXT_SIZE_PARAMS(_capacity_before_gc),
tonyp@2961 1670 EXT_SIZE_PARAMS(used),
tonyp@2961 1671 EXT_SIZE_PARAMS(capacity));
tonyp@2961 1672 } else if (PrintGC) {
tonyp@2961 1673 _g1->print_size_transition(gclog_or_tty,
tonyp@2961 1674 _cur_collection_pause_used_at_start_bytes,
tonyp@2961 1675 _g1->used(), _g1->capacity());
tonyp@2961 1676 }
tonyp@2961 1677 }
tonyp@2961 1678
ysr@777 1679 // <NEW PREDICTION>
ysr@777 1680
iveresov@1546 1681 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 1682 double update_rs_processed_buffers,
iveresov@1546 1683 double goal_ms) {
iveresov@1546 1684 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
iveresov@1546 1685 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
iveresov@1546 1686
tonyp@1717 1687 if (G1UseAdaptiveConcRefinement) {
iveresov@1546 1688 const int k_gy = 3, k_gr = 6;
iveresov@1546 1689 const double inc_k = 1.1, dec_k = 0.9;
iveresov@1546 1690
iveresov@1546 1691 int g = cg1r->green_zone();
iveresov@1546 1692 if (update_rs_time > goal_ms) {
iveresov@1546 1693 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
iveresov@1546 1694 } else {
iveresov@1546 1695 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
iveresov@1546 1696 g = (int)MAX2(g * inc_k, g + 1.0);
iveresov@1546 1697 }
iveresov@1546 1698 }
iveresov@1546 1699 // Change the refinement threads params
iveresov@1546 1700 cg1r->set_green_zone(g);
iveresov@1546 1701 cg1r->set_yellow_zone(g * k_gy);
iveresov@1546 1702 cg1r->set_red_zone(g * k_gr);
iveresov@1546 1703 cg1r->reinitialize_threads();
iveresov@1546 1704
iveresov@1546 1705 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
iveresov@1546 1706 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
iveresov@1546 1707 cg1r->yellow_zone());
iveresov@1546 1708 // Change the barrier params
iveresov@1546 1709 dcqs.set_process_completed_threshold(processing_threshold);
iveresov@1546 1710 dcqs.set_max_completed_queue(cg1r->red_zone());
iveresov@1546 1711 }
iveresov@1546 1712
iveresov@1546 1713 int curr_queue_size = dcqs.completed_buffers_num();
iveresov@1546 1714 if (curr_queue_size >= cg1r->yellow_zone()) {
iveresov@1546 1715 dcqs.set_completed_queue_padding(curr_queue_size);
iveresov@1546 1716 } else {
iveresov@1546 1717 dcqs.set_completed_queue_padding(0);
iveresov@1546 1718 }
iveresov@1546 1719 dcqs.notify_if_necessary();
iveresov@1546 1720 }
iveresov@1546 1721
ysr@777 1722 double
ysr@777 1723 G1CollectorPolicy::
ysr@777 1724 predict_young_collection_elapsed_time_ms(size_t adjustment) {
ysr@777 1725 guarantee( adjustment == 0 || adjustment == 1, "invariant" );
ysr@777 1726
ysr@777 1727 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@1829 1728 size_t young_num = g1h->young_list()->length();
ysr@777 1729 if (young_num == 0)
ysr@777 1730 return 0.0;
ysr@777 1731
ysr@777 1732 young_num += adjustment;
ysr@777 1733 size_t pending_cards = predict_pending_cards();
johnc@1829 1734 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
ysr@777 1735 predict_rs_length_diff();
ysr@777 1736 size_t card_num;
ysr@777 1737 if (full_young_gcs())
ysr@777 1738 card_num = predict_young_card_num(rs_lengths);
ysr@777 1739 else
ysr@777 1740 card_num = predict_non_young_card_num(rs_lengths);
ysr@777 1741 size_t young_byte_size = young_num * HeapRegion::GrainBytes;
ysr@777 1742 double accum_yg_surv_rate =
ysr@777 1743 _short_lived_surv_rate_group->accum_surv_rate(adjustment);
ysr@777 1744
ysr@777 1745 size_t bytes_to_copy =
ysr@777 1746 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
ysr@777 1747
ysr@777 1748 return
ysr@777 1749 predict_rs_update_time_ms(pending_cards) +
ysr@777 1750 predict_rs_scan_time_ms(card_num) +
ysr@777 1751 predict_object_copy_time_ms(bytes_to_copy) +
ysr@777 1752 predict_young_other_time_ms(young_num) +
ysr@777 1753 predict_constant_other_time_ms();
ysr@777 1754 }
ysr@777 1755
ysr@777 1756 double
ysr@777 1757 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
ysr@777 1758 size_t rs_length = predict_rs_length_diff();
ysr@777 1759 size_t card_num;
ysr@777 1760 if (full_young_gcs())
ysr@777 1761 card_num = predict_young_card_num(rs_length);
ysr@777 1762 else
ysr@777 1763 card_num = predict_non_young_card_num(rs_length);
ysr@777 1764 return predict_base_elapsed_time_ms(pending_cards, card_num);
ysr@777 1765 }
ysr@777 1766
ysr@777 1767 double
ysr@777 1768 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 1769 size_t scanned_cards) {
ysr@777 1770 return
ysr@777 1771 predict_rs_update_time_ms(pending_cards) +
ysr@777 1772 predict_rs_scan_time_ms(scanned_cards) +
ysr@777 1773 predict_constant_other_time_ms();
ysr@777 1774 }
ysr@777 1775
ysr@777 1776 double
ysr@777 1777 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
ysr@777 1778 bool young) {
ysr@777 1779 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1780 size_t card_num;
ysr@777 1781 if (full_young_gcs())
ysr@777 1782 card_num = predict_young_card_num(rs_length);
ysr@777 1783 else
ysr@777 1784 card_num = predict_non_young_card_num(rs_length);
ysr@777 1785 size_t bytes_to_copy = predict_bytes_to_copy(hr);
ysr@777 1786
ysr@777 1787 double region_elapsed_time_ms =
ysr@777 1788 predict_rs_scan_time_ms(card_num) +
ysr@777 1789 predict_object_copy_time_ms(bytes_to_copy);
ysr@777 1790
ysr@777 1791 if (young)
ysr@777 1792 region_elapsed_time_ms += predict_young_other_time_ms(1);
ysr@777 1793 else
ysr@777 1794 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
ysr@777 1795
ysr@777 1796 return region_elapsed_time_ms;
ysr@777 1797 }
ysr@777 1798
ysr@777 1799 size_t
ysr@777 1800 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
ysr@777 1801 size_t bytes_to_copy;
ysr@777 1802 if (hr->is_marked())
ysr@777 1803 bytes_to_copy = hr->max_live_bytes();
ysr@777 1804 else {
ysr@777 1805 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
ysr@777 1806 "invariant" );
ysr@777 1807 int age = hr->age_in_surv_rate_group();
apetrusenko@980 1808 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
ysr@777 1809 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
ysr@777 1810 }
ysr@777 1811
ysr@777 1812 return bytes_to_copy;
ysr@777 1813 }
ysr@777 1814
ysr@777 1815 void
ysr@777 1816 G1CollectorPolicy::start_recording_regions() {
ysr@777 1817 _recorded_rs_lengths = 0;
ysr@777 1818 _recorded_young_regions = 0;
ysr@777 1819 _recorded_non_young_regions = 0;
ysr@777 1820
ysr@777 1821 #if PREDICTIONS_VERBOSE
ysr@777 1822 _recorded_marked_bytes = 0;
ysr@777 1823 _recorded_young_bytes = 0;
ysr@777 1824 _predicted_bytes_to_copy = 0;
johnc@1829 1825 _predicted_rs_lengths = 0;
johnc@1829 1826 _predicted_cards_scanned = 0;
ysr@777 1827 #endif // PREDICTIONS_VERBOSE
ysr@777 1828 }
ysr@777 1829
ysr@777 1830 void
johnc@1829 1831 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
ysr@777 1832 #if PREDICTIONS_VERBOSE
johnc@1829 1833 if (!young) {
ysr@777 1834 _recorded_marked_bytes += hr->max_live_bytes();
ysr@777 1835 }
ysr@777 1836 _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
ysr@777 1837 #endif // PREDICTIONS_VERBOSE
ysr@777 1838
ysr@777 1839 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1840 _recorded_rs_lengths += rs_length;
ysr@777 1841 }
ysr@777 1842
ysr@777 1843 void
johnc@1829 1844 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
johnc@1829 1845 assert(!hr->is_young(), "should not call this");
johnc@1829 1846 ++_recorded_non_young_regions;
johnc@1829 1847 record_cset_region_info(hr, false);
johnc@1829 1848 }
johnc@1829 1849
johnc@1829 1850 void
johnc@1829 1851 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
johnc@1829 1852 _recorded_young_regions = n_regions;
johnc@1829 1853 }
johnc@1829 1854
johnc@1829 1855 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
johnc@1829 1856 #if PREDICTIONS_VERBOSE
johnc@1829 1857 _recorded_young_bytes = bytes;
johnc@1829 1858 #endif // PREDICTIONS_VERBOSE
johnc@1829 1859 }
johnc@1829 1860
johnc@1829 1861 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
johnc@1829 1862 _recorded_rs_lengths = rs_lengths;
johnc@1829 1863 }
johnc@1829 1864
johnc@1829 1865 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 1866 _predicted_bytes_to_copy = bytes;
ysr@777 1867 }
ysr@777 1868
ysr@777 1869 void
ysr@777 1870 G1CollectorPolicy::end_recording_regions() {
johnc@1829 1871 // The _predicted_pause_time_ms field is referenced in code
johnc@1829 1872 // not under PREDICTIONS_VERBOSE. Let's initialize it.
johnc@1829 1873 _predicted_pause_time_ms = -1.0;
johnc@1829 1874
ysr@777 1875 #if PREDICTIONS_VERBOSE
ysr@777 1876 _predicted_pending_cards = predict_pending_cards();
ysr@777 1877 _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
ysr@777 1878 if (full_young_gcs())
ysr@777 1879 _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
ysr@777 1880 else
ysr@777 1881 _predicted_cards_scanned +=
ysr@777 1882 predict_non_young_card_num(_predicted_rs_lengths);
ysr@777 1883 _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
ysr@777 1884
ysr@777 1885 _predicted_rs_update_time_ms =
ysr@777 1886 predict_rs_update_time_ms(_g1->pending_card_num());
ysr@777 1887 _predicted_rs_scan_time_ms =
ysr@777 1888 predict_rs_scan_time_ms(_predicted_cards_scanned);
ysr@777 1889 _predicted_object_copy_time_ms =
ysr@777 1890 predict_object_copy_time_ms(_predicted_bytes_to_copy);
ysr@777 1891 _predicted_constant_other_time_ms =
ysr@777 1892 predict_constant_other_time_ms();
ysr@777 1893 _predicted_young_other_time_ms =
ysr@777 1894 predict_young_other_time_ms(_recorded_young_regions);
ysr@777 1895 _predicted_non_young_other_time_ms =
ysr@777 1896 predict_non_young_other_time_ms(_recorded_non_young_regions);
ysr@777 1897
ysr@777 1898 _predicted_pause_time_ms =
ysr@777 1899 _predicted_rs_update_time_ms +
ysr@777 1900 _predicted_rs_scan_time_ms +
ysr@777 1901 _predicted_object_copy_time_ms +
ysr@777 1902 _predicted_constant_other_time_ms +
ysr@777 1903 _predicted_young_other_time_ms +
ysr@777 1904 _predicted_non_young_other_time_ms;
ysr@777 1905 #endif // PREDICTIONS_VERBOSE
ysr@777 1906 }
ysr@777 1907
ysr@777 1908 void G1CollectorPolicy::check_if_region_is_too_expensive(double
ysr@777 1909 predicted_time_ms) {
ysr@777 1910 // I don't think we need to do this when in young GC mode since
ysr@777 1911 // marking will be initiated next time we hit the soft limit anyway...
ysr@777 1912 if (predicted_time_ms > _expensive_region_limit_ms) {
ysr@777 1913 if (!in_young_gc_mode()) {
ysr@777 1914 set_full_young_gcs(true);
tonyp@1794 1915 // We might want to do something different here. However,
tonyp@1794 1916 // right now we don't support the non-generational G1 mode
tonyp@1794 1917 // (and in fact we are planning to remove the associated code,
tonyp@1794 1918 // see CR 6814390). So, let's leave it as is and this will be
tonyp@1794 1919 // removed some time in the future
tonyp@1794 1920 ShouldNotReachHere();
tonyp@1794 1921 set_during_initial_mark_pause();
ysr@777 1922 } else
ysr@777 1923 // no point in doing another partial one
ysr@777 1924 _should_revert_to_full_young_gcs = true;
ysr@777 1925 }
ysr@777 1926 }
ysr@777 1927
ysr@777 1928 // </NEW PREDICTION>
ysr@777 1929
ysr@777 1930
ysr@777 1931 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
ysr@777 1932 double elapsed_ms) {
ysr@777 1933 _recent_gc_times_ms->add(elapsed_ms);
ysr@777 1934 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
ysr@777 1935 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
ysr@777 1936 }
ysr@777 1937
ysr@777 1938 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
johnc@3021 1939 if (_recent_pause_times_ms->num() == 0) {
johnc@3021 1940 return (double) MaxGCPauseMillis;
johnc@3021 1941 }
johnc@3021 1942 return _recent_pause_times_ms->avg();
ysr@777 1943 }
ysr@777 1944
johnc@3021 1945 double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
johnc@3021 1946 if (_recent_rs_scan_times_ms->num() == 0) {
johnc@1186 1947 return (double)MaxGCPauseMillis/3.0;
johnc@3021 1948 }
johnc@3021 1949 return _recent_rs_scan_times_ms->avg();
ysr@777 1950 }
ysr@777 1951
ysr@777 1952 int G1CollectorPolicy::number_of_recent_gcs() {
johnc@3021 1953 assert(_recent_rs_scan_times_ms->num() ==
ysr@777 1954 _recent_pause_times_ms->num(), "Sequence out of sync");
ysr@777 1955 assert(_recent_pause_times_ms->num() ==
ysr@777 1956 _recent_CS_bytes_used_before->num(), "Sequence out of sync");
ysr@777 1957 assert(_recent_CS_bytes_used_before->num() ==
ysr@777 1958 _recent_CS_bytes_surviving->num(), "Sequence out of sync");
johnc@3021 1959
ysr@777 1960 return _recent_pause_times_ms->num();
ysr@777 1961 }
ysr@777 1962
ysr@777 1963 double G1CollectorPolicy::recent_avg_survival_fraction() {
ysr@777 1964 return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 1965 _recent_CS_bytes_used_before);
ysr@777 1966 }
ysr@777 1967
ysr@777 1968 double G1CollectorPolicy::last_survival_fraction() {
ysr@777 1969 return last_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 1970 _recent_CS_bytes_used_before);
ysr@777 1971 }
ysr@777 1972
ysr@777 1973 double
ysr@777 1974 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 1975 TruncatedSeq* before) {
ysr@777 1976 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 1977 if (before->sum() > 0.0) {
ysr@777 1978 double recent_survival_rate = surviving->sum() / before->sum();
ysr@777 1979 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1980 // fragmentation can produce negative collections.
ysr@777 1981 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1982 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1983 // (DLD, 10/05.)
jmasa@2188 1984 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
ysr@777 1985 _g1->evacuation_failed() ||
ysr@777 1986 recent_survival_rate <= 1.0, "Or bad frac");
ysr@777 1987 return recent_survival_rate;
ysr@777 1988 } else {
ysr@777 1989 return 1.0; // Be conservative.
ysr@777 1990 }
ysr@777 1991 }
ysr@777 1992
ysr@777 1993 double
ysr@777 1994 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 1995 TruncatedSeq* before) {
ysr@777 1996 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 1997 if (surviving->num() > 0 && before->last() > 0.0) {
ysr@777 1998 double last_survival_rate = surviving->last() / before->last();
ysr@777 1999 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 2000 // fragmentation can produce negative collections.
ysr@777 2001 // Further, we're now always doing parallel collection. But I'm still
ysr@777 2002 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 2003 // (DLD, 10/05.)
jmasa@2188 2004 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
ysr@777 2005 last_survival_rate <= 1.0, "Or bad frac");
ysr@777 2006 return last_survival_rate;
ysr@777 2007 } else {
ysr@777 2008 return 1.0;
ysr@777 2009 }
ysr@777 2010 }
ysr@777 2011
ysr@777 2012 static const int survival_min_obs = 5;
ysr@777 2013 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
ysr@777 2014 static const double min_survival_rate = 0.1;
ysr@777 2015
ysr@777 2016 double
ysr@777 2017 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
ysr@777 2018 double latest) {
ysr@777 2019 double res = avg;
ysr@777 2020 if (number_of_recent_gcs() < survival_min_obs) {
ysr@777 2021 res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
ysr@777 2022 }
ysr@777 2023 res = MAX2(res, latest);
ysr@777 2024 res = MAX2(res, min_survival_rate);
ysr@777 2025 // In the parallel case, LAB fragmentation can produce "negative
ysr@777 2026 // collections"; so can evac failure. Cap at 1.0
ysr@777 2027 res = MIN2(res, 1.0);
ysr@777 2028 return res;
ysr@777 2029 }
ysr@777 2030
ysr@777 2031 size_t G1CollectorPolicy::expansion_amount() {
tonyp@1791 2032 if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) {
johnc@1186 2033 // We will double the existing space, or take
johnc@1186 2034 // G1ExpandByPercentOfAvailable % of the available expansion
johnc@1186 2035 // space, whichever is smaller, bounded below by a minimum
johnc@1186 2036 // expansion (unless that's all that's left.)
ysr@777 2037 const size_t min_expand_bytes = 1*M;
johnc@2504 2038 size_t reserved_bytes = _g1->max_capacity();
ysr@777 2039 size_t committed_bytes = _g1->capacity();
ysr@777 2040 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
ysr@777 2041 size_t expand_bytes;
ysr@777 2042 size_t expand_bytes_via_pct =
johnc@1186 2043 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
ysr@777 2044 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
ysr@777 2045 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
ysr@777 2046 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
ysr@777 2047 if (G1PolicyVerbose > 1) {
ysr@777 2048 gclog_or_tty->print("Decided to expand: ratio = %5.2f, "
ysr@777 2049 "committed = %d%s, uncommited = %d%s, via pct = %d%s.\n"
ysr@777 2050 " Answer = %d.\n",
ysr@777 2051 recent_avg_pause_time_ratio(),
ysr@777 2052 byte_size_in_proper_unit(committed_bytes),
ysr@777 2053 proper_unit_for_byte_size(committed_bytes),
ysr@777 2054 byte_size_in_proper_unit(uncommitted_bytes),
ysr@777 2055 proper_unit_for_byte_size(uncommitted_bytes),
ysr@777 2056 byte_size_in_proper_unit(expand_bytes_via_pct),
ysr@777 2057 proper_unit_for_byte_size(expand_bytes_via_pct),
ysr@777 2058 byte_size_in_proper_unit(expand_bytes),
ysr@777 2059 proper_unit_for_byte_size(expand_bytes));
ysr@777 2060 }
ysr@777 2061 return expand_bytes;
ysr@777 2062 } else {
ysr@777 2063 return 0;
ysr@777 2064 }
ysr@777 2065 }
ysr@777 2066
ysr@777 2067 void G1CollectorPolicy::note_start_of_mark_thread() {
ysr@777 2068 _mark_thread_startup_sec = os::elapsedTime();
ysr@777 2069 }
ysr@777 2070
ysr@777 2071 class CountCSClosure: public HeapRegionClosure {
ysr@777 2072 G1CollectorPolicy* _g1_policy;
ysr@777 2073 public:
ysr@777 2074 CountCSClosure(G1CollectorPolicy* g1_policy) :
ysr@777 2075 _g1_policy(g1_policy) {}
ysr@777 2076 bool doHeapRegion(HeapRegion* r) {
ysr@777 2077 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
ysr@777 2078 return false;
ysr@777 2079 }
ysr@777 2080 };
ysr@777 2081
ysr@777 2082 void G1CollectorPolicy::count_CS_bytes_used() {
ysr@777 2083 CountCSClosure cs_closure(this);
ysr@777 2084 _g1->collection_set_iterate(&cs_closure);
ysr@777 2085 }
ysr@777 2086
ysr@777 2087 void G1CollectorPolicy::print_summary (int level,
ysr@777 2088 const char* str,
ysr@777 2089 NumberSeq* seq) const {
ysr@777 2090 double sum = seq->sum();
brutisso@2645 2091 LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
ysr@777 2092 str, sum / 1000.0, seq->avg());
ysr@777 2093 }
ysr@777 2094
ysr@777 2095 void G1CollectorPolicy::print_summary_sd (int level,
ysr@777 2096 const char* str,
ysr@777 2097 NumberSeq* seq) const {
ysr@777 2098 print_summary(level, str, seq);
brutisso@2645 2099 LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
ysr@777 2100 seq->num(), seq->sd(), seq->maximum());
ysr@777 2101 }
ysr@777 2102
ysr@777 2103 void G1CollectorPolicy::check_other_times(int level,
ysr@777 2104 NumberSeq* other_times_ms,
ysr@777 2105 NumberSeq* calc_other_times_ms) const {
ysr@777 2106 bool should_print = false;
brutisso@2645 2107 LineBuffer buf(level + 2);
ysr@777 2108
ysr@777 2109 double max_sum = MAX2(fabs(other_times_ms->sum()),
ysr@777 2110 fabs(calc_other_times_ms->sum()));
ysr@777 2111 double min_sum = MIN2(fabs(other_times_ms->sum()),
ysr@777 2112 fabs(calc_other_times_ms->sum()));
ysr@777 2113 double sum_ratio = max_sum / min_sum;
ysr@777 2114 if (sum_ratio > 1.1) {
ysr@777 2115 should_print = true;
brutisso@2645 2116 buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
ysr@777 2117 }
ysr@777 2118
ysr@777 2119 double max_avg = MAX2(fabs(other_times_ms->avg()),
ysr@777 2120 fabs(calc_other_times_ms->avg()));
ysr@777 2121 double min_avg = MIN2(fabs(other_times_ms->avg()),
ysr@777 2122 fabs(calc_other_times_ms->avg()));
ysr@777 2123 double avg_ratio = max_avg / min_avg;
ysr@777 2124 if (avg_ratio > 1.1) {
ysr@777 2125 should_print = true;
brutisso@2645 2126 buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
ysr@777 2127 }
ysr@777 2128
ysr@777 2129 if (other_times_ms->sum() < -0.01) {
brutisso@2645 2130 buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
ysr@777 2131 }
ysr@777 2132
ysr@777 2133 if (other_times_ms->avg() < -0.01) {
brutisso@2645 2134 buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
ysr@777 2135 }
ysr@777 2136
ysr@777 2137 if (calc_other_times_ms->sum() < -0.01) {
ysr@777 2138 should_print = true;
brutisso@2645 2139 buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
ysr@777 2140 }
ysr@777 2141
ysr@777 2142 if (calc_other_times_ms->avg() < -0.01) {
ysr@777 2143 should_print = true;
brutisso@2645 2144 buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
ysr@777 2145 }
ysr@777 2146
ysr@777 2147 if (should_print)
ysr@777 2148 print_summary(level, "Other(Calc)", calc_other_times_ms);
ysr@777 2149 }
ysr@777 2150
ysr@777 2151 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
jmasa@2188 2152 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 2153 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 2154 if (summary->get_total_seq()->num() > 0) {
apetrusenko@1112 2155 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
ysr@777 2156 if (body_summary != NULL) {
ysr@777 2157 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
ysr@777 2158 if (parallel) {
ysr@777 2159 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
ysr@777 2160 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2161 print_summary(2, "Ext Root Scanning",
ysr@777 2162 body_summary->get_ext_root_scan_seq());
ysr@777 2163 print_summary(2, "Mark Stack Scanning",
ysr@777 2164 body_summary->get_mark_stack_scan_seq());
ysr@777 2165 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2166 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2167 print_summary(2, "Termination", body_summary->get_termination_seq());
ysr@777 2168 print_summary(2, "Other", body_summary->get_parallel_other_seq());
ysr@777 2169 {
ysr@777 2170 NumberSeq* other_parts[] = {
ysr@777 2171 body_summary->get_update_rs_seq(),
ysr@777 2172 body_summary->get_ext_root_scan_seq(),
ysr@777 2173 body_summary->get_mark_stack_scan_seq(),
ysr@777 2174 body_summary->get_scan_rs_seq(),
ysr@777 2175 body_summary->get_obj_copy_seq(),
ysr@777 2176 body_summary->get_termination_seq()
ysr@777 2177 };
ysr@777 2178 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
johnc@2134 2179 6, other_parts);
ysr@777 2180 check_other_times(2, body_summary->get_parallel_other_seq(),
ysr@777 2181 &calc_other_times_ms);
ysr@777 2182 }
ysr@777 2183 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
ysr@777 2184 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
ysr@777 2185 } else {
ysr@777 2186 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2187 print_summary(1, "Ext Root Scanning",
ysr@777 2188 body_summary->get_ext_root_scan_seq());
ysr@777 2189 print_summary(1, "Mark Stack Scanning",
ysr@777 2190 body_summary->get_mark_stack_scan_seq());
ysr@777 2191 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2192 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2193 }
ysr@777 2194 }
ysr@777 2195 print_summary(1, "Other", summary->get_other_seq());
ysr@777 2196 {
johnc@2134 2197 if (body_summary != NULL) {
johnc@2134 2198 NumberSeq calc_other_times_ms;
johnc@2134 2199 if (parallel) {
johnc@2134 2200 // parallel
johnc@2134 2201 NumberSeq* other_parts[] = {
johnc@2134 2202 body_summary->get_satb_drain_seq(),
johnc@2134 2203 body_summary->get_parallel_seq(),
johnc@2134 2204 body_summary->get_clear_ct_seq()
johnc@2134 2205 };
johnc@2134 2206 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
johnc@2134 2207 3, other_parts);
johnc@2134 2208 } else {
johnc@2134 2209 // serial
johnc@2134 2210 NumberSeq* other_parts[] = {
johnc@2134 2211 body_summary->get_satb_drain_seq(),
johnc@2134 2212 body_summary->get_update_rs_seq(),
johnc@2134 2213 body_summary->get_ext_root_scan_seq(),
johnc@2134 2214 body_summary->get_mark_stack_scan_seq(),
johnc@2134 2215 body_summary->get_scan_rs_seq(),
johnc@2134 2216 body_summary->get_obj_copy_seq()
johnc@2134 2217 };
johnc@2134 2218 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
johnc@2134 2219 6, other_parts);
johnc@2134 2220 }
johnc@2134 2221 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
ysr@777 2222 }
ysr@777 2223 }
ysr@777 2224 } else {
brutisso@2645 2225 LineBuffer(1).append_and_print_cr("none");
ysr@777 2226 }
brutisso@2645 2227 LineBuffer(0).append_and_print_cr("");
ysr@777 2228 }
ysr@777 2229
ysr@777 2230 void G1CollectorPolicy::print_tracing_info() const {
ysr@777 2231 if (TraceGen0Time) {
ysr@777 2232 gclog_or_tty->print_cr("ALL PAUSES");
ysr@777 2233 print_summary_sd(0, "Total", _all_pause_times_ms);
ysr@777 2234 gclog_or_tty->print_cr("");
ysr@777 2235 gclog_or_tty->print_cr("");
ysr@777 2236 gclog_or_tty->print_cr(" Full Young GC Pauses: %8d", _full_young_pause_num);
ysr@777 2237 gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num);
ysr@777 2238 gclog_or_tty->print_cr("");
ysr@777 2239
apetrusenko@1112 2240 gclog_or_tty->print_cr("EVACUATION PAUSES");
apetrusenko@1112 2241 print_summary(_summary);
ysr@777 2242
ysr@777 2243 gclog_or_tty->print_cr("MISC");
ysr@777 2244 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
ysr@777 2245 print_summary_sd(0, "Yields", _all_yield_times_ms);
ysr@777 2246 for (int i = 0; i < _aux_num; ++i) {
ysr@777 2247 if (_all_aux_times_ms[i].num() > 0) {
ysr@777 2248 char buffer[96];
ysr@777 2249 sprintf(buffer, "Aux%d", i);
ysr@777 2250 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
ysr@777 2251 }
ysr@777 2252 }
ysr@777 2253
ysr@777 2254 size_t all_region_num = _region_num_young + _region_num_tenured;
ysr@777 2255 gclog_or_tty->print_cr(" New Regions %8d, Young %8d (%6.2lf%%), "
ysr@777 2256 "Tenured %8d (%6.2lf%%)",
ysr@777 2257 all_region_num,
ysr@777 2258 _region_num_young,
ysr@777 2259 (double) _region_num_young / (double) all_region_num * 100.0,
ysr@777 2260 _region_num_tenured,
ysr@777 2261 (double) _region_num_tenured / (double) all_region_num * 100.0);
ysr@777 2262 }
ysr@777 2263 if (TraceGen1Time) {
ysr@777 2264 if (_all_full_gc_times_ms->num() > 0) {
ysr@777 2265 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
ysr@777 2266 _all_full_gc_times_ms->num(),
ysr@777 2267 _all_full_gc_times_ms->sum() / 1000.0);
ysr@777 2268 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
ysr@777 2269 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 2270 _all_full_gc_times_ms->sd(),
ysr@777 2271 _all_full_gc_times_ms->maximum());
ysr@777 2272 }
ysr@777 2273 }
ysr@777 2274 }
ysr@777 2275
ysr@777 2276 void G1CollectorPolicy::print_yg_surv_rate_info() const {
ysr@777 2277 #ifndef PRODUCT
ysr@777 2278 _short_lived_surv_rate_group->print_surv_rate_summary();
ysr@777 2279 // add this call for any other surv rate groups
ysr@777 2280 #endif // PRODUCT
ysr@777 2281 }
ysr@777 2282
tonyp@2315 2283 void
tonyp@2315 2284 G1CollectorPolicy::update_region_num(bool young) {
tonyp@2315 2285 if (young) {
ysr@777 2286 ++_region_num_young;
ysr@777 2287 } else {
ysr@777 2288 ++_region_num_tenured;
ysr@777 2289 }
ysr@777 2290 }
ysr@777 2291
ysr@777 2292 #ifndef PRODUCT
ysr@777 2293 // for debugging, bit of a hack...
ysr@777 2294 static char*
ysr@777 2295 region_num_to_mbs(int length) {
ysr@777 2296 static char buffer[64];
ysr@777 2297 double bytes = (double) (length * HeapRegion::GrainBytes);
ysr@777 2298 double mbs = bytes / (double) (1024 * 1024);
ysr@777 2299 sprintf(buffer, "%7.2lfMB", mbs);
ysr@777 2300 return buffer;
ysr@777 2301 }
ysr@777 2302 #endif // PRODUCT
ysr@777 2303
apetrusenko@980 2304 size_t G1CollectorPolicy::max_regions(int purpose) {
ysr@777 2305 switch (purpose) {
ysr@777 2306 case GCAllocForSurvived:
apetrusenko@980 2307 return _max_survivor_regions;
ysr@777 2308 case GCAllocForTenured:
apetrusenko@980 2309 return REGIONS_UNLIMITED;
ysr@777 2310 default:
apetrusenko@980 2311 ShouldNotReachHere();
apetrusenko@980 2312 return REGIONS_UNLIMITED;
ysr@777 2313 };
ysr@777 2314 }
ysr@777 2315
tonyp@2333 2316 void G1CollectorPolicy::calculate_max_gc_locker_expansion() {
tonyp@2333 2317 size_t expansion_region_num = 0;
tonyp@2333 2318 if (GCLockerEdenExpansionPercent > 0) {
tonyp@2333 2319 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
tonyp@2333 2320 double expansion_region_num_d = perc * (double) _young_list_target_length;
tonyp@2333 2321 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
tonyp@2333 2322 // less than 1.0) we'll get 1.
tonyp@2333 2323 expansion_region_num = (size_t) ceil(expansion_region_num_d);
tonyp@2333 2324 } else {
tonyp@2333 2325 assert(expansion_region_num == 0, "sanity");
tonyp@2333 2326 }
tonyp@2333 2327 _young_list_max_length = _young_list_target_length + expansion_region_num;
tonyp@2333 2328 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
tonyp@2333 2329 }
tonyp@2333 2330
apetrusenko@980 2331 // Calculates survivor space parameters.
apetrusenko@980 2332 void G1CollectorPolicy::calculate_survivors_policy()
apetrusenko@980 2333 {
apetrusenko@980 2334 if (G1FixedSurvivorSpaceSize == 0) {
apetrusenko@980 2335 _max_survivor_regions = _young_list_target_length / SurvivorRatio;
apetrusenko@980 2336 } else {
apetrusenko@982 2337 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
apetrusenko@980 2338 }
apetrusenko@980 2339
apetrusenko@980 2340 if (G1FixedTenuringThreshold) {
apetrusenko@980 2341 _tenuring_threshold = MaxTenuringThreshold;
apetrusenko@980 2342 } else {
apetrusenko@980 2343 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
apetrusenko@980 2344 HeapRegion::GrainWords * _max_survivor_regions);
apetrusenko@980 2345 }
apetrusenko@980 2346 }
apetrusenko@980 2347
ysr@777 2348 #ifndef PRODUCT
ysr@777 2349 class HRSortIndexIsOKClosure: public HeapRegionClosure {
ysr@777 2350 CollectionSetChooser* _chooser;
ysr@777 2351 public:
ysr@777 2352 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
ysr@777 2353 _chooser(chooser) {}
ysr@777 2354
ysr@777 2355 bool doHeapRegion(HeapRegion* r) {
ysr@777 2356 if (!r->continuesHumongous()) {
ysr@777 2357 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
ysr@777 2358 }
ysr@777 2359 return false;
ysr@777 2360 }
ysr@777 2361 };
ysr@777 2362
ysr@777 2363 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
ysr@777 2364 HRSortIndexIsOKClosure cl(_collectionSetChooser);
ysr@777 2365 _g1->heap_region_iterate(&cl);
ysr@777 2366 return true;
ysr@777 2367 }
ysr@777 2368 #endif
ysr@777 2369
tonyp@2011 2370 bool
tonyp@2011 2371 G1CollectorPolicy::force_initial_mark_if_outside_cycle() {
tonyp@2011 2372 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@2011 2373 if (!during_cycle) {
tonyp@2011 2374 set_initiate_conc_mark_if_possible();
tonyp@2011 2375 return true;
tonyp@2011 2376 } else {
tonyp@2011 2377 return false;
tonyp@2011 2378 }
tonyp@2011 2379 }
tonyp@2011 2380
ysr@777 2381 void
tonyp@1794 2382 G1CollectorPolicy::decide_on_conc_mark_initiation() {
tonyp@1794 2383 // We are about to decide on whether this pause will be an
tonyp@1794 2384 // initial-mark pause.
tonyp@1794 2385
tonyp@1794 2386 // First, during_initial_mark_pause() should not be already set. We
tonyp@1794 2387 // will set it here if we have to. However, it should be cleared by
tonyp@1794 2388 // the end of the pause (it's only set for the duration of an
tonyp@1794 2389 // initial-mark pause).
tonyp@1794 2390 assert(!during_initial_mark_pause(), "pre-condition");
tonyp@1794 2391
tonyp@1794 2392 if (initiate_conc_mark_if_possible()) {
tonyp@1794 2393 // We had noticed on a previous pause that the heap occupancy has
tonyp@1794 2394 // gone over the initiating threshold and we should start a
tonyp@1794 2395 // concurrent marking cycle. So we might initiate one.
tonyp@1794 2396
tonyp@1794 2397 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@1794 2398 if (!during_cycle) {
tonyp@1794 2399 // The concurrent marking thread is not "during a cycle", i.e.,
tonyp@1794 2400 // it has completed the last one. So we can go ahead and
tonyp@1794 2401 // initiate a new cycle.
tonyp@1794 2402
tonyp@1794 2403 set_during_initial_mark_pause();
tonyp@1794 2404
tonyp@1794 2405 // And we can now clear initiate_conc_mark_if_possible() as
tonyp@1794 2406 // we've already acted on it.
tonyp@1794 2407 clear_initiate_conc_mark_if_possible();
tonyp@1794 2408 } else {
tonyp@1794 2409 // The concurrent marking thread is still finishing up the
tonyp@1794 2410 // previous cycle. If we start one right now the two cycles
tonyp@1794 2411 // overlap. In particular, the concurrent marking thread might
tonyp@1794 2412 // be in the process of clearing the next marking bitmap (which
tonyp@1794 2413 // we will use for the next cycle if we start one). Starting a
tonyp@1794 2414 // cycle now will be bad given that parts of the marking
tonyp@1794 2415 // information might get cleared by the marking thread. And we
tonyp@1794 2416 // cannot wait for the marking thread to finish the cycle as it
tonyp@1794 2417 // periodically yields while clearing the next marking bitmap
tonyp@1794 2418 // and, if it's in a yield point, it's waiting for us to
tonyp@1794 2419 // finish. So, at this point we will not start a cycle and we'll
tonyp@1794 2420 // let the concurrent marking thread complete the last one.
tonyp@1794 2421 }
tonyp@1794 2422 }
tonyp@1794 2423 }
tonyp@1794 2424
tonyp@1794 2425 void
ysr@777 2426 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2427 record_collection_pause_start(double start_time_sec, size_t start_used) {
ysr@777 2428 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
ysr@777 2429 }
ysr@777 2430
ysr@777 2431 class KnownGarbageClosure: public HeapRegionClosure {
ysr@777 2432 CollectionSetChooser* _hrSorted;
ysr@777 2433
ysr@777 2434 public:
ysr@777 2435 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
ysr@777 2436 _hrSorted(hrSorted)
ysr@777 2437 {}
ysr@777 2438
ysr@777 2439 bool doHeapRegion(HeapRegion* r) {
ysr@777 2440 // We only include humongous regions in collection
ysr@777 2441 // sets when concurrent mark shows that their contained object is
ysr@777 2442 // unreachable.
ysr@777 2443
ysr@777 2444 // Do we have any marking information for this region?
ysr@777 2445 if (r->is_marked()) {
ysr@777 2446 // We don't include humongous regions in collection
ysr@777 2447 // sets because we collect them immediately at the end of a marking
ysr@777 2448 // cycle. We also don't include young regions because we *must*
ysr@777 2449 // include them in the next collection pause.
ysr@777 2450 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2451 _hrSorted->addMarkedHeapRegion(r);
ysr@777 2452 }
ysr@777 2453 }
ysr@777 2454 return false;
ysr@777 2455 }
ysr@777 2456 };
ysr@777 2457
ysr@777 2458 class ParKnownGarbageHRClosure: public HeapRegionClosure {
ysr@777 2459 CollectionSetChooser* _hrSorted;
ysr@777 2460 jint _marked_regions_added;
ysr@777 2461 jint _chunk_size;
ysr@777 2462 jint _cur_chunk_idx;
ysr@777 2463 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
ysr@777 2464 int _worker;
ysr@777 2465 int _invokes;
ysr@777 2466
ysr@777 2467 void get_new_chunk() {
ysr@777 2468 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
ysr@777 2469 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
ysr@777 2470 }
ysr@777 2471 void add_region(HeapRegion* r) {
ysr@777 2472 if (_cur_chunk_idx == _cur_chunk_end) {
ysr@777 2473 get_new_chunk();
ysr@777 2474 }
ysr@777 2475 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
ysr@777 2476 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
ysr@777 2477 _marked_regions_added++;
ysr@777 2478 _cur_chunk_idx++;
ysr@777 2479 }
ysr@777 2480
ysr@777 2481 public:
ysr@777 2482 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
ysr@777 2483 jint chunk_size,
ysr@777 2484 int worker) :
ysr@777 2485 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
ysr@777 2486 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
ysr@777 2487 _invokes(0)
ysr@777 2488 {}
ysr@777 2489
ysr@777 2490 bool doHeapRegion(HeapRegion* r) {
ysr@777 2491 // We only include humongous regions in collection
ysr@777 2492 // sets when concurrent mark shows that their contained object is
ysr@777 2493 // unreachable.
ysr@777 2494 _invokes++;
ysr@777 2495
ysr@777 2496 // Do we have any marking information for this region?
ysr@777 2497 if (r->is_marked()) {
ysr@777 2498 // We don't include humongous regions in collection
ysr@777 2499 // sets because we collect them immediately at the end of a marking
ysr@777 2500 // cycle.
ysr@777 2501 // We also do not include young regions in collection sets
ysr@777 2502 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2503 add_region(r);
ysr@777 2504 }
ysr@777 2505 }
ysr@777 2506 return false;
ysr@777 2507 }
ysr@777 2508 jint marked_regions_added() { return _marked_regions_added; }
ysr@777 2509 int invokes() { return _invokes; }
ysr@777 2510 };
ysr@777 2511
ysr@777 2512 class ParKnownGarbageTask: public AbstractGangTask {
ysr@777 2513 CollectionSetChooser* _hrSorted;
ysr@777 2514 jint _chunk_size;
ysr@777 2515 G1CollectedHeap* _g1;
ysr@777 2516 public:
ysr@777 2517 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
ysr@777 2518 AbstractGangTask("ParKnownGarbageTask"),
ysr@777 2519 _hrSorted(hrSorted), _chunk_size(chunk_size),
ysr@777 2520 _g1(G1CollectedHeap::heap())
ysr@777 2521 {}
ysr@777 2522
ysr@777 2523 void work(int i) {
ysr@777 2524 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
ysr@777 2525 // Back to zero for the claim value.
tonyp@790 2526 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
tonyp@790 2527 HeapRegion::InitialClaimValue);
ysr@777 2528 jint regions_added = parKnownGarbageCl.marked_regions_added();
ysr@777 2529 _hrSorted->incNumMarkedHeapRegions(regions_added);
ysr@777 2530 if (G1PrintParCleanupStats) {
brutisso@2645 2531 gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
ysr@777 2532 i, parKnownGarbageCl.invokes(), regions_added);
ysr@777 2533 }
ysr@777 2534 }
ysr@777 2535 };
ysr@777 2536
ysr@777 2537 void
ysr@777 2538 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2539 record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 2540 size_t max_live_bytes) {
ysr@777 2541 double start;
ysr@777 2542 if (G1PrintParCleanupStats) start = os::elapsedTime();
ysr@777 2543 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 2544
ysr@777 2545 _collectionSetChooser->clearMarkedHeapRegions();
ysr@777 2546 double clear_marked_end;
ysr@777 2547 if (G1PrintParCleanupStats) {
ysr@777 2548 clear_marked_end = os::elapsedTime();
ysr@777 2549 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.",
ysr@777 2550 (clear_marked_end - start)*1000.0);
ysr@777 2551 }
jmasa@2188 2552 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 2553 const size_t OverpartitionFactor = 4;
kvn@1926 2554 const size_t MinWorkUnit = 8;
kvn@1926 2555 const size_t WorkUnit =
ysr@777 2556 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
kvn@1926 2557 MinWorkUnit);
ysr@777 2558 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
kvn@1926 2559 WorkUnit);
ysr@777 2560 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
kvn@1926 2561 (int) WorkUnit);
ysr@777 2562 _g1->workers()->run_task(&parKnownGarbageTask);
tonyp@790 2563
tonyp@790 2564 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2565 "sanity check");
ysr@777 2566 } else {
ysr@777 2567 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
ysr@777 2568 _g1->heap_region_iterate(&knownGarbagecl);
ysr@777 2569 }
ysr@777 2570 double known_garbage_end;
ysr@777 2571 if (G1PrintParCleanupStats) {
ysr@777 2572 known_garbage_end = os::elapsedTime();
ysr@777 2573 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
ysr@777 2574 (known_garbage_end - clear_marked_end)*1000.0);
ysr@777 2575 }
ysr@777 2576 _collectionSetChooser->sortMarkedHeapRegions();
ysr@777 2577 double sort_end;
ysr@777 2578 if (G1PrintParCleanupStats) {
ysr@777 2579 sort_end = os::elapsedTime();
ysr@777 2580 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
ysr@777 2581 (sort_end - known_garbage_end)*1000.0);
ysr@777 2582 }
ysr@777 2583
ysr@777 2584 record_concurrent_mark_cleanup_end_work2();
ysr@777 2585 double work2_end;
ysr@777 2586 if (G1PrintParCleanupStats) {
ysr@777 2587 work2_end = os::elapsedTime();
ysr@777 2588 gclog_or_tty->print_cr(" work2: %8.3f ms.",
ysr@777 2589 (work2_end - sort_end)*1000.0);
ysr@777 2590 }
ysr@777 2591 }
ysr@777 2592
johnc@1829 2593 // Add the heap region at the head of the non-incremental collection set
ysr@777 2594 void G1CollectorPolicy::
ysr@777 2595 add_to_collection_set(HeapRegion* hr) {
johnc@1829 2596 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2597 assert(!hr->is_young(), "non-incremental add of young region");
johnc@1829 2598
ysr@777 2599 if (_g1->mark_in_progress())
ysr@777 2600 _g1->concurrent_mark()->registerCSetRegion(hr);
ysr@777 2601
johnc@1829 2602 assert(!hr->in_collection_set(), "should not already be in the CSet");
ysr@777 2603 hr->set_in_collection_set(true);
ysr@777 2604 hr->set_next_in_collection_set(_collection_set);
ysr@777 2605 _collection_set = hr;
ysr@777 2606 _collection_set_size++;
ysr@777 2607 _collection_set_bytes_used_before += hr->used();
tonyp@961 2608 _g1->register_region_with_in_cset_fast_test(hr);
ysr@777 2609 }
ysr@777 2610
johnc@1829 2611 // Initialize the per-collection-set information
johnc@1829 2612 void G1CollectorPolicy::start_incremental_cset_building() {
johnc@1829 2613 assert(_inc_cset_build_state == Inactive, "Precondition");
johnc@1829 2614
johnc@1829 2615 _inc_cset_head = NULL;
johnc@1829 2616 _inc_cset_tail = NULL;
johnc@1829 2617 _inc_cset_size = 0;
johnc@1829 2618 _inc_cset_bytes_used_before = 0;
johnc@1829 2619
johnc@1829 2620 if (in_young_gc_mode()) {
johnc@1829 2621 _inc_cset_young_index = 0;
johnc@1829 2622 }
johnc@1829 2623
johnc@1829 2624 _inc_cset_max_finger = 0;
johnc@1829 2625 _inc_cset_recorded_young_bytes = 0;
johnc@1829 2626 _inc_cset_recorded_rs_lengths = 0;
johnc@1829 2627 _inc_cset_predicted_elapsed_time_ms = 0;
johnc@1829 2628 _inc_cset_predicted_bytes_to_copy = 0;
johnc@1829 2629 _inc_cset_build_state = Active;
johnc@1829 2630 }
johnc@1829 2631
johnc@1829 2632 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
johnc@1829 2633 // This routine is used when:
johnc@1829 2634 // * adding survivor regions to the incremental cset at the end of an
johnc@1829 2635 // evacuation pause,
johnc@1829 2636 // * adding the current allocation region to the incremental cset
johnc@1829 2637 // when it is retired, and
johnc@1829 2638 // * updating existing policy information for a region in the
johnc@1829 2639 // incremental cset via young list RSet sampling.
johnc@1829 2640 // Therefore this routine may be called at a safepoint by the
johnc@1829 2641 // VM thread, or in-between safepoints by mutator threads (when
johnc@1829 2642 // retiring the current allocation region) or a concurrent
johnc@1829 2643 // refine thread (RSet sampling).
johnc@1829 2644
johnc@1829 2645 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
johnc@1829 2646 size_t used_bytes = hr->used();
johnc@1829 2647
johnc@1829 2648 _inc_cset_recorded_rs_lengths += rs_length;
johnc@1829 2649 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
johnc@1829 2650
johnc@1829 2651 _inc_cset_bytes_used_before += used_bytes;
johnc@1829 2652
johnc@1829 2653 // Cache the values we have added to the aggregated informtion
johnc@1829 2654 // in the heap region in case we have to remove this region from
johnc@1829 2655 // the incremental collection set, or it is updated by the
johnc@1829 2656 // rset sampling code
johnc@1829 2657 hr->set_recorded_rs_length(rs_length);
johnc@1829 2658 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
johnc@1829 2659
johnc@1829 2660 #if PREDICTIONS_VERBOSE
johnc@1829 2661 size_t bytes_to_copy = predict_bytes_to_copy(hr);
johnc@1829 2662 _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
johnc@1829 2663
johnc@1829 2664 // Record the number of bytes used in this region
johnc@1829 2665 _inc_cset_recorded_young_bytes += used_bytes;
johnc@1829 2666
johnc@1829 2667 // Cache the values we have added to the aggregated informtion
johnc@1829 2668 // in the heap region in case we have to remove this region from
johnc@1829 2669 // the incremental collection set, or it is updated by the
johnc@1829 2670 // rset sampling code
johnc@1829 2671 hr->set_predicted_bytes_to_copy(bytes_to_copy);
johnc@1829 2672 #endif // PREDICTIONS_VERBOSE
johnc@1829 2673 }
johnc@1829 2674
johnc@1829 2675 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
johnc@1829 2676 // This routine is currently only called as part of the updating of
johnc@1829 2677 // existing policy information for regions in the incremental cset that
johnc@1829 2678 // is performed by the concurrent refine thread(s) as part of young list
johnc@1829 2679 // RSet sampling. Therefore we should not be at a safepoint.
johnc@1829 2680
johnc@1829 2681 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
johnc@1829 2682 assert(hr->is_young(), "it should be");
johnc@1829 2683
johnc@1829 2684 size_t used_bytes = hr->used();
johnc@1829 2685 size_t old_rs_length = hr->recorded_rs_length();
johnc@1829 2686 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
johnc@1829 2687
johnc@1829 2688 // Subtract the old recorded/predicted policy information for
johnc@1829 2689 // the given heap region from the collection set info.
johnc@1829 2690 _inc_cset_recorded_rs_lengths -= old_rs_length;
johnc@1829 2691 _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
johnc@1829 2692
johnc@1829 2693 _inc_cset_bytes_used_before -= used_bytes;
johnc@1829 2694
johnc@1829 2695 // Clear the values cached in the heap region
johnc@1829 2696 hr->set_recorded_rs_length(0);
johnc@1829 2697 hr->set_predicted_elapsed_time_ms(0);
johnc@1829 2698
johnc@1829 2699 #if PREDICTIONS_VERBOSE
johnc@1829 2700 size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
johnc@1829 2701 _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
johnc@1829 2702
johnc@1829 2703 // Subtract the number of bytes used in this region
johnc@1829 2704 _inc_cset_recorded_young_bytes -= used_bytes;
johnc@1829 2705
johnc@1829 2706 // Clear the values cached in the heap region
johnc@1829 2707 hr->set_predicted_bytes_to_copy(0);
johnc@1829 2708 #endif // PREDICTIONS_VERBOSE
johnc@1829 2709 }
johnc@1829 2710
johnc@1829 2711 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
johnc@1829 2712 // Update the collection set information that is dependent on the new RS length
johnc@1829 2713 assert(hr->is_young(), "Precondition");
johnc@1829 2714
johnc@1829 2715 remove_from_incremental_cset_info(hr);
johnc@1829 2716 add_to_incremental_cset_info(hr, new_rs_length);
johnc@1829 2717 }
johnc@1829 2718
johnc@1829 2719 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
johnc@1829 2720 assert( hr->is_young(), "invariant");
johnc@1829 2721 assert( hr->young_index_in_cset() == -1, "invariant" );
johnc@1829 2722 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2723
johnc@1829 2724 // We need to clear and set the cached recorded/cached collection set
johnc@1829 2725 // information in the heap region here (before the region gets added
johnc@1829 2726 // to the collection set). An individual heap region's cached values
johnc@1829 2727 // are calculated, aggregated with the policy collection set info,
johnc@1829 2728 // and cached in the heap region here (initially) and (subsequently)
johnc@1829 2729 // by the Young List sampling code.
johnc@1829 2730
johnc@1829 2731 size_t rs_length = hr->rem_set()->occupied();
johnc@1829 2732 add_to_incremental_cset_info(hr, rs_length);
johnc@1829 2733
johnc@1829 2734 HeapWord* hr_end = hr->end();
johnc@1829 2735 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
johnc@1829 2736
johnc@1829 2737 assert(!hr->in_collection_set(), "invariant");
johnc@1829 2738 hr->set_in_collection_set(true);
johnc@1829 2739 assert( hr->next_in_collection_set() == NULL, "invariant");
johnc@1829 2740
johnc@1829 2741 _inc_cset_size++;
johnc@1829 2742 _g1->register_region_with_in_cset_fast_test(hr);
johnc@1829 2743
johnc@1829 2744 hr->set_young_index_in_cset((int) _inc_cset_young_index);
johnc@1829 2745 ++_inc_cset_young_index;
johnc@1829 2746 }
johnc@1829 2747
johnc@1829 2748 // Add the region at the RHS of the incremental cset
johnc@1829 2749 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
johnc@1829 2750 // We should only ever be appending survivors at the end of a pause
johnc@1829 2751 assert( hr->is_survivor(), "Logic");
johnc@1829 2752
johnc@1829 2753 // Do the 'common' stuff
johnc@1829 2754 add_region_to_incremental_cset_common(hr);
johnc@1829 2755
johnc@1829 2756 // Now add the region at the right hand side
johnc@1829 2757 if (_inc_cset_tail == NULL) {
johnc@1829 2758 assert(_inc_cset_head == NULL, "invariant");
johnc@1829 2759 _inc_cset_head = hr;
johnc@1829 2760 } else {
johnc@1829 2761 _inc_cset_tail->set_next_in_collection_set(hr);
johnc@1829 2762 }
johnc@1829 2763 _inc_cset_tail = hr;
johnc@1829 2764 }
johnc@1829 2765
johnc@1829 2766 // Add the region to the LHS of the incremental cset
johnc@1829 2767 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
johnc@1829 2768 // Survivors should be added to the RHS at the end of a pause
johnc@1829 2769 assert(!hr->is_survivor(), "Logic");
johnc@1829 2770
johnc@1829 2771 // Do the 'common' stuff
johnc@1829 2772 add_region_to_incremental_cset_common(hr);
johnc@1829 2773
johnc@1829 2774 // Add the region at the left hand side
johnc@1829 2775 hr->set_next_in_collection_set(_inc_cset_head);
johnc@1829 2776 if (_inc_cset_head == NULL) {
johnc@1829 2777 assert(_inc_cset_tail == NULL, "Invariant");
johnc@1829 2778 _inc_cset_tail = hr;
johnc@1829 2779 }
johnc@1829 2780 _inc_cset_head = hr;
johnc@1829 2781 }
johnc@1829 2782
johnc@1829 2783 #ifndef PRODUCT
johnc@1829 2784 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
johnc@1829 2785 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
johnc@1829 2786
johnc@1829 2787 st->print_cr("\nCollection_set:");
johnc@1829 2788 HeapRegion* csr = list_head;
johnc@1829 2789 while (csr != NULL) {
johnc@1829 2790 HeapRegion* next = csr->next_in_collection_set();
johnc@1829 2791 assert(csr->in_collection_set(), "bad CS");
johnc@1829 2792 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
johnc@1829 2793 "age: %4d, y: %d, surv: %d",
johnc@1829 2794 csr->bottom(), csr->end(),
johnc@1829 2795 csr->top(),
johnc@1829 2796 csr->prev_top_at_mark_start(),
johnc@1829 2797 csr->next_top_at_mark_start(),
johnc@1829 2798 csr->top_at_conc_mark_count(),
johnc@1829 2799 csr->age_in_surv_rate_group_cond(),
johnc@1829 2800 csr->is_young(),
johnc@1829 2801 csr->is_survivor());
johnc@1829 2802 csr = next;
johnc@1829 2803 }
johnc@1829 2804 }
johnc@1829 2805 #endif // !PRODUCT
johnc@1829 2806
tonyp@2062 2807 void
tonyp@2011 2808 G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
tonyp@2011 2809 double target_pause_time_ms) {
johnc@1829 2810 // Set this here - in case we're not doing young collections.
johnc@1829 2811 double non_young_start_time_sec = os::elapsedTime();
johnc@1829 2812
ysr@777 2813 start_recording_regions();
ysr@777 2814
tonyp@2011 2815 guarantee(target_pause_time_ms > 0.0,
tonyp@2011 2816 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2011 2817 target_pause_time_ms));
tonyp@2011 2818 guarantee(_collection_set == NULL, "Precondition");
ysr@777 2819
ysr@777 2820 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
ysr@777 2821 double predicted_pause_time_ms = base_time_ms;
ysr@777 2822
tonyp@2011 2823 double time_remaining_ms = target_pause_time_ms - base_time_ms;
ysr@777 2824
ysr@777 2825 // the 10% and 50% values are arbitrary...
tonyp@2011 2826 if (time_remaining_ms < 0.10 * target_pause_time_ms) {
tonyp@2011 2827 time_remaining_ms = 0.50 * target_pause_time_ms;
ysr@777 2828 _within_target = false;
ysr@777 2829 } else {
ysr@777 2830 _within_target = true;
ysr@777 2831 }
ysr@777 2832
ysr@777 2833 // We figure out the number of bytes available for future to-space.
ysr@777 2834 // For new regions without marking information, we must assume the
ysr@777 2835 // worst-case of complete survival. If we have marking information for a
ysr@777 2836 // region, we can bound the amount of live data. We can add a number of
ysr@777 2837 // such regions, as long as the sum of the live data bounds does not
ysr@777 2838 // exceed the available evacuation space.
ysr@777 2839 size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
ysr@777 2840
ysr@777 2841 size_t expansion_bytes =
ysr@777 2842 _g1->expansion_regions() * HeapRegion::GrainBytes;
ysr@777 2843
apetrusenko@1112 2844 _collection_set_bytes_used_before = 0;
apetrusenko@1112 2845 _collection_set_size = 0;
ysr@777 2846
ysr@777 2847 // Adjust for expansion and slop.
ysr@777 2848 max_live_bytes = max_live_bytes + expansion_bytes;
ysr@777 2849
ysr@777 2850 HeapRegion* hr;
ysr@777 2851 if (in_young_gc_mode()) {
ysr@777 2852 double young_start_time_sec = os::elapsedTime();
ysr@777 2853
ysr@777 2854 if (G1PolicyVerbose > 0) {
ysr@777 2855 gclog_or_tty->print_cr("Adding %d young regions to the CSet",
johnc@1829 2856 _g1->young_list()->length());
ysr@777 2857 }
johnc@1829 2858
ysr@777 2859 _young_cset_length = 0;
ysr@777 2860 _last_young_gc_full = full_young_gcs() ? true : false;
johnc@1829 2861
ysr@777 2862 if (_last_young_gc_full)
ysr@777 2863 ++_full_young_pause_num;
ysr@777 2864 else
ysr@777 2865 ++_partial_young_pause_num;
johnc@1829 2866
johnc@1829 2867 // The young list is laid with the survivor regions from the previous
johnc@1829 2868 // pause are appended to the RHS of the young list, i.e.
johnc@1829 2869 // [Newly Young Regions ++ Survivors from last pause].
johnc@1829 2870
johnc@1829 2871 hr = _g1->young_list()->first_survivor_region();
ysr@777 2872 while (hr != NULL) {
johnc@1829 2873 assert(hr->is_survivor(), "badly formed young list");
johnc@1829 2874 hr->set_young();
johnc@1829 2875 hr = hr->get_next_young_region();
ysr@777 2876 }
ysr@777 2877
johnc@1829 2878 // Clear the fields that point to the survivor list - they are
johnc@1829 2879 // all young now.
johnc@1829 2880 _g1->young_list()->clear_survivors();
johnc@1829 2881
johnc@1829 2882 if (_g1->mark_in_progress())
johnc@1829 2883 _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
johnc@1829 2884
johnc@1829 2885 _young_cset_length = _inc_cset_young_index;
johnc@1829 2886 _collection_set = _inc_cset_head;
johnc@1829 2887 _collection_set_size = _inc_cset_size;
johnc@1829 2888 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
johnc@1829 2889
johnc@1829 2890 // For young regions in the collection set, we assume the worst
johnc@1829 2891 // case of complete survival
johnc@1829 2892 max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes;
johnc@1829 2893
johnc@1829 2894 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
johnc@1829 2895 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
johnc@1829 2896
johnc@1829 2897 // The number of recorded young regions is the incremental
johnc@1829 2898 // collection set's current size
johnc@1829 2899 set_recorded_young_regions(_inc_cset_size);
johnc@1829 2900 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
johnc@1829 2901 set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
johnc@1829 2902 #if PREDICTIONS_VERBOSE
johnc@1829 2903 set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
johnc@1829 2904 #endif // PREDICTIONS_VERBOSE
johnc@1829 2905
johnc@1829 2906 if (G1PolicyVerbose > 0) {
johnc@1829 2907 gclog_or_tty->print_cr(" Added " PTR_FORMAT " Young Regions to CS.",
johnc@1829 2908 _inc_cset_size);
johnc@1829 2909 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
johnc@1829 2910 max_live_bytes/K);
johnc@1829 2911 }
johnc@1829 2912
johnc@1829 2913 assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
ysr@777 2914
ysr@777 2915 double young_end_time_sec = os::elapsedTime();
ysr@777 2916 _recorded_young_cset_choice_time_ms =
ysr@777 2917 (young_end_time_sec - young_start_time_sec) * 1000.0;
ysr@777 2918
johnc@1829 2919 // We are doing young collections so reset this.
johnc@1829 2920 non_young_start_time_sec = young_end_time_sec;
johnc@1829 2921
johnc@1829 2922 // Note we can use either _collection_set_size or
johnc@1829 2923 // _young_cset_length here
johnc@1829 2924 if (_collection_set_size > 0 && _last_young_gc_full) {
ysr@777 2925 // don't bother adding more regions...
ysr@777 2926 goto choose_collection_set_end;
ysr@777 2927 }
ysr@777 2928 }
ysr@777 2929
ysr@777 2930 if (!in_young_gc_mode() || !full_young_gcs()) {
ysr@777 2931 bool should_continue = true;
ysr@777 2932 NumberSeq seq;
ysr@777 2933 double avg_prediction = 100000000000000000.0; // something very large
johnc@1829 2934
ysr@777 2935 do {
ysr@777 2936 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
ysr@777 2937 avg_prediction);
apetrusenko@1112 2938 if (hr != NULL) {
ysr@777 2939 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
ysr@777 2940 time_remaining_ms -= predicted_time_ms;
ysr@777 2941 predicted_pause_time_ms += predicted_time_ms;
ysr@777 2942 add_to_collection_set(hr);
johnc@1829 2943 record_non_young_cset_region(hr);
ysr@777 2944 max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
ysr@777 2945 if (G1PolicyVerbose > 0) {
ysr@777 2946 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
ysr@777 2947 max_live_bytes/K);
ysr@777 2948 }
ysr@777 2949 seq.add(predicted_time_ms);
ysr@777 2950 avg_prediction = seq.avg() + seq.sd();
ysr@777 2951 }
ysr@777 2952 should_continue =
ysr@777 2953 ( hr != NULL) &&
ysr@777 2954 ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0
ysr@777 2955 : _collection_set_size < _young_list_fixed_length );
ysr@777 2956 } while (should_continue);
ysr@777 2957
ysr@777 2958 if (!adaptive_young_list_length() &&
ysr@777 2959 _collection_set_size < _young_list_fixed_length)
ysr@777 2960 _should_revert_to_full_young_gcs = true;
ysr@777 2961 }
ysr@777 2962
ysr@777 2963 choose_collection_set_end:
johnc@1829 2964 stop_incremental_cset_building();
johnc@1829 2965
ysr@777 2966 count_CS_bytes_used();
ysr@777 2967
ysr@777 2968 end_recording_regions();
ysr@777 2969
ysr@777 2970 double non_young_end_time_sec = os::elapsedTime();
ysr@777 2971 _recorded_non_young_cset_choice_time_ms =
ysr@777 2972 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
ysr@777 2973 }
ysr@777 2974
ysr@777 2975 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
ysr@777 2976 G1CollectorPolicy::record_full_collection_end();
ysr@777 2977 _collectionSetChooser->updateAfterFullCollection();
ysr@777 2978 }
ysr@777 2979
ysr@777 2980 void G1CollectorPolicy_BestRegionsFirst::
ysr@777 2981 expand_if_possible(size_t numRegions) {
ysr@777 2982 size_t expansion_bytes = numRegions * HeapRegion::GrainBytes;
ysr@777 2983 _g1->expand(expansion_bytes);
ysr@777 2984 }
ysr@777 2985
ysr@777 2986 void G1CollectorPolicy_BestRegionsFirst::
tonyp@2062 2987 record_collection_pause_end() {
tonyp@2062 2988 G1CollectorPolicy::record_collection_pause_end();
ysr@777 2989 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
ysr@777 2990 }

mercurial