src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2712
5c0b591e1074
child 2961
053d84a76d3d
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

ysr@777 1 /*
tonyp@2472 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
stefank@2314 31 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 32 #include "gc_implementation/shared/gcPolicyCounters.hpp"
stefank@2314 33 #include "runtime/arguments.hpp"
stefank@2314 34 #include "runtime/java.hpp"
stefank@2314 35 #include "runtime/mutexLocker.hpp"
stefank@2314 36 #include "utilities/debug.hpp"
ysr@777 37
ysr@777 38 #define PREDICTIONS_VERBOSE 0
ysr@777 39
ysr@777 40 // <NEW PREDICTION>
ysr@777 41
ysr@777 42 // Different defaults for different number of GC threads
ysr@777 43 // They were chosen by running GCOld and SPECjbb on debris with different
ysr@777 44 // numbers of GC threads and choosing them based on the results
ysr@777 45
ysr@777 46 // all the same
ysr@777 47 static double rs_length_diff_defaults[] = {
ysr@777 48 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
ysr@777 49 };
ysr@777 50
ysr@777 51 static double cost_per_card_ms_defaults[] = {
ysr@777 52 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
ysr@777 53 };
ysr@777 54
ysr@777 55 // all the same
ysr@777 56 static double fully_young_cards_per_entry_ratio_defaults[] = {
ysr@777 57 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
ysr@777 58 };
ysr@777 59
ysr@777 60 static double cost_per_entry_ms_defaults[] = {
ysr@777 61 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
ysr@777 62 };
ysr@777 63
ysr@777 64 static double cost_per_byte_ms_defaults[] = {
ysr@777 65 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
ysr@777 66 };
ysr@777 67
ysr@777 68 // these should be pretty consistent
ysr@777 69 static double constant_other_time_ms_defaults[] = {
ysr@777 70 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
ysr@777 71 };
ysr@777 72
ysr@777 73
ysr@777 74 static double young_other_cost_per_region_ms_defaults[] = {
ysr@777 75 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
ysr@777 76 };
ysr@777 77
ysr@777 78 static double non_young_other_cost_per_region_ms_defaults[] = {
ysr@777 79 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
ysr@777 80 };
ysr@777 81
ysr@777 82 // </NEW PREDICTION>
ysr@777 83
brutisso@2645 84 // Help class for avoiding interleaved logging
brutisso@2645 85 class LineBuffer: public StackObj {
brutisso@2645 86
brutisso@2645 87 private:
brutisso@2645 88 static const int BUFFER_LEN = 1024;
brutisso@2645 89 static const int INDENT_CHARS = 3;
brutisso@2645 90 char _buffer[BUFFER_LEN];
brutisso@2645 91 int _indent_level;
brutisso@2645 92 int _cur;
brutisso@2645 93
brutisso@2645 94 void vappend(const char* format, va_list ap) {
brutisso@2645 95 int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
brutisso@2645 96 if (res != -1) {
brutisso@2645 97 _cur += res;
brutisso@2645 98 } else {
brutisso@2645 99 DEBUG_ONLY(warning("buffer too small in LineBuffer");)
brutisso@2645 100 _buffer[BUFFER_LEN -1] = 0;
brutisso@2645 101 _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
brutisso@2645 102 }
brutisso@2645 103 }
brutisso@2645 104
brutisso@2645 105 public:
brutisso@2645 106 explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
brutisso@2645 107 for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
brutisso@2645 108 _buffer[_cur] = ' ';
brutisso@2645 109 }
brutisso@2645 110 }
brutisso@2645 111
brutisso@2645 112 #ifndef PRODUCT
brutisso@2645 113 ~LineBuffer() {
brutisso@2645 114 assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
brutisso@2645 115 }
brutisso@2645 116 #endif
brutisso@2645 117
brutisso@2645 118 void append(const char* format, ...) {
brutisso@2645 119 va_list ap;
brutisso@2645 120 va_start(ap, format);
brutisso@2645 121 vappend(format, ap);
brutisso@2645 122 va_end(ap);
brutisso@2645 123 }
brutisso@2645 124
brutisso@2645 125 void append_and_print_cr(const char* format, ...) {
brutisso@2645 126 va_list ap;
brutisso@2645 127 va_start(ap, format);
brutisso@2645 128 vappend(format, ap);
brutisso@2645 129 va_end(ap);
brutisso@2645 130 gclog_or_tty->print_cr("%s", _buffer);
brutisso@2645 131 _cur = _indent_level * INDENT_CHARS;
brutisso@2645 132 }
brutisso@2645 133 };
brutisso@2645 134
ysr@777 135 G1CollectorPolicy::G1CollectorPolicy() :
jmasa@2188 136 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
jmasa@2188 137 ? ParallelGCThreads : 1),
jmasa@2188 138
jmasa@2188 139
ysr@777 140 _n_pauses(0),
ysr@777 141 _recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 142 _recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 143 _recent_evac_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 144 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 145 _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 146 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 147 _all_pause_times_ms(new NumberSeq()),
ysr@777 148 _stop_world_start(0.0),
ysr@777 149 _all_stop_world_times_ms(new NumberSeq()),
ysr@777 150 _all_yield_times_ms(new NumberSeq()),
ysr@777 151
ysr@777 152 _all_mod_union_times_ms(new NumberSeq()),
ysr@777 153
apetrusenko@1112 154 _summary(new Summary()),
ysr@777 155
johnc@1325 156 #ifndef PRODUCT
ysr@777 157 _cur_clear_ct_time_ms(0.0),
johnc@1325 158 _min_clear_cc_time_ms(-1.0),
johnc@1325 159 _max_clear_cc_time_ms(-1.0),
johnc@1325 160 _cur_clear_cc_time_ms(0.0),
johnc@1325 161 _cum_clear_cc_time_ms(0.0),
johnc@1325 162 _num_cc_clears(0L),
johnc@1325 163 #endif
ysr@777 164
ysr@777 165 _region_num_young(0),
ysr@777 166 _region_num_tenured(0),
ysr@777 167 _prev_region_num_young(0),
ysr@777 168 _prev_region_num_tenured(0),
ysr@777 169
ysr@777 170 _aux_num(10),
ysr@777 171 _all_aux_times_ms(new NumberSeq[_aux_num]),
ysr@777 172 _cur_aux_start_times_ms(new double[_aux_num]),
ysr@777 173 _cur_aux_times_ms(new double[_aux_num]),
ysr@777 174 _cur_aux_times_set(new bool[_aux_num]),
ysr@777 175
ysr@777 176 _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 177 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 178 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 179
ysr@777 180 // <NEW PREDICTION>
ysr@777 181
ysr@777 182 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 183 _prev_collection_pause_end_ms(0.0),
ysr@777 184 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 185 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 186 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 187 _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 188 _partially_young_cards_per_entry_ratio_seq(
ysr@777 189 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 190 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 191 _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 192 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 193 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 194 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 195 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 196 _non_young_other_cost_per_region_ms_seq(
ysr@777 197 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 198
ysr@777 199 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 200 _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 201 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 202
johnc@1186 203 _pause_time_target_ms((double) MaxGCPauseMillis),
ysr@777 204
ysr@777 205 // </NEW PREDICTION>
ysr@777 206
ysr@777 207 _in_young_gc_mode(false),
ysr@777 208 _full_young_gcs(true),
ysr@777 209 _full_young_pause_num(0),
ysr@777 210 _partial_young_pause_num(0),
ysr@777 211
ysr@777 212 _during_marking(false),
ysr@777 213 _in_marking_window(false),
ysr@777 214 _in_marking_window_im(false),
ysr@777 215
ysr@777 216 _known_garbage_ratio(0.0),
ysr@777 217 _known_garbage_bytes(0),
ysr@777 218
ysr@777 219 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 220
ysr@777 221 _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 222
ysr@777 223 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 224 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 225
ysr@777 226 _recent_avg_pause_time_ratio(0.0),
ysr@777 227 _num_markings(0),
ysr@777 228 _n_marks(0),
ysr@777 229 _n_pauses_at_mark_end(0),
ysr@777 230
ysr@777 231 _all_full_gc_times_ms(new NumberSeq()),
ysr@777 232
ysr@777 233 // G1PausesBtwnConcMark defaults to -1
ysr@777 234 // so the hack is to do the cast QQQ FIXME
ysr@777 235 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
ysr@777 236 _n_marks_since_last_pause(0),
tonyp@1794 237 _initiate_conc_mark_if_possible(false),
tonyp@1794 238 _during_initial_mark_pause(false),
ysr@777 239 _should_revert_to_full_young_gcs(false),
ysr@777 240 _last_full_young_gc(false),
ysr@777 241
ysr@777 242 _prev_collection_pause_used_at_end_bytes(0),
ysr@777 243
ysr@777 244 _collection_set(NULL),
johnc@1829 245 _collection_set_size(0),
johnc@1829 246 _collection_set_bytes_used_before(0),
johnc@1829 247
johnc@1829 248 // Incremental CSet attributes
johnc@1829 249 _inc_cset_build_state(Inactive),
johnc@1829 250 _inc_cset_head(NULL),
johnc@1829 251 _inc_cset_tail(NULL),
johnc@1829 252 _inc_cset_size(0),
johnc@1829 253 _inc_cset_young_index(0),
johnc@1829 254 _inc_cset_bytes_used_before(0),
johnc@1829 255 _inc_cset_max_finger(NULL),
johnc@1829 256 _inc_cset_recorded_young_bytes(0),
johnc@1829 257 _inc_cset_recorded_rs_lengths(0),
johnc@1829 258 _inc_cset_predicted_elapsed_time_ms(0.0),
johnc@1829 259 _inc_cset_predicted_bytes_to_copy(0),
johnc@1829 260
ysr@777 261 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 262 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 263 #endif // _MSC_VER
ysr@777 264
ysr@777 265 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
ysr@777 266 G1YoungSurvRateNumRegionsSummary)),
ysr@777 267 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
apetrusenko@980 268 G1YoungSurvRateNumRegionsSummary)),
ysr@777 269 // add here any more surv rate groups
apetrusenko@980 270 _recorded_survivor_regions(0),
apetrusenko@980 271 _recorded_survivor_head(NULL),
apetrusenko@980 272 _recorded_survivor_tail(NULL),
tonyp@1791 273 _survivors_age_table(true),
tonyp@1791 274
tonyp@1791 275 _gc_overhead_perc(0.0)
apetrusenko@980 276
ysr@777 277 {
tonyp@1377 278 // Set up the region size and associated fields. Given that the
tonyp@1377 279 // policy is created before the heap, we have to set this up here,
tonyp@1377 280 // so it's done as soon as possible.
tonyp@1377 281 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
iveresov@1696 282 HeapRegionRemSet::setup_remset_size();
tonyp@1377 283
apetrusenko@1826 284 // Verify PLAB sizes
apetrusenko@1826 285 const uint region_size = HeapRegion::GrainWords;
apetrusenko@1826 286 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
apetrusenko@1826 287 char buffer[128];
apetrusenko@1826 288 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
apetrusenko@1826 289 OldPLABSize > region_size ? "Old" : "Young", region_size);
apetrusenko@1826 290 vm_exit_during_initialization(buffer);
apetrusenko@1826 291 }
apetrusenko@1826 292
ysr@777 293 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
ysr@777 294 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
ysr@777 295
tonyp@1966 296 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
ysr@777 297 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 298 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 299
ysr@777 300 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 301 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
ysr@777 302
ysr@777 303 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 304
ysr@777 305 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
ysr@777 306
ysr@777 307 _par_last_termination_times_ms = new double[_parallel_gc_threads];
tonyp@1966 308 _par_last_termination_attempts = new double[_parallel_gc_threads];
tonyp@1966 309 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
brutisso@2712 310 _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
ysr@777 311
ysr@777 312 // start conservatively
johnc@1186 313 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
ysr@777 314
ysr@777 315 // <NEW PREDICTION>
ysr@777 316
ysr@777 317 int index;
ysr@777 318 if (ParallelGCThreads == 0)
ysr@777 319 index = 0;
ysr@777 320 else if (ParallelGCThreads > 8)
ysr@777 321 index = 7;
ysr@777 322 else
ysr@777 323 index = ParallelGCThreads - 1;
ysr@777 324
ysr@777 325 _pending_card_diff_seq->add(0.0);
ysr@777 326 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
ysr@777 327 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
ysr@777 328 _fully_young_cards_per_entry_ratio_seq->add(
ysr@777 329 fully_young_cards_per_entry_ratio_defaults[index]);
ysr@777 330 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
ysr@777 331 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
ysr@777 332 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
ysr@777 333 _young_other_cost_per_region_ms_seq->add(
ysr@777 334 young_other_cost_per_region_ms_defaults[index]);
ysr@777 335 _non_young_other_cost_per_region_ms_seq->add(
ysr@777 336 non_young_other_cost_per_region_ms_defaults[index]);
ysr@777 337
ysr@777 338 // </NEW PREDICTION>
ysr@777 339
tonyp@1965 340 // Below, we might need to calculate the pause time target based on
tonyp@1965 341 // the pause interval. When we do so we are going to give G1 maximum
tonyp@1965 342 // flexibility and allow it to do pauses when it needs to. So, we'll
tonyp@1965 343 // arrange that the pause interval to be pause time target + 1 to
tonyp@1965 344 // ensure that a) the pause time target is maximized with respect to
tonyp@1965 345 // the pause interval and b) we maintain the invariant that pause
tonyp@1965 346 // time target < pause interval. If the user does not want this
tonyp@1965 347 // maximum flexibility, they will have to set the pause interval
tonyp@1965 348 // explicitly.
tonyp@1965 349
tonyp@1965 350 // First make sure that, if either parameter is set, its value is
tonyp@1965 351 // reasonable.
tonyp@1965 352 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 353 if (MaxGCPauseMillis < 1) {
tonyp@1965 354 vm_exit_during_initialization("MaxGCPauseMillis should be "
tonyp@1965 355 "greater than 0");
tonyp@1965 356 }
tonyp@1965 357 }
tonyp@1965 358 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 359 if (GCPauseIntervalMillis < 1) {
tonyp@1965 360 vm_exit_during_initialization("GCPauseIntervalMillis should be "
tonyp@1965 361 "greater than 0");
tonyp@1965 362 }
tonyp@1965 363 }
tonyp@1965 364
tonyp@1965 365 // Then, if the pause time target parameter was not set, set it to
tonyp@1965 366 // the default value.
tonyp@1965 367 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 368 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 369 // The default pause time target in G1 is 200ms
tonyp@1965 370 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
tonyp@1965 371 } else {
tonyp@1965 372 // We do not allow the pause interval to be set without the
tonyp@1965 373 // pause time target
tonyp@1965 374 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
tonyp@1965 375 "without setting MaxGCPauseMillis");
tonyp@1965 376 }
tonyp@1965 377 }
tonyp@1965 378
tonyp@1965 379 // Then, if the interval parameter was not set, set it according to
tonyp@1965 380 // the pause time target (this will also deal with the case when the
tonyp@1965 381 // pause time target is the default value).
tonyp@1965 382 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 383 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
tonyp@1965 384 }
tonyp@1965 385
tonyp@1965 386 // Finally, make sure that the two parameters are consistent.
tonyp@1965 387 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
tonyp@1965 388 char buffer[256];
tonyp@1965 389 jio_snprintf(buffer, 256,
tonyp@1965 390 "MaxGCPauseMillis (%u) should be less than "
tonyp@1965 391 "GCPauseIntervalMillis (%u)",
tonyp@1965 392 MaxGCPauseMillis, GCPauseIntervalMillis);
tonyp@1965 393 vm_exit_during_initialization(buffer);
tonyp@1965 394 }
tonyp@1965 395
tonyp@1965 396 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
johnc@1186 397 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
ysr@777 398 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
johnc@1186 399 _sigma = (double) G1ConfidencePercent / 100.0;
ysr@777 400
ysr@777 401 // start conservatively (around 50ms is about right)
ysr@777 402 _concurrent_mark_init_times_ms->add(0.05);
ysr@777 403 _concurrent_mark_remark_times_ms->add(0.05);
ysr@777 404 _concurrent_mark_cleanup_times_ms->add(0.20);
ysr@777 405 _tenuring_threshold = MaxTenuringThreshold;
ysr@777 406
tonyp@1717 407 // if G1FixedSurvivorSpaceSize is 0 which means the size is not
tonyp@1717 408 // fixed, then _max_survivor_regions will be calculated at
johnc@1829 409 // calculate_young_list_target_length during initialization
tonyp@1717 410 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
apetrusenko@980 411
tonyp@1791 412 assert(GCTimeRatio > 0,
tonyp@1791 413 "we should have set it to a default value set_g1_gc_flags() "
tonyp@1791 414 "if a user set it to 0");
tonyp@1791 415 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
tonyp@1791 416
ysr@777 417 initialize_all();
ysr@777 418 }
ysr@777 419
ysr@777 420 // Increment "i", mod "len"
ysr@777 421 static void inc_mod(int& i, int len) {
ysr@777 422 i++; if (i == len) i = 0;
ysr@777 423 }
ysr@777 424
ysr@777 425 void G1CollectorPolicy::initialize_flags() {
ysr@777 426 set_min_alignment(HeapRegion::GrainBytes);
ysr@777 427 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
apetrusenko@982 428 if (SurvivorRatio < 1) {
apetrusenko@982 429 vm_exit_during_initialization("Invalid survivor ratio specified");
apetrusenko@982 430 }
ysr@777 431 CollectorPolicy::initialize_flags();
ysr@777 432 }
ysr@777 433
tonyp@1720 434 // The easiest way to deal with the parsing of the NewSize /
tonyp@1720 435 // MaxNewSize / etc. parameteres is to re-use the code in the
tonyp@1720 436 // TwoGenerationCollectorPolicy class. This is similar to what
tonyp@1720 437 // ParallelScavenge does with its GenerationSizer class (see
tonyp@1720 438 // ParallelScavengeHeap::initialize()). We might change this in the
tonyp@1720 439 // future, but it's a good start.
tonyp@1720 440 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
tonyp@1720 441 size_t size_to_region_num(size_t byte_size) {
tonyp@1720 442 return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
tonyp@1720 443 }
tonyp@1720 444
tonyp@1720 445 public:
tonyp@1720 446 G1YoungGenSizer() {
tonyp@1720 447 initialize_flags();
tonyp@1720 448 initialize_size_info();
tonyp@1720 449 }
tonyp@1720 450
tonyp@1720 451 size_t min_young_region_num() {
tonyp@1720 452 return size_to_region_num(_min_gen0_size);
tonyp@1720 453 }
tonyp@1720 454 size_t initial_young_region_num() {
tonyp@1720 455 return size_to_region_num(_initial_gen0_size);
tonyp@1720 456 }
tonyp@1720 457 size_t max_young_region_num() {
tonyp@1720 458 return size_to_region_num(_max_gen0_size);
tonyp@1720 459 }
tonyp@1720 460 };
tonyp@1720 461
ysr@777 462 void G1CollectorPolicy::init() {
ysr@777 463 // Set aside an initial future to_space.
ysr@777 464 _g1 = G1CollectedHeap::heap();
ysr@777 465
ysr@777 466 assert(Heap_lock->owned_by_self(), "Locking discipline.");
ysr@777 467
apetrusenko@980 468 initialize_gc_policy_counters();
apetrusenko@980 469
ysr@777 470 if (G1Gen) {
ysr@777 471 _in_young_gc_mode = true;
ysr@777 472
tonyp@1720 473 G1YoungGenSizer sizer;
tonyp@1720 474 size_t initial_region_num = sizer.initial_young_region_num();
tonyp@1720 475
tonyp@1720 476 if (UseAdaptiveSizePolicy) {
ysr@777 477 set_adaptive_young_list_length(true);
ysr@777 478 _young_list_fixed_length = 0;
ysr@777 479 } else {
ysr@777 480 set_adaptive_young_list_length(false);
tonyp@1720 481 _young_list_fixed_length = initial_region_num;
ysr@777 482 }
johnc@1829 483 _free_regions_at_end_of_collection = _g1->free_regions();
johnc@1829 484 calculate_young_list_min_length();
johnc@1829 485 guarantee( _young_list_min_length == 0, "invariant, not enough info" );
johnc@1829 486 calculate_young_list_target_length();
johnc@1829 487 } else {
ysr@777 488 _young_list_fixed_length = 0;
ysr@777 489 _in_young_gc_mode = false;
ysr@777 490 }
johnc@1829 491
johnc@1829 492 // We may immediately start allocating regions and placing them on the
johnc@1829 493 // collection set list. Initialize the per-collection set info
johnc@1829 494 start_incremental_cset_building();
ysr@777 495 }
ysr@777 496
apetrusenko@980 497 // Create the jstat counters for the policy.
apetrusenko@980 498 void G1CollectorPolicy::initialize_gc_policy_counters()
apetrusenko@980 499 {
apetrusenko@980 500 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
apetrusenko@980 501 }
apetrusenko@980 502
ysr@777 503 void G1CollectorPolicy::calculate_young_list_min_length() {
ysr@777 504 _young_list_min_length = 0;
ysr@777 505
ysr@777 506 if (!adaptive_young_list_length())
ysr@777 507 return;
ysr@777 508
ysr@777 509 if (_alloc_rate_ms_seq->num() > 3) {
ysr@777 510 double now_sec = os::elapsedTime();
ysr@777 511 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
ysr@777 512 double alloc_rate_ms = predict_alloc_rate_ms();
tonyp@2315 513 size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
tonyp@2315 514 size_t current_region_num = _g1->young_list()->length();
ysr@777 515 _young_list_min_length = min_regions + current_region_num;
ysr@777 516 }
ysr@777 517 }
ysr@777 518
johnc@1829 519 void G1CollectorPolicy::calculate_young_list_target_length() {
ysr@777 520 if (adaptive_young_list_length()) {
ysr@777 521 size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
johnc@1829 522 calculate_young_list_target_length(rs_lengths);
ysr@777 523 } else {
ysr@777 524 if (full_young_gcs())
ysr@777 525 _young_list_target_length = _young_list_fixed_length;
ysr@777 526 else
ysr@777 527 _young_list_target_length = _young_list_fixed_length / 2;
ysr@777 528 }
tonyp@2315 529
tonyp@2315 530 // Make sure we allow the application to allocate at least one
tonyp@2315 531 // region before we need to do a collection again.
tonyp@2315 532 size_t min_length = _g1->young_list()->length() + 1;
tonyp@2315 533 _young_list_target_length = MAX2(_young_list_target_length, min_length);
tonyp@2333 534 calculate_max_gc_locker_expansion();
apetrusenko@980 535 calculate_survivors_policy();
ysr@777 536 }
ysr@777 537
johnc@1829 538 void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
ysr@777 539 guarantee( adaptive_young_list_length(), "pre-condition" );
johnc@1829 540 guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" );
ysr@777 541
ysr@777 542 double start_time_sec = os::elapsedTime();
tonyp@1717 543 size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
ysr@777 544 min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
ysr@777 545 size_t reserve_regions =
ysr@777 546 (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
ysr@777 547
ysr@777 548 if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
ysr@777 549 // we are in fully-young mode and there are free regions in the heap
ysr@777 550
apetrusenko@980 551 double survivor_regions_evac_time =
apetrusenko@980 552 predict_survivor_regions_evac_time();
apetrusenko@980 553
ysr@777 554 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
ysr@777 555 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
ysr@777 556 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
johnc@1829 557 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
apetrusenko@980 558 double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
apetrusenko@980 559 + survivor_regions_evac_time;
johnc@1829 560
ysr@777 561 // the result
ysr@777 562 size_t final_young_length = 0;
johnc@1829 563
johnc@1829 564 size_t init_free_regions =
johnc@1829 565 MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions);
johnc@1829 566
johnc@1829 567 // if we're still under the pause target...
johnc@1829 568 if (base_time_ms <= target_pause_time_ms) {
johnc@1829 569 // We make sure that the shortest young length that makes sense
johnc@1829 570 // fits within the target pause time.
johnc@1829 571 size_t min_young_length = 1;
johnc@1829 572
johnc@1829 573 if (predict_will_fit(min_young_length, base_time_ms,
johnc@1829 574 init_free_regions, target_pause_time_ms)) {
johnc@1829 575 // The shortest young length will fit within the target pause time;
johnc@1829 576 // we'll now check whether the absolute maximum number of young
johnc@1829 577 // regions will fit in the target pause time. If not, we'll do
johnc@1829 578 // a binary search between min_young_length and max_young_length
johnc@1829 579 size_t abs_max_young_length = _free_regions_at_end_of_collection - 1;
johnc@1829 580 size_t max_young_length = abs_max_young_length;
johnc@1829 581
johnc@1829 582 if (max_young_length > min_young_length) {
johnc@1829 583 // Let's check if the initial max young length will fit within the
johnc@1829 584 // target pause. If so then there is no need to search for a maximal
johnc@1829 585 // young length - we'll return the initial maximum
johnc@1829 586
johnc@1829 587 if (predict_will_fit(max_young_length, base_time_ms,
johnc@1829 588 init_free_regions, target_pause_time_ms)) {
johnc@1829 589 // The maximum young length will satisfy the target pause time.
johnc@1829 590 // We are done so set min young length to this maximum length.
johnc@1829 591 // The code after the loop will then set final_young_length using
johnc@1829 592 // the value cached in the minimum length.
johnc@1829 593 min_young_length = max_young_length;
johnc@1829 594 } else {
johnc@1829 595 // The maximum possible number of young regions will not fit within
johnc@1829 596 // the target pause time so let's search....
johnc@1829 597
johnc@1829 598 size_t diff = (max_young_length - min_young_length) / 2;
johnc@1829 599 max_young_length = min_young_length + diff;
johnc@1829 600
johnc@1829 601 while (max_young_length > min_young_length) {
johnc@1829 602 if (predict_will_fit(max_young_length, base_time_ms,
johnc@1829 603 init_free_regions, target_pause_time_ms)) {
johnc@1829 604
johnc@1829 605 // The current max young length will fit within the target
johnc@1829 606 // pause time. Note we do not exit the loop here. By setting
johnc@1829 607 // min = max, and then increasing the max below means that
johnc@1829 608 // we will continue searching for an upper bound in the
johnc@1829 609 // range [max..max+diff]
johnc@1829 610 min_young_length = max_young_length;
johnc@1829 611 }
johnc@1829 612 diff = (max_young_length - min_young_length) / 2;
johnc@1829 613 max_young_length = min_young_length + diff;
johnc@1829 614 }
johnc@1829 615 // the above loop found a maximal young length that will fit
johnc@1829 616 // within the target pause time.
johnc@1829 617 }
johnc@1829 618 assert(min_young_length <= abs_max_young_length, "just checking");
johnc@1829 619 }
johnc@1829 620 final_young_length = min_young_length;
johnc@1829 621 }
ysr@777 622 }
johnc@1829 623 // and we're done!
ysr@777 624
ysr@777 625 // we should have at least one region in the target young length
apetrusenko@980 626 _young_list_target_length =
tonyp@2315 627 final_young_length + _recorded_survivor_regions;
ysr@777 628
ysr@777 629 // let's keep an eye of how long we spend on this calculation
ysr@777 630 // right now, I assume that we'll print it when we need it; we
ysr@777 631 // should really adde it to the breakdown of a pause
ysr@777 632 double end_time_sec = os::elapsedTime();
ysr@777 633 double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
ysr@777 634
johnc@1829 635 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 636 // leave this in for debugging, just in case
johnc@1829 637 gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", "
johnc@1829 638 "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT,
ysr@777 639 target_pause_time_ms,
johnc@1829 640 _young_list_target_length
ysr@777 641 elapsed_time_ms,
ysr@777 642 full_young_gcs() ? "full" : "partial",
tonyp@1794 643 during_initial_mark_pause() ? " i-m" : "",
apetrusenko@980 644 _in_marking_window,
apetrusenko@980 645 _in_marking_window_im);
johnc@1829 646 #endif // TRACE_CALC_YOUNG_LENGTH
ysr@777 647
ysr@777 648 if (_young_list_target_length < _young_list_min_length) {
johnc@1829 649 // bummer; this means that, if we do a pause when the maximal
johnc@1829 650 // length dictates, we'll violate the pause spacing target (the
ysr@777 651 // min length was calculate based on the application's current
ysr@777 652 // alloc rate);
ysr@777 653
ysr@777 654 // so, we have to bite the bullet, and allocate the minimum
ysr@777 655 // number. We'll violate our target, but we just can't meet it.
ysr@777 656
johnc@1829 657 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 658 // leave this in for debugging, just in case
ysr@777 659 gclog_or_tty->print_cr("adjusted target length from "
johnc@1829 660 SIZE_FORMAT " to " SIZE_FORMAT,
johnc@1829 661 _young_list_target_length, _young_list_min_length);
johnc@1829 662 #endif // TRACE_CALC_YOUNG_LENGTH
johnc@1829 663
johnc@1829 664 _young_list_target_length = _young_list_min_length;
ysr@777 665 }
ysr@777 666 } else {
ysr@777 667 // we are in a partially-young mode or we've run out of regions (due
ysr@777 668 // to evacuation failure)
ysr@777 669
johnc@1829 670 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 671 // leave this in for debugging, just in case
ysr@777 672 gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
johnc@1829 673 _young_list_min_length);
johnc@1829 674 #endif // TRACE_CALC_YOUNG_LENGTH
johnc@1829 675 // we'll do the pause as soon as possible by choosing the minimum
tonyp@2315 676 _young_list_target_length = _young_list_min_length;
ysr@777 677 }
ysr@777 678
ysr@777 679 _rs_lengths_prediction = rs_lengths;
ysr@777 680 }
ysr@777 681
johnc@1829 682 // This is used by: calculate_young_list_target_length(rs_length). It
johnc@1829 683 // returns true iff:
johnc@1829 684 // the predicted pause time for the given young list will not overflow
johnc@1829 685 // the target pause time
johnc@1829 686 // and:
johnc@1829 687 // the predicted amount of surviving data will not overflow the
johnc@1829 688 // the amount of free space available for survivor regions.
johnc@1829 689 //
ysr@777 690 bool
johnc@1829 691 G1CollectorPolicy::predict_will_fit(size_t young_length,
johnc@1829 692 double base_time_ms,
johnc@1829 693 size_t init_free_regions,
johnc@1829 694 double target_pause_time_ms) {
ysr@777 695
ysr@777 696 if (young_length >= init_free_regions)
ysr@777 697 // end condition 1: not enough space for the young regions
ysr@777 698 return false;
ysr@777 699
ysr@777 700 double accum_surv_rate_adj = 0.0;
ysr@777 701 double accum_surv_rate =
ysr@777 702 accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
johnc@1829 703
ysr@777 704 size_t bytes_to_copy =
ysr@777 705 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
johnc@1829 706
ysr@777 707 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
johnc@1829 708
ysr@777 709 double young_other_time_ms =
johnc@1829 710 predict_young_other_time_ms(young_length);
johnc@1829 711
ysr@777 712 double pause_time_ms =
johnc@1829 713 base_time_ms + copy_time_ms + young_other_time_ms;
ysr@777 714
ysr@777 715 if (pause_time_ms > target_pause_time_ms)
ysr@777 716 // end condition 2: over the target pause time
ysr@777 717 return false;
ysr@777 718
ysr@777 719 size_t free_bytes =
ysr@777 720 (init_free_regions - young_length) * HeapRegion::GrainBytes;
ysr@777 721
ysr@777 722 if ((2.0 + sigma()) * (double) bytes_to_copy > (double) free_bytes)
ysr@777 723 // end condition 3: out of to-space (conservatively)
ysr@777 724 return false;
ysr@777 725
ysr@777 726 // success!
ysr@777 727 return true;
ysr@777 728 }
ysr@777 729
apetrusenko@980 730 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
apetrusenko@980 731 double survivor_regions_evac_time = 0.0;
apetrusenko@980 732 for (HeapRegion * r = _recorded_survivor_head;
apetrusenko@980 733 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
apetrusenko@980 734 r = r->get_next_young_region()) {
apetrusenko@980 735 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
apetrusenko@980 736 }
apetrusenko@980 737 return survivor_regions_evac_time;
apetrusenko@980 738 }
apetrusenko@980 739
ysr@777 740 void G1CollectorPolicy::check_prediction_validity() {
ysr@777 741 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
ysr@777 742
johnc@1829 743 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
ysr@777 744 if (rs_lengths > _rs_lengths_prediction) {
ysr@777 745 // add 10% to avoid having to recalculate often
ysr@777 746 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
johnc@1829 747 calculate_young_list_target_length(rs_lengths_prediction);
ysr@777 748 }
ysr@777 749 }
ysr@777 750
ysr@777 751 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
ysr@777 752 bool is_tlab,
ysr@777 753 bool* gc_overhead_limit_was_exceeded) {
ysr@777 754 guarantee(false, "Not using this policy feature yet.");
ysr@777 755 return NULL;
ysr@777 756 }
ysr@777 757
ysr@777 758 // This method controls how a collector handles one or more
ysr@777 759 // of its generations being fully allocated.
ysr@777 760 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
ysr@777 761 bool is_tlab) {
ysr@777 762 guarantee(false, "Not using this policy feature yet.");
ysr@777 763 return NULL;
ysr@777 764 }
ysr@777 765
ysr@777 766
ysr@777 767 #ifndef PRODUCT
ysr@777 768 bool G1CollectorPolicy::verify_young_ages() {
johnc@1829 769 HeapRegion* head = _g1->young_list()->first_region();
ysr@777 770 return
ysr@777 771 verify_young_ages(head, _short_lived_surv_rate_group);
ysr@777 772 // also call verify_young_ages on any additional surv rate groups
ysr@777 773 }
ysr@777 774
ysr@777 775 bool
ysr@777 776 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
ysr@777 777 SurvRateGroup *surv_rate_group) {
ysr@777 778 guarantee( surv_rate_group != NULL, "pre-condition" );
ysr@777 779
ysr@777 780 const char* name = surv_rate_group->name();
ysr@777 781 bool ret = true;
ysr@777 782 int prev_age = -1;
ysr@777 783
ysr@777 784 for (HeapRegion* curr = head;
ysr@777 785 curr != NULL;
ysr@777 786 curr = curr->get_next_young_region()) {
ysr@777 787 SurvRateGroup* group = curr->surv_rate_group();
ysr@777 788 if (group == NULL && !curr->is_survivor()) {
ysr@777 789 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
ysr@777 790 ret = false;
ysr@777 791 }
ysr@777 792
ysr@777 793 if (surv_rate_group == group) {
ysr@777 794 int age = curr->age_in_surv_rate_group();
ysr@777 795
ysr@777 796 if (age < 0) {
ysr@777 797 gclog_or_tty->print_cr("## %s: encountered negative age", name);
ysr@777 798 ret = false;
ysr@777 799 }
ysr@777 800
ysr@777 801 if (age <= prev_age) {
ysr@777 802 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
ysr@777 803 "(%d, %d)", name, age, prev_age);
ysr@777 804 ret = false;
ysr@777 805 }
ysr@777 806 prev_age = age;
ysr@777 807 }
ysr@777 808 }
ysr@777 809
ysr@777 810 return ret;
ysr@777 811 }
ysr@777 812 #endif // PRODUCT
ysr@777 813
ysr@777 814 void G1CollectorPolicy::record_full_collection_start() {
ysr@777 815 _cur_collection_start_sec = os::elapsedTime();
ysr@777 816 // Release the future to-space so that it is available for compaction into.
ysr@777 817 _g1->set_full_collection();
ysr@777 818 }
ysr@777 819
ysr@777 820 void G1CollectorPolicy::record_full_collection_end() {
ysr@777 821 // Consider this like a collection pause for the purposes of allocation
ysr@777 822 // since last pause.
ysr@777 823 double end_sec = os::elapsedTime();
ysr@777 824 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
ysr@777 825 double full_gc_time_ms = full_gc_time_sec * 1000.0;
ysr@777 826
ysr@777 827 _all_full_gc_times_ms->add(full_gc_time_ms);
ysr@777 828
tonyp@1030 829 update_recent_gc_times(end_sec, full_gc_time_ms);
ysr@777 830
ysr@777 831 _g1->clear_full_collection();
ysr@777 832
ysr@777 833 // "Nuke" the heuristics that control the fully/partially young GC
ysr@777 834 // transitions and make sure we start with fully young GCs after the
ysr@777 835 // Full GC.
ysr@777 836 set_full_young_gcs(true);
ysr@777 837 _last_full_young_gc = false;
ysr@777 838 _should_revert_to_full_young_gcs = false;
tonyp@1794 839 clear_initiate_conc_mark_if_possible();
tonyp@1794 840 clear_during_initial_mark_pause();
ysr@777 841 _known_garbage_bytes = 0;
ysr@777 842 _known_garbage_ratio = 0.0;
ysr@777 843 _in_marking_window = false;
ysr@777 844 _in_marking_window_im = false;
ysr@777 845
ysr@777 846 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 847 // also call this on any additional surv rate groups
ysr@777 848
apetrusenko@980 849 record_survivor_regions(0, NULL, NULL);
apetrusenko@980 850
ysr@777 851 _prev_region_num_young = _region_num_young;
ysr@777 852 _prev_region_num_tenured = _region_num_tenured;
ysr@777 853
ysr@777 854 _free_regions_at_end_of_collection = _g1->free_regions();
apetrusenko@980 855 // Reset survivors SurvRateGroup.
apetrusenko@980 856 _survivor_surv_rate_group->reset();
ysr@777 857 calculate_young_list_min_length();
johnc@1829 858 calculate_young_list_target_length();
tonyp@2315 859 }
ysr@777 860
ysr@777 861 void G1CollectorPolicy::record_before_bytes(size_t bytes) {
ysr@777 862 _bytes_in_to_space_before_gc += bytes;
ysr@777 863 }
ysr@777 864
ysr@777 865 void G1CollectorPolicy::record_after_bytes(size_t bytes) {
ysr@777 866 _bytes_in_to_space_after_gc += bytes;
ysr@777 867 }
ysr@777 868
ysr@777 869 void G1CollectorPolicy::record_stop_world_start() {
ysr@777 870 _stop_world_start = os::elapsedTime();
ysr@777 871 }
ysr@777 872
ysr@777 873 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
ysr@777 874 size_t start_used) {
ysr@777 875 if (PrintGCDetails) {
ysr@777 876 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 877 gclog_or_tty->print("[GC pause");
ysr@777 878 if (in_young_gc_mode())
ysr@777 879 gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
ysr@777 880 }
ysr@777 881
tonyp@2315 882 assert(_g1->used() == _g1->recalculate_used(),
tonyp@2315 883 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
tonyp@2315 884 _g1->used(), _g1->recalculate_used()));
ysr@777 885
ysr@777 886 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
ysr@777 887 _all_stop_world_times_ms->add(s_w_t_ms);
ysr@777 888 _stop_world_start = 0.0;
ysr@777 889
ysr@777 890 _cur_collection_start_sec = start_time_sec;
ysr@777 891 _cur_collection_pause_used_at_start_bytes = start_used;
ysr@777 892 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
ysr@777 893 _pending_cards = _g1->pending_card_num();
ysr@777 894 _max_pending_cards = _g1->max_pending_card_num();
ysr@777 895
ysr@777 896 _bytes_in_to_space_before_gc = 0;
ysr@777 897 _bytes_in_to_space_after_gc = 0;
ysr@777 898 _bytes_in_collection_set_before_gc = 0;
ysr@777 899
ysr@777 900 #ifdef DEBUG
ysr@777 901 // initialise these to something well known so that we can spot
ysr@777 902 // if they are not set properly
ysr@777 903
ysr@777 904 for (int i = 0; i < _parallel_gc_threads; ++i) {
tonyp@1966 905 _par_last_gc_worker_start_times_ms[i] = -1234.0;
tonyp@1966 906 _par_last_ext_root_scan_times_ms[i] = -1234.0;
tonyp@1966 907 _par_last_mark_stack_scan_times_ms[i] = -1234.0;
tonyp@1966 908 _par_last_update_rs_times_ms[i] = -1234.0;
tonyp@1966 909 _par_last_update_rs_processed_buffers[i] = -1234.0;
tonyp@1966 910 _par_last_scan_rs_times_ms[i] = -1234.0;
tonyp@1966 911 _par_last_obj_copy_times_ms[i] = -1234.0;
tonyp@1966 912 _par_last_termination_times_ms[i] = -1234.0;
tonyp@1966 913 _par_last_termination_attempts[i] = -1234.0;
tonyp@1966 914 _par_last_gc_worker_end_times_ms[i] = -1234.0;
brutisso@2712 915 _par_last_gc_worker_times_ms[i] = -1234.0;
ysr@777 916 }
ysr@777 917 #endif
ysr@777 918
ysr@777 919 for (int i = 0; i < _aux_num; ++i) {
ysr@777 920 _cur_aux_times_ms[i] = 0.0;
ysr@777 921 _cur_aux_times_set[i] = false;
ysr@777 922 }
ysr@777 923
ysr@777 924 _satb_drain_time_set = false;
ysr@777 925 _last_satb_drain_processed_buffers = -1;
ysr@777 926
ysr@777 927 if (in_young_gc_mode())
ysr@777 928 _last_young_gc_full = false;
ysr@777 929
ysr@777 930 // do that for any other surv rate groups
ysr@777 931 _short_lived_surv_rate_group->stop_adding_regions();
tonyp@1717 932 _survivors_age_table.clear();
apetrusenko@980 933
ysr@777 934 assert( verify_young_ages(), "region age verification" );
ysr@777 935 }
ysr@777 936
ysr@777 937 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
ysr@777 938 _mark_closure_time_ms = mark_closure_time_ms;
ysr@777 939 }
ysr@777 940
ysr@777 941 void G1CollectorPolicy::record_concurrent_mark_init_start() {
ysr@777 942 _mark_init_start_sec = os::elapsedTime();
ysr@777 943 guarantee(!in_young_gc_mode(), "should not do be here in young GC mode");
ysr@777 944 }
ysr@777 945
ysr@777 946 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
ysr@777 947 mark_init_elapsed_time_ms) {
ysr@777 948 _during_marking = true;
tonyp@1794 949 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
tonyp@1794 950 clear_during_initial_mark_pause();
ysr@777 951 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
ysr@777 952 }
ysr@777 953
ysr@777 954 void G1CollectorPolicy::record_concurrent_mark_init_end() {
ysr@777 955 double end_time_sec = os::elapsedTime();
ysr@777 956 double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
ysr@777 957 _concurrent_mark_init_times_ms->add(elapsed_time_ms);
ysr@777 958 record_concurrent_mark_init_end_pre(elapsed_time_ms);
ysr@777 959
ysr@777 960 _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
ysr@777 961 }
ysr@777 962
ysr@777 963 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
ysr@777 964 _mark_remark_start_sec = os::elapsedTime();
ysr@777 965 _during_marking = false;
ysr@777 966 }
ysr@777 967
ysr@777 968 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
ysr@777 969 double end_time_sec = os::elapsedTime();
ysr@777 970 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
ysr@777 971 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
ysr@777 972 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 973 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 974
ysr@777 975 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
ysr@777 976 }
ysr@777 977
ysr@777 978 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
ysr@777 979 _mark_cleanup_start_sec = os::elapsedTime();
ysr@777 980 }
ysr@777 981
ysr@777 982 void
ysr@777 983 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 984 size_t max_live_bytes) {
ysr@777 985 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 986 record_concurrent_mark_cleanup_end_work2();
ysr@777 987 }
ysr@777 988
ysr@777 989 void
ysr@777 990 G1CollectorPolicy::
ysr@777 991 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 992 size_t max_live_bytes) {
ysr@777 993 if (_n_marks < 2) _n_marks++;
ysr@777 994 if (G1PolicyVerbose > 0)
ysr@777 995 gclog_or_tty->print_cr("At end of marking, max_live is " SIZE_FORMAT " MB "
ysr@777 996 " (of " SIZE_FORMAT " MB heap).",
ysr@777 997 max_live_bytes/M, _g1->capacity()/M);
ysr@777 998 }
ysr@777 999
ysr@777 1000 // The important thing about this is that it includes "os::elapsedTime".
ysr@777 1001 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
ysr@777 1002 double end_time_sec = os::elapsedTime();
ysr@777 1003 double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
ysr@777 1004 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
ysr@777 1005 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 1006 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 1007
ysr@777 1008 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
ysr@777 1009
ysr@777 1010 _num_markings++;
ysr@777 1011
ysr@777 1012 // We did a marking, so reset the "since_last_mark" variables.
ysr@777 1013 double considerConcMarkCost = 1.0;
ysr@777 1014 // If there are available processors, concurrent activity is free...
ysr@777 1015 if (Threads::number_of_non_daemon_threads() * 2 <
ysr@777 1016 os::active_processor_count()) {
ysr@777 1017 considerConcMarkCost = 0.0;
ysr@777 1018 }
ysr@777 1019 _n_pauses_at_mark_end = _n_pauses;
ysr@777 1020 _n_marks_since_last_pause++;
ysr@777 1021 }
ysr@777 1022
ysr@777 1023 void
ysr@777 1024 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
ysr@777 1025 if (in_young_gc_mode()) {
ysr@777 1026 _should_revert_to_full_young_gcs = false;
ysr@777 1027 _last_full_young_gc = true;
ysr@777 1028 _in_marking_window = false;
ysr@777 1029 if (adaptive_young_list_length())
johnc@1829 1030 calculate_young_list_target_length();
ysr@777 1031 }
ysr@777 1032 }
ysr@777 1033
ysr@777 1034 void G1CollectorPolicy::record_concurrent_pause() {
ysr@777 1035 if (_stop_world_start > 0.0) {
ysr@777 1036 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
ysr@777 1037 _all_yield_times_ms->add(yield_ms);
ysr@777 1038 }
ysr@777 1039 }
ysr@777 1040
ysr@777 1041 void G1CollectorPolicy::record_concurrent_pause_end() {
ysr@777 1042 }
ysr@777 1043
ysr@777 1044 void G1CollectorPolicy::record_collection_pause_end_CH_strong_roots() {
ysr@777 1045 _cur_CH_strong_roots_end_sec = os::elapsedTime();
ysr@777 1046 _cur_CH_strong_roots_dur_ms =
ysr@777 1047 (_cur_CH_strong_roots_end_sec - _cur_collection_start_sec) * 1000.0;
ysr@777 1048 }
ysr@777 1049
ysr@777 1050 void G1CollectorPolicy::record_collection_pause_end_G1_strong_roots() {
ysr@777 1051 _cur_G1_strong_roots_end_sec = os::elapsedTime();
ysr@777 1052 _cur_G1_strong_roots_dur_ms =
ysr@777 1053 (_cur_G1_strong_roots_end_sec - _cur_CH_strong_roots_end_sec) * 1000.0;
ysr@777 1054 }
ysr@777 1055
ysr@777 1056 template<class T>
ysr@777 1057 T sum_of(T* sum_arr, int start, int n, int N) {
ysr@777 1058 T sum = (T)0;
ysr@777 1059 for (int i = 0; i < n; i++) {
ysr@777 1060 int j = (start + i) % N;
ysr@777 1061 sum += sum_arr[j];
ysr@777 1062 }
ysr@777 1063 return sum;
ysr@777 1064 }
ysr@777 1065
tonyp@1966 1066 void G1CollectorPolicy::print_par_stats(int level,
tonyp@1966 1067 const char* str,
brutisso@2712 1068 double* data) {
ysr@777 1069 double min = data[0], max = data[0];
ysr@777 1070 double total = 0.0;
brutisso@2645 1071 LineBuffer buf(level);
brutisso@2645 1072 buf.append("[%s (ms):", str);
ysr@777 1073 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 1074 double val = data[i];
ysr@777 1075 if (val < min)
ysr@777 1076 min = val;
ysr@777 1077 if (val > max)
ysr@777 1078 max = val;
ysr@777 1079 total += val;
brutisso@2645 1080 buf.append(" %3.1lf", val);
ysr@777 1081 }
brutisso@2712 1082 buf.append_and_print_cr("");
brutisso@2712 1083 double avg = total / (double) ParallelGCThreads;
brutisso@2712 1084 buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]",
brutisso@2712 1085 avg, min, max, max - min);
ysr@777 1086 }
ysr@777 1087
tonyp@1966 1088 void G1CollectorPolicy::print_par_sizes(int level,
tonyp@1966 1089 const char* str,
brutisso@2712 1090 double* data) {
ysr@777 1091 double min = data[0], max = data[0];
ysr@777 1092 double total = 0.0;
brutisso@2645 1093 LineBuffer buf(level);
brutisso@2645 1094 buf.append("[%s :", str);
ysr@777 1095 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 1096 double val = data[i];
ysr@777 1097 if (val < min)
ysr@777 1098 min = val;
ysr@777 1099 if (val > max)
ysr@777 1100 max = val;
ysr@777 1101 total += val;
brutisso@2645 1102 buf.append(" %d", (int) val);
ysr@777 1103 }
brutisso@2712 1104 buf.append_and_print_cr("");
brutisso@2712 1105 double avg = total / (double) ParallelGCThreads;
brutisso@2712 1106 buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]",
brutisso@2712 1107 (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min);
ysr@777 1108 }
ysr@777 1109
ysr@777 1110 void G1CollectorPolicy::print_stats (int level,
ysr@777 1111 const char* str,
ysr@777 1112 double value) {
brutisso@2645 1113 LineBuffer(level).append_and_print_cr("[%s: %5.1lf ms]", str, value);
ysr@777 1114 }
ysr@777 1115
ysr@777 1116 void G1CollectorPolicy::print_stats (int level,
ysr@777 1117 const char* str,
ysr@777 1118 int value) {
brutisso@2645 1119 LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
ysr@777 1120 }
ysr@777 1121
ysr@777 1122 double G1CollectorPolicy::avg_value (double* data) {
jmasa@2188 1123 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1124 double ret = 0.0;
ysr@777 1125 for (uint i = 0; i < ParallelGCThreads; ++i)
ysr@777 1126 ret += data[i];
ysr@777 1127 return ret / (double) ParallelGCThreads;
ysr@777 1128 } else {
ysr@777 1129 return data[0];
ysr@777 1130 }
ysr@777 1131 }
ysr@777 1132
ysr@777 1133 double G1CollectorPolicy::max_value (double* data) {
jmasa@2188 1134 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1135 double ret = data[0];
ysr@777 1136 for (uint i = 1; i < ParallelGCThreads; ++i)
ysr@777 1137 if (data[i] > ret)
ysr@777 1138 ret = data[i];
ysr@777 1139 return ret;
ysr@777 1140 } else {
ysr@777 1141 return data[0];
ysr@777 1142 }
ysr@777 1143 }
ysr@777 1144
ysr@777 1145 double G1CollectorPolicy::sum_of_values (double* data) {
jmasa@2188 1146 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1147 double sum = 0.0;
ysr@777 1148 for (uint i = 0; i < ParallelGCThreads; i++)
ysr@777 1149 sum += data[i];
ysr@777 1150 return sum;
ysr@777 1151 } else {
ysr@777 1152 return data[0];
ysr@777 1153 }
ysr@777 1154 }
ysr@777 1155
ysr@777 1156 double G1CollectorPolicy::max_sum (double* data1,
ysr@777 1157 double* data2) {
ysr@777 1158 double ret = data1[0] + data2[0];
ysr@777 1159
jmasa@2188 1160 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1161 for (uint i = 1; i < ParallelGCThreads; ++i) {
ysr@777 1162 double data = data1[i] + data2[i];
ysr@777 1163 if (data > ret)
ysr@777 1164 ret = data;
ysr@777 1165 }
ysr@777 1166 }
ysr@777 1167 return ret;
ysr@777 1168 }
ysr@777 1169
ysr@777 1170 // Anything below that is considered to be zero
ysr@777 1171 #define MIN_TIMER_GRANULARITY 0.0000001
ysr@777 1172
tonyp@2062 1173 void G1CollectorPolicy::record_collection_pause_end() {
ysr@777 1174 double end_time_sec = os::elapsedTime();
ysr@777 1175 double elapsed_ms = _last_pause_time_ms;
jmasa@2188 1176 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 1177 double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0;
ysr@777 1178 size_t rs_size =
ysr@777 1179 _cur_collection_pause_used_regions_at_start - collection_set_size();
ysr@777 1180 size_t cur_used_bytes = _g1->used();
ysr@777 1181 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
ysr@777 1182 bool last_pause_included_initial_mark = false;
tonyp@2062 1183 bool update_stats = !_g1->evacuation_failed();
ysr@777 1184
ysr@777 1185 #ifndef PRODUCT
ysr@777 1186 if (G1YoungSurvRateVerbose) {
ysr@777 1187 gclog_or_tty->print_cr("");
ysr@777 1188 _short_lived_surv_rate_group->print();
ysr@777 1189 // do that for any other surv rate groups too
ysr@777 1190 }
ysr@777 1191 #endif // PRODUCT
ysr@777 1192
ysr@777 1193 if (in_young_gc_mode()) {
tonyp@1794 1194 last_pause_included_initial_mark = during_initial_mark_pause();
ysr@777 1195 if (last_pause_included_initial_mark)
ysr@777 1196 record_concurrent_mark_init_end_pre(0.0);
ysr@777 1197
ysr@777 1198 size_t min_used_targ =
tonyp@1718 1199 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
ysr@777 1200
tonyp@1794 1201
tonyp@1794 1202 if (!_g1->mark_in_progress() && !_last_full_young_gc) {
tonyp@1794 1203 assert(!last_pause_included_initial_mark, "invariant");
tonyp@1794 1204 if (cur_used_bytes > min_used_targ &&
tonyp@1794 1205 cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
tonyp@1794 1206 assert(!during_initial_mark_pause(), "we should not see this here");
tonyp@1794 1207
tonyp@1794 1208 // Note: this might have already been set, if during the last
tonyp@1794 1209 // pause we decided to start a cycle but at the beginning of
tonyp@1794 1210 // this pause we decided to postpone it. That's OK.
tonyp@1794 1211 set_initiate_conc_mark_if_possible();
ysr@777 1212 }
ysr@777 1213 }
ysr@777 1214
ysr@777 1215 _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
ysr@777 1216 }
ysr@777 1217
ysr@777 1218 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
ysr@777 1219 end_time_sec, false);
ysr@777 1220
ysr@777 1221 guarantee(_cur_collection_pause_used_regions_at_start >=
ysr@777 1222 collection_set_size(),
ysr@777 1223 "Negative RS size?");
ysr@777 1224
ysr@777 1225 // This assert is exempted when we're doing parallel collection pauses,
ysr@777 1226 // because the fragmentation caused by the parallel GC allocation buffers
ysr@777 1227 // can lead to more memory being used during collection than was used
ysr@777 1228 // before. Best leave this out until the fragmentation problem is fixed.
ysr@777 1229 // Pauses in which evacuation failed can also lead to negative
ysr@777 1230 // collections, since no space is reclaimed from a region containing an
ysr@777 1231 // object whose evacuation failed.
ysr@777 1232 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1233 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1234 // (DLD, 10/05.)
ysr@777 1235 assert((true || parallel) // Always using GC LABs now.
ysr@777 1236 || _g1->evacuation_failed()
ysr@777 1237 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
ysr@777 1238 "Negative collection");
ysr@777 1239
ysr@777 1240 size_t freed_bytes =
ysr@777 1241 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
ysr@777 1242 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
johnc@1829 1243
ysr@777 1244 double survival_fraction =
ysr@777 1245 (double)surviving_bytes/
ysr@777 1246 (double)_collection_set_bytes_used_before;
ysr@777 1247
ysr@777 1248 _n_pauses++;
ysr@777 1249
tonyp@1030 1250 if (update_stats) {
ysr@777 1251 _recent_CH_strong_roots_times_ms->add(_cur_CH_strong_roots_dur_ms);
ysr@777 1252 _recent_G1_strong_roots_times_ms->add(_cur_G1_strong_roots_dur_ms);
ysr@777 1253 _recent_evac_times_ms->add(evac_ms);
ysr@777 1254 _recent_pause_times_ms->add(elapsed_ms);
ysr@777 1255
ysr@777 1256 _recent_rs_sizes->add(rs_size);
ysr@777 1257
ysr@777 1258 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1259 // fragmentation can produce negative collections. Same with evac
ysr@777 1260 // failure.
ysr@777 1261 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1262 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1263 // (DLD, 10/05.
ysr@777 1264 assert((true || parallel)
ysr@777 1265 || _g1->evacuation_failed()
ysr@777 1266 || surviving_bytes <= _collection_set_bytes_used_before,
ysr@777 1267 "Or else negative collection!");
ysr@777 1268 _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
ysr@777 1269 _recent_CS_bytes_surviving->add(surviving_bytes);
ysr@777 1270
ysr@777 1271 // this is where we update the allocation rate of the application
ysr@777 1272 double app_time_ms =
ysr@777 1273 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
ysr@777 1274 if (app_time_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1275 // This usually happens due to the timer not having the required
ysr@777 1276 // granularity. Some Linuxes are the usual culprits.
ysr@777 1277 // We'll just set it to something (arbitrarily) small.
ysr@777 1278 app_time_ms = 1.0;
ysr@777 1279 }
ysr@777 1280 size_t regions_allocated =
ysr@777 1281 (_region_num_young - _prev_region_num_young) +
ysr@777 1282 (_region_num_tenured - _prev_region_num_tenured);
ysr@777 1283 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
ysr@777 1284 _alloc_rate_ms_seq->add(alloc_rate_ms);
ysr@777 1285 _prev_region_num_young = _region_num_young;
ysr@777 1286 _prev_region_num_tenured = _region_num_tenured;
ysr@777 1287
ysr@777 1288 double interval_ms =
ysr@777 1289 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
ysr@777 1290 update_recent_gc_times(end_time_sec, elapsed_ms);
ysr@777 1291 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
ysr@1521 1292 if (recent_avg_pause_time_ratio() < 0.0 ||
ysr@1521 1293 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
ysr@1521 1294 #ifndef PRODUCT
ysr@1521 1295 // Dump info to allow post-facto debugging
ysr@1521 1296 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
ysr@1521 1297 gclog_or_tty->print_cr("-------------------------------------------");
ysr@1521 1298 gclog_or_tty->print_cr("Recent GC Times (ms):");
ysr@1521 1299 _recent_gc_times_ms->dump();
ysr@1521 1300 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
ysr@1521 1301 _recent_prev_end_times_for_all_gcs_sec->dump();
ysr@1521 1302 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
ysr@1521 1303 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
ysr@1522 1304 // In debug mode, terminate the JVM if the user wants to debug at this point.
ysr@1522 1305 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
ysr@1522 1306 #endif // !PRODUCT
ysr@1522 1307 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
ysr@1522 1308 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
ysr@1521 1309 if (_recent_avg_pause_time_ratio < 0.0) {
ysr@1521 1310 _recent_avg_pause_time_ratio = 0.0;
ysr@1521 1311 } else {
ysr@1521 1312 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
ysr@1521 1313 _recent_avg_pause_time_ratio = 1.0;
ysr@1521 1314 }
ysr@1521 1315 }
ysr@777 1316 }
ysr@777 1317
ysr@777 1318 if (G1PolicyVerbose > 1) {
ysr@777 1319 gclog_or_tty->print_cr(" Recording collection pause(%d)", _n_pauses);
ysr@777 1320 }
ysr@777 1321
tonyp@2062 1322 PauseSummary* summary = _summary;
ysr@777 1323
ysr@777 1324 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
ysr@777 1325 double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
ysr@777 1326 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
ysr@777 1327 double update_rs_processed_buffers =
ysr@777 1328 sum_of_values(_par_last_update_rs_processed_buffers);
ysr@777 1329 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
ysr@777 1330 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
ysr@777 1331 double termination_time = avg_value(_par_last_termination_times_ms);
ysr@777 1332
tonyp@1083 1333 double parallel_other_time = _cur_collection_par_time_ms -
tonyp@1083 1334 (update_rs_time + ext_root_scan_time + mark_stack_scan_time +
johnc@1829 1335 scan_rs_time + obj_copy_time + termination_time);
tonyp@1030 1336 if (update_stats) {
ysr@777 1337 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 1338 guarantee(body_summary != NULL, "should not be null!");
ysr@777 1339
ysr@777 1340 if (_satb_drain_time_set)
ysr@777 1341 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
ysr@777 1342 else
ysr@777 1343 body_summary->record_satb_drain_time_ms(0.0);
ysr@777 1344 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
ysr@777 1345 body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
ysr@777 1346 body_summary->record_update_rs_time_ms(update_rs_time);
ysr@777 1347 body_summary->record_scan_rs_time_ms(scan_rs_time);
ysr@777 1348 body_summary->record_obj_copy_time_ms(obj_copy_time);
ysr@777 1349 if (parallel) {
ysr@777 1350 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
ysr@777 1351 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
ysr@777 1352 body_summary->record_termination_time_ms(termination_time);
ysr@777 1353 body_summary->record_parallel_other_time_ms(parallel_other_time);
ysr@777 1354 }
ysr@777 1355 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
ysr@777 1356 }
ysr@777 1357
ysr@777 1358 if (G1PolicyVerbose > 1) {
ysr@777 1359 gclog_or_tty->print_cr(" ET: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1360 " CH Strong: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1361 " G1 Strong: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1362 " Evac: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1363 " ET-RS: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1364 " |RS|: " SIZE_FORMAT,
ysr@777 1365 elapsed_ms, recent_avg_time_for_pauses_ms(),
ysr@777 1366 _cur_CH_strong_roots_dur_ms, recent_avg_time_for_CH_strong_ms(),
ysr@777 1367 _cur_G1_strong_roots_dur_ms, recent_avg_time_for_G1_strong_ms(),
ysr@777 1368 evac_ms, recent_avg_time_for_evac_ms(),
ysr@777 1369 scan_rs_time,
ysr@777 1370 recent_avg_time_for_pauses_ms() -
ysr@777 1371 recent_avg_time_for_G1_strong_ms(),
ysr@777 1372 rs_size);
ysr@777 1373
ysr@777 1374 gclog_or_tty->print_cr(" Used at start: " SIZE_FORMAT"K"
ysr@777 1375 " At end " SIZE_FORMAT "K\n"
ysr@777 1376 " garbage : " SIZE_FORMAT "K"
ysr@777 1377 " of " SIZE_FORMAT "K\n"
ysr@777 1378 " survival : %6.2f%% (%6.2f%% avg)",
ysr@777 1379 _cur_collection_pause_used_at_start_bytes/K,
ysr@777 1380 _g1->used()/K, freed_bytes/K,
ysr@777 1381 _collection_set_bytes_used_before/K,
ysr@777 1382 survival_fraction*100.0,
ysr@777 1383 recent_avg_survival_fraction()*100.0);
ysr@777 1384 gclog_or_tty->print_cr(" Recent %% gc pause time: %6.2f",
ysr@777 1385 recent_avg_pause_time_ratio() * 100.0);
ysr@777 1386 }
ysr@777 1387
ysr@777 1388 double other_time_ms = elapsed_ms;
ysr@777 1389
tonyp@2062 1390 if (_satb_drain_time_set) {
tonyp@2062 1391 other_time_ms -= _cur_satb_drain_time_ms;
ysr@777 1392 }
ysr@777 1393
tonyp@2062 1394 if (parallel) {
tonyp@2062 1395 other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
tonyp@2062 1396 } else {
tonyp@2062 1397 other_time_ms -=
tonyp@2062 1398 update_rs_time +
tonyp@2062 1399 ext_root_scan_time + mark_stack_scan_time +
tonyp@2062 1400 scan_rs_time + obj_copy_time;
tonyp@2062 1401 }
tonyp@2062 1402
ysr@777 1403 if (PrintGCDetails) {
tonyp@2062 1404 gclog_or_tty->print_cr("%s, %1.8lf secs]",
ysr@777 1405 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
ysr@777 1406 elapsed_ms / 1000.0);
ysr@777 1407
tonyp@2062 1408 if (_satb_drain_time_set) {
tonyp@2062 1409 print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
tonyp@2062 1410 }
tonyp@2062 1411 if (_last_satb_drain_processed_buffers >= 0) {
tonyp@2062 1412 print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
tonyp@2062 1413 }
tonyp@2062 1414 if (parallel) {
tonyp@2062 1415 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
brutisso@2712 1416 print_par_stats(2, "GC Worker Start Time", _par_last_gc_worker_start_times_ms);
tonyp@2062 1417 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
brutisso@2712 1418 print_par_sizes(3, "Processed Buffers", _par_last_update_rs_processed_buffers);
brutisso@2712 1419 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
brutisso@2712 1420 print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
tonyp@2062 1421 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
tonyp@2062 1422 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
tonyp@2062 1423 print_par_stats(2, "Termination", _par_last_termination_times_ms);
brutisso@2712 1424 print_par_sizes(3, "Termination Attempts", _par_last_termination_attempts);
brutisso@2712 1425 print_par_stats(2, "GC Worker End Time", _par_last_gc_worker_end_times_ms);
brutisso@2712 1426
brutisso@2712 1427 for (int i = 0; i < _parallel_gc_threads; i++) {
brutisso@2712 1428 _par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] - _par_last_gc_worker_start_times_ms[i];
brutisso@2712 1429 }
brutisso@2712 1430 print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
brutisso@2712 1431
tonyp@2062 1432 print_stats(2, "Other", parallel_other_time);
tonyp@2062 1433 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
tonyp@2062 1434 } else {
tonyp@2062 1435 print_stats(1, "Update RS", update_rs_time);
tonyp@2062 1436 print_stats(2, "Processed Buffers",
tonyp@2062 1437 (int)update_rs_processed_buffers);
tonyp@2062 1438 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
tonyp@2062 1439 print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
tonyp@2062 1440 print_stats(1, "Scan RS", scan_rs_time);
tonyp@2062 1441 print_stats(1, "Object Copying", obj_copy_time);
ysr@777 1442 }
johnc@1325 1443 #ifndef PRODUCT
johnc@1325 1444 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
johnc@1325 1445 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
johnc@1325 1446 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
johnc@1325 1447 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
johnc@1325 1448 if (_num_cc_clears > 0) {
johnc@1325 1449 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
johnc@1325 1450 }
johnc@1325 1451 #endif
ysr@777 1452 print_stats(1, "Other", other_time_ms);
johnc@1829 1453 print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
johnc@1829 1454
ysr@777 1455 for (int i = 0; i < _aux_num; ++i) {
ysr@777 1456 if (_cur_aux_times_set[i]) {
ysr@777 1457 char buffer[96];
ysr@777 1458 sprintf(buffer, "Aux%d", i);
ysr@777 1459 print_stats(1, buffer, _cur_aux_times_ms[i]);
ysr@777 1460 }
ysr@777 1461 }
ysr@777 1462 }
ysr@777 1463 if (PrintGCDetails)
ysr@777 1464 gclog_or_tty->print(" [");
ysr@777 1465 if (PrintGC || PrintGCDetails)
ysr@777 1466 _g1->print_size_transition(gclog_or_tty,
ysr@777 1467 _cur_collection_pause_used_at_start_bytes,
ysr@777 1468 _g1->used(), _g1->capacity());
ysr@777 1469 if (PrintGCDetails)
ysr@777 1470 gclog_or_tty->print_cr("]");
ysr@777 1471
ysr@777 1472 _all_pause_times_ms->add(elapsed_ms);
tonyp@1083 1473 if (update_stats) {
tonyp@1083 1474 summary->record_total_time_ms(elapsed_ms);
tonyp@1083 1475 summary->record_other_time_ms(other_time_ms);
tonyp@1083 1476 }
ysr@777 1477 for (int i = 0; i < _aux_num; ++i)
ysr@777 1478 if (_cur_aux_times_set[i])
ysr@777 1479 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
ysr@777 1480
ysr@777 1481 // Reset marks-between-pauses counter.
ysr@777 1482 _n_marks_since_last_pause = 0;
ysr@777 1483
ysr@777 1484 // Update the efficiency-since-mark vars.
ysr@777 1485 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
ysr@777 1486 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1487 // This usually happens due to the timer not having the required
ysr@777 1488 // granularity. Some Linuxes are the usual culprits.
ysr@777 1489 // We'll just set it to something (arbitrarily) small.
ysr@777 1490 proc_ms = 1.0;
ysr@777 1491 }
ysr@777 1492 double cur_efficiency = (double) freed_bytes / proc_ms;
ysr@777 1493
ysr@777 1494 bool new_in_marking_window = _in_marking_window;
ysr@777 1495 bool new_in_marking_window_im = false;
tonyp@1794 1496 if (during_initial_mark_pause()) {
ysr@777 1497 new_in_marking_window = true;
ysr@777 1498 new_in_marking_window_im = true;
ysr@777 1499 }
ysr@777 1500
ysr@777 1501 if (in_young_gc_mode()) {
ysr@777 1502 if (_last_full_young_gc) {
ysr@777 1503 set_full_young_gcs(false);
ysr@777 1504 _last_full_young_gc = false;
ysr@777 1505 }
ysr@777 1506
ysr@777 1507 if ( !_last_young_gc_full ) {
ysr@777 1508 if ( _should_revert_to_full_young_gcs ||
ysr@777 1509 _known_garbage_ratio < 0.05 ||
ysr@777 1510 (adaptive_young_list_length() &&
ysr@777 1511 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
ysr@777 1512 set_full_young_gcs(true);
ysr@777 1513 }
ysr@777 1514 }
ysr@777 1515 _should_revert_to_full_young_gcs = false;
ysr@777 1516
ysr@777 1517 if (_last_young_gc_full && !_during_marking)
ysr@777 1518 _young_gc_eff_seq->add(cur_efficiency);
ysr@777 1519 }
ysr@777 1520
ysr@777 1521 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 1522 // do that for any other surv rate groupsx
ysr@777 1523
ysr@777 1524 // <NEW PREDICTION>
ysr@777 1525
apetrusenko@1112 1526 if (update_stats) {
ysr@777 1527 double pause_time_ms = elapsed_ms;
ysr@777 1528
ysr@777 1529 size_t diff = 0;
ysr@777 1530 if (_max_pending_cards >= _pending_cards)
ysr@777 1531 diff = _max_pending_cards - _pending_cards;
ysr@777 1532 _pending_card_diff_seq->add((double) diff);
ysr@777 1533
ysr@777 1534 double cost_per_card_ms = 0.0;
ysr@777 1535 if (_pending_cards > 0) {
ysr@777 1536 cost_per_card_ms = update_rs_time / (double) _pending_cards;
ysr@777 1537 _cost_per_card_ms_seq->add(cost_per_card_ms);
ysr@777 1538 }
ysr@777 1539
ysr@777 1540 size_t cards_scanned = _g1->cards_scanned();
ysr@777 1541
ysr@777 1542 double cost_per_entry_ms = 0.0;
ysr@777 1543 if (cards_scanned > 10) {
ysr@777 1544 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
ysr@777 1545 if (_last_young_gc_full)
ysr@777 1546 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1547 else
ysr@777 1548 _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1549 }
ysr@777 1550
ysr@777 1551 if (_max_rs_lengths > 0) {
ysr@777 1552 double cards_per_entry_ratio =
ysr@777 1553 (double) cards_scanned / (double) _max_rs_lengths;
ysr@777 1554 if (_last_young_gc_full)
ysr@777 1555 _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1556 else
ysr@777 1557 _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1558 }
ysr@777 1559
ysr@777 1560 size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
ysr@777 1561 if (rs_length_diff >= 0)
ysr@777 1562 _rs_length_diff_seq->add((double) rs_length_diff);
ysr@777 1563
ysr@777 1564 size_t copied_bytes = surviving_bytes;
ysr@777 1565 double cost_per_byte_ms = 0.0;
ysr@777 1566 if (copied_bytes > 0) {
ysr@777 1567 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
ysr@777 1568 if (_in_marking_window)
ysr@777 1569 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
ysr@777 1570 else
ysr@777 1571 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
ysr@777 1572 }
ysr@777 1573
ysr@777 1574 double all_other_time_ms = pause_time_ms -
johnc@1829 1575 (update_rs_time + scan_rs_time + obj_copy_time +
ysr@777 1576 _mark_closure_time_ms + termination_time);
ysr@777 1577
ysr@777 1578 double young_other_time_ms = 0.0;
ysr@777 1579 if (_recorded_young_regions > 0) {
ysr@777 1580 young_other_time_ms =
ysr@777 1581 _recorded_young_cset_choice_time_ms +
ysr@777 1582 _recorded_young_free_cset_time_ms;
ysr@777 1583 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
ysr@777 1584 (double) _recorded_young_regions);
ysr@777 1585 }
ysr@777 1586 double non_young_other_time_ms = 0.0;
ysr@777 1587 if (_recorded_non_young_regions > 0) {
ysr@777 1588 non_young_other_time_ms =
ysr@777 1589 _recorded_non_young_cset_choice_time_ms +
ysr@777 1590 _recorded_non_young_free_cset_time_ms;
ysr@777 1591
ysr@777 1592 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
ysr@777 1593 (double) _recorded_non_young_regions);
ysr@777 1594 }
ysr@777 1595
ysr@777 1596 double constant_other_time_ms = all_other_time_ms -
ysr@777 1597 (young_other_time_ms + non_young_other_time_ms);
ysr@777 1598 _constant_other_time_ms_seq->add(constant_other_time_ms);
ysr@777 1599
ysr@777 1600 double survival_ratio = 0.0;
ysr@777 1601 if (_bytes_in_collection_set_before_gc > 0) {
ysr@777 1602 survival_ratio = (double) bytes_in_to_space_during_gc() /
ysr@777 1603 (double) _bytes_in_collection_set_before_gc;
ysr@777 1604 }
ysr@777 1605
ysr@777 1606 _pending_cards_seq->add((double) _pending_cards);
ysr@777 1607 _scanned_cards_seq->add((double) cards_scanned);
ysr@777 1608 _rs_lengths_seq->add((double) _max_rs_lengths);
ysr@777 1609
ysr@777 1610 double expensive_region_limit_ms =
johnc@1186 1611 (double) MaxGCPauseMillis - predict_constant_other_time_ms();
ysr@777 1612 if (expensive_region_limit_ms < 0.0) {
ysr@777 1613 // this means that the other time was predicted to be longer than
ysr@777 1614 // than the max pause time
johnc@1186 1615 expensive_region_limit_ms = (double) MaxGCPauseMillis;
ysr@777 1616 }
ysr@777 1617 _expensive_region_limit_ms = expensive_region_limit_ms;
ysr@777 1618
ysr@777 1619 if (PREDICTIONS_VERBOSE) {
ysr@777 1620 gclog_or_tty->print_cr("");
ysr@777 1621 gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
johnc@1829 1622 "REGIONS %d %d %d "
ysr@777 1623 "PENDING_CARDS %d %d "
ysr@777 1624 "CARDS_SCANNED %d %d "
ysr@777 1625 "RS_LENGTHS %d %d "
ysr@777 1626 "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
ysr@777 1627 "SURVIVAL_RATIO %1.6lf %1.6lf "
ysr@777 1628 "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
ysr@777 1629 "OTHER_YOUNG %1.6lf %1.6lf "
ysr@777 1630 "OTHER_NON_YOUNG %1.6lf %1.6lf "
ysr@777 1631 "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
ysr@777 1632 "ELAPSED %1.6lf %1.6lf ",
ysr@777 1633 _cur_collection_start_sec,
ysr@777 1634 (!_last_young_gc_full) ? 2 :
ysr@777 1635 (last_pause_included_initial_mark) ? 1 : 0,
ysr@777 1636 _recorded_region_num,
ysr@777 1637 _recorded_young_regions,
ysr@777 1638 _recorded_non_young_regions,
ysr@777 1639 _predicted_pending_cards, _pending_cards,
ysr@777 1640 _predicted_cards_scanned, cards_scanned,
ysr@777 1641 _predicted_rs_lengths, _max_rs_lengths,
ysr@777 1642 _predicted_rs_update_time_ms, update_rs_time,
ysr@777 1643 _predicted_rs_scan_time_ms, scan_rs_time,
ysr@777 1644 _predicted_survival_ratio, survival_ratio,
ysr@777 1645 _predicted_object_copy_time_ms, obj_copy_time,
ysr@777 1646 _predicted_constant_other_time_ms, constant_other_time_ms,
ysr@777 1647 _predicted_young_other_time_ms, young_other_time_ms,
ysr@777 1648 _predicted_non_young_other_time_ms,
ysr@777 1649 non_young_other_time_ms,
ysr@777 1650 _vtime_diff_ms, termination_time,
ysr@777 1651 _predicted_pause_time_ms, elapsed_ms);
ysr@777 1652 }
ysr@777 1653
ysr@777 1654 if (G1PolicyVerbose > 0) {
ysr@777 1655 gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
ysr@777 1656 _predicted_pause_time_ms,
ysr@777 1657 (_within_target) ? "within" : "outside",
ysr@777 1658 elapsed_ms);
ysr@777 1659 }
ysr@777 1660
ysr@777 1661 }
ysr@777 1662
ysr@777 1663 _in_marking_window = new_in_marking_window;
ysr@777 1664 _in_marking_window_im = new_in_marking_window_im;
ysr@777 1665 _free_regions_at_end_of_collection = _g1->free_regions();
ysr@777 1666 calculate_young_list_min_length();
johnc@1829 1667 calculate_young_list_target_length();
ysr@777 1668
iveresov@1546 1669 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
tonyp@1717 1670 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
iveresov@1546 1671 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
ysr@777 1672 // </NEW PREDICTION>
ysr@777 1673 }
ysr@777 1674
ysr@777 1675 // <NEW PREDICTION>
ysr@777 1676
iveresov@1546 1677 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 1678 double update_rs_processed_buffers,
iveresov@1546 1679 double goal_ms) {
iveresov@1546 1680 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
iveresov@1546 1681 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
iveresov@1546 1682
tonyp@1717 1683 if (G1UseAdaptiveConcRefinement) {
iveresov@1546 1684 const int k_gy = 3, k_gr = 6;
iveresov@1546 1685 const double inc_k = 1.1, dec_k = 0.9;
iveresov@1546 1686
iveresov@1546 1687 int g = cg1r->green_zone();
iveresov@1546 1688 if (update_rs_time > goal_ms) {
iveresov@1546 1689 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
iveresov@1546 1690 } else {
iveresov@1546 1691 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
iveresov@1546 1692 g = (int)MAX2(g * inc_k, g + 1.0);
iveresov@1546 1693 }
iveresov@1546 1694 }
iveresov@1546 1695 // Change the refinement threads params
iveresov@1546 1696 cg1r->set_green_zone(g);
iveresov@1546 1697 cg1r->set_yellow_zone(g * k_gy);
iveresov@1546 1698 cg1r->set_red_zone(g * k_gr);
iveresov@1546 1699 cg1r->reinitialize_threads();
iveresov@1546 1700
iveresov@1546 1701 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
iveresov@1546 1702 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
iveresov@1546 1703 cg1r->yellow_zone());
iveresov@1546 1704 // Change the barrier params
iveresov@1546 1705 dcqs.set_process_completed_threshold(processing_threshold);
iveresov@1546 1706 dcqs.set_max_completed_queue(cg1r->red_zone());
iveresov@1546 1707 }
iveresov@1546 1708
iveresov@1546 1709 int curr_queue_size = dcqs.completed_buffers_num();
iveresov@1546 1710 if (curr_queue_size >= cg1r->yellow_zone()) {
iveresov@1546 1711 dcqs.set_completed_queue_padding(curr_queue_size);
iveresov@1546 1712 } else {
iveresov@1546 1713 dcqs.set_completed_queue_padding(0);
iveresov@1546 1714 }
iveresov@1546 1715 dcqs.notify_if_necessary();
iveresov@1546 1716 }
iveresov@1546 1717
ysr@777 1718 double
ysr@777 1719 G1CollectorPolicy::
ysr@777 1720 predict_young_collection_elapsed_time_ms(size_t adjustment) {
ysr@777 1721 guarantee( adjustment == 0 || adjustment == 1, "invariant" );
ysr@777 1722
ysr@777 1723 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@1829 1724 size_t young_num = g1h->young_list()->length();
ysr@777 1725 if (young_num == 0)
ysr@777 1726 return 0.0;
ysr@777 1727
ysr@777 1728 young_num += adjustment;
ysr@777 1729 size_t pending_cards = predict_pending_cards();
johnc@1829 1730 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
ysr@777 1731 predict_rs_length_diff();
ysr@777 1732 size_t card_num;
ysr@777 1733 if (full_young_gcs())
ysr@777 1734 card_num = predict_young_card_num(rs_lengths);
ysr@777 1735 else
ysr@777 1736 card_num = predict_non_young_card_num(rs_lengths);
ysr@777 1737 size_t young_byte_size = young_num * HeapRegion::GrainBytes;
ysr@777 1738 double accum_yg_surv_rate =
ysr@777 1739 _short_lived_surv_rate_group->accum_surv_rate(adjustment);
ysr@777 1740
ysr@777 1741 size_t bytes_to_copy =
ysr@777 1742 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
ysr@777 1743
ysr@777 1744 return
ysr@777 1745 predict_rs_update_time_ms(pending_cards) +
ysr@777 1746 predict_rs_scan_time_ms(card_num) +
ysr@777 1747 predict_object_copy_time_ms(bytes_to_copy) +
ysr@777 1748 predict_young_other_time_ms(young_num) +
ysr@777 1749 predict_constant_other_time_ms();
ysr@777 1750 }
ysr@777 1751
ysr@777 1752 double
ysr@777 1753 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
ysr@777 1754 size_t rs_length = predict_rs_length_diff();
ysr@777 1755 size_t card_num;
ysr@777 1756 if (full_young_gcs())
ysr@777 1757 card_num = predict_young_card_num(rs_length);
ysr@777 1758 else
ysr@777 1759 card_num = predict_non_young_card_num(rs_length);
ysr@777 1760 return predict_base_elapsed_time_ms(pending_cards, card_num);
ysr@777 1761 }
ysr@777 1762
ysr@777 1763 double
ysr@777 1764 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 1765 size_t scanned_cards) {
ysr@777 1766 return
ysr@777 1767 predict_rs_update_time_ms(pending_cards) +
ysr@777 1768 predict_rs_scan_time_ms(scanned_cards) +
ysr@777 1769 predict_constant_other_time_ms();
ysr@777 1770 }
ysr@777 1771
ysr@777 1772 double
ysr@777 1773 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
ysr@777 1774 bool young) {
ysr@777 1775 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1776 size_t card_num;
ysr@777 1777 if (full_young_gcs())
ysr@777 1778 card_num = predict_young_card_num(rs_length);
ysr@777 1779 else
ysr@777 1780 card_num = predict_non_young_card_num(rs_length);
ysr@777 1781 size_t bytes_to_copy = predict_bytes_to_copy(hr);
ysr@777 1782
ysr@777 1783 double region_elapsed_time_ms =
ysr@777 1784 predict_rs_scan_time_ms(card_num) +
ysr@777 1785 predict_object_copy_time_ms(bytes_to_copy);
ysr@777 1786
ysr@777 1787 if (young)
ysr@777 1788 region_elapsed_time_ms += predict_young_other_time_ms(1);
ysr@777 1789 else
ysr@777 1790 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
ysr@777 1791
ysr@777 1792 return region_elapsed_time_ms;
ysr@777 1793 }
ysr@777 1794
ysr@777 1795 size_t
ysr@777 1796 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
ysr@777 1797 size_t bytes_to_copy;
ysr@777 1798 if (hr->is_marked())
ysr@777 1799 bytes_to_copy = hr->max_live_bytes();
ysr@777 1800 else {
ysr@777 1801 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
ysr@777 1802 "invariant" );
ysr@777 1803 int age = hr->age_in_surv_rate_group();
apetrusenko@980 1804 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
ysr@777 1805 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
ysr@777 1806 }
ysr@777 1807
ysr@777 1808 return bytes_to_copy;
ysr@777 1809 }
ysr@777 1810
ysr@777 1811 void
ysr@777 1812 G1CollectorPolicy::start_recording_regions() {
ysr@777 1813 _recorded_rs_lengths = 0;
ysr@777 1814 _recorded_young_regions = 0;
ysr@777 1815 _recorded_non_young_regions = 0;
ysr@777 1816
ysr@777 1817 #if PREDICTIONS_VERBOSE
ysr@777 1818 _recorded_marked_bytes = 0;
ysr@777 1819 _recorded_young_bytes = 0;
ysr@777 1820 _predicted_bytes_to_copy = 0;
johnc@1829 1821 _predicted_rs_lengths = 0;
johnc@1829 1822 _predicted_cards_scanned = 0;
ysr@777 1823 #endif // PREDICTIONS_VERBOSE
ysr@777 1824 }
ysr@777 1825
ysr@777 1826 void
johnc@1829 1827 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
ysr@777 1828 #if PREDICTIONS_VERBOSE
johnc@1829 1829 if (!young) {
ysr@777 1830 _recorded_marked_bytes += hr->max_live_bytes();
ysr@777 1831 }
ysr@777 1832 _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
ysr@777 1833 #endif // PREDICTIONS_VERBOSE
ysr@777 1834
ysr@777 1835 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1836 _recorded_rs_lengths += rs_length;
ysr@777 1837 }
ysr@777 1838
ysr@777 1839 void
johnc@1829 1840 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
johnc@1829 1841 assert(!hr->is_young(), "should not call this");
johnc@1829 1842 ++_recorded_non_young_regions;
johnc@1829 1843 record_cset_region_info(hr, false);
johnc@1829 1844 }
johnc@1829 1845
johnc@1829 1846 void
johnc@1829 1847 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
johnc@1829 1848 _recorded_young_regions = n_regions;
johnc@1829 1849 }
johnc@1829 1850
johnc@1829 1851 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
johnc@1829 1852 #if PREDICTIONS_VERBOSE
johnc@1829 1853 _recorded_young_bytes = bytes;
johnc@1829 1854 #endif // PREDICTIONS_VERBOSE
johnc@1829 1855 }
johnc@1829 1856
johnc@1829 1857 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
johnc@1829 1858 _recorded_rs_lengths = rs_lengths;
johnc@1829 1859 }
johnc@1829 1860
johnc@1829 1861 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 1862 _predicted_bytes_to_copy = bytes;
ysr@777 1863 }
ysr@777 1864
ysr@777 1865 void
ysr@777 1866 G1CollectorPolicy::end_recording_regions() {
johnc@1829 1867 // The _predicted_pause_time_ms field is referenced in code
johnc@1829 1868 // not under PREDICTIONS_VERBOSE. Let's initialize it.
johnc@1829 1869 _predicted_pause_time_ms = -1.0;
johnc@1829 1870
ysr@777 1871 #if PREDICTIONS_VERBOSE
ysr@777 1872 _predicted_pending_cards = predict_pending_cards();
ysr@777 1873 _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
ysr@777 1874 if (full_young_gcs())
ysr@777 1875 _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
ysr@777 1876 else
ysr@777 1877 _predicted_cards_scanned +=
ysr@777 1878 predict_non_young_card_num(_predicted_rs_lengths);
ysr@777 1879 _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
ysr@777 1880
ysr@777 1881 _predicted_rs_update_time_ms =
ysr@777 1882 predict_rs_update_time_ms(_g1->pending_card_num());
ysr@777 1883 _predicted_rs_scan_time_ms =
ysr@777 1884 predict_rs_scan_time_ms(_predicted_cards_scanned);
ysr@777 1885 _predicted_object_copy_time_ms =
ysr@777 1886 predict_object_copy_time_ms(_predicted_bytes_to_copy);
ysr@777 1887 _predicted_constant_other_time_ms =
ysr@777 1888 predict_constant_other_time_ms();
ysr@777 1889 _predicted_young_other_time_ms =
ysr@777 1890 predict_young_other_time_ms(_recorded_young_regions);
ysr@777 1891 _predicted_non_young_other_time_ms =
ysr@777 1892 predict_non_young_other_time_ms(_recorded_non_young_regions);
ysr@777 1893
ysr@777 1894 _predicted_pause_time_ms =
ysr@777 1895 _predicted_rs_update_time_ms +
ysr@777 1896 _predicted_rs_scan_time_ms +
ysr@777 1897 _predicted_object_copy_time_ms +
ysr@777 1898 _predicted_constant_other_time_ms +
ysr@777 1899 _predicted_young_other_time_ms +
ysr@777 1900 _predicted_non_young_other_time_ms;
ysr@777 1901 #endif // PREDICTIONS_VERBOSE
ysr@777 1902 }
ysr@777 1903
ysr@777 1904 void G1CollectorPolicy::check_if_region_is_too_expensive(double
ysr@777 1905 predicted_time_ms) {
ysr@777 1906 // I don't think we need to do this when in young GC mode since
ysr@777 1907 // marking will be initiated next time we hit the soft limit anyway...
ysr@777 1908 if (predicted_time_ms > _expensive_region_limit_ms) {
ysr@777 1909 if (!in_young_gc_mode()) {
ysr@777 1910 set_full_young_gcs(true);
tonyp@1794 1911 // We might want to do something different here. However,
tonyp@1794 1912 // right now we don't support the non-generational G1 mode
tonyp@1794 1913 // (and in fact we are planning to remove the associated code,
tonyp@1794 1914 // see CR 6814390). So, let's leave it as is and this will be
tonyp@1794 1915 // removed some time in the future
tonyp@1794 1916 ShouldNotReachHere();
tonyp@1794 1917 set_during_initial_mark_pause();
ysr@777 1918 } else
ysr@777 1919 // no point in doing another partial one
ysr@777 1920 _should_revert_to_full_young_gcs = true;
ysr@777 1921 }
ysr@777 1922 }
ysr@777 1923
ysr@777 1924 // </NEW PREDICTION>
ysr@777 1925
ysr@777 1926
ysr@777 1927 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
ysr@777 1928 double elapsed_ms) {
ysr@777 1929 _recent_gc_times_ms->add(elapsed_ms);
ysr@777 1930 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
ysr@777 1931 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
ysr@777 1932 }
ysr@777 1933
ysr@777 1934 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
johnc@1186 1935 if (_recent_pause_times_ms->num() == 0) return (double) MaxGCPauseMillis;
ysr@777 1936 else return _recent_pause_times_ms->avg();
ysr@777 1937 }
ysr@777 1938
ysr@777 1939 double G1CollectorPolicy::recent_avg_time_for_CH_strong_ms() {
ysr@777 1940 if (_recent_CH_strong_roots_times_ms->num() == 0)
johnc@1186 1941 return (double)MaxGCPauseMillis/3.0;
ysr@777 1942 else return _recent_CH_strong_roots_times_ms->avg();
ysr@777 1943 }
ysr@777 1944
ysr@777 1945 double G1CollectorPolicy::recent_avg_time_for_G1_strong_ms() {
ysr@777 1946 if (_recent_G1_strong_roots_times_ms->num() == 0)
johnc@1186 1947 return (double)MaxGCPauseMillis/3.0;
ysr@777 1948 else return _recent_G1_strong_roots_times_ms->avg();
ysr@777 1949 }
ysr@777 1950
ysr@777 1951 double G1CollectorPolicy::recent_avg_time_for_evac_ms() {
johnc@1186 1952 if (_recent_evac_times_ms->num() == 0) return (double)MaxGCPauseMillis/3.0;
ysr@777 1953 else return _recent_evac_times_ms->avg();
ysr@777 1954 }
ysr@777 1955
ysr@777 1956 int G1CollectorPolicy::number_of_recent_gcs() {
ysr@777 1957 assert(_recent_CH_strong_roots_times_ms->num() ==
ysr@777 1958 _recent_G1_strong_roots_times_ms->num(), "Sequence out of sync");
ysr@777 1959 assert(_recent_G1_strong_roots_times_ms->num() ==
ysr@777 1960 _recent_evac_times_ms->num(), "Sequence out of sync");
ysr@777 1961 assert(_recent_evac_times_ms->num() ==
ysr@777 1962 _recent_pause_times_ms->num(), "Sequence out of sync");
ysr@777 1963 assert(_recent_pause_times_ms->num() ==
ysr@777 1964 _recent_CS_bytes_used_before->num(), "Sequence out of sync");
ysr@777 1965 assert(_recent_CS_bytes_used_before->num() ==
ysr@777 1966 _recent_CS_bytes_surviving->num(), "Sequence out of sync");
ysr@777 1967 return _recent_pause_times_ms->num();
ysr@777 1968 }
ysr@777 1969
ysr@777 1970 double G1CollectorPolicy::recent_avg_survival_fraction() {
ysr@777 1971 return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 1972 _recent_CS_bytes_used_before);
ysr@777 1973 }
ysr@777 1974
ysr@777 1975 double G1CollectorPolicy::last_survival_fraction() {
ysr@777 1976 return last_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 1977 _recent_CS_bytes_used_before);
ysr@777 1978 }
ysr@777 1979
ysr@777 1980 double
ysr@777 1981 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 1982 TruncatedSeq* before) {
ysr@777 1983 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 1984 if (before->sum() > 0.0) {
ysr@777 1985 double recent_survival_rate = surviving->sum() / before->sum();
ysr@777 1986 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1987 // fragmentation can produce negative collections.
ysr@777 1988 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1989 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1990 // (DLD, 10/05.)
jmasa@2188 1991 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
ysr@777 1992 _g1->evacuation_failed() ||
ysr@777 1993 recent_survival_rate <= 1.0, "Or bad frac");
ysr@777 1994 return recent_survival_rate;
ysr@777 1995 } else {
ysr@777 1996 return 1.0; // Be conservative.
ysr@777 1997 }
ysr@777 1998 }
ysr@777 1999
ysr@777 2000 double
ysr@777 2001 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 2002 TruncatedSeq* before) {
ysr@777 2003 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 2004 if (surviving->num() > 0 && before->last() > 0.0) {
ysr@777 2005 double last_survival_rate = surviving->last() / before->last();
ysr@777 2006 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 2007 // fragmentation can produce negative collections.
ysr@777 2008 // Further, we're now always doing parallel collection. But I'm still
ysr@777 2009 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 2010 // (DLD, 10/05.)
jmasa@2188 2011 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
ysr@777 2012 last_survival_rate <= 1.0, "Or bad frac");
ysr@777 2013 return last_survival_rate;
ysr@777 2014 } else {
ysr@777 2015 return 1.0;
ysr@777 2016 }
ysr@777 2017 }
ysr@777 2018
ysr@777 2019 static const int survival_min_obs = 5;
ysr@777 2020 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
ysr@777 2021 static const double min_survival_rate = 0.1;
ysr@777 2022
ysr@777 2023 double
ysr@777 2024 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
ysr@777 2025 double latest) {
ysr@777 2026 double res = avg;
ysr@777 2027 if (number_of_recent_gcs() < survival_min_obs) {
ysr@777 2028 res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
ysr@777 2029 }
ysr@777 2030 res = MAX2(res, latest);
ysr@777 2031 res = MAX2(res, min_survival_rate);
ysr@777 2032 // In the parallel case, LAB fragmentation can produce "negative
ysr@777 2033 // collections"; so can evac failure. Cap at 1.0
ysr@777 2034 res = MIN2(res, 1.0);
ysr@777 2035 return res;
ysr@777 2036 }
ysr@777 2037
ysr@777 2038 size_t G1CollectorPolicy::expansion_amount() {
tonyp@1791 2039 if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) {
johnc@1186 2040 // We will double the existing space, or take
johnc@1186 2041 // G1ExpandByPercentOfAvailable % of the available expansion
johnc@1186 2042 // space, whichever is smaller, bounded below by a minimum
johnc@1186 2043 // expansion (unless that's all that's left.)
ysr@777 2044 const size_t min_expand_bytes = 1*M;
johnc@2504 2045 size_t reserved_bytes = _g1->max_capacity();
ysr@777 2046 size_t committed_bytes = _g1->capacity();
ysr@777 2047 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
ysr@777 2048 size_t expand_bytes;
ysr@777 2049 size_t expand_bytes_via_pct =
johnc@1186 2050 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
ysr@777 2051 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
ysr@777 2052 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
ysr@777 2053 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
ysr@777 2054 if (G1PolicyVerbose > 1) {
ysr@777 2055 gclog_or_tty->print("Decided to expand: ratio = %5.2f, "
ysr@777 2056 "committed = %d%s, uncommited = %d%s, via pct = %d%s.\n"
ysr@777 2057 " Answer = %d.\n",
ysr@777 2058 recent_avg_pause_time_ratio(),
ysr@777 2059 byte_size_in_proper_unit(committed_bytes),
ysr@777 2060 proper_unit_for_byte_size(committed_bytes),
ysr@777 2061 byte_size_in_proper_unit(uncommitted_bytes),
ysr@777 2062 proper_unit_for_byte_size(uncommitted_bytes),
ysr@777 2063 byte_size_in_proper_unit(expand_bytes_via_pct),
ysr@777 2064 proper_unit_for_byte_size(expand_bytes_via_pct),
ysr@777 2065 byte_size_in_proper_unit(expand_bytes),
ysr@777 2066 proper_unit_for_byte_size(expand_bytes));
ysr@777 2067 }
ysr@777 2068 return expand_bytes;
ysr@777 2069 } else {
ysr@777 2070 return 0;
ysr@777 2071 }
ysr@777 2072 }
ysr@777 2073
ysr@777 2074 void G1CollectorPolicy::note_start_of_mark_thread() {
ysr@777 2075 _mark_thread_startup_sec = os::elapsedTime();
ysr@777 2076 }
ysr@777 2077
ysr@777 2078 class CountCSClosure: public HeapRegionClosure {
ysr@777 2079 G1CollectorPolicy* _g1_policy;
ysr@777 2080 public:
ysr@777 2081 CountCSClosure(G1CollectorPolicy* g1_policy) :
ysr@777 2082 _g1_policy(g1_policy) {}
ysr@777 2083 bool doHeapRegion(HeapRegion* r) {
ysr@777 2084 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
ysr@777 2085 return false;
ysr@777 2086 }
ysr@777 2087 };
ysr@777 2088
ysr@777 2089 void G1CollectorPolicy::count_CS_bytes_used() {
ysr@777 2090 CountCSClosure cs_closure(this);
ysr@777 2091 _g1->collection_set_iterate(&cs_closure);
ysr@777 2092 }
ysr@777 2093
ysr@777 2094 void G1CollectorPolicy::print_summary (int level,
ysr@777 2095 const char* str,
ysr@777 2096 NumberSeq* seq) const {
ysr@777 2097 double sum = seq->sum();
brutisso@2645 2098 LineBuffer(level + 1).append_and_print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
ysr@777 2099 str, sum / 1000.0, seq->avg());
ysr@777 2100 }
ysr@777 2101
ysr@777 2102 void G1CollectorPolicy::print_summary_sd (int level,
ysr@777 2103 const char* str,
ysr@777 2104 NumberSeq* seq) const {
ysr@777 2105 print_summary(level, str, seq);
brutisso@2645 2106 LineBuffer(level + 6).append_and_print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
ysr@777 2107 seq->num(), seq->sd(), seq->maximum());
ysr@777 2108 }
ysr@777 2109
ysr@777 2110 void G1CollectorPolicy::check_other_times(int level,
ysr@777 2111 NumberSeq* other_times_ms,
ysr@777 2112 NumberSeq* calc_other_times_ms) const {
ysr@777 2113 bool should_print = false;
brutisso@2645 2114 LineBuffer buf(level + 2);
ysr@777 2115
ysr@777 2116 double max_sum = MAX2(fabs(other_times_ms->sum()),
ysr@777 2117 fabs(calc_other_times_ms->sum()));
ysr@777 2118 double min_sum = MIN2(fabs(other_times_ms->sum()),
ysr@777 2119 fabs(calc_other_times_ms->sum()));
ysr@777 2120 double sum_ratio = max_sum / min_sum;
ysr@777 2121 if (sum_ratio > 1.1) {
ysr@777 2122 should_print = true;
brutisso@2645 2123 buf.append_and_print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
ysr@777 2124 }
ysr@777 2125
ysr@777 2126 double max_avg = MAX2(fabs(other_times_ms->avg()),
ysr@777 2127 fabs(calc_other_times_ms->avg()));
ysr@777 2128 double min_avg = MIN2(fabs(other_times_ms->avg()),
ysr@777 2129 fabs(calc_other_times_ms->avg()));
ysr@777 2130 double avg_ratio = max_avg / min_avg;
ysr@777 2131 if (avg_ratio > 1.1) {
ysr@777 2132 should_print = true;
brutisso@2645 2133 buf.append_and_print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
ysr@777 2134 }
ysr@777 2135
ysr@777 2136 if (other_times_ms->sum() < -0.01) {
brutisso@2645 2137 buf.append_and_print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
ysr@777 2138 }
ysr@777 2139
ysr@777 2140 if (other_times_ms->avg() < -0.01) {
brutisso@2645 2141 buf.append_and_print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
ysr@777 2142 }
ysr@777 2143
ysr@777 2144 if (calc_other_times_ms->sum() < -0.01) {
ysr@777 2145 should_print = true;
brutisso@2645 2146 buf.append_and_print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
ysr@777 2147 }
ysr@777 2148
ysr@777 2149 if (calc_other_times_ms->avg() < -0.01) {
ysr@777 2150 should_print = true;
brutisso@2645 2151 buf.append_and_print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
ysr@777 2152 }
ysr@777 2153
ysr@777 2154 if (should_print)
ysr@777 2155 print_summary(level, "Other(Calc)", calc_other_times_ms);
ysr@777 2156 }
ysr@777 2157
ysr@777 2158 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
jmasa@2188 2159 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 2160 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 2161 if (summary->get_total_seq()->num() > 0) {
apetrusenko@1112 2162 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
ysr@777 2163 if (body_summary != NULL) {
ysr@777 2164 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
ysr@777 2165 if (parallel) {
ysr@777 2166 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
ysr@777 2167 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2168 print_summary(2, "Ext Root Scanning",
ysr@777 2169 body_summary->get_ext_root_scan_seq());
ysr@777 2170 print_summary(2, "Mark Stack Scanning",
ysr@777 2171 body_summary->get_mark_stack_scan_seq());
ysr@777 2172 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2173 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2174 print_summary(2, "Termination", body_summary->get_termination_seq());
ysr@777 2175 print_summary(2, "Other", body_summary->get_parallel_other_seq());
ysr@777 2176 {
ysr@777 2177 NumberSeq* other_parts[] = {
ysr@777 2178 body_summary->get_update_rs_seq(),
ysr@777 2179 body_summary->get_ext_root_scan_seq(),
ysr@777 2180 body_summary->get_mark_stack_scan_seq(),
ysr@777 2181 body_summary->get_scan_rs_seq(),
ysr@777 2182 body_summary->get_obj_copy_seq(),
ysr@777 2183 body_summary->get_termination_seq()
ysr@777 2184 };
ysr@777 2185 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
johnc@2134 2186 6, other_parts);
ysr@777 2187 check_other_times(2, body_summary->get_parallel_other_seq(),
ysr@777 2188 &calc_other_times_ms);
ysr@777 2189 }
ysr@777 2190 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
ysr@777 2191 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
ysr@777 2192 } else {
ysr@777 2193 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2194 print_summary(1, "Ext Root Scanning",
ysr@777 2195 body_summary->get_ext_root_scan_seq());
ysr@777 2196 print_summary(1, "Mark Stack Scanning",
ysr@777 2197 body_summary->get_mark_stack_scan_seq());
ysr@777 2198 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2199 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2200 }
ysr@777 2201 }
ysr@777 2202 print_summary(1, "Other", summary->get_other_seq());
ysr@777 2203 {
johnc@2134 2204 if (body_summary != NULL) {
johnc@2134 2205 NumberSeq calc_other_times_ms;
johnc@2134 2206 if (parallel) {
johnc@2134 2207 // parallel
johnc@2134 2208 NumberSeq* other_parts[] = {
johnc@2134 2209 body_summary->get_satb_drain_seq(),
johnc@2134 2210 body_summary->get_parallel_seq(),
johnc@2134 2211 body_summary->get_clear_ct_seq()
johnc@2134 2212 };
johnc@2134 2213 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
johnc@2134 2214 3, other_parts);
johnc@2134 2215 } else {
johnc@2134 2216 // serial
johnc@2134 2217 NumberSeq* other_parts[] = {
johnc@2134 2218 body_summary->get_satb_drain_seq(),
johnc@2134 2219 body_summary->get_update_rs_seq(),
johnc@2134 2220 body_summary->get_ext_root_scan_seq(),
johnc@2134 2221 body_summary->get_mark_stack_scan_seq(),
johnc@2134 2222 body_summary->get_scan_rs_seq(),
johnc@2134 2223 body_summary->get_obj_copy_seq()
johnc@2134 2224 };
johnc@2134 2225 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
johnc@2134 2226 6, other_parts);
johnc@2134 2227 }
johnc@2134 2228 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
ysr@777 2229 }
ysr@777 2230 }
ysr@777 2231 } else {
brutisso@2645 2232 LineBuffer(1).append_and_print_cr("none");
ysr@777 2233 }
brutisso@2645 2234 LineBuffer(0).append_and_print_cr("");
ysr@777 2235 }
ysr@777 2236
ysr@777 2237 void G1CollectorPolicy::print_tracing_info() const {
ysr@777 2238 if (TraceGen0Time) {
ysr@777 2239 gclog_or_tty->print_cr("ALL PAUSES");
ysr@777 2240 print_summary_sd(0, "Total", _all_pause_times_ms);
ysr@777 2241 gclog_or_tty->print_cr("");
ysr@777 2242 gclog_or_tty->print_cr("");
ysr@777 2243 gclog_or_tty->print_cr(" Full Young GC Pauses: %8d", _full_young_pause_num);
ysr@777 2244 gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num);
ysr@777 2245 gclog_or_tty->print_cr("");
ysr@777 2246
apetrusenko@1112 2247 gclog_or_tty->print_cr("EVACUATION PAUSES");
apetrusenko@1112 2248 print_summary(_summary);
ysr@777 2249
ysr@777 2250 gclog_or_tty->print_cr("MISC");
ysr@777 2251 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
ysr@777 2252 print_summary_sd(0, "Yields", _all_yield_times_ms);
ysr@777 2253 for (int i = 0; i < _aux_num; ++i) {
ysr@777 2254 if (_all_aux_times_ms[i].num() > 0) {
ysr@777 2255 char buffer[96];
ysr@777 2256 sprintf(buffer, "Aux%d", i);
ysr@777 2257 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
ysr@777 2258 }
ysr@777 2259 }
ysr@777 2260
ysr@777 2261 size_t all_region_num = _region_num_young + _region_num_tenured;
ysr@777 2262 gclog_or_tty->print_cr(" New Regions %8d, Young %8d (%6.2lf%%), "
ysr@777 2263 "Tenured %8d (%6.2lf%%)",
ysr@777 2264 all_region_num,
ysr@777 2265 _region_num_young,
ysr@777 2266 (double) _region_num_young / (double) all_region_num * 100.0,
ysr@777 2267 _region_num_tenured,
ysr@777 2268 (double) _region_num_tenured / (double) all_region_num * 100.0);
ysr@777 2269 }
ysr@777 2270 if (TraceGen1Time) {
ysr@777 2271 if (_all_full_gc_times_ms->num() > 0) {
ysr@777 2272 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
ysr@777 2273 _all_full_gc_times_ms->num(),
ysr@777 2274 _all_full_gc_times_ms->sum() / 1000.0);
ysr@777 2275 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
ysr@777 2276 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 2277 _all_full_gc_times_ms->sd(),
ysr@777 2278 _all_full_gc_times_ms->maximum());
ysr@777 2279 }
ysr@777 2280 }
ysr@777 2281 }
ysr@777 2282
ysr@777 2283 void G1CollectorPolicy::print_yg_surv_rate_info() const {
ysr@777 2284 #ifndef PRODUCT
ysr@777 2285 _short_lived_surv_rate_group->print_surv_rate_summary();
ysr@777 2286 // add this call for any other surv rate groups
ysr@777 2287 #endif // PRODUCT
ysr@777 2288 }
ysr@777 2289
tonyp@2315 2290 void
tonyp@2315 2291 G1CollectorPolicy::update_region_num(bool young) {
tonyp@2315 2292 if (young) {
ysr@777 2293 ++_region_num_young;
ysr@777 2294 } else {
ysr@777 2295 ++_region_num_tenured;
ysr@777 2296 }
ysr@777 2297 }
ysr@777 2298
ysr@777 2299 #ifndef PRODUCT
ysr@777 2300 // for debugging, bit of a hack...
ysr@777 2301 static char*
ysr@777 2302 region_num_to_mbs(int length) {
ysr@777 2303 static char buffer[64];
ysr@777 2304 double bytes = (double) (length * HeapRegion::GrainBytes);
ysr@777 2305 double mbs = bytes / (double) (1024 * 1024);
ysr@777 2306 sprintf(buffer, "%7.2lfMB", mbs);
ysr@777 2307 return buffer;
ysr@777 2308 }
ysr@777 2309 #endif // PRODUCT
ysr@777 2310
apetrusenko@980 2311 size_t G1CollectorPolicy::max_regions(int purpose) {
ysr@777 2312 switch (purpose) {
ysr@777 2313 case GCAllocForSurvived:
apetrusenko@980 2314 return _max_survivor_regions;
ysr@777 2315 case GCAllocForTenured:
apetrusenko@980 2316 return REGIONS_UNLIMITED;
ysr@777 2317 default:
apetrusenko@980 2318 ShouldNotReachHere();
apetrusenko@980 2319 return REGIONS_UNLIMITED;
ysr@777 2320 };
ysr@777 2321 }
ysr@777 2322
tonyp@2333 2323 void G1CollectorPolicy::calculate_max_gc_locker_expansion() {
tonyp@2333 2324 size_t expansion_region_num = 0;
tonyp@2333 2325 if (GCLockerEdenExpansionPercent > 0) {
tonyp@2333 2326 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
tonyp@2333 2327 double expansion_region_num_d = perc * (double) _young_list_target_length;
tonyp@2333 2328 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
tonyp@2333 2329 // less than 1.0) we'll get 1.
tonyp@2333 2330 expansion_region_num = (size_t) ceil(expansion_region_num_d);
tonyp@2333 2331 } else {
tonyp@2333 2332 assert(expansion_region_num == 0, "sanity");
tonyp@2333 2333 }
tonyp@2333 2334 _young_list_max_length = _young_list_target_length + expansion_region_num;
tonyp@2333 2335 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
tonyp@2333 2336 }
tonyp@2333 2337
apetrusenko@980 2338 // Calculates survivor space parameters.
apetrusenko@980 2339 void G1CollectorPolicy::calculate_survivors_policy()
apetrusenko@980 2340 {
apetrusenko@980 2341 if (G1FixedSurvivorSpaceSize == 0) {
apetrusenko@980 2342 _max_survivor_regions = _young_list_target_length / SurvivorRatio;
apetrusenko@980 2343 } else {
apetrusenko@982 2344 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
apetrusenko@980 2345 }
apetrusenko@980 2346
apetrusenko@980 2347 if (G1FixedTenuringThreshold) {
apetrusenko@980 2348 _tenuring_threshold = MaxTenuringThreshold;
apetrusenko@980 2349 } else {
apetrusenko@980 2350 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
apetrusenko@980 2351 HeapRegion::GrainWords * _max_survivor_regions);
apetrusenko@980 2352 }
apetrusenko@980 2353 }
apetrusenko@980 2354
ysr@777 2355 #ifndef PRODUCT
ysr@777 2356 class HRSortIndexIsOKClosure: public HeapRegionClosure {
ysr@777 2357 CollectionSetChooser* _chooser;
ysr@777 2358 public:
ysr@777 2359 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
ysr@777 2360 _chooser(chooser) {}
ysr@777 2361
ysr@777 2362 bool doHeapRegion(HeapRegion* r) {
ysr@777 2363 if (!r->continuesHumongous()) {
ysr@777 2364 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
ysr@777 2365 }
ysr@777 2366 return false;
ysr@777 2367 }
ysr@777 2368 };
ysr@777 2369
ysr@777 2370 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
ysr@777 2371 HRSortIndexIsOKClosure cl(_collectionSetChooser);
ysr@777 2372 _g1->heap_region_iterate(&cl);
ysr@777 2373 return true;
ysr@777 2374 }
ysr@777 2375 #endif
ysr@777 2376
tonyp@2011 2377 bool
tonyp@2011 2378 G1CollectorPolicy::force_initial_mark_if_outside_cycle() {
tonyp@2011 2379 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@2011 2380 if (!during_cycle) {
tonyp@2011 2381 set_initiate_conc_mark_if_possible();
tonyp@2011 2382 return true;
tonyp@2011 2383 } else {
tonyp@2011 2384 return false;
tonyp@2011 2385 }
tonyp@2011 2386 }
tonyp@2011 2387
ysr@777 2388 void
tonyp@1794 2389 G1CollectorPolicy::decide_on_conc_mark_initiation() {
tonyp@1794 2390 // We are about to decide on whether this pause will be an
tonyp@1794 2391 // initial-mark pause.
tonyp@1794 2392
tonyp@1794 2393 // First, during_initial_mark_pause() should not be already set. We
tonyp@1794 2394 // will set it here if we have to. However, it should be cleared by
tonyp@1794 2395 // the end of the pause (it's only set for the duration of an
tonyp@1794 2396 // initial-mark pause).
tonyp@1794 2397 assert(!during_initial_mark_pause(), "pre-condition");
tonyp@1794 2398
tonyp@1794 2399 if (initiate_conc_mark_if_possible()) {
tonyp@1794 2400 // We had noticed on a previous pause that the heap occupancy has
tonyp@1794 2401 // gone over the initiating threshold and we should start a
tonyp@1794 2402 // concurrent marking cycle. So we might initiate one.
tonyp@1794 2403
tonyp@1794 2404 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@1794 2405 if (!during_cycle) {
tonyp@1794 2406 // The concurrent marking thread is not "during a cycle", i.e.,
tonyp@1794 2407 // it has completed the last one. So we can go ahead and
tonyp@1794 2408 // initiate a new cycle.
tonyp@1794 2409
tonyp@1794 2410 set_during_initial_mark_pause();
tonyp@1794 2411
tonyp@1794 2412 // And we can now clear initiate_conc_mark_if_possible() as
tonyp@1794 2413 // we've already acted on it.
tonyp@1794 2414 clear_initiate_conc_mark_if_possible();
tonyp@1794 2415 } else {
tonyp@1794 2416 // The concurrent marking thread is still finishing up the
tonyp@1794 2417 // previous cycle. If we start one right now the two cycles
tonyp@1794 2418 // overlap. In particular, the concurrent marking thread might
tonyp@1794 2419 // be in the process of clearing the next marking bitmap (which
tonyp@1794 2420 // we will use for the next cycle if we start one). Starting a
tonyp@1794 2421 // cycle now will be bad given that parts of the marking
tonyp@1794 2422 // information might get cleared by the marking thread. And we
tonyp@1794 2423 // cannot wait for the marking thread to finish the cycle as it
tonyp@1794 2424 // periodically yields while clearing the next marking bitmap
tonyp@1794 2425 // and, if it's in a yield point, it's waiting for us to
tonyp@1794 2426 // finish. So, at this point we will not start a cycle and we'll
tonyp@1794 2427 // let the concurrent marking thread complete the last one.
tonyp@1794 2428 }
tonyp@1794 2429 }
tonyp@1794 2430 }
tonyp@1794 2431
tonyp@1794 2432 void
ysr@777 2433 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2434 record_collection_pause_start(double start_time_sec, size_t start_used) {
ysr@777 2435 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
ysr@777 2436 }
ysr@777 2437
ysr@777 2438 class NextNonCSElemFinder: public HeapRegionClosure {
ysr@777 2439 HeapRegion* _res;
ysr@777 2440 public:
ysr@777 2441 NextNonCSElemFinder(): _res(NULL) {}
ysr@777 2442 bool doHeapRegion(HeapRegion* r) {
ysr@777 2443 if (!r->in_collection_set()) {
ysr@777 2444 _res = r;
ysr@777 2445 return true;
ysr@777 2446 } else {
ysr@777 2447 return false;
ysr@777 2448 }
ysr@777 2449 }
ysr@777 2450 HeapRegion* res() { return _res; }
ysr@777 2451 };
ysr@777 2452
ysr@777 2453 class KnownGarbageClosure: public HeapRegionClosure {
ysr@777 2454 CollectionSetChooser* _hrSorted;
ysr@777 2455
ysr@777 2456 public:
ysr@777 2457 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
ysr@777 2458 _hrSorted(hrSorted)
ysr@777 2459 {}
ysr@777 2460
ysr@777 2461 bool doHeapRegion(HeapRegion* r) {
ysr@777 2462 // We only include humongous regions in collection
ysr@777 2463 // sets when concurrent mark shows that their contained object is
ysr@777 2464 // unreachable.
ysr@777 2465
ysr@777 2466 // Do we have any marking information for this region?
ysr@777 2467 if (r->is_marked()) {
ysr@777 2468 // We don't include humongous regions in collection
ysr@777 2469 // sets because we collect them immediately at the end of a marking
ysr@777 2470 // cycle. We also don't include young regions because we *must*
ysr@777 2471 // include them in the next collection pause.
ysr@777 2472 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2473 _hrSorted->addMarkedHeapRegion(r);
ysr@777 2474 }
ysr@777 2475 }
ysr@777 2476 return false;
ysr@777 2477 }
ysr@777 2478 };
ysr@777 2479
ysr@777 2480 class ParKnownGarbageHRClosure: public HeapRegionClosure {
ysr@777 2481 CollectionSetChooser* _hrSorted;
ysr@777 2482 jint _marked_regions_added;
ysr@777 2483 jint _chunk_size;
ysr@777 2484 jint _cur_chunk_idx;
ysr@777 2485 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
ysr@777 2486 int _worker;
ysr@777 2487 int _invokes;
ysr@777 2488
ysr@777 2489 void get_new_chunk() {
ysr@777 2490 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
ysr@777 2491 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
ysr@777 2492 }
ysr@777 2493 void add_region(HeapRegion* r) {
ysr@777 2494 if (_cur_chunk_idx == _cur_chunk_end) {
ysr@777 2495 get_new_chunk();
ysr@777 2496 }
ysr@777 2497 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
ysr@777 2498 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
ysr@777 2499 _marked_regions_added++;
ysr@777 2500 _cur_chunk_idx++;
ysr@777 2501 }
ysr@777 2502
ysr@777 2503 public:
ysr@777 2504 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
ysr@777 2505 jint chunk_size,
ysr@777 2506 int worker) :
ysr@777 2507 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
ysr@777 2508 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
ysr@777 2509 _invokes(0)
ysr@777 2510 {}
ysr@777 2511
ysr@777 2512 bool doHeapRegion(HeapRegion* r) {
ysr@777 2513 // We only include humongous regions in collection
ysr@777 2514 // sets when concurrent mark shows that their contained object is
ysr@777 2515 // unreachable.
ysr@777 2516 _invokes++;
ysr@777 2517
ysr@777 2518 // Do we have any marking information for this region?
ysr@777 2519 if (r->is_marked()) {
ysr@777 2520 // We don't include humongous regions in collection
ysr@777 2521 // sets because we collect them immediately at the end of a marking
ysr@777 2522 // cycle.
ysr@777 2523 // We also do not include young regions in collection sets
ysr@777 2524 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2525 add_region(r);
ysr@777 2526 }
ysr@777 2527 }
ysr@777 2528 return false;
ysr@777 2529 }
ysr@777 2530 jint marked_regions_added() { return _marked_regions_added; }
ysr@777 2531 int invokes() { return _invokes; }
ysr@777 2532 };
ysr@777 2533
ysr@777 2534 class ParKnownGarbageTask: public AbstractGangTask {
ysr@777 2535 CollectionSetChooser* _hrSorted;
ysr@777 2536 jint _chunk_size;
ysr@777 2537 G1CollectedHeap* _g1;
ysr@777 2538 public:
ysr@777 2539 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
ysr@777 2540 AbstractGangTask("ParKnownGarbageTask"),
ysr@777 2541 _hrSorted(hrSorted), _chunk_size(chunk_size),
ysr@777 2542 _g1(G1CollectedHeap::heap())
ysr@777 2543 {}
ysr@777 2544
ysr@777 2545 void work(int i) {
ysr@777 2546 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
ysr@777 2547 // Back to zero for the claim value.
tonyp@790 2548 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
tonyp@790 2549 HeapRegion::InitialClaimValue);
ysr@777 2550 jint regions_added = parKnownGarbageCl.marked_regions_added();
ysr@777 2551 _hrSorted->incNumMarkedHeapRegions(regions_added);
ysr@777 2552 if (G1PrintParCleanupStats) {
brutisso@2645 2553 gclog_or_tty->print_cr(" Thread %d called %d times, added %d regions to list.",
ysr@777 2554 i, parKnownGarbageCl.invokes(), regions_added);
ysr@777 2555 }
ysr@777 2556 }
ysr@777 2557 };
ysr@777 2558
ysr@777 2559 void
ysr@777 2560 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2561 record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 2562 size_t max_live_bytes) {
ysr@777 2563 double start;
ysr@777 2564 if (G1PrintParCleanupStats) start = os::elapsedTime();
ysr@777 2565 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 2566
ysr@777 2567 _collectionSetChooser->clearMarkedHeapRegions();
ysr@777 2568 double clear_marked_end;
ysr@777 2569 if (G1PrintParCleanupStats) {
ysr@777 2570 clear_marked_end = os::elapsedTime();
ysr@777 2571 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.",
ysr@777 2572 (clear_marked_end - start)*1000.0);
ysr@777 2573 }
jmasa@2188 2574 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 2575 const size_t OverpartitionFactor = 4;
kvn@1926 2576 const size_t MinWorkUnit = 8;
kvn@1926 2577 const size_t WorkUnit =
ysr@777 2578 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
kvn@1926 2579 MinWorkUnit);
ysr@777 2580 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
kvn@1926 2581 WorkUnit);
ysr@777 2582 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
kvn@1926 2583 (int) WorkUnit);
ysr@777 2584 _g1->workers()->run_task(&parKnownGarbageTask);
tonyp@790 2585
tonyp@790 2586 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2587 "sanity check");
ysr@777 2588 } else {
ysr@777 2589 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
ysr@777 2590 _g1->heap_region_iterate(&knownGarbagecl);
ysr@777 2591 }
ysr@777 2592 double known_garbage_end;
ysr@777 2593 if (G1PrintParCleanupStats) {
ysr@777 2594 known_garbage_end = os::elapsedTime();
ysr@777 2595 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
ysr@777 2596 (known_garbage_end - clear_marked_end)*1000.0);
ysr@777 2597 }
ysr@777 2598 _collectionSetChooser->sortMarkedHeapRegions();
ysr@777 2599 double sort_end;
ysr@777 2600 if (G1PrintParCleanupStats) {
ysr@777 2601 sort_end = os::elapsedTime();
ysr@777 2602 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
ysr@777 2603 (sort_end - known_garbage_end)*1000.0);
ysr@777 2604 }
ysr@777 2605
ysr@777 2606 record_concurrent_mark_cleanup_end_work2();
ysr@777 2607 double work2_end;
ysr@777 2608 if (G1PrintParCleanupStats) {
ysr@777 2609 work2_end = os::elapsedTime();
ysr@777 2610 gclog_or_tty->print_cr(" work2: %8.3f ms.",
ysr@777 2611 (work2_end - sort_end)*1000.0);
ysr@777 2612 }
ysr@777 2613 }
ysr@777 2614
johnc@1829 2615 // Add the heap region at the head of the non-incremental collection set
ysr@777 2616 void G1CollectorPolicy::
ysr@777 2617 add_to_collection_set(HeapRegion* hr) {
johnc@1829 2618 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2619 assert(!hr->is_young(), "non-incremental add of young region");
johnc@1829 2620
tonyp@1717 2621 if (G1PrintHeapRegions) {
tonyp@1823 2622 gclog_or_tty->print_cr("added region to cset "
tonyp@1823 2623 "%d:["PTR_FORMAT", "PTR_FORMAT"], "
tonyp@1823 2624 "top "PTR_FORMAT", %s",
tonyp@1823 2625 hr->hrs_index(), hr->bottom(), hr->end(),
tonyp@1823 2626 hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG");
ysr@777 2627 }
ysr@777 2628
ysr@777 2629 if (_g1->mark_in_progress())
ysr@777 2630 _g1->concurrent_mark()->registerCSetRegion(hr);
ysr@777 2631
johnc@1829 2632 assert(!hr->in_collection_set(), "should not already be in the CSet");
ysr@777 2633 hr->set_in_collection_set(true);
ysr@777 2634 hr->set_next_in_collection_set(_collection_set);
ysr@777 2635 _collection_set = hr;
ysr@777 2636 _collection_set_size++;
ysr@777 2637 _collection_set_bytes_used_before += hr->used();
tonyp@961 2638 _g1->register_region_with_in_cset_fast_test(hr);
ysr@777 2639 }
ysr@777 2640
johnc@1829 2641 // Initialize the per-collection-set information
johnc@1829 2642 void G1CollectorPolicy::start_incremental_cset_building() {
johnc@1829 2643 assert(_inc_cset_build_state == Inactive, "Precondition");
johnc@1829 2644
johnc@1829 2645 _inc_cset_head = NULL;
johnc@1829 2646 _inc_cset_tail = NULL;
johnc@1829 2647 _inc_cset_size = 0;
johnc@1829 2648 _inc_cset_bytes_used_before = 0;
johnc@1829 2649
johnc@1829 2650 if (in_young_gc_mode()) {
johnc@1829 2651 _inc_cset_young_index = 0;
johnc@1829 2652 }
johnc@1829 2653
johnc@1829 2654 _inc_cset_max_finger = 0;
johnc@1829 2655 _inc_cset_recorded_young_bytes = 0;
johnc@1829 2656 _inc_cset_recorded_rs_lengths = 0;
johnc@1829 2657 _inc_cset_predicted_elapsed_time_ms = 0;
johnc@1829 2658 _inc_cset_predicted_bytes_to_copy = 0;
johnc@1829 2659 _inc_cset_build_state = Active;
johnc@1829 2660 }
johnc@1829 2661
johnc@1829 2662 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
johnc@1829 2663 // This routine is used when:
johnc@1829 2664 // * adding survivor regions to the incremental cset at the end of an
johnc@1829 2665 // evacuation pause,
johnc@1829 2666 // * adding the current allocation region to the incremental cset
johnc@1829 2667 // when it is retired, and
johnc@1829 2668 // * updating existing policy information for a region in the
johnc@1829 2669 // incremental cset via young list RSet sampling.
johnc@1829 2670 // Therefore this routine may be called at a safepoint by the
johnc@1829 2671 // VM thread, or in-between safepoints by mutator threads (when
johnc@1829 2672 // retiring the current allocation region) or a concurrent
johnc@1829 2673 // refine thread (RSet sampling).
johnc@1829 2674
johnc@1829 2675 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
johnc@1829 2676 size_t used_bytes = hr->used();
johnc@1829 2677
johnc@1829 2678 _inc_cset_recorded_rs_lengths += rs_length;
johnc@1829 2679 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
johnc@1829 2680
johnc@1829 2681 _inc_cset_bytes_used_before += used_bytes;
johnc@1829 2682
johnc@1829 2683 // Cache the values we have added to the aggregated informtion
johnc@1829 2684 // in the heap region in case we have to remove this region from
johnc@1829 2685 // the incremental collection set, or it is updated by the
johnc@1829 2686 // rset sampling code
johnc@1829 2687 hr->set_recorded_rs_length(rs_length);
johnc@1829 2688 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
johnc@1829 2689
johnc@1829 2690 #if PREDICTIONS_VERBOSE
johnc@1829 2691 size_t bytes_to_copy = predict_bytes_to_copy(hr);
johnc@1829 2692 _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
johnc@1829 2693
johnc@1829 2694 // Record the number of bytes used in this region
johnc@1829 2695 _inc_cset_recorded_young_bytes += used_bytes;
johnc@1829 2696
johnc@1829 2697 // Cache the values we have added to the aggregated informtion
johnc@1829 2698 // in the heap region in case we have to remove this region from
johnc@1829 2699 // the incremental collection set, or it is updated by the
johnc@1829 2700 // rset sampling code
johnc@1829 2701 hr->set_predicted_bytes_to_copy(bytes_to_copy);
johnc@1829 2702 #endif // PREDICTIONS_VERBOSE
johnc@1829 2703 }
johnc@1829 2704
johnc@1829 2705 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
johnc@1829 2706 // This routine is currently only called as part of the updating of
johnc@1829 2707 // existing policy information for regions in the incremental cset that
johnc@1829 2708 // is performed by the concurrent refine thread(s) as part of young list
johnc@1829 2709 // RSet sampling. Therefore we should not be at a safepoint.
johnc@1829 2710
johnc@1829 2711 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
johnc@1829 2712 assert(hr->is_young(), "it should be");
johnc@1829 2713
johnc@1829 2714 size_t used_bytes = hr->used();
johnc@1829 2715 size_t old_rs_length = hr->recorded_rs_length();
johnc@1829 2716 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
johnc@1829 2717
johnc@1829 2718 // Subtract the old recorded/predicted policy information for
johnc@1829 2719 // the given heap region from the collection set info.
johnc@1829 2720 _inc_cset_recorded_rs_lengths -= old_rs_length;
johnc@1829 2721 _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
johnc@1829 2722
johnc@1829 2723 _inc_cset_bytes_used_before -= used_bytes;
johnc@1829 2724
johnc@1829 2725 // Clear the values cached in the heap region
johnc@1829 2726 hr->set_recorded_rs_length(0);
johnc@1829 2727 hr->set_predicted_elapsed_time_ms(0);
johnc@1829 2728
johnc@1829 2729 #if PREDICTIONS_VERBOSE
johnc@1829 2730 size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
johnc@1829 2731 _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
johnc@1829 2732
johnc@1829 2733 // Subtract the number of bytes used in this region
johnc@1829 2734 _inc_cset_recorded_young_bytes -= used_bytes;
johnc@1829 2735
johnc@1829 2736 // Clear the values cached in the heap region
johnc@1829 2737 hr->set_predicted_bytes_to_copy(0);
johnc@1829 2738 #endif // PREDICTIONS_VERBOSE
johnc@1829 2739 }
johnc@1829 2740
johnc@1829 2741 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
johnc@1829 2742 // Update the collection set information that is dependent on the new RS length
johnc@1829 2743 assert(hr->is_young(), "Precondition");
johnc@1829 2744
johnc@1829 2745 remove_from_incremental_cset_info(hr);
johnc@1829 2746 add_to_incremental_cset_info(hr, new_rs_length);
johnc@1829 2747 }
johnc@1829 2748
johnc@1829 2749 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
johnc@1829 2750 assert( hr->is_young(), "invariant");
johnc@1829 2751 assert( hr->young_index_in_cset() == -1, "invariant" );
johnc@1829 2752 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2753
johnc@1829 2754 // We need to clear and set the cached recorded/cached collection set
johnc@1829 2755 // information in the heap region here (before the region gets added
johnc@1829 2756 // to the collection set). An individual heap region's cached values
johnc@1829 2757 // are calculated, aggregated with the policy collection set info,
johnc@1829 2758 // and cached in the heap region here (initially) and (subsequently)
johnc@1829 2759 // by the Young List sampling code.
johnc@1829 2760
johnc@1829 2761 size_t rs_length = hr->rem_set()->occupied();
johnc@1829 2762 add_to_incremental_cset_info(hr, rs_length);
johnc@1829 2763
johnc@1829 2764 HeapWord* hr_end = hr->end();
johnc@1829 2765 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
johnc@1829 2766
johnc@1829 2767 assert(!hr->in_collection_set(), "invariant");
johnc@1829 2768 hr->set_in_collection_set(true);
johnc@1829 2769 assert( hr->next_in_collection_set() == NULL, "invariant");
johnc@1829 2770
johnc@1829 2771 _inc_cset_size++;
johnc@1829 2772 _g1->register_region_with_in_cset_fast_test(hr);
johnc@1829 2773
johnc@1829 2774 hr->set_young_index_in_cset((int) _inc_cset_young_index);
johnc@1829 2775 ++_inc_cset_young_index;
johnc@1829 2776 }
johnc@1829 2777
johnc@1829 2778 // Add the region at the RHS of the incremental cset
johnc@1829 2779 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
johnc@1829 2780 // We should only ever be appending survivors at the end of a pause
johnc@1829 2781 assert( hr->is_survivor(), "Logic");
johnc@1829 2782
johnc@1829 2783 // Do the 'common' stuff
johnc@1829 2784 add_region_to_incremental_cset_common(hr);
johnc@1829 2785
johnc@1829 2786 // Now add the region at the right hand side
johnc@1829 2787 if (_inc_cset_tail == NULL) {
johnc@1829 2788 assert(_inc_cset_head == NULL, "invariant");
johnc@1829 2789 _inc_cset_head = hr;
johnc@1829 2790 } else {
johnc@1829 2791 _inc_cset_tail->set_next_in_collection_set(hr);
johnc@1829 2792 }
johnc@1829 2793 _inc_cset_tail = hr;
johnc@1829 2794
johnc@1829 2795 if (G1PrintHeapRegions) {
johnc@1829 2796 gclog_or_tty->print_cr(" added region to incremental cset (RHS) "
johnc@1829 2797 "%d:["PTR_FORMAT", "PTR_FORMAT"], "
johnc@1829 2798 "top "PTR_FORMAT", young %s",
johnc@1829 2799 hr->hrs_index(), hr->bottom(), hr->end(),
johnc@1829 2800 hr->top(), (hr->is_young()) ? "YES" : "NO");
johnc@1829 2801 }
johnc@1829 2802 }
johnc@1829 2803
johnc@1829 2804 // Add the region to the LHS of the incremental cset
johnc@1829 2805 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
johnc@1829 2806 // Survivors should be added to the RHS at the end of a pause
johnc@1829 2807 assert(!hr->is_survivor(), "Logic");
johnc@1829 2808
johnc@1829 2809 // Do the 'common' stuff
johnc@1829 2810 add_region_to_incremental_cset_common(hr);
johnc@1829 2811
johnc@1829 2812 // Add the region at the left hand side
johnc@1829 2813 hr->set_next_in_collection_set(_inc_cset_head);
johnc@1829 2814 if (_inc_cset_head == NULL) {
johnc@1829 2815 assert(_inc_cset_tail == NULL, "Invariant");
johnc@1829 2816 _inc_cset_tail = hr;
johnc@1829 2817 }
johnc@1829 2818 _inc_cset_head = hr;
johnc@1829 2819
johnc@1829 2820 if (G1PrintHeapRegions) {
johnc@1829 2821 gclog_or_tty->print_cr(" added region to incremental cset (LHS) "
johnc@1829 2822 "%d:["PTR_FORMAT", "PTR_FORMAT"], "
johnc@1829 2823 "top "PTR_FORMAT", young %s",
johnc@1829 2824 hr->hrs_index(), hr->bottom(), hr->end(),
johnc@1829 2825 hr->top(), (hr->is_young()) ? "YES" : "NO");
johnc@1829 2826 }
johnc@1829 2827 }
johnc@1829 2828
johnc@1829 2829 #ifndef PRODUCT
johnc@1829 2830 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
johnc@1829 2831 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
johnc@1829 2832
johnc@1829 2833 st->print_cr("\nCollection_set:");
johnc@1829 2834 HeapRegion* csr = list_head;
johnc@1829 2835 while (csr != NULL) {
johnc@1829 2836 HeapRegion* next = csr->next_in_collection_set();
johnc@1829 2837 assert(csr->in_collection_set(), "bad CS");
johnc@1829 2838 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
johnc@1829 2839 "age: %4d, y: %d, surv: %d",
johnc@1829 2840 csr->bottom(), csr->end(),
johnc@1829 2841 csr->top(),
johnc@1829 2842 csr->prev_top_at_mark_start(),
johnc@1829 2843 csr->next_top_at_mark_start(),
johnc@1829 2844 csr->top_at_conc_mark_count(),
johnc@1829 2845 csr->age_in_surv_rate_group_cond(),
johnc@1829 2846 csr->is_young(),
johnc@1829 2847 csr->is_survivor());
johnc@1829 2848 csr = next;
johnc@1829 2849 }
johnc@1829 2850 }
johnc@1829 2851 #endif // !PRODUCT
johnc@1829 2852
tonyp@2062 2853 void
tonyp@2011 2854 G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
tonyp@2011 2855 double target_pause_time_ms) {
johnc@1829 2856 // Set this here - in case we're not doing young collections.
johnc@1829 2857 double non_young_start_time_sec = os::elapsedTime();
johnc@1829 2858
ysr@777 2859 start_recording_regions();
ysr@777 2860
tonyp@2011 2861 guarantee(target_pause_time_ms > 0.0,
tonyp@2011 2862 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2011 2863 target_pause_time_ms));
tonyp@2011 2864 guarantee(_collection_set == NULL, "Precondition");
ysr@777 2865
ysr@777 2866 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
ysr@777 2867 double predicted_pause_time_ms = base_time_ms;
ysr@777 2868
tonyp@2011 2869 double time_remaining_ms = target_pause_time_ms - base_time_ms;
ysr@777 2870
ysr@777 2871 // the 10% and 50% values are arbitrary...
tonyp@2011 2872 if (time_remaining_ms < 0.10 * target_pause_time_ms) {
tonyp@2011 2873 time_remaining_ms = 0.50 * target_pause_time_ms;
ysr@777 2874 _within_target = false;
ysr@777 2875 } else {
ysr@777 2876 _within_target = true;
ysr@777 2877 }
ysr@777 2878
ysr@777 2879 // We figure out the number of bytes available for future to-space.
ysr@777 2880 // For new regions without marking information, we must assume the
ysr@777 2881 // worst-case of complete survival. If we have marking information for a
ysr@777 2882 // region, we can bound the amount of live data. We can add a number of
ysr@777 2883 // such regions, as long as the sum of the live data bounds does not
ysr@777 2884 // exceed the available evacuation space.
ysr@777 2885 size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
ysr@777 2886
ysr@777 2887 size_t expansion_bytes =
ysr@777 2888 _g1->expansion_regions() * HeapRegion::GrainBytes;
ysr@777 2889
apetrusenko@1112 2890 _collection_set_bytes_used_before = 0;
apetrusenko@1112 2891 _collection_set_size = 0;
ysr@777 2892
ysr@777 2893 // Adjust for expansion and slop.
ysr@777 2894 max_live_bytes = max_live_bytes + expansion_bytes;
ysr@777 2895
ysr@777 2896 HeapRegion* hr;
ysr@777 2897 if (in_young_gc_mode()) {
ysr@777 2898 double young_start_time_sec = os::elapsedTime();
ysr@777 2899
ysr@777 2900 if (G1PolicyVerbose > 0) {
ysr@777 2901 gclog_or_tty->print_cr("Adding %d young regions to the CSet",
johnc@1829 2902 _g1->young_list()->length());
ysr@777 2903 }
johnc@1829 2904
ysr@777 2905 _young_cset_length = 0;
ysr@777 2906 _last_young_gc_full = full_young_gcs() ? true : false;
johnc@1829 2907
ysr@777 2908 if (_last_young_gc_full)
ysr@777 2909 ++_full_young_pause_num;
ysr@777 2910 else
ysr@777 2911 ++_partial_young_pause_num;
johnc@1829 2912
johnc@1829 2913 // The young list is laid with the survivor regions from the previous
johnc@1829 2914 // pause are appended to the RHS of the young list, i.e.
johnc@1829 2915 // [Newly Young Regions ++ Survivors from last pause].
johnc@1829 2916
johnc@1829 2917 hr = _g1->young_list()->first_survivor_region();
ysr@777 2918 while (hr != NULL) {
johnc@1829 2919 assert(hr->is_survivor(), "badly formed young list");
johnc@1829 2920 hr->set_young();
johnc@1829 2921 hr = hr->get_next_young_region();
ysr@777 2922 }
ysr@777 2923
johnc@1829 2924 // Clear the fields that point to the survivor list - they are
johnc@1829 2925 // all young now.
johnc@1829 2926 _g1->young_list()->clear_survivors();
johnc@1829 2927
johnc@1829 2928 if (_g1->mark_in_progress())
johnc@1829 2929 _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
johnc@1829 2930
johnc@1829 2931 _young_cset_length = _inc_cset_young_index;
johnc@1829 2932 _collection_set = _inc_cset_head;
johnc@1829 2933 _collection_set_size = _inc_cset_size;
johnc@1829 2934 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
johnc@1829 2935
johnc@1829 2936 // For young regions in the collection set, we assume the worst
johnc@1829 2937 // case of complete survival
johnc@1829 2938 max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes;
johnc@1829 2939
johnc@1829 2940 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
johnc@1829 2941 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
johnc@1829 2942
johnc@1829 2943 // The number of recorded young regions is the incremental
johnc@1829 2944 // collection set's current size
johnc@1829 2945 set_recorded_young_regions(_inc_cset_size);
johnc@1829 2946 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
johnc@1829 2947 set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
johnc@1829 2948 #if PREDICTIONS_VERBOSE
johnc@1829 2949 set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
johnc@1829 2950 #endif // PREDICTIONS_VERBOSE
johnc@1829 2951
johnc@1829 2952 if (G1PolicyVerbose > 0) {
johnc@1829 2953 gclog_or_tty->print_cr(" Added " PTR_FORMAT " Young Regions to CS.",
johnc@1829 2954 _inc_cset_size);
johnc@1829 2955 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
johnc@1829 2956 max_live_bytes/K);
johnc@1829 2957 }
johnc@1829 2958
johnc@1829 2959 assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
ysr@777 2960
ysr@777 2961 double young_end_time_sec = os::elapsedTime();
ysr@777 2962 _recorded_young_cset_choice_time_ms =
ysr@777 2963 (young_end_time_sec - young_start_time_sec) * 1000.0;
ysr@777 2964
johnc@1829 2965 // We are doing young collections so reset this.
johnc@1829 2966 non_young_start_time_sec = young_end_time_sec;
johnc@1829 2967
johnc@1829 2968 // Note we can use either _collection_set_size or
johnc@1829 2969 // _young_cset_length here
johnc@1829 2970 if (_collection_set_size > 0 && _last_young_gc_full) {
ysr@777 2971 // don't bother adding more regions...
ysr@777 2972 goto choose_collection_set_end;
ysr@777 2973 }
ysr@777 2974 }
ysr@777 2975
ysr@777 2976 if (!in_young_gc_mode() || !full_young_gcs()) {
ysr@777 2977 bool should_continue = true;
ysr@777 2978 NumberSeq seq;
ysr@777 2979 double avg_prediction = 100000000000000000.0; // something very large
johnc@1829 2980
ysr@777 2981 do {
ysr@777 2982 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
ysr@777 2983 avg_prediction);
apetrusenko@1112 2984 if (hr != NULL) {
ysr@777 2985 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
ysr@777 2986 time_remaining_ms -= predicted_time_ms;
ysr@777 2987 predicted_pause_time_ms += predicted_time_ms;
ysr@777 2988 add_to_collection_set(hr);
johnc@1829 2989 record_non_young_cset_region(hr);
ysr@777 2990 max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
ysr@777 2991 if (G1PolicyVerbose > 0) {
ysr@777 2992 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
ysr@777 2993 max_live_bytes/K);
ysr@777 2994 }
ysr@777 2995 seq.add(predicted_time_ms);
ysr@777 2996 avg_prediction = seq.avg() + seq.sd();
ysr@777 2997 }
ysr@777 2998 should_continue =
ysr@777 2999 ( hr != NULL) &&
ysr@777 3000 ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0
ysr@777 3001 : _collection_set_size < _young_list_fixed_length );
ysr@777 3002 } while (should_continue);
ysr@777 3003
ysr@777 3004 if (!adaptive_young_list_length() &&
ysr@777 3005 _collection_set_size < _young_list_fixed_length)
ysr@777 3006 _should_revert_to_full_young_gcs = true;
ysr@777 3007 }
ysr@777 3008
ysr@777 3009 choose_collection_set_end:
johnc@1829 3010 stop_incremental_cset_building();
johnc@1829 3011
ysr@777 3012 count_CS_bytes_used();
ysr@777 3013
ysr@777 3014 end_recording_regions();
ysr@777 3015
ysr@777 3016 double non_young_end_time_sec = os::elapsedTime();
ysr@777 3017 _recorded_non_young_cset_choice_time_ms =
ysr@777 3018 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
ysr@777 3019 }
ysr@777 3020
ysr@777 3021 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
ysr@777 3022 G1CollectorPolicy::record_full_collection_end();
ysr@777 3023 _collectionSetChooser->updateAfterFullCollection();
ysr@777 3024 }
ysr@777 3025
ysr@777 3026 void G1CollectorPolicy_BestRegionsFirst::
ysr@777 3027 expand_if_possible(size_t numRegions) {
ysr@777 3028 size_t expansion_bytes = numRegions * HeapRegion::GrainBytes;
ysr@777 3029 _g1->expand(expansion_bytes);
ysr@777 3030 }
ysr@777 3031
ysr@777 3032 void G1CollectorPolicy_BestRegionsFirst::
tonyp@2062 3033 record_collection_pause_end() {
tonyp@2062 3034 G1CollectorPolicy::record_collection_pause_end();
ysr@777 3035 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
ysr@777 3036 }

mercurial