src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Tue, 24 Aug 2010 17:24:33 -0400

author
tonyp
date
Tue, 24 Aug 2010 17:24:33 -0400
changeset 2315
631f79e71e90
parent 2314
f95d63e2154a
child 2333
016a3628c885
permissions
-rw-r--r--

6974966: G1: unnecessary direct-to-old allocations
Summary: This change revamps the slow allocation path of G1. Improvements include the following: a) Allocations directly to old regions are now totally banned. G1 now only allows allocations out of young regions (with the only exception being humongous regions). b) The thread that allocates a new region (which is now guaranteed to be young) does not dirty all its cards. Each thread that successfully allocates out of a young region is now responsible for dirtying the cards that corresponding to the "block" that just got allocated. c) allocate_new_tlab() and mem_allocate() are now implemented differently and TLAB allocations are only done by allocate_new_tlab(). d) If a thread schedules an evacuation pause in order to satisfy an allocation request, it will perform the allocation at the end of the safepoint so that the thread that initiated the GC also gets "first pick" of any space made available by the GC. e) If a thread is unable to allocate a humongous object it will schedule an evacuation pause in case it reclaims enough regions so that the humongous allocation can be satisfied aftewards. f) The G1 policy is more careful to set the young list target length to be the survivor number +1. g) Lots of code tidy up, removal, refactoring to make future changes easier.
Reviewed-by: johnc, ysr

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
stefank@2314 31 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 32 #include "gc_implementation/shared/gcPolicyCounters.hpp"
stefank@2314 33 #include "runtime/arguments.hpp"
stefank@2314 34 #include "runtime/java.hpp"
stefank@2314 35 #include "runtime/mutexLocker.hpp"
stefank@2314 36 #include "utilities/debug.hpp"
ysr@777 37
ysr@777 38 #define PREDICTIONS_VERBOSE 0
ysr@777 39
ysr@777 40 // <NEW PREDICTION>
ysr@777 41
ysr@777 42 // Different defaults for different number of GC threads
ysr@777 43 // They were chosen by running GCOld and SPECjbb on debris with different
ysr@777 44 // numbers of GC threads and choosing them based on the results
ysr@777 45
ysr@777 46 // all the same
ysr@777 47 static double rs_length_diff_defaults[] = {
ysr@777 48 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
ysr@777 49 };
ysr@777 50
ysr@777 51 static double cost_per_card_ms_defaults[] = {
ysr@777 52 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
ysr@777 53 };
ysr@777 54
ysr@777 55 // all the same
ysr@777 56 static double fully_young_cards_per_entry_ratio_defaults[] = {
ysr@777 57 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
ysr@777 58 };
ysr@777 59
ysr@777 60 static double cost_per_entry_ms_defaults[] = {
ysr@777 61 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
ysr@777 62 };
ysr@777 63
ysr@777 64 static double cost_per_byte_ms_defaults[] = {
ysr@777 65 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
ysr@777 66 };
ysr@777 67
ysr@777 68 // these should be pretty consistent
ysr@777 69 static double constant_other_time_ms_defaults[] = {
ysr@777 70 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
ysr@777 71 };
ysr@777 72
ysr@777 73
ysr@777 74 static double young_other_cost_per_region_ms_defaults[] = {
ysr@777 75 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
ysr@777 76 };
ysr@777 77
ysr@777 78 static double non_young_other_cost_per_region_ms_defaults[] = {
ysr@777 79 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
ysr@777 80 };
ysr@777 81
ysr@777 82 // </NEW PREDICTION>
ysr@777 83
ysr@777 84 G1CollectorPolicy::G1CollectorPolicy() :
jmasa@2188 85 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
jmasa@2188 86 ? ParallelGCThreads : 1),
jmasa@2188 87
jmasa@2188 88
ysr@777 89 _n_pauses(0),
ysr@777 90 _recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 91 _recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 92 _recent_evac_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 93 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 94 _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 95 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 96 _all_pause_times_ms(new NumberSeq()),
ysr@777 97 _stop_world_start(0.0),
ysr@777 98 _all_stop_world_times_ms(new NumberSeq()),
ysr@777 99 _all_yield_times_ms(new NumberSeq()),
ysr@777 100
ysr@777 101 _all_mod_union_times_ms(new NumberSeq()),
ysr@777 102
apetrusenko@1112 103 _summary(new Summary()),
ysr@777 104
johnc@1325 105 #ifndef PRODUCT
ysr@777 106 _cur_clear_ct_time_ms(0.0),
johnc@1325 107 _min_clear_cc_time_ms(-1.0),
johnc@1325 108 _max_clear_cc_time_ms(-1.0),
johnc@1325 109 _cur_clear_cc_time_ms(0.0),
johnc@1325 110 _cum_clear_cc_time_ms(0.0),
johnc@1325 111 _num_cc_clears(0L),
johnc@1325 112 #endif
ysr@777 113
ysr@777 114 _region_num_young(0),
ysr@777 115 _region_num_tenured(0),
ysr@777 116 _prev_region_num_young(0),
ysr@777 117 _prev_region_num_tenured(0),
ysr@777 118
ysr@777 119 _aux_num(10),
ysr@777 120 _all_aux_times_ms(new NumberSeq[_aux_num]),
ysr@777 121 _cur_aux_start_times_ms(new double[_aux_num]),
ysr@777 122 _cur_aux_times_ms(new double[_aux_num]),
ysr@777 123 _cur_aux_times_set(new bool[_aux_num]),
ysr@777 124
ysr@777 125 _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 126 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 127 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 128
ysr@777 129 // <NEW PREDICTION>
ysr@777 130
ysr@777 131 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 132 _prev_collection_pause_end_ms(0.0),
ysr@777 133 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 134 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 135 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 136 _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 137 _partially_young_cards_per_entry_ratio_seq(
ysr@777 138 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 139 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 140 _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 141 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 142 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 143 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 144 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 145 _non_young_other_cost_per_region_ms_seq(
ysr@777 146 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 147
ysr@777 148 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 149 _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 150 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 151
johnc@1186 152 _pause_time_target_ms((double) MaxGCPauseMillis),
ysr@777 153
ysr@777 154 // </NEW PREDICTION>
ysr@777 155
ysr@777 156 _in_young_gc_mode(false),
ysr@777 157 _full_young_gcs(true),
ysr@777 158 _full_young_pause_num(0),
ysr@777 159 _partial_young_pause_num(0),
ysr@777 160
ysr@777 161 _during_marking(false),
ysr@777 162 _in_marking_window(false),
ysr@777 163 _in_marking_window_im(false),
ysr@777 164
ysr@777 165 _known_garbage_ratio(0.0),
ysr@777 166 _known_garbage_bytes(0),
ysr@777 167
ysr@777 168 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 169
ysr@777 170 _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 171
ysr@777 172 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 173 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 174
ysr@777 175 _recent_avg_pause_time_ratio(0.0),
ysr@777 176 _num_markings(0),
ysr@777 177 _n_marks(0),
ysr@777 178 _n_pauses_at_mark_end(0),
ysr@777 179
ysr@777 180 _all_full_gc_times_ms(new NumberSeq()),
ysr@777 181
ysr@777 182 // G1PausesBtwnConcMark defaults to -1
ysr@777 183 // so the hack is to do the cast QQQ FIXME
ysr@777 184 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
ysr@777 185 _n_marks_since_last_pause(0),
tonyp@1794 186 _initiate_conc_mark_if_possible(false),
tonyp@1794 187 _during_initial_mark_pause(false),
ysr@777 188 _should_revert_to_full_young_gcs(false),
ysr@777 189 _last_full_young_gc(false),
ysr@777 190
ysr@777 191 _prev_collection_pause_used_at_end_bytes(0),
ysr@777 192
ysr@777 193 _collection_set(NULL),
johnc@1829 194 _collection_set_size(0),
johnc@1829 195 _collection_set_bytes_used_before(0),
johnc@1829 196
johnc@1829 197 // Incremental CSet attributes
johnc@1829 198 _inc_cset_build_state(Inactive),
johnc@1829 199 _inc_cset_head(NULL),
johnc@1829 200 _inc_cset_tail(NULL),
johnc@1829 201 _inc_cset_size(0),
johnc@1829 202 _inc_cset_young_index(0),
johnc@1829 203 _inc_cset_bytes_used_before(0),
johnc@1829 204 _inc_cset_max_finger(NULL),
johnc@1829 205 _inc_cset_recorded_young_bytes(0),
johnc@1829 206 _inc_cset_recorded_rs_lengths(0),
johnc@1829 207 _inc_cset_predicted_elapsed_time_ms(0.0),
johnc@1829 208 _inc_cset_predicted_bytes_to_copy(0),
johnc@1829 209
ysr@777 210 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 211 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 212 #endif // _MSC_VER
ysr@777 213
ysr@777 214 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
ysr@777 215 G1YoungSurvRateNumRegionsSummary)),
ysr@777 216 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
apetrusenko@980 217 G1YoungSurvRateNumRegionsSummary)),
ysr@777 218 // add here any more surv rate groups
apetrusenko@980 219 _recorded_survivor_regions(0),
apetrusenko@980 220 _recorded_survivor_head(NULL),
apetrusenko@980 221 _recorded_survivor_tail(NULL),
tonyp@1791 222 _survivors_age_table(true),
tonyp@1791 223
tonyp@1791 224 _gc_overhead_perc(0.0)
apetrusenko@980 225
ysr@777 226 {
tonyp@1377 227 // Set up the region size and associated fields. Given that the
tonyp@1377 228 // policy is created before the heap, we have to set this up here,
tonyp@1377 229 // so it's done as soon as possible.
tonyp@1377 230 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
iveresov@1696 231 HeapRegionRemSet::setup_remset_size();
tonyp@1377 232
apetrusenko@1826 233 // Verify PLAB sizes
apetrusenko@1826 234 const uint region_size = HeapRegion::GrainWords;
apetrusenko@1826 235 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
apetrusenko@1826 236 char buffer[128];
apetrusenko@1826 237 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
apetrusenko@1826 238 OldPLABSize > region_size ? "Old" : "Young", region_size);
apetrusenko@1826 239 vm_exit_during_initialization(buffer);
apetrusenko@1826 240 }
apetrusenko@1826 241
ysr@777 242 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
ysr@777 243 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
ysr@777 244
tonyp@1966 245 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
ysr@777 246 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 247 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 248
ysr@777 249 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 250 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
ysr@777 251
ysr@777 252 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 253
ysr@777 254 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
ysr@777 255
ysr@777 256 _par_last_termination_times_ms = new double[_parallel_gc_threads];
tonyp@1966 257 _par_last_termination_attempts = new double[_parallel_gc_threads];
tonyp@1966 258 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
ysr@777 259
ysr@777 260 // start conservatively
johnc@1186 261 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
ysr@777 262
ysr@777 263 // <NEW PREDICTION>
ysr@777 264
ysr@777 265 int index;
ysr@777 266 if (ParallelGCThreads == 0)
ysr@777 267 index = 0;
ysr@777 268 else if (ParallelGCThreads > 8)
ysr@777 269 index = 7;
ysr@777 270 else
ysr@777 271 index = ParallelGCThreads - 1;
ysr@777 272
ysr@777 273 _pending_card_diff_seq->add(0.0);
ysr@777 274 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
ysr@777 275 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
ysr@777 276 _fully_young_cards_per_entry_ratio_seq->add(
ysr@777 277 fully_young_cards_per_entry_ratio_defaults[index]);
ysr@777 278 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
ysr@777 279 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
ysr@777 280 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
ysr@777 281 _young_other_cost_per_region_ms_seq->add(
ysr@777 282 young_other_cost_per_region_ms_defaults[index]);
ysr@777 283 _non_young_other_cost_per_region_ms_seq->add(
ysr@777 284 non_young_other_cost_per_region_ms_defaults[index]);
ysr@777 285
ysr@777 286 // </NEW PREDICTION>
ysr@777 287
tonyp@1965 288 // Below, we might need to calculate the pause time target based on
tonyp@1965 289 // the pause interval. When we do so we are going to give G1 maximum
tonyp@1965 290 // flexibility and allow it to do pauses when it needs to. So, we'll
tonyp@1965 291 // arrange that the pause interval to be pause time target + 1 to
tonyp@1965 292 // ensure that a) the pause time target is maximized with respect to
tonyp@1965 293 // the pause interval and b) we maintain the invariant that pause
tonyp@1965 294 // time target < pause interval. If the user does not want this
tonyp@1965 295 // maximum flexibility, they will have to set the pause interval
tonyp@1965 296 // explicitly.
tonyp@1965 297
tonyp@1965 298 // First make sure that, if either parameter is set, its value is
tonyp@1965 299 // reasonable.
tonyp@1965 300 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 301 if (MaxGCPauseMillis < 1) {
tonyp@1965 302 vm_exit_during_initialization("MaxGCPauseMillis should be "
tonyp@1965 303 "greater than 0");
tonyp@1965 304 }
tonyp@1965 305 }
tonyp@1965 306 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 307 if (GCPauseIntervalMillis < 1) {
tonyp@1965 308 vm_exit_during_initialization("GCPauseIntervalMillis should be "
tonyp@1965 309 "greater than 0");
tonyp@1965 310 }
tonyp@1965 311 }
tonyp@1965 312
tonyp@1965 313 // Then, if the pause time target parameter was not set, set it to
tonyp@1965 314 // the default value.
tonyp@1965 315 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 316 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 317 // The default pause time target in G1 is 200ms
tonyp@1965 318 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
tonyp@1965 319 } else {
tonyp@1965 320 // We do not allow the pause interval to be set without the
tonyp@1965 321 // pause time target
tonyp@1965 322 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
tonyp@1965 323 "without setting MaxGCPauseMillis");
tonyp@1965 324 }
tonyp@1965 325 }
tonyp@1965 326
tonyp@1965 327 // Then, if the interval parameter was not set, set it according to
tonyp@1965 328 // the pause time target (this will also deal with the case when the
tonyp@1965 329 // pause time target is the default value).
tonyp@1965 330 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 331 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
tonyp@1965 332 }
tonyp@1965 333
tonyp@1965 334 // Finally, make sure that the two parameters are consistent.
tonyp@1965 335 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
tonyp@1965 336 char buffer[256];
tonyp@1965 337 jio_snprintf(buffer, 256,
tonyp@1965 338 "MaxGCPauseMillis (%u) should be less than "
tonyp@1965 339 "GCPauseIntervalMillis (%u)",
tonyp@1965 340 MaxGCPauseMillis, GCPauseIntervalMillis);
tonyp@1965 341 vm_exit_during_initialization(buffer);
tonyp@1965 342 }
tonyp@1965 343
tonyp@1965 344 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
johnc@1186 345 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
ysr@777 346 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
johnc@1186 347 _sigma = (double) G1ConfidencePercent / 100.0;
ysr@777 348
ysr@777 349 // start conservatively (around 50ms is about right)
ysr@777 350 _concurrent_mark_init_times_ms->add(0.05);
ysr@777 351 _concurrent_mark_remark_times_ms->add(0.05);
ysr@777 352 _concurrent_mark_cleanup_times_ms->add(0.20);
ysr@777 353 _tenuring_threshold = MaxTenuringThreshold;
ysr@777 354
tonyp@1717 355 // if G1FixedSurvivorSpaceSize is 0 which means the size is not
tonyp@1717 356 // fixed, then _max_survivor_regions will be calculated at
johnc@1829 357 // calculate_young_list_target_length during initialization
tonyp@1717 358 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
apetrusenko@980 359
tonyp@1791 360 assert(GCTimeRatio > 0,
tonyp@1791 361 "we should have set it to a default value set_g1_gc_flags() "
tonyp@1791 362 "if a user set it to 0");
tonyp@1791 363 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
tonyp@1791 364
ysr@777 365 initialize_all();
ysr@777 366 }
ysr@777 367
ysr@777 368 // Increment "i", mod "len"
ysr@777 369 static void inc_mod(int& i, int len) {
ysr@777 370 i++; if (i == len) i = 0;
ysr@777 371 }
ysr@777 372
ysr@777 373 void G1CollectorPolicy::initialize_flags() {
ysr@777 374 set_min_alignment(HeapRegion::GrainBytes);
ysr@777 375 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
apetrusenko@982 376 if (SurvivorRatio < 1) {
apetrusenko@982 377 vm_exit_during_initialization("Invalid survivor ratio specified");
apetrusenko@982 378 }
ysr@777 379 CollectorPolicy::initialize_flags();
ysr@777 380 }
ysr@777 381
tonyp@1720 382 // The easiest way to deal with the parsing of the NewSize /
tonyp@1720 383 // MaxNewSize / etc. parameteres is to re-use the code in the
tonyp@1720 384 // TwoGenerationCollectorPolicy class. This is similar to what
tonyp@1720 385 // ParallelScavenge does with its GenerationSizer class (see
tonyp@1720 386 // ParallelScavengeHeap::initialize()). We might change this in the
tonyp@1720 387 // future, but it's a good start.
tonyp@1720 388 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
tonyp@1720 389 size_t size_to_region_num(size_t byte_size) {
tonyp@1720 390 return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
tonyp@1720 391 }
tonyp@1720 392
tonyp@1720 393 public:
tonyp@1720 394 G1YoungGenSizer() {
tonyp@1720 395 initialize_flags();
tonyp@1720 396 initialize_size_info();
tonyp@1720 397 }
tonyp@1720 398
tonyp@1720 399 size_t min_young_region_num() {
tonyp@1720 400 return size_to_region_num(_min_gen0_size);
tonyp@1720 401 }
tonyp@1720 402 size_t initial_young_region_num() {
tonyp@1720 403 return size_to_region_num(_initial_gen0_size);
tonyp@1720 404 }
tonyp@1720 405 size_t max_young_region_num() {
tonyp@1720 406 return size_to_region_num(_max_gen0_size);
tonyp@1720 407 }
tonyp@1720 408 };
tonyp@1720 409
ysr@777 410 void G1CollectorPolicy::init() {
ysr@777 411 // Set aside an initial future to_space.
ysr@777 412 _g1 = G1CollectedHeap::heap();
ysr@777 413
ysr@777 414 assert(Heap_lock->owned_by_self(), "Locking discipline.");
ysr@777 415
apetrusenko@980 416 initialize_gc_policy_counters();
apetrusenko@980 417
ysr@777 418 if (G1Gen) {
ysr@777 419 _in_young_gc_mode = true;
ysr@777 420
tonyp@1720 421 G1YoungGenSizer sizer;
tonyp@1720 422 size_t initial_region_num = sizer.initial_young_region_num();
tonyp@1720 423
tonyp@1720 424 if (UseAdaptiveSizePolicy) {
ysr@777 425 set_adaptive_young_list_length(true);
ysr@777 426 _young_list_fixed_length = 0;
ysr@777 427 } else {
ysr@777 428 set_adaptive_young_list_length(false);
tonyp@1720 429 _young_list_fixed_length = initial_region_num;
ysr@777 430 }
johnc@1829 431 _free_regions_at_end_of_collection = _g1->free_regions();
johnc@1829 432 calculate_young_list_min_length();
johnc@1829 433 guarantee( _young_list_min_length == 0, "invariant, not enough info" );
johnc@1829 434 calculate_young_list_target_length();
johnc@1829 435 } else {
ysr@777 436 _young_list_fixed_length = 0;
ysr@777 437 _in_young_gc_mode = false;
ysr@777 438 }
johnc@1829 439
johnc@1829 440 // We may immediately start allocating regions and placing them on the
johnc@1829 441 // collection set list. Initialize the per-collection set info
johnc@1829 442 start_incremental_cset_building();
ysr@777 443 }
ysr@777 444
apetrusenko@980 445 // Create the jstat counters for the policy.
apetrusenko@980 446 void G1CollectorPolicy::initialize_gc_policy_counters()
apetrusenko@980 447 {
apetrusenko@980 448 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
apetrusenko@980 449 }
apetrusenko@980 450
ysr@777 451 void G1CollectorPolicy::calculate_young_list_min_length() {
ysr@777 452 _young_list_min_length = 0;
ysr@777 453
ysr@777 454 if (!adaptive_young_list_length())
ysr@777 455 return;
ysr@777 456
ysr@777 457 if (_alloc_rate_ms_seq->num() > 3) {
ysr@777 458 double now_sec = os::elapsedTime();
ysr@777 459 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
ysr@777 460 double alloc_rate_ms = predict_alloc_rate_ms();
tonyp@2315 461 size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
tonyp@2315 462 size_t current_region_num = _g1->young_list()->length();
ysr@777 463 _young_list_min_length = min_regions + current_region_num;
ysr@777 464 }
ysr@777 465 }
ysr@777 466
johnc@1829 467 void G1CollectorPolicy::calculate_young_list_target_length() {
ysr@777 468 if (adaptive_young_list_length()) {
ysr@777 469 size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
johnc@1829 470 calculate_young_list_target_length(rs_lengths);
ysr@777 471 } else {
ysr@777 472 if (full_young_gcs())
ysr@777 473 _young_list_target_length = _young_list_fixed_length;
ysr@777 474 else
ysr@777 475 _young_list_target_length = _young_list_fixed_length / 2;
ysr@777 476 }
tonyp@2315 477
tonyp@2315 478 // Make sure we allow the application to allocate at least one
tonyp@2315 479 // region before we need to do a collection again.
tonyp@2315 480 size_t min_length = _g1->young_list()->length() + 1;
tonyp@2315 481 _young_list_target_length = MAX2(_young_list_target_length, min_length);
apetrusenko@980 482 calculate_survivors_policy();
ysr@777 483 }
ysr@777 484
johnc@1829 485 void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
ysr@777 486 guarantee( adaptive_young_list_length(), "pre-condition" );
johnc@1829 487 guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" );
ysr@777 488
ysr@777 489 double start_time_sec = os::elapsedTime();
tonyp@1717 490 size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
ysr@777 491 min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
ysr@777 492 size_t reserve_regions =
ysr@777 493 (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
ysr@777 494
ysr@777 495 if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
ysr@777 496 // we are in fully-young mode and there are free regions in the heap
ysr@777 497
apetrusenko@980 498 double survivor_regions_evac_time =
apetrusenko@980 499 predict_survivor_regions_evac_time();
apetrusenko@980 500
ysr@777 501 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
ysr@777 502 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
ysr@777 503 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
johnc@1829 504 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
apetrusenko@980 505 double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
apetrusenko@980 506 + survivor_regions_evac_time;
johnc@1829 507
ysr@777 508 // the result
ysr@777 509 size_t final_young_length = 0;
johnc@1829 510
johnc@1829 511 size_t init_free_regions =
johnc@1829 512 MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions);
johnc@1829 513
johnc@1829 514 // if we're still under the pause target...
johnc@1829 515 if (base_time_ms <= target_pause_time_ms) {
johnc@1829 516 // We make sure that the shortest young length that makes sense
johnc@1829 517 // fits within the target pause time.
johnc@1829 518 size_t min_young_length = 1;
johnc@1829 519
johnc@1829 520 if (predict_will_fit(min_young_length, base_time_ms,
johnc@1829 521 init_free_regions, target_pause_time_ms)) {
johnc@1829 522 // The shortest young length will fit within the target pause time;
johnc@1829 523 // we'll now check whether the absolute maximum number of young
johnc@1829 524 // regions will fit in the target pause time. If not, we'll do
johnc@1829 525 // a binary search between min_young_length and max_young_length
johnc@1829 526 size_t abs_max_young_length = _free_regions_at_end_of_collection - 1;
johnc@1829 527 size_t max_young_length = abs_max_young_length;
johnc@1829 528
johnc@1829 529 if (max_young_length > min_young_length) {
johnc@1829 530 // Let's check if the initial max young length will fit within the
johnc@1829 531 // target pause. If so then there is no need to search for a maximal
johnc@1829 532 // young length - we'll return the initial maximum
johnc@1829 533
johnc@1829 534 if (predict_will_fit(max_young_length, base_time_ms,
johnc@1829 535 init_free_regions, target_pause_time_ms)) {
johnc@1829 536 // The maximum young length will satisfy the target pause time.
johnc@1829 537 // We are done so set min young length to this maximum length.
johnc@1829 538 // The code after the loop will then set final_young_length using
johnc@1829 539 // the value cached in the minimum length.
johnc@1829 540 min_young_length = max_young_length;
johnc@1829 541 } else {
johnc@1829 542 // The maximum possible number of young regions will not fit within
johnc@1829 543 // the target pause time so let's search....
johnc@1829 544
johnc@1829 545 size_t diff = (max_young_length - min_young_length) / 2;
johnc@1829 546 max_young_length = min_young_length + diff;
johnc@1829 547
johnc@1829 548 while (max_young_length > min_young_length) {
johnc@1829 549 if (predict_will_fit(max_young_length, base_time_ms,
johnc@1829 550 init_free_regions, target_pause_time_ms)) {
johnc@1829 551
johnc@1829 552 // The current max young length will fit within the target
johnc@1829 553 // pause time. Note we do not exit the loop here. By setting
johnc@1829 554 // min = max, and then increasing the max below means that
johnc@1829 555 // we will continue searching for an upper bound in the
johnc@1829 556 // range [max..max+diff]
johnc@1829 557 min_young_length = max_young_length;
johnc@1829 558 }
johnc@1829 559 diff = (max_young_length - min_young_length) / 2;
johnc@1829 560 max_young_length = min_young_length + diff;
johnc@1829 561 }
johnc@1829 562 // the above loop found a maximal young length that will fit
johnc@1829 563 // within the target pause time.
johnc@1829 564 }
johnc@1829 565 assert(min_young_length <= abs_max_young_length, "just checking");
johnc@1829 566 }
johnc@1829 567 final_young_length = min_young_length;
johnc@1829 568 }
ysr@777 569 }
johnc@1829 570 // and we're done!
ysr@777 571
ysr@777 572 // we should have at least one region in the target young length
apetrusenko@980 573 _young_list_target_length =
tonyp@2315 574 final_young_length + _recorded_survivor_regions;
ysr@777 575
ysr@777 576 // let's keep an eye of how long we spend on this calculation
ysr@777 577 // right now, I assume that we'll print it when we need it; we
ysr@777 578 // should really adde it to the breakdown of a pause
ysr@777 579 double end_time_sec = os::elapsedTime();
ysr@777 580 double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
ysr@777 581
johnc@1829 582 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 583 // leave this in for debugging, just in case
johnc@1829 584 gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", "
johnc@1829 585 "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT,
ysr@777 586 target_pause_time_ms,
johnc@1829 587 _young_list_target_length
ysr@777 588 elapsed_time_ms,
ysr@777 589 full_young_gcs() ? "full" : "partial",
tonyp@1794 590 during_initial_mark_pause() ? " i-m" : "",
apetrusenko@980 591 _in_marking_window,
apetrusenko@980 592 _in_marking_window_im);
johnc@1829 593 #endif // TRACE_CALC_YOUNG_LENGTH
ysr@777 594
ysr@777 595 if (_young_list_target_length < _young_list_min_length) {
johnc@1829 596 // bummer; this means that, if we do a pause when the maximal
johnc@1829 597 // length dictates, we'll violate the pause spacing target (the
ysr@777 598 // min length was calculate based on the application's current
ysr@777 599 // alloc rate);
ysr@777 600
ysr@777 601 // so, we have to bite the bullet, and allocate the minimum
ysr@777 602 // number. We'll violate our target, but we just can't meet it.
ysr@777 603
johnc@1829 604 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 605 // leave this in for debugging, just in case
ysr@777 606 gclog_or_tty->print_cr("adjusted target length from "
johnc@1829 607 SIZE_FORMAT " to " SIZE_FORMAT,
johnc@1829 608 _young_list_target_length, _young_list_min_length);
johnc@1829 609 #endif // TRACE_CALC_YOUNG_LENGTH
johnc@1829 610
johnc@1829 611 _young_list_target_length = _young_list_min_length;
ysr@777 612 }
ysr@777 613 } else {
ysr@777 614 // we are in a partially-young mode or we've run out of regions (due
ysr@777 615 // to evacuation failure)
ysr@777 616
johnc@1829 617 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 618 // leave this in for debugging, just in case
ysr@777 619 gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
johnc@1829 620 _young_list_min_length);
johnc@1829 621 #endif // TRACE_CALC_YOUNG_LENGTH
johnc@1829 622 // we'll do the pause as soon as possible by choosing the minimum
tonyp@2315 623 _young_list_target_length = _young_list_min_length;
ysr@777 624 }
ysr@777 625
ysr@777 626 _rs_lengths_prediction = rs_lengths;
ysr@777 627 }
ysr@777 628
johnc@1829 629 // This is used by: calculate_young_list_target_length(rs_length). It
johnc@1829 630 // returns true iff:
johnc@1829 631 // the predicted pause time for the given young list will not overflow
johnc@1829 632 // the target pause time
johnc@1829 633 // and:
johnc@1829 634 // the predicted amount of surviving data will not overflow the
johnc@1829 635 // the amount of free space available for survivor regions.
johnc@1829 636 //
ysr@777 637 bool
johnc@1829 638 G1CollectorPolicy::predict_will_fit(size_t young_length,
johnc@1829 639 double base_time_ms,
johnc@1829 640 size_t init_free_regions,
johnc@1829 641 double target_pause_time_ms) {
ysr@777 642
ysr@777 643 if (young_length >= init_free_regions)
ysr@777 644 // end condition 1: not enough space for the young regions
ysr@777 645 return false;
ysr@777 646
ysr@777 647 double accum_surv_rate_adj = 0.0;
ysr@777 648 double accum_surv_rate =
ysr@777 649 accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
johnc@1829 650
ysr@777 651 size_t bytes_to_copy =
ysr@777 652 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
johnc@1829 653
ysr@777 654 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
johnc@1829 655
ysr@777 656 double young_other_time_ms =
johnc@1829 657 predict_young_other_time_ms(young_length);
johnc@1829 658
ysr@777 659 double pause_time_ms =
johnc@1829 660 base_time_ms + copy_time_ms + young_other_time_ms;
ysr@777 661
ysr@777 662 if (pause_time_ms > target_pause_time_ms)
ysr@777 663 // end condition 2: over the target pause time
ysr@777 664 return false;
ysr@777 665
ysr@777 666 size_t free_bytes =
ysr@777 667 (init_free_regions - young_length) * HeapRegion::GrainBytes;
ysr@777 668
ysr@777 669 if ((2.0 + sigma()) * (double) bytes_to_copy > (double) free_bytes)
ysr@777 670 // end condition 3: out of to-space (conservatively)
ysr@777 671 return false;
ysr@777 672
ysr@777 673 // success!
ysr@777 674 return true;
ysr@777 675 }
ysr@777 676
apetrusenko@980 677 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
apetrusenko@980 678 double survivor_regions_evac_time = 0.0;
apetrusenko@980 679 for (HeapRegion * r = _recorded_survivor_head;
apetrusenko@980 680 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
apetrusenko@980 681 r = r->get_next_young_region()) {
apetrusenko@980 682 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
apetrusenko@980 683 }
apetrusenko@980 684 return survivor_regions_evac_time;
apetrusenko@980 685 }
apetrusenko@980 686
ysr@777 687 void G1CollectorPolicy::check_prediction_validity() {
ysr@777 688 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
ysr@777 689
johnc@1829 690 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
ysr@777 691 if (rs_lengths > _rs_lengths_prediction) {
ysr@777 692 // add 10% to avoid having to recalculate often
ysr@777 693 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
johnc@1829 694 calculate_young_list_target_length(rs_lengths_prediction);
ysr@777 695 }
ysr@777 696 }
ysr@777 697
ysr@777 698 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
ysr@777 699 bool is_tlab,
ysr@777 700 bool* gc_overhead_limit_was_exceeded) {
ysr@777 701 guarantee(false, "Not using this policy feature yet.");
ysr@777 702 return NULL;
ysr@777 703 }
ysr@777 704
ysr@777 705 // This method controls how a collector handles one or more
ysr@777 706 // of its generations being fully allocated.
ysr@777 707 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
ysr@777 708 bool is_tlab) {
ysr@777 709 guarantee(false, "Not using this policy feature yet.");
ysr@777 710 return NULL;
ysr@777 711 }
ysr@777 712
ysr@777 713
ysr@777 714 #ifndef PRODUCT
ysr@777 715 bool G1CollectorPolicy::verify_young_ages() {
johnc@1829 716 HeapRegion* head = _g1->young_list()->first_region();
ysr@777 717 return
ysr@777 718 verify_young_ages(head, _short_lived_surv_rate_group);
ysr@777 719 // also call verify_young_ages on any additional surv rate groups
ysr@777 720 }
ysr@777 721
ysr@777 722 bool
ysr@777 723 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
ysr@777 724 SurvRateGroup *surv_rate_group) {
ysr@777 725 guarantee( surv_rate_group != NULL, "pre-condition" );
ysr@777 726
ysr@777 727 const char* name = surv_rate_group->name();
ysr@777 728 bool ret = true;
ysr@777 729 int prev_age = -1;
ysr@777 730
ysr@777 731 for (HeapRegion* curr = head;
ysr@777 732 curr != NULL;
ysr@777 733 curr = curr->get_next_young_region()) {
ysr@777 734 SurvRateGroup* group = curr->surv_rate_group();
ysr@777 735 if (group == NULL && !curr->is_survivor()) {
ysr@777 736 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
ysr@777 737 ret = false;
ysr@777 738 }
ysr@777 739
ysr@777 740 if (surv_rate_group == group) {
ysr@777 741 int age = curr->age_in_surv_rate_group();
ysr@777 742
ysr@777 743 if (age < 0) {
ysr@777 744 gclog_or_tty->print_cr("## %s: encountered negative age", name);
ysr@777 745 ret = false;
ysr@777 746 }
ysr@777 747
ysr@777 748 if (age <= prev_age) {
ysr@777 749 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
ysr@777 750 "(%d, %d)", name, age, prev_age);
ysr@777 751 ret = false;
ysr@777 752 }
ysr@777 753 prev_age = age;
ysr@777 754 }
ysr@777 755 }
ysr@777 756
ysr@777 757 return ret;
ysr@777 758 }
ysr@777 759 #endif // PRODUCT
ysr@777 760
ysr@777 761 void G1CollectorPolicy::record_full_collection_start() {
ysr@777 762 _cur_collection_start_sec = os::elapsedTime();
ysr@777 763 // Release the future to-space so that it is available for compaction into.
ysr@777 764 _g1->set_full_collection();
ysr@777 765 }
ysr@777 766
ysr@777 767 void G1CollectorPolicy::record_full_collection_end() {
ysr@777 768 // Consider this like a collection pause for the purposes of allocation
ysr@777 769 // since last pause.
ysr@777 770 double end_sec = os::elapsedTime();
ysr@777 771 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
ysr@777 772 double full_gc_time_ms = full_gc_time_sec * 1000.0;
ysr@777 773
ysr@777 774 _all_full_gc_times_ms->add(full_gc_time_ms);
ysr@777 775
tonyp@1030 776 update_recent_gc_times(end_sec, full_gc_time_ms);
ysr@777 777
ysr@777 778 _g1->clear_full_collection();
ysr@777 779
ysr@777 780 // "Nuke" the heuristics that control the fully/partially young GC
ysr@777 781 // transitions and make sure we start with fully young GCs after the
ysr@777 782 // Full GC.
ysr@777 783 set_full_young_gcs(true);
ysr@777 784 _last_full_young_gc = false;
ysr@777 785 _should_revert_to_full_young_gcs = false;
tonyp@1794 786 clear_initiate_conc_mark_if_possible();
tonyp@1794 787 clear_during_initial_mark_pause();
ysr@777 788 _known_garbage_bytes = 0;
ysr@777 789 _known_garbage_ratio = 0.0;
ysr@777 790 _in_marking_window = false;
ysr@777 791 _in_marking_window_im = false;
ysr@777 792
ysr@777 793 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 794 // also call this on any additional surv rate groups
ysr@777 795
apetrusenko@980 796 record_survivor_regions(0, NULL, NULL);
apetrusenko@980 797
ysr@777 798 _prev_region_num_young = _region_num_young;
ysr@777 799 _prev_region_num_tenured = _region_num_tenured;
ysr@777 800
ysr@777 801 _free_regions_at_end_of_collection = _g1->free_regions();
apetrusenko@980 802 // Reset survivors SurvRateGroup.
apetrusenko@980 803 _survivor_surv_rate_group->reset();
ysr@777 804 calculate_young_list_min_length();
johnc@1829 805 calculate_young_list_target_length();
tonyp@2315 806 }
ysr@777 807
ysr@777 808 void G1CollectorPolicy::record_before_bytes(size_t bytes) {
ysr@777 809 _bytes_in_to_space_before_gc += bytes;
ysr@777 810 }
ysr@777 811
ysr@777 812 void G1CollectorPolicy::record_after_bytes(size_t bytes) {
ysr@777 813 _bytes_in_to_space_after_gc += bytes;
ysr@777 814 }
ysr@777 815
ysr@777 816 void G1CollectorPolicy::record_stop_world_start() {
ysr@777 817 _stop_world_start = os::elapsedTime();
ysr@777 818 }
ysr@777 819
ysr@777 820 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
ysr@777 821 size_t start_used) {
ysr@777 822 if (PrintGCDetails) {
ysr@777 823 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 824 gclog_or_tty->print("[GC pause");
ysr@777 825 if (in_young_gc_mode())
ysr@777 826 gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
ysr@777 827 }
ysr@777 828
tonyp@2315 829 assert(_g1->used() == _g1->recalculate_used(),
tonyp@2315 830 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
tonyp@2315 831 _g1->used(), _g1->recalculate_used()));
ysr@777 832
ysr@777 833 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
ysr@777 834 _all_stop_world_times_ms->add(s_w_t_ms);
ysr@777 835 _stop_world_start = 0.0;
ysr@777 836
ysr@777 837 _cur_collection_start_sec = start_time_sec;
ysr@777 838 _cur_collection_pause_used_at_start_bytes = start_used;
ysr@777 839 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
ysr@777 840 _pending_cards = _g1->pending_card_num();
ysr@777 841 _max_pending_cards = _g1->max_pending_card_num();
ysr@777 842
ysr@777 843 _bytes_in_to_space_before_gc = 0;
ysr@777 844 _bytes_in_to_space_after_gc = 0;
ysr@777 845 _bytes_in_collection_set_before_gc = 0;
ysr@777 846
ysr@777 847 #ifdef DEBUG
ysr@777 848 // initialise these to something well known so that we can spot
ysr@777 849 // if they are not set properly
ysr@777 850
ysr@777 851 for (int i = 0; i < _parallel_gc_threads; ++i) {
tonyp@1966 852 _par_last_gc_worker_start_times_ms[i] = -1234.0;
tonyp@1966 853 _par_last_ext_root_scan_times_ms[i] = -1234.0;
tonyp@1966 854 _par_last_mark_stack_scan_times_ms[i] = -1234.0;
tonyp@1966 855 _par_last_update_rs_times_ms[i] = -1234.0;
tonyp@1966 856 _par_last_update_rs_processed_buffers[i] = -1234.0;
tonyp@1966 857 _par_last_scan_rs_times_ms[i] = -1234.0;
tonyp@1966 858 _par_last_obj_copy_times_ms[i] = -1234.0;
tonyp@1966 859 _par_last_termination_times_ms[i] = -1234.0;
tonyp@1966 860 _par_last_termination_attempts[i] = -1234.0;
tonyp@1966 861 _par_last_gc_worker_end_times_ms[i] = -1234.0;
ysr@777 862 }
ysr@777 863 #endif
ysr@777 864
ysr@777 865 for (int i = 0; i < _aux_num; ++i) {
ysr@777 866 _cur_aux_times_ms[i] = 0.0;
ysr@777 867 _cur_aux_times_set[i] = false;
ysr@777 868 }
ysr@777 869
ysr@777 870 _satb_drain_time_set = false;
ysr@777 871 _last_satb_drain_processed_buffers = -1;
ysr@777 872
ysr@777 873 if (in_young_gc_mode())
ysr@777 874 _last_young_gc_full = false;
ysr@777 875
ysr@777 876 // do that for any other surv rate groups
ysr@777 877 _short_lived_surv_rate_group->stop_adding_regions();
tonyp@1717 878 _survivors_age_table.clear();
apetrusenko@980 879
ysr@777 880 assert( verify_young_ages(), "region age verification" );
ysr@777 881 }
ysr@777 882
ysr@777 883 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
ysr@777 884 _mark_closure_time_ms = mark_closure_time_ms;
ysr@777 885 }
ysr@777 886
ysr@777 887 void G1CollectorPolicy::record_concurrent_mark_init_start() {
ysr@777 888 _mark_init_start_sec = os::elapsedTime();
ysr@777 889 guarantee(!in_young_gc_mode(), "should not do be here in young GC mode");
ysr@777 890 }
ysr@777 891
ysr@777 892 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
ysr@777 893 mark_init_elapsed_time_ms) {
ysr@777 894 _during_marking = true;
tonyp@1794 895 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
tonyp@1794 896 clear_during_initial_mark_pause();
ysr@777 897 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
ysr@777 898 }
ysr@777 899
ysr@777 900 void G1CollectorPolicy::record_concurrent_mark_init_end() {
ysr@777 901 double end_time_sec = os::elapsedTime();
ysr@777 902 double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
ysr@777 903 _concurrent_mark_init_times_ms->add(elapsed_time_ms);
ysr@777 904 record_concurrent_mark_init_end_pre(elapsed_time_ms);
ysr@777 905
ysr@777 906 _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
ysr@777 907 }
ysr@777 908
ysr@777 909 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
ysr@777 910 _mark_remark_start_sec = os::elapsedTime();
ysr@777 911 _during_marking = false;
ysr@777 912 }
ysr@777 913
ysr@777 914 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
ysr@777 915 double end_time_sec = os::elapsedTime();
ysr@777 916 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
ysr@777 917 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
ysr@777 918 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 919 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 920
ysr@777 921 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
ysr@777 922 }
ysr@777 923
ysr@777 924 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
ysr@777 925 _mark_cleanup_start_sec = os::elapsedTime();
ysr@777 926 }
ysr@777 927
ysr@777 928 void
ysr@777 929 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 930 size_t max_live_bytes) {
ysr@777 931 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 932 record_concurrent_mark_cleanup_end_work2();
ysr@777 933 }
ysr@777 934
ysr@777 935 void
ysr@777 936 G1CollectorPolicy::
ysr@777 937 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 938 size_t max_live_bytes) {
ysr@777 939 if (_n_marks < 2) _n_marks++;
ysr@777 940 if (G1PolicyVerbose > 0)
ysr@777 941 gclog_or_tty->print_cr("At end of marking, max_live is " SIZE_FORMAT " MB "
ysr@777 942 " (of " SIZE_FORMAT " MB heap).",
ysr@777 943 max_live_bytes/M, _g1->capacity()/M);
ysr@777 944 }
ysr@777 945
ysr@777 946 // The important thing about this is that it includes "os::elapsedTime".
ysr@777 947 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
ysr@777 948 double end_time_sec = os::elapsedTime();
ysr@777 949 double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
ysr@777 950 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
ysr@777 951 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 952 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 953
ysr@777 954 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
ysr@777 955
ysr@777 956 _num_markings++;
ysr@777 957
ysr@777 958 // We did a marking, so reset the "since_last_mark" variables.
ysr@777 959 double considerConcMarkCost = 1.0;
ysr@777 960 // If there are available processors, concurrent activity is free...
ysr@777 961 if (Threads::number_of_non_daemon_threads() * 2 <
ysr@777 962 os::active_processor_count()) {
ysr@777 963 considerConcMarkCost = 0.0;
ysr@777 964 }
ysr@777 965 _n_pauses_at_mark_end = _n_pauses;
ysr@777 966 _n_marks_since_last_pause++;
ysr@777 967 }
ysr@777 968
ysr@777 969 void
ysr@777 970 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
ysr@777 971 if (in_young_gc_mode()) {
ysr@777 972 _should_revert_to_full_young_gcs = false;
ysr@777 973 _last_full_young_gc = true;
ysr@777 974 _in_marking_window = false;
ysr@777 975 if (adaptive_young_list_length())
johnc@1829 976 calculate_young_list_target_length();
ysr@777 977 }
ysr@777 978 }
ysr@777 979
ysr@777 980 void G1CollectorPolicy::record_concurrent_pause() {
ysr@777 981 if (_stop_world_start > 0.0) {
ysr@777 982 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
ysr@777 983 _all_yield_times_ms->add(yield_ms);
ysr@777 984 }
ysr@777 985 }
ysr@777 986
ysr@777 987 void G1CollectorPolicy::record_concurrent_pause_end() {
ysr@777 988 }
ysr@777 989
ysr@777 990 void G1CollectorPolicy::record_collection_pause_end_CH_strong_roots() {
ysr@777 991 _cur_CH_strong_roots_end_sec = os::elapsedTime();
ysr@777 992 _cur_CH_strong_roots_dur_ms =
ysr@777 993 (_cur_CH_strong_roots_end_sec - _cur_collection_start_sec) * 1000.0;
ysr@777 994 }
ysr@777 995
ysr@777 996 void G1CollectorPolicy::record_collection_pause_end_G1_strong_roots() {
ysr@777 997 _cur_G1_strong_roots_end_sec = os::elapsedTime();
ysr@777 998 _cur_G1_strong_roots_dur_ms =
ysr@777 999 (_cur_G1_strong_roots_end_sec - _cur_CH_strong_roots_end_sec) * 1000.0;
ysr@777 1000 }
ysr@777 1001
ysr@777 1002 template<class T>
ysr@777 1003 T sum_of(T* sum_arr, int start, int n, int N) {
ysr@777 1004 T sum = (T)0;
ysr@777 1005 for (int i = 0; i < n; i++) {
ysr@777 1006 int j = (start + i) % N;
ysr@777 1007 sum += sum_arr[j];
ysr@777 1008 }
ysr@777 1009 return sum;
ysr@777 1010 }
ysr@777 1011
tonyp@1966 1012 void G1CollectorPolicy::print_par_stats(int level,
tonyp@1966 1013 const char* str,
tonyp@1966 1014 double* data,
ysr@777 1015 bool summary) {
ysr@777 1016 double min = data[0], max = data[0];
ysr@777 1017 double total = 0.0;
ysr@777 1018 int j;
ysr@777 1019 for (j = 0; j < level; ++j)
ysr@777 1020 gclog_or_tty->print(" ");
ysr@777 1021 gclog_or_tty->print("[%s (ms):", str);
ysr@777 1022 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 1023 double val = data[i];
ysr@777 1024 if (val < min)
ysr@777 1025 min = val;
ysr@777 1026 if (val > max)
ysr@777 1027 max = val;
ysr@777 1028 total += val;
ysr@777 1029 gclog_or_tty->print(" %3.1lf", val);
ysr@777 1030 }
ysr@777 1031 if (summary) {
ysr@777 1032 gclog_or_tty->print_cr("");
ysr@777 1033 double avg = total / (double) ParallelGCThreads;
ysr@777 1034 gclog_or_tty->print(" ");
ysr@777 1035 for (j = 0; j < level; ++j)
ysr@777 1036 gclog_or_tty->print(" ");
ysr@777 1037 gclog_or_tty->print("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf",
ysr@777 1038 avg, min, max);
ysr@777 1039 }
ysr@777 1040 gclog_or_tty->print_cr("]");
ysr@777 1041 }
ysr@777 1042
tonyp@1966 1043 void G1CollectorPolicy::print_par_sizes(int level,
tonyp@1966 1044 const char* str,
tonyp@1966 1045 double* data,
tonyp@1966 1046 bool summary) {
ysr@777 1047 double min = data[0], max = data[0];
ysr@777 1048 double total = 0.0;
ysr@777 1049 int j;
ysr@777 1050 for (j = 0; j < level; ++j)
ysr@777 1051 gclog_or_tty->print(" ");
ysr@777 1052 gclog_or_tty->print("[%s :", str);
ysr@777 1053 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 1054 double val = data[i];
ysr@777 1055 if (val < min)
ysr@777 1056 min = val;
ysr@777 1057 if (val > max)
ysr@777 1058 max = val;
ysr@777 1059 total += val;
ysr@777 1060 gclog_or_tty->print(" %d", (int) val);
ysr@777 1061 }
ysr@777 1062 if (summary) {
ysr@777 1063 gclog_or_tty->print_cr("");
ysr@777 1064 double avg = total / (double) ParallelGCThreads;
ysr@777 1065 gclog_or_tty->print(" ");
ysr@777 1066 for (j = 0; j < level; ++j)
ysr@777 1067 gclog_or_tty->print(" ");
ysr@777 1068 gclog_or_tty->print("Sum: %d, Avg: %d, Min: %d, Max: %d",
ysr@777 1069 (int)total, (int)avg, (int)min, (int)max);
ysr@777 1070 }
ysr@777 1071 gclog_or_tty->print_cr("]");
ysr@777 1072 }
ysr@777 1073
ysr@777 1074 void G1CollectorPolicy::print_stats (int level,
ysr@777 1075 const char* str,
ysr@777 1076 double value) {
ysr@777 1077 for (int j = 0; j < level; ++j)
ysr@777 1078 gclog_or_tty->print(" ");
ysr@777 1079 gclog_or_tty->print_cr("[%s: %5.1lf ms]", str, value);
ysr@777 1080 }
ysr@777 1081
ysr@777 1082 void G1CollectorPolicy::print_stats (int level,
ysr@777 1083 const char* str,
ysr@777 1084 int value) {
ysr@777 1085 for (int j = 0; j < level; ++j)
ysr@777 1086 gclog_or_tty->print(" ");
ysr@777 1087 gclog_or_tty->print_cr("[%s: %d]", str, value);
ysr@777 1088 }
ysr@777 1089
ysr@777 1090 double G1CollectorPolicy::avg_value (double* data) {
jmasa@2188 1091 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1092 double ret = 0.0;
ysr@777 1093 for (uint i = 0; i < ParallelGCThreads; ++i)
ysr@777 1094 ret += data[i];
ysr@777 1095 return ret / (double) ParallelGCThreads;
ysr@777 1096 } else {
ysr@777 1097 return data[0];
ysr@777 1098 }
ysr@777 1099 }
ysr@777 1100
ysr@777 1101 double G1CollectorPolicy::max_value (double* data) {
jmasa@2188 1102 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1103 double ret = data[0];
ysr@777 1104 for (uint i = 1; i < ParallelGCThreads; ++i)
ysr@777 1105 if (data[i] > ret)
ysr@777 1106 ret = data[i];
ysr@777 1107 return ret;
ysr@777 1108 } else {
ysr@777 1109 return data[0];
ysr@777 1110 }
ysr@777 1111 }
ysr@777 1112
ysr@777 1113 double G1CollectorPolicy::sum_of_values (double* data) {
jmasa@2188 1114 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1115 double sum = 0.0;
ysr@777 1116 for (uint i = 0; i < ParallelGCThreads; i++)
ysr@777 1117 sum += data[i];
ysr@777 1118 return sum;
ysr@777 1119 } else {
ysr@777 1120 return data[0];
ysr@777 1121 }
ysr@777 1122 }
ysr@777 1123
ysr@777 1124 double G1CollectorPolicy::max_sum (double* data1,
ysr@777 1125 double* data2) {
ysr@777 1126 double ret = data1[0] + data2[0];
ysr@777 1127
jmasa@2188 1128 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 1129 for (uint i = 1; i < ParallelGCThreads; ++i) {
ysr@777 1130 double data = data1[i] + data2[i];
ysr@777 1131 if (data > ret)
ysr@777 1132 ret = data;
ysr@777 1133 }
ysr@777 1134 }
ysr@777 1135 return ret;
ysr@777 1136 }
ysr@777 1137
ysr@777 1138 // Anything below that is considered to be zero
ysr@777 1139 #define MIN_TIMER_GRANULARITY 0.0000001
ysr@777 1140
tonyp@2062 1141 void G1CollectorPolicy::record_collection_pause_end() {
ysr@777 1142 double end_time_sec = os::elapsedTime();
ysr@777 1143 double elapsed_ms = _last_pause_time_ms;
jmasa@2188 1144 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 1145 double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0;
ysr@777 1146 size_t rs_size =
ysr@777 1147 _cur_collection_pause_used_regions_at_start - collection_set_size();
ysr@777 1148 size_t cur_used_bytes = _g1->used();
ysr@777 1149 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
ysr@777 1150 bool last_pause_included_initial_mark = false;
tonyp@2062 1151 bool update_stats = !_g1->evacuation_failed();
ysr@777 1152
ysr@777 1153 #ifndef PRODUCT
ysr@777 1154 if (G1YoungSurvRateVerbose) {
ysr@777 1155 gclog_or_tty->print_cr("");
ysr@777 1156 _short_lived_surv_rate_group->print();
ysr@777 1157 // do that for any other surv rate groups too
ysr@777 1158 }
ysr@777 1159 #endif // PRODUCT
ysr@777 1160
ysr@777 1161 if (in_young_gc_mode()) {
tonyp@1794 1162 last_pause_included_initial_mark = during_initial_mark_pause();
ysr@777 1163 if (last_pause_included_initial_mark)
ysr@777 1164 record_concurrent_mark_init_end_pre(0.0);
ysr@777 1165
ysr@777 1166 size_t min_used_targ =
tonyp@1718 1167 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
ysr@777 1168
tonyp@1794 1169
tonyp@1794 1170 if (!_g1->mark_in_progress() && !_last_full_young_gc) {
tonyp@1794 1171 assert(!last_pause_included_initial_mark, "invariant");
tonyp@1794 1172 if (cur_used_bytes > min_used_targ &&
tonyp@1794 1173 cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
tonyp@1794 1174 assert(!during_initial_mark_pause(), "we should not see this here");
tonyp@1794 1175
tonyp@1794 1176 // Note: this might have already been set, if during the last
tonyp@1794 1177 // pause we decided to start a cycle but at the beginning of
tonyp@1794 1178 // this pause we decided to postpone it. That's OK.
tonyp@1794 1179 set_initiate_conc_mark_if_possible();
ysr@777 1180 }
ysr@777 1181 }
ysr@777 1182
ysr@777 1183 _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
ysr@777 1184 }
ysr@777 1185
ysr@777 1186 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
ysr@777 1187 end_time_sec, false);
ysr@777 1188
ysr@777 1189 guarantee(_cur_collection_pause_used_regions_at_start >=
ysr@777 1190 collection_set_size(),
ysr@777 1191 "Negative RS size?");
ysr@777 1192
ysr@777 1193 // This assert is exempted when we're doing parallel collection pauses,
ysr@777 1194 // because the fragmentation caused by the parallel GC allocation buffers
ysr@777 1195 // can lead to more memory being used during collection than was used
ysr@777 1196 // before. Best leave this out until the fragmentation problem is fixed.
ysr@777 1197 // Pauses in which evacuation failed can also lead to negative
ysr@777 1198 // collections, since no space is reclaimed from a region containing an
ysr@777 1199 // object whose evacuation failed.
ysr@777 1200 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1201 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1202 // (DLD, 10/05.)
ysr@777 1203 assert((true || parallel) // Always using GC LABs now.
ysr@777 1204 || _g1->evacuation_failed()
ysr@777 1205 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
ysr@777 1206 "Negative collection");
ysr@777 1207
ysr@777 1208 size_t freed_bytes =
ysr@777 1209 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
ysr@777 1210 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
johnc@1829 1211
ysr@777 1212 double survival_fraction =
ysr@777 1213 (double)surviving_bytes/
ysr@777 1214 (double)_collection_set_bytes_used_before;
ysr@777 1215
ysr@777 1216 _n_pauses++;
ysr@777 1217
tonyp@1030 1218 if (update_stats) {
ysr@777 1219 _recent_CH_strong_roots_times_ms->add(_cur_CH_strong_roots_dur_ms);
ysr@777 1220 _recent_G1_strong_roots_times_ms->add(_cur_G1_strong_roots_dur_ms);
ysr@777 1221 _recent_evac_times_ms->add(evac_ms);
ysr@777 1222 _recent_pause_times_ms->add(elapsed_ms);
ysr@777 1223
ysr@777 1224 _recent_rs_sizes->add(rs_size);
ysr@777 1225
ysr@777 1226 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1227 // fragmentation can produce negative collections. Same with evac
ysr@777 1228 // failure.
ysr@777 1229 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1230 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1231 // (DLD, 10/05.
ysr@777 1232 assert((true || parallel)
ysr@777 1233 || _g1->evacuation_failed()
ysr@777 1234 || surviving_bytes <= _collection_set_bytes_used_before,
ysr@777 1235 "Or else negative collection!");
ysr@777 1236 _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
ysr@777 1237 _recent_CS_bytes_surviving->add(surviving_bytes);
ysr@777 1238
ysr@777 1239 // this is where we update the allocation rate of the application
ysr@777 1240 double app_time_ms =
ysr@777 1241 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
ysr@777 1242 if (app_time_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1243 // This usually happens due to the timer not having the required
ysr@777 1244 // granularity. Some Linuxes are the usual culprits.
ysr@777 1245 // We'll just set it to something (arbitrarily) small.
ysr@777 1246 app_time_ms = 1.0;
ysr@777 1247 }
ysr@777 1248 size_t regions_allocated =
ysr@777 1249 (_region_num_young - _prev_region_num_young) +
ysr@777 1250 (_region_num_tenured - _prev_region_num_tenured);
ysr@777 1251 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
ysr@777 1252 _alloc_rate_ms_seq->add(alloc_rate_ms);
ysr@777 1253 _prev_region_num_young = _region_num_young;
ysr@777 1254 _prev_region_num_tenured = _region_num_tenured;
ysr@777 1255
ysr@777 1256 double interval_ms =
ysr@777 1257 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
ysr@777 1258 update_recent_gc_times(end_time_sec, elapsed_ms);
ysr@777 1259 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
ysr@1521 1260 if (recent_avg_pause_time_ratio() < 0.0 ||
ysr@1521 1261 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
ysr@1521 1262 #ifndef PRODUCT
ysr@1521 1263 // Dump info to allow post-facto debugging
ysr@1521 1264 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
ysr@1521 1265 gclog_or_tty->print_cr("-------------------------------------------");
ysr@1521 1266 gclog_or_tty->print_cr("Recent GC Times (ms):");
ysr@1521 1267 _recent_gc_times_ms->dump();
ysr@1521 1268 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
ysr@1521 1269 _recent_prev_end_times_for_all_gcs_sec->dump();
ysr@1521 1270 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
ysr@1521 1271 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
ysr@1522 1272 // In debug mode, terminate the JVM if the user wants to debug at this point.
ysr@1522 1273 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
ysr@1522 1274 #endif // !PRODUCT
ysr@1522 1275 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
ysr@1522 1276 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
ysr@1521 1277 if (_recent_avg_pause_time_ratio < 0.0) {
ysr@1521 1278 _recent_avg_pause_time_ratio = 0.0;
ysr@1521 1279 } else {
ysr@1521 1280 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
ysr@1521 1281 _recent_avg_pause_time_ratio = 1.0;
ysr@1521 1282 }
ysr@1521 1283 }
ysr@777 1284 }
ysr@777 1285
ysr@777 1286 if (G1PolicyVerbose > 1) {
ysr@777 1287 gclog_or_tty->print_cr(" Recording collection pause(%d)", _n_pauses);
ysr@777 1288 }
ysr@777 1289
tonyp@2062 1290 PauseSummary* summary = _summary;
ysr@777 1291
ysr@777 1292 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
ysr@777 1293 double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
ysr@777 1294 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
ysr@777 1295 double update_rs_processed_buffers =
ysr@777 1296 sum_of_values(_par_last_update_rs_processed_buffers);
ysr@777 1297 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
ysr@777 1298 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
ysr@777 1299 double termination_time = avg_value(_par_last_termination_times_ms);
ysr@777 1300
tonyp@1083 1301 double parallel_other_time = _cur_collection_par_time_ms -
tonyp@1083 1302 (update_rs_time + ext_root_scan_time + mark_stack_scan_time +
johnc@1829 1303 scan_rs_time + obj_copy_time + termination_time);
tonyp@1030 1304 if (update_stats) {
ysr@777 1305 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 1306 guarantee(body_summary != NULL, "should not be null!");
ysr@777 1307
ysr@777 1308 if (_satb_drain_time_set)
ysr@777 1309 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
ysr@777 1310 else
ysr@777 1311 body_summary->record_satb_drain_time_ms(0.0);
ysr@777 1312 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
ysr@777 1313 body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
ysr@777 1314 body_summary->record_update_rs_time_ms(update_rs_time);
ysr@777 1315 body_summary->record_scan_rs_time_ms(scan_rs_time);
ysr@777 1316 body_summary->record_obj_copy_time_ms(obj_copy_time);
ysr@777 1317 if (parallel) {
ysr@777 1318 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
ysr@777 1319 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
ysr@777 1320 body_summary->record_termination_time_ms(termination_time);
ysr@777 1321 body_summary->record_parallel_other_time_ms(parallel_other_time);
ysr@777 1322 }
ysr@777 1323 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
ysr@777 1324 }
ysr@777 1325
ysr@777 1326 if (G1PolicyVerbose > 1) {
ysr@777 1327 gclog_or_tty->print_cr(" ET: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1328 " CH Strong: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1329 " G1 Strong: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1330 " Evac: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1331 " ET-RS: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1332 " |RS|: " SIZE_FORMAT,
ysr@777 1333 elapsed_ms, recent_avg_time_for_pauses_ms(),
ysr@777 1334 _cur_CH_strong_roots_dur_ms, recent_avg_time_for_CH_strong_ms(),
ysr@777 1335 _cur_G1_strong_roots_dur_ms, recent_avg_time_for_G1_strong_ms(),
ysr@777 1336 evac_ms, recent_avg_time_for_evac_ms(),
ysr@777 1337 scan_rs_time,
ysr@777 1338 recent_avg_time_for_pauses_ms() -
ysr@777 1339 recent_avg_time_for_G1_strong_ms(),
ysr@777 1340 rs_size);
ysr@777 1341
ysr@777 1342 gclog_or_tty->print_cr(" Used at start: " SIZE_FORMAT"K"
ysr@777 1343 " At end " SIZE_FORMAT "K\n"
ysr@777 1344 " garbage : " SIZE_FORMAT "K"
ysr@777 1345 " of " SIZE_FORMAT "K\n"
ysr@777 1346 " survival : %6.2f%% (%6.2f%% avg)",
ysr@777 1347 _cur_collection_pause_used_at_start_bytes/K,
ysr@777 1348 _g1->used()/K, freed_bytes/K,
ysr@777 1349 _collection_set_bytes_used_before/K,
ysr@777 1350 survival_fraction*100.0,
ysr@777 1351 recent_avg_survival_fraction()*100.0);
ysr@777 1352 gclog_or_tty->print_cr(" Recent %% gc pause time: %6.2f",
ysr@777 1353 recent_avg_pause_time_ratio() * 100.0);
ysr@777 1354 }
ysr@777 1355
ysr@777 1356 double other_time_ms = elapsed_ms;
ysr@777 1357
tonyp@2062 1358 if (_satb_drain_time_set) {
tonyp@2062 1359 other_time_ms -= _cur_satb_drain_time_ms;
ysr@777 1360 }
ysr@777 1361
tonyp@2062 1362 if (parallel) {
tonyp@2062 1363 other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
tonyp@2062 1364 } else {
tonyp@2062 1365 other_time_ms -=
tonyp@2062 1366 update_rs_time +
tonyp@2062 1367 ext_root_scan_time + mark_stack_scan_time +
tonyp@2062 1368 scan_rs_time + obj_copy_time;
tonyp@2062 1369 }
tonyp@2062 1370
ysr@777 1371 if (PrintGCDetails) {
tonyp@2062 1372 gclog_or_tty->print_cr("%s, %1.8lf secs]",
ysr@777 1373 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
ysr@777 1374 elapsed_ms / 1000.0);
ysr@777 1375
tonyp@2062 1376 if (_satb_drain_time_set) {
tonyp@2062 1377 print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
tonyp@2062 1378 }
tonyp@2062 1379 if (_last_satb_drain_processed_buffers >= 0) {
tonyp@2062 1380 print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
tonyp@2062 1381 }
tonyp@2062 1382 if (parallel) {
tonyp@2062 1383 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
tonyp@2062 1384 print_par_stats(2, "GC Worker Start Time",
tonyp@2062 1385 _par_last_gc_worker_start_times_ms, false);
tonyp@2062 1386 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
tonyp@2062 1387 print_par_sizes(3, "Processed Buffers",
tonyp@2062 1388 _par_last_update_rs_processed_buffers, true);
tonyp@2062 1389 print_par_stats(2, "Ext Root Scanning",
tonyp@2062 1390 _par_last_ext_root_scan_times_ms);
tonyp@2062 1391 print_par_stats(2, "Mark Stack Scanning",
tonyp@2062 1392 _par_last_mark_stack_scan_times_ms);
tonyp@2062 1393 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
tonyp@2062 1394 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
tonyp@2062 1395 print_par_stats(2, "Termination", _par_last_termination_times_ms);
tonyp@2062 1396 print_par_sizes(3, "Termination Attempts",
tonyp@2062 1397 _par_last_termination_attempts, true);
tonyp@2062 1398 print_par_stats(2, "GC Worker End Time",
tonyp@2062 1399 _par_last_gc_worker_end_times_ms, false);
tonyp@2062 1400 print_stats(2, "Other", parallel_other_time);
tonyp@2062 1401 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
tonyp@2062 1402 } else {
tonyp@2062 1403 print_stats(1, "Update RS", update_rs_time);
tonyp@2062 1404 print_stats(2, "Processed Buffers",
tonyp@2062 1405 (int)update_rs_processed_buffers);
tonyp@2062 1406 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
tonyp@2062 1407 print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
tonyp@2062 1408 print_stats(1, "Scan RS", scan_rs_time);
tonyp@2062 1409 print_stats(1, "Object Copying", obj_copy_time);
ysr@777 1410 }
johnc@1325 1411 #ifndef PRODUCT
johnc@1325 1412 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
johnc@1325 1413 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
johnc@1325 1414 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
johnc@1325 1415 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
johnc@1325 1416 if (_num_cc_clears > 0) {
johnc@1325 1417 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
johnc@1325 1418 }
johnc@1325 1419 #endif
ysr@777 1420 print_stats(1, "Other", other_time_ms);
johnc@1829 1421 print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
johnc@1829 1422
ysr@777 1423 for (int i = 0; i < _aux_num; ++i) {
ysr@777 1424 if (_cur_aux_times_set[i]) {
ysr@777 1425 char buffer[96];
ysr@777 1426 sprintf(buffer, "Aux%d", i);
ysr@777 1427 print_stats(1, buffer, _cur_aux_times_ms[i]);
ysr@777 1428 }
ysr@777 1429 }
ysr@777 1430 }
ysr@777 1431 if (PrintGCDetails)
ysr@777 1432 gclog_or_tty->print(" [");
ysr@777 1433 if (PrintGC || PrintGCDetails)
ysr@777 1434 _g1->print_size_transition(gclog_or_tty,
ysr@777 1435 _cur_collection_pause_used_at_start_bytes,
ysr@777 1436 _g1->used(), _g1->capacity());
ysr@777 1437 if (PrintGCDetails)
ysr@777 1438 gclog_or_tty->print_cr("]");
ysr@777 1439
ysr@777 1440 _all_pause_times_ms->add(elapsed_ms);
tonyp@1083 1441 if (update_stats) {
tonyp@1083 1442 summary->record_total_time_ms(elapsed_ms);
tonyp@1083 1443 summary->record_other_time_ms(other_time_ms);
tonyp@1083 1444 }
ysr@777 1445 for (int i = 0; i < _aux_num; ++i)
ysr@777 1446 if (_cur_aux_times_set[i])
ysr@777 1447 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
ysr@777 1448
ysr@777 1449 // Reset marks-between-pauses counter.
ysr@777 1450 _n_marks_since_last_pause = 0;
ysr@777 1451
ysr@777 1452 // Update the efficiency-since-mark vars.
ysr@777 1453 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
ysr@777 1454 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1455 // This usually happens due to the timer not having the required
ysr@777 1456 // granularity. Some Linuxes are the usual culprits.
ysr@777 1457 // We'll just set it to something (arbitrarily) small.
ysr@777 1458 proc_ms = 1.0;
ysr@777 1459 }
ysr@777 1460 double cur_efficiency = (double) freed_bytes / proc_ms;
ysr@777 1461
ysr@777 1462 bool new_in_marking_window = _in_marking_window;
ysr@777 1463 bool new_in_marking_window_im = false;
tonyp@1794 1464 if (during_initial_mark_pause()) {
ysr@777 1465 new_in_marking_window = true;
ysr@777 1466 new_in_marking_window_im = true;
ysr@777 1467 }
ysr@777 1468
ysr@777 1469 if (in_young_gc_mode()) {
ysr@777 1470 if (_last_full_young_gc) {
ysr@777 1471 set_full_young_gcs(false);
ysr@777 1472 _last_full_young_gc = false;
ysr@777 1473 }
ysr@777 1474
ysr@777 1475 if ( !_last_young_gc_full ) {
ysr@777 1476 if ( _should_revert_to_full_young_gcs ||
ysr@777 1477 _known_garbage_ratio < 0.05 ||
ysr@777 1478 (adaptive_young_list_length() &&
ysr@777 1479 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
ysr@777 1480 set_full_young_gcs(true);
ysr@777 1481 }
ysr@777 1482 }
ysr@777 1483 _should_revert_to_full_young_gcs = false;
ysr@777 1484
ysr@777 1485 if (_last_young_gc_full && !_during_marking)
ysr@777 1486 _young_gc_eff_seq->add(cur_efficiency);
ysr@777 1487 }
ysr@777 1488
ysr@777 1489 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 1490 // do that for any other surv rate groupsx
ysr@777 1491
ysr@777 1492 // <NEW PREDICTION>
ysr@777 1493
apetrusenko@1112 1494 if (update_stats) {
ysr@777 1495 double pause_time_ms = elapsed_ms;
ysr@777 1496
ysr@777 1497 size_t diff = 0;
ysr@777 1498 if (_max_pending_cards >= _pending_cards)
ysr@777 1499 diff = _max_pending_cards - _pending_cards;
ysr@777 1500 _pending_card_diff_seq->add((double) diff);
ysr@777 1501
ysr@777 1502 double cost_per_card_ms = 0.0;
ysr@777 1503 if (_pending_cards > 0) {
ysr@777 1504 cost_per_card_ms = update_rs_time / (double) _pending_cards;
ysr@777 1505 _cost_per_card_ms_seq->add(cost_per_card_ms);
ysr@777 1506 }
ysr@777 1507
ysr@777 1508 size_t cards_scanned = _g1->cards_scanned();
ysr@777 1509
ysr@777 1510 double cost_per_entry_ms = 0.0;
ysr@777 1511 if (cards_scanned > 10) {
ysr@777 1512 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
ysr@777 1513 if (_last_young_gc_full)
ysr@777 1514 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1515 else
ysr@777 1516 _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1517 }
ysr@777 1518
ysr@777 1519 if (_max_rs_lengths > 0) {
ysr@777 1520 double cards_per_entry_ratio =
ysr@777 1521 (double) cards_scanned / (double) _max_rs_lengths;
ysr@777 1522 if (_last_young_gc_full)
ysr@777 1523 _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1524 else
ysr@777 1525 _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1526 }
ysr@777 1527
ysr@777 1528 size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
ysr@777 1529 if (rs_length_diff >= 0)
ysr@777 1530 _rs_length_diff_seq->add((double) rs_length_diff);
ysr@777 1531
ysr@777 1532 size_t copied_bytes = surviving_bytes;
ysr@777 1533 double cost_per_byte_ms = 0.0;
ysr@777 1534 if (copied_bytes > 0) {
ysr@777 1535 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
ysr@777 1536 if (_in_marking_window)
ysr@777 1537 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
ysr@777 1538 else
ysr@777 1539 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
ysr@777 1540 }
ysr@777 1541
ysr@777 1542 double all_other_time_ms = pause_time_ms -
johnc@1829 1543 (update_rs_time + scan_rs_time + obj_copy_time +
ysr@777 1544 _mark_closure_time_ms + termination_time);
ysr@777 1545
ysr@777 1546 double young_other_time_ms = 0.0;
ysr@777 1547 if (_recorded_young_regions > 0) {
ysr@777 1548 young_other_time_ms =
ysr@777 1549 _recorded_young_cset_choice_time_ms +
ysr@777 1550 _recorded_young_free_cset_time_ms;
ysr@777 1551 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
ysr@777 1552 (double) _recorded_young_regions);
ysr@777 1553 }
ysr@777 1554 double non_young_other_time_ms = 0.0;
ysr@777 1555 if (_recorded_non_young_regions > 0) {
ysr@777 1556 non_young_other_time_ms =
ysr@777 1557 _recorded_non_young_cset_choice_time_ms +
ysr@777 1558 _recorded_non_young_free_cset_time_ms;
ysr@777 1559
ysr@777 1560 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
ysr@777 1561 (double) _recorded_non_young_regions);
ysr@777 1562 }
ysr@777 1563
ysr@777 1564 double constant_other_time_ms = all_other_time_ms -
ysr@777 1565 (young_other_time_ms + non_young_other_time_ms);
ysr@777 1566 _constant_other_time_ms_seq->add(constant_other_time_ms);
ysr@777 1567
ysr@777 1568 double survival_ratio = 0.0;
ysr@777 1569 if (_bytes_in_collection_set_before_gc > 0) {
ysr@777 1570 survival_ratio = (double) bytes_in_to_space_during_gc() /
ysr@777 1571 (double) _bytes_in_collection_set_before_gc;
ysr@777 1572 }
ysr@777 1573
ysr@777 1574 _pending_cards_seq->add((double) _pending_cards);
ysr@777 1575 _scanned_cards_seq->add((double) cards_scanned);
ysr@777 1576 _rs_lengths_seq->add((double) _max_rs_lengths);
ysr@777 1577
ysr@777 1578 double expensive_region_limit_ms =
johnc@1186 1579 (double) MaxGCPauseMillis - predict_constant_other_time_ms();
ysr@777 1580 if (expensive_region_limit_ms < 0.0) {
ysr@777 1581 // this means that the other time was predicted to be longer than
ysr@777 1582 // than the max pause time
johnc@1186 1583 expensive_region_limit_ms = (double) MaxGCPauseMillis;
ysr@777 1584 }
ysr@777 1585 _expensive_region_limit_ms = expensive_region_limit_ms;
ysr@777 1586
ysr@777 1587 if (PREDICTIONS_VERBOSE) {
ysr@777 1588 gclog_or_tty->print_cr("");
ysr@777 1589 gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
johnc@1829 1590 "REGIONS %d %d %d "
ysr@777 1591 "PENDING_CARDS %d %d "
ysr@777 1592 "CARDS_SCANNED %d %d "
ysr@777 1593 "RS_LENGTHS %d %d "
ysr@777 1594 "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
ysr@777 1595 "SURVIVAL_RATIO %1.6lf %1.6lf "
ysr@777 1596 "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
ysr@777 1597 "OTHER_YOUNG %1.6lf %1.6lf "
ysr@777 1598 "OTHER_NON_YOUNG %1.6lf %1.6lf "
ysr@777 1599 "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
ysr@777 1600 "ELAPSED %1.6lf %1.6lf ",
ysr@777 1601 _cur_collection_start_sec,
ysr@777 1602 (!_last_young_gc_full) ? 2 :
ysr@777 1603 (last_pause_included_initial_mark) ? 1 : 0,
ysr@777 1604 _recorded_region_num,
ysr@777 1605 _recorded_young_regions,
ysr@777 1606 _recorded_non_young_regions,
ysr@777 1607 _predicted_pending_cards, _pending_cards,
ysr@777 1608 _predicted_cards_scanned, cards_scanned,
ysr@777 1609 _predicted_rs_lengths, _max_rs_lengths,
ysr@777 1610 _predicted_rs_update_time_ms, update_rs_time,
ysr@777 1611 _predicted_rs_scan_time_ms, scan_rs_time,
ysr@777 1612 _predicted_survival_ratio, survival_ratio,
ysr@777 1613 _predicted_object_copy_time_ms, obj_copy_time,
ysr@777 1614 _predicted_constant_other_time_ms, constant_other_time_ms,
ysr@777 1615 _predicted_young_other_time_ms, young_other_time_ms,
ysr@777 1616 _predicted_non_young_other_time_ms,
ysr@777 1617 non_young_other_time_ms,
ysr@777 1618 _vtime_diff_ms, termination_time,
ysr@777 1619 _predicted_pause_time_ms, elapsed_ms);
ysr@777 1620 }
ysr@777 1621
ysr@777 1622 if (G1PolicyVerbose > 0) {
ysr@777 1623 gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
ysr@777 1624 _predicted_pause_time_ms,
ysr@777 1625 (_within_target) ? "within" : "outside",
ysr@777 1626 elapsed_ms);
ysr@777 1627 }
ysr@777 1628
ysr@777 1629 }
ysr@777 1630
ysr@777 1631 _in_marking_window = new_in_marking_window;
ysr@777 1632 _in_marking_window_im = new_in_marking_window_im;
ysr@777 1633 _free_regions_at_end_of_collection = _g1->free_regions();
ysr@777 1634 calculate_young_list_min_length();
johnc@1829 1635 calculate_young_list_target_length();
ysr@777 1636
iveresov@1546 1637 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
tonyp@1717 1638 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
iveresov@1546 1639 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
ysr@777 1640 // </NEW PREDICTION>
ysr@777 1641 }
ysr@777 1642
ysr@777 1643 // <NEW PREDICTION>
ysr@777 1644
iveresov@1546 1645 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 1646 double update_rs_processed_buffers,
iveresov@1546 1647 double goal_ms) {
iveresov@1546 1648 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
iveresov@1546 1649 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
iveresov@1546 1650
tonyp@1717 1651 if (G1UseAdaptiveConcRefinement) {
iveresov@1546 1652 const int k_gy = 3, k_gr = 6;
iveresov@1546 1653 const double inc_k = 1.1, dec_k = 0.9;
iveresov@1546 1654
iveresov@1546 1655 int g = cg1r->green_zone();
iveresov@1546 1656 if (update_rs_time > goal_ms) {
iveresov@1546 1657 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
iveresov@1546 1658 } else {
iveresov@1546 1659 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
iveresov@1546 1660 g = (int)MAX2(g * inc_k, g + 1.0);
iveresov@1546 1661 }
iveresov@1546 1662 }
iveresov@1546 1663 // Change the refinement threads params
iveresov@1546 1664 cg1r->set_green_zone(g);
iveresov@1546 1665 cg1r->set_yellow_zone(g * k_gy);
iveresov@1546 1666 cg1r->set_red_zone(g * k_gr);
iveresov@1546 1667 cg1r->reinitialize_threads();
iveresov@1546 1668
iveresov@1546 1669 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
iveresov@1546 1670 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
iveresov@1546 1671 cg1r->yellow_zone());
iveresov@1546 1672 // Change the barrier params
iveresov@1546 1673 dcqs.set_process_completed_threshold(processing_threshold);
iveresov@1546 1674 dcqs.set_max_completed_queue(cg1r->red_zone());
iveresov@1546 1675 }
iveresov@1546 1676
iveresov@1546 1677 int curr_queue_size = dcqs.completed_buffers_num();
iveresov@1546 1678 if (curr_queue_size >= cg1r->yellow_zone()) {
iveresov@1546 1679 dcqs.set_completed_queue_padding(curr_queue_size);
iveresov@1546 1680 } else {
iveresov@1546 1681 dcqs.set_completed_queue_padding(0);
iveresov@1546 1682 }
iveresov@1546 1683 dcqs.notify_if_necessary();
iveresov@1546 1684 }
iveresov@1546 1685
ysr@777 1686 double
ysr@777 1687 G1CollectorPolicy::
ysr@777 1688 predict_young_collection_elapsed_time_ms(size_t adjustment) {
ysr@777 1689 guarantee( adjustment == 0 || adjustment == 1, "invariant" );
ysr@777 1690
ysr@777 1691 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@1829 1692 size_t young_num = g1h->young_list()->length();
ysr@777 1693 if (young_num == 0)
ysr@777 1694 return 0.0;
ysr@777 1695
ysr@777 1696 young_num += adjustment;
ysr@777 1697 size_t pending_cards = predict_pending_cards();
johnc@1829 1698 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
ysr@777 1699 predict_rs_length_diff();
ysr@777 1700 size_t card_num;
ysr@777 1701 if (full_young_gcs())
ysr@777 1702 card_num = predict_young_card_num(rs_lengths);
ysr@777 1703 else
ysr@777 1704 card_num = predict_non_young_card_num(rs_lengths);
ysr@777 1705 size_t young_byte_size = young_num * HeapRegion::GrainBytes;
ysr@777 1706 double accum_yg_surv_rate =
ysr@777 1707 _short_lived_surv_rate_group->accum_surv_rate(adjustment);
ysr@777 1708
ysr@777 1709 size_t bytes_to_copy =
ysr@777 1710 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
ysr@777 1711
ysr@777 1712 return
ysr@777 1713 predict_rs_update_time_ms(pending_cards) +
ysr@777 1714 predict_rs_scan_time_ms(card_num) +
ysr@777 1715 predict_object_copy_time_ms(bytes_to_copy) +
ysr@777 1716 predict_young_other_time_ms(young_num) +
ysr@777 1717 predict_constant_other_time_ms();
ysr@777 1718 }
ysr@777 1719
ysr@777 1720 double
ysr@777 1721 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
ysr@777 1722 size_t rs_length = predict_rs_length_diff();
ysr@777 1723 size_t card_num;
ysr@777 1724 if (full_young_gcs())
ysr@777 1725 card_num = predict_young_card_num(rs_length);
ysr@777 1726 else
ysr@777 1727 card_num = predict_non_young_card_num(rs_length);
ysr@777 1728 return predict_base_elapsed_time_ms(pending_cards, card_num);
ysr@777 1729 }
ysr@777 1730
ysr@777 1731 double
ysr@777 1732 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 1733 size_t scanned_cards) {
ysr@777 1734 return
ysr@777 1735 predict_rs_update_time_ms(pending_cards) +
ysr@777 1736 predict_rs_scan_time_ms(scanned_cards) +
ysr@777 1737 predict_constant_other_time_ms();
ysr@777 1738 }
ysr@777 1739
ysr@777 1740 double
ysr@777 1741 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
ysr@777 1742 bool young) {
ysr@777 1743 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1744 size_t card_num;
ysr@777 1745 if (full_young_gcs())
ysr@777 1746 card_num = predict_young_card_num(rs_length);
ysr@777 1747 else
ysr@777 1748 card_num = predict_non_young_card_num(rs_length);
ysr@777 1749 size_t bytes_to_copy = predict_bytes_to_copy(hr);
ysr@777 1750
ysr@777 1751 double region_elapsed_time_ms =
ysr@777 1752 predict_rs_scan_time_ms(card_num) +
ysr@777 1753 predict_object_copy_time_ms(bytes_to_copy);
ysr@777 1754
ysr@777 1755 if (young)
ysr@777 1756 region_elapsed_time_ms += predict_young_other_time_ms(1);
ysr@777 1757 else
ysr@777 1758 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
ysr@777 1759
ysr@777 1760 return region_elapsed_time_ms;
ysr@777 1761 }
ysr@777 1762
ysr@777 1763 size_t
ysr@777 1764 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
ysr@777 1765 size_t bytes_to_copy;
ysr@777 1766 if (hr->is_marked())
ysr@777 1767 bytes_to_copy = hr->max_live_bytes();
ysr@777 1768 else {
ysr@777 1769 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
ysr@777 1770 "invariant" );
ysr@777 1771 int age = hr->age_in_surv_rate_group();
apetrusenko@980 1772 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
ysr@777 1773 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
ysr@777 1774 }
ysr@777 1775
ysr@777 1776 return bytes_to_copy;
ysr@777 1777 }
ysr@777 1778
ysr@777 1779 void
ysr@777 1780 G1CollectorPolicy::start_recording_regions() {
ysr@777 1781 _recorded_rs_lengths = 0;
ysr@777 1782 _recorded_young_regions = 0;
ysr@777 1783 _recorded_non_young_regions = 0;
ysr@777 1784
ysr@777 1785 #if PREDICTIONS_VERBOSE
ysr@777 1786 _recorded_marked_bytes = 0;
ysr@777 1787 _recorded_young_bytes = 0;
ysr@777 1788 _predicted_bytes_to_copy = 0;
johnc@1829 1789 _predicted_rs_lengths = 0;
johnc@1829 1790 _predicted_cards_scanned = 0;
ysr@777 1791 #endif // PREDICTIONS_VERBOSE
ysr@777 1792 }
ysr@777 1793
ysr@777 1794 void
johnc@1829 1795 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
ysr@777 1796 #if PREDICTIONS_VERBOSE
johnc@1829 1797 if (!young) {
ysr@777 1798 _recorded_marked_bytes += hr->max_live_bytes();
ysr@777 1799 }
ysr@777 1800 _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
ysr@777 1801 #endif // PREDICTIONS_VERBOSE
ysr@777 1802
ysr@777 1803 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1804 _recorded_rs_lengths += rs_length;
ysr@777 1805 }
ysr@777 1806
ysr@777 1807 void
johnc@1829 1808 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
johnc@1829 1809 assert(!hr->is_young(), "should not call this");
johnc@1829 1810 ++_recorded_non_young_regions;
johnc@1829 1811 record_cset_region_info(hr, false);
johnc@1829 1812 }
johnc@1829 1813
johnc@1829 1814 void
johnc@1829 1815 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
johnc@1829 1816 _recorded_young_regions = n_regions;
johnc@1829 1817 }
johnc@1829 1818
johnc@1829 1819 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
johnc@1829 1820 #if PREDICTIONS_VERBOSE
johnc@1829 1821 _recorded_young_bytes = bytes;
johnc@1829 1822 #endif // PREDICTIONS_VERBOSE
johnc@1829 1823 }
johnc@1829 1824
johnc@1829 1825 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
johnc@1829 1826 _recorded_rs_lengths = rs_lengths;
johnc@1829 1827 }
johnc@1829 1828
johnc@1829 1829 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 1830 _predicted_bytes_to_copy = bytes;
ysr@777 1831 }
ysr@777 1832
ysr@777 1833 void
ysr@777 1834 G1CollectorPolicy::end_recording_regions() {
johnc@1829 1835 // The _predicted_pause_time_ms field is referenced in code
johnc@1829 1836 // not under PREDICTIONS_VERBOSE. Let's initialize it.
johnc@1829 1837 _predicted_pause_time_ms = -1.0;
johnc@1829 1838
ysr@777 1839 #if PREDICTIONS_VERBOSE
ysr@777 1840 _predicted_pending_cards = predict_pending_cards();
ysr@777 1841 _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
ysr@777 1842 if (full_young_gcs())
ysr@777 1843 _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
ysr@777 1844 else
ysr@777 1845 _predicted_cards_scanned +=
ysr@777 1846 predict_non_young_card_num(_predicted_rs_lengths);
ysr@777 1847 _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
ysr@777 1848
ysr@777 1849 _predicted_rs_update_time_ms =
ysr@777 1850 predict_rs_update_time_ms(_g1->pending_card_num());
ysr@777 1851 _predicted_rs_scan_time_ms =
ysr@777 1852 predict_rs_scan_time_ms(_predicted_cards_scanned);
ysr@777 1853 _predicted_object_copy_time_ms =
ysr@777 1854 predict_object_copy_time_ms(_predicted_bytes_to_copy);
ysr@777 1855 _predicted_constant_other_time_ms =
ysr@777 1856 predict_constant_other_time_ms();
ysr@777 1857 _predicted_young_other_time_ms =
ysr@777 1858 predict_young_other_time_ms(_recorded_young_regions);
ysr@777 1859 _predicted_non_young_other_time_ms =
ysr@777 1860 predict_non_young_other_time_ms(_recorded_non_young_regions);
ysr@777 1861
ysr@777 1862 _predicted_pause_time_ms =
ysr@777 1863 _predicted_rs_update_time_ms +
ysr@777 1864 _predicted_rs_scan_time_ms +
ysr@777 1865 _predicted_object_copy_time_ms +
ysr@777 1866 _predicted_constant_other_time_ms +
ysr@777 1867 _predicted_young_other_time_ms +
ysr@777 1868 _predicted_non_young_other_time_ms;
ysr@777 1869 #endif // PREDICTIONS_VERBOSE
ysr@777 1870 }
ysr@777 1871
ysr@777 1872 void G1CollectorPolicy::check_if_region_is_too_expensive(double
ysr@777 1873 predicted_time_ms) {
ysr@777 1874 // I don't think we need to do this when in young GC mode since
ysr@777 1875 // marking will be initiated next time we hit the soft limit anyway...
ysr@777 1876 if (predicted_time_ms > _expensive_region_limit_ms) {
ysr@777 1877 if (!in_young_gc_mode()) {
ysr@777 1878 set_full_young_gcs(true);
tonyp@1794 1879 // We might want to do something different here. However,
tonyp@1794 1880 // right now we don't support the non-generational G1 mode
tonyp@1794 1881 // (and in fact we are planning to remove the associated code,
tonyp@1794 1882 // see CR 6814390). So, let's leave it as is and this will be
tonyp@1794 1883 // removed some time in the future
tonyp@1794 1884 ShouldNotReachHere();
tonyp@1794 1885 set_during_initial_mark_pause();
ysr@777 1886 } else
ysr@777 1887 // no point in doing another partial one
ysr@777 1888 _should_revert_to_full_young_gcs = true;
ysr@777 1889 }
ysr@777 1890 }
ysr@777 1891
ysr@777 1892 // </NEW PREDICTION>
ysr@777 1893
ysr@777 1894
ysr@777 1895 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
ysr@777 1896 double elapsed_ms) {
ysr@777 1897 _recent_gc_times_ms->add(elapsed_ms);
ysr@777 1898 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
ysr@777 1899 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
ysr@777 1900 }
ysr@777 1901
ysr@777 1902 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
johnc@1186 1903 if (_recent_pause_times_ms->num() == 0) return (double) MaxGCPauseMillis;
ysr@777 1904 else return _recent_pause_times_ms->avg();
ysr@777 1905 }
ysr@777 1906
ysr@777 1907 double G1CollectorPolicy::recent_avg_time_for_CH_strong_ms() {
ysr@777 1908 if (_recent_CH_strong_roots_times_ms->num() == 0)
johnc@1186 1909 return (double)MaxGCPauseMillis/3.0;
ysr@777 1910 else return _recent_CH_strong_roots_times_ms->avg();
ysr@777 1911 }
ysr@777 1912
ysr@777 1913 double G1CollectorPolicy::recent_avg_time_for_G1_strong_ms() {
ysr@777 1914 if (_recent_G1_strong_roots_times_ms->num() == 0)
johnc@1186 1915 return (double)MaxGCPauseMillis/3.0;
ysr@777 1916 else return _recent_G1_strong_roots_times_ms->avg();
ysr@777 1917 }
ysr@777 1918
ysr@777 1919 double G1CollectorPolicy::recent_avg_time_for_evac_ms() {
johnc@1186 1920 if (_recent_evac_times_ms->num() == 0) return (double)MaxGCPauseMillis/3.0;
ysr@777 1921 else return _recent_evac_times_ms->avg();
ysr@777 1922 }
ysr@777 1923
ysr@777 1924 int G1CollectorPolicy::number_of_recent_gcs() {
ysr@777 1925 assert(_recent_CH_strong_roots_times_ms->num() ==
ysr@777 1926 _recent_G1_strong_roots_times_ms->num(), "Sequence out of sync");
ysr@777 1927 assert(_recent_G1_strong_roots_times_ms->num() ==
ysr@777 1928 _recent_evac_times_ms->num(), "Sequence out of sync");
ysr@777 1929 assert(_recent_evac_times_ms->num() ==
ysr@777 1930 _recent_pause_times_ms->num(), "Sequence out of sync");
ysr@777 1931 assert(_recent_pause_times_ms->num() ==
ysr@777 1932 _recent_CS_bytes_used_before->num(), "Sequence out of sync");
ysr@777 1933 assert(_recent_CS_bytes_used_before->num() ==
ysr@777 1934 _recent_CS_bytes_surviving->num(), "Sequence out of sync");
ysr@777 1935 return _recent_pause_times_ms->num();
ysr@777 1936 }
ysr@777 1937
ysr@777 1938 double G1CollectorPolicy::recent_avg_survival_fraction() {
ysr@777 1939 return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 1940 _recent_CS_bytes_used_before);
ysr@777 1941 }
ysr@777 1942
ysr@777 1943 double G1CollectorPolicy::last_survival_fraction() {
ysr@777 1944 return last_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 1945 _recent_CS_bytes_used_before);
ysr@777 1946 }
ysr@777 1947
ysr@777 1948 double
ysr@777 1949 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 1950 TruncatedSeq* before) {
ysr@777 1951 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 1952 if (before->sum() > 0.0) {
ysr@777 1953 double recent_survival_rate = surviving->sum() / before->sum();
ysr@777 1954 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1955 // fragmentation can produce negative collections.
ysr@777 1956 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1957 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1958 // (DLD, 10/05.)
jmasa@2188 1959 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
ysr@777 1960 _g1->evacuation_failed() ||
ysr@777 1961 recent_survival_rate <= 1.0, "Or bad frac");
ysr@777 1962 return recent_survival_rate;
ysr@777 1963 } else {
ysr@777 1964 return 1.0; // Be conservative.
ysr@777 1965 }
ysr@777 1966 }
ysr@777 1967
ysr@777 1968 double
ysr@777 1969 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 1970 TruncatedSeq* before) {
ysr@777 1971 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 1972 if (surviving->num() > 0 && before->last() > 0.0) {
ysr@777 1973 double last_survival_rate = surviving->last() / before->last();
ysr@777 1974 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1975 // fragmentation can produce negative collections.
ysr@777 1976 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1977 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1978 // (DLD, 10/05.)
jmasa@2188 1979 assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
ysr@777 1980 last_survival_rate <= 1.0, "Or bad frac");
ysr@777 1981 return last_survival_rate;
ysr@777 1982 } else {
ysr@777 1983 return 1.0;
ysr@777 1984 }
ysr@777 1985 }
ysr@777 1986
ysr@777 1987 static const int survival_min_obs = 5;
ysr@777 1988 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
ysr@777 1989 static const double min_survival_rate = 0.1;
ysr@777 1990
ysr@777 1991 double
ysr@777 1992 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
ysr@777 1993 double latest) {
ysr@777 1994 double res = avg;
ysr@777 1995 if (number_of_recent_gcs() < survival_min_obs) {
ysr@777 1996 res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
ysr@777 1997 }
ysr@777 1998 res = MAX2(res, latest);
ysr@777 1999 res = MAX2(res, min_survival_rate);
ysr@777 2000 // In the parallel case, LAB fragmentation can produce "negative
ysr@777 2001 // collections"; so can evac failure. Cap at 1.0
ysr@777 2002 res = MIN2(res, 1.0);
ysr@777 2003 return res;
ysr@777 2004 }
ysr@777 2005
ysr@777 2006 size_t G1CollectorPolicy::expansion_amount() {
tonyp@1791 2007 if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) {
johnc@1186 2008 // We will double the existing space, or take
johnc@1186 2009 // G1ExpandByPercentOfAvailable % of the available expansion
johnc@1186 2010 // space, whichever is smaller, bounded below by a minimum
johnc@1186 2011 // expansion (unless that's all that's left.)
ysr@777 2012 const size_t min_expand_bytes = 1*M;
ysr@777 2013 size_t reserved_bytes = _g1->g1_reserved_obj_bytes();
ysr@777 2014 size_t committed_bytes = _g1->capacity();
ysr@777 2015 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
ysr@777 2016 size_t expand_bytes;
ysr@777 2017 size_t expand_bytes_via_pct =
johnc@1186 2018 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
ysr@777 2019 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
ysr@777 2020 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
ysr@777 2021 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
ysr@777 2022 if (G1PolicyVerbose > 1) {
ysr@777 2023 gclog_or_tty->print("Decided to expand: ratio = %5.2f, "
ysr@777 2024 "committed = %d%s, uncommited = %d%s, via pct = %d%s.\n"
ysr@777 2025 " Answer = %d.\n",
ysr@777 2026 recent_avg_pause_time_ratio(),
ysr@777 2027 byte_size_in_proper_unit(committed_bytes),
ysr@777 2028 proper_unit_for_byte_size(committed_bytes),
ysr@777 2029 byte_size_in_proper_unit(uncommitted_bytes),
ysr@777 2030 proper_unit_for_byte_size(uncommitted_bytes),
ysr@777 2031 byte_size_in_proper_unit(expand_bytes_via_pct),
ysr@777 2032 proper_unit_for_byte_size(expand_bytes_via_pct),
ysr@777 2033 byte_size_in_proper_unit(expand_bytes),
ysr@777 2034 proper_unit_for_byte_size(expand_bytes));
ysr@777 2035 }
ysr@777 2036 return expand_bytes;
ysr@777 2037 } else {
ysr@777 2038 return 0;
ysr@777 2039 }
ysr@777 2040 }
ysr@777 2041
ysr@777 2042 void G1CollectorPolicy::note_start_of_mark_thread() {
ysr@777 2043 _mark_thread_startup_sec = os::elapsedTime();
ysr@777 2044 }
ysr@777 2045
ysr@777 2046 class CountCSClosure: public HeapRegionClosure {
ysr@777 2047 G1CollectorPolicy* _g1_policy;
ysr@777 2048 public:
ysr@777 2049 CountCSClosure(G1CollectorPolicy* g1_policy) :
ysr@777 2050 _g1_policy(g1_policy) {}
ysr@777 2051 bool doHeapRegion(HeapRegion* r) {
ysr@777 2052 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
ysr@777 2053 return false;
ysr@777 2054 }
ysr@777 2055 };
ysr@777 2056
ysr@777 2057 void G1CollectorPolicy::count_CS_bytes_used() {
ysr@777 2058 CountCSClosure cs_closure(this);
ysr@777 2059 _g1->collection_set_iterate(&cs_closure);
ysr@777 2060 }
ysr@777 2061
ysr@777 2062 static void print_indent(int level) {
ysr@777 2063 for (int j = 0; j < level+1; ++j)
ysr@777 2064 gclog_or_tty->print(" ");
ysr@777 2065 }
ysr@777 2066
ysr@777 2067 void G1CollectorPolicy::print_summary (int level,
ysr@777 2068 const char* str,
ysr@777 2069 NumberSeq* seq) const {
ysr@777 2070 double sum = seq->sum();
ysr@777 2071 print_indent(level);
ysr@777 2072 gclog_or_tty->print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
ysr@777 2073 str, sum / 1000.0, seq->avg());
ysr@777 2074 }
ysr@777 2075
ysr@777 2076 void G1CollectorPolicy::print_summary_sd (int level,
ysr@777 2077 const char* str,
ysr@777 2078 NumberSeq* seq) const {
ysr@777 2079 print_summary(level, str, seq);
ysr@777 2080 print_indent(level + 5);
ysr@777 2081 gclog_or_tty->print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
ysr@777 2082 seq->num(), seq->sd(), seq->maximum());
ysr@777 2083 }
ysr@777 2084
ysr@777 2085 void G1CollectorPolicy::check_other_times(int level,
ysr@777 2086 NumberSeq* other_times_ms,
ysr@777 2087 NumberSeq* calc_other_times_ms) const {
ysr@777 2088 bool should_print = false;
ysr@777 2089
ysr@777 2090 double max_sum = MAX2(fabs(other_times_ms->sum()),
ysr@777 2091 fabs(calc_other_times_ms->sum()));
ysr@777 2092 double min_sum = MIN2(fabs(other_times_ms->sum()),
ysr@777 2093 fabs(calc_other_times_ms->sum()));
ysr@777 2094 double sum_ratio = max_sum / min_sum;
ysr@777 2095 if (sum_ratio > 1.1) {
ysr@777 2096 should_print = true;
ysr@777 2097 print_indent(level + 1);
ysr@777 2098 gclog_or_tty->print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
ysr@777 2099 }
ysr@777 2100
ysr@777 2101 double max_avg = MAX2(fabs(other_times_ms->avg()),
ysr@777 2102 fabs(calc_other_times_ms->avg()));
ysr@777 2103 double min_avg = MIN2(fabs(other_times_ms->avg()),
ysr@777 2104 fabs(calc_other_times_ms->avg()));
ysr@777 2105 double avg_ratio = max_avg / min_avg;
ysr@777 2106 if (avg_ratio > 1.1) {
ysr@777 2107 should_print = true;
ysr@777 2108 print_indent(level + 1);
ysr@777 2109 gclog_or_tty->print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
ysr@777 2110 }
ysr@777 2111
ysr@777 2112 if (other_times_ms->sum() < -0.01) {
ysr@777 2113 print_indent(level + 1);
ysr@777 2114 gclog_or_tty->print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
ysr@777 2115 }
ysr@777 2116
ysr@777 2117 if (other_times_ms->avg() < -0.01) {
ysr@777 2118 print_indent(level + 1);
ysr@777 2119 gclog_or_tty->print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
ysr@777 2120 }
ysr@777 2121
ysr@777 2122 if (calc_other_times_ms->sum() < -0.01) {
ysr@777 2123 should_print = true;
ysr@777 2124 print_indent(level + 1);
ysr@777 2125 gclog_or_tty->print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
ysr@777 2126 }
ysr@777 2127
ysr@777 2128 if (calc_other_times_ms->avg() < -0.01) {
ysr@777 2129 should_print = true;
ysr@777 2130 print_indent(level + 1);
ysr@777 2131 gclog_or_tty->print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
ysr@777 2132 }
ysr@777 2133
ysr@777 2134 if (should_print)
ysr@777 2135 print_summary(level, "Other(Calc)", calc_other_times_ms);
ysr@777 2136 }
ysr@777 2137
ysr@777 2138 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
jmasa@2188 2139 bool parallel = G1CollectedHeap::use_parallel_gc_threads();
ysr@777 2140 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 2141 if (summary->get_total_seq()->num() > 0) {
apetrusenko@1112 2142 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
ysr@777 2143 if (body_summary != NULL) {
ysr@777 2144 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
ysr@777 2145 if (parallel) {
ysr@777 2146 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
ysr@777 2147 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2148 print_summary(2, "Ext Root Scanning",
ysr@777 2149 body_summary->get_ext_root_scan_seq());
ysr@777 2150 print_summary(2, "Mark Stack Scanning",
ysr@777 2151 body_summary->get_mark_stack_scan_seq());
ysr@777 2152 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2153 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2154 print_summary(2, "Termination", body_summary->get_termination_seq());
ysr@777 2155 print_summary(2, "Other", body_summary->get_parallel_other_seq());
ysr@777 2156 {
ysr@777 2157 NumberSeq* other_parts[] = {
ysr@777 2158 body_summary->get_update_rs_seq(),
ysr@777 2159 body_summary->get_ext_root_scan_seq(),
ysr@777 2160 body_summary->get_mark_stack_scan_seq(),
ysr@777 2161 body_summary->get_scan_rs_seq(),
ysr@777 2162 body_summary->get_obj_copy_seq(),
ysr@777 2163 body_summary->get_termination_seq()
ysr@777 2164 };
ysr@777 2165 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
johnc@2134 2166 6, other_parts);
ysr@777 2167 check_other_times(2, body_summary->get_parallel_other_seq(),
ysr@777 2168 &calc_other_times_ms);
ysr@777 2169 }
ysr@777 2170 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
ysr@777 2171 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
ysr@777 2172 } else {
ysr@777 2173 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2174 print_summary(1, "Ext Root Scanning",
ysr@777 2175 body_summary->get_ext_root_scan_seq());
ysr@777 2176 print_summary(1, "Mark Stack Scanning",
ysr@777 2177 body_summary->get_mark_stack_scan_seq());
ysr@777 2178 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2179 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2180 }
ysr@777 2181 }
ysr@777 2182 print_summary(1, "Other", summary->get_other_seq());
ysr@777 2183 {
johnc@2134 2184 if (body_summary != NULL) {
johnc@2134 2185 NumberSeq calc_other_times_ms;
johnc@2134 2186 if (parallel) {
johnc@2134 2187 // parallel
johnc@2134 2188 NumberSeq* other_parts[] = {
johnc@2134 2189 body_summary->get_satb_drain_seq(),
johnc@2134 2190 body_summary->get_parallel_seq(),
johnc@2134 2191 body_summary->get_clear_ct_seq()
johnc@2134 2192 };
johnc@2134 2193 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
johnc@2134 2194 3, other_parts);
johnc@2134 2195 } else {
johnc@2134 2196 // serial
johnc@2134 2197 NumberSeq* other_parts[] = {
johnc@2134 2198 body_summary->get_satb_drain_seq(),
johnc@2134 2199 body_summary->get_update_rs_seq(),
johnc@2134 2200 body_summary->get_ext_root_scan_seq(),
johnc@2134 2201 body_summary->get_mark_stack_scan_seq(),
johnc@2134 2202 body_summary->get_scan_rs_seq(),
johnc@2134 2203 body_summary->get_obj_copy_seq()
johnc@2134 2204 };
johnc@2134 2205 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
johnc@2134 2206 6, other_parts);
johnc@2134 2207 }
johnc@2134 2208 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
ysr@777 2209 }
ysr@777 2210 }
ysr@777 2211 } else {
ysr@777 2212 print_indent(0);
ysr@777 2213 gclog_or_tty->print_cr("none");
ysr@777 2214 }
ysr@777 2215 gclog_or_tty->print_cr("");
ysr@777 2216 }
ysr@777 2217
ysr@777 2218 void G1CollectorPolicy::print_tracing_info() const {
ysr@777 2219 if (TraceGen0Time) {
ysr@777 2220 gclog_or_tty->print_cr("ALL PAUSES");
ysr@777 2221 print_summary_sd(0, "Total", _all_pause_times_ms);
ysr@777 2222 gclog_or_tty->print_cr("");
ysr@777 2223 gclog_or_tty->print_cr("");
ysr@777 2224 gclog_or_tty->print_cr(" Full Young GC Pauses: %8d", _full_young_pause_num);
ysr@777 2225 gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num);
ysr@777 2226 gclog_or_tty->print_cr("");
ysr@777 2227
apetrusenko@1112 2228 gclog_or_tty->print_cr("EVACUATION PAUSES");
apetrusenko@1112 2229 print_summary(_summary);
ysr@777 2230
ysr@777 2231 gclog_or_tty->print_cr("MISC");
ysr@777 2232 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
ysr@777 2233 print_summary_sd(0, "Yields", _all_yield_times_ms);
ysr@777 2234 for (int i = 0; i < _aux_num; ++i) {
ysr@777 2235 if (_all_aux_times_ms[i].num() > 0) {
ysr@777 2236 char buffer[96];
ysr@777 2237 sprintf(buffer, "Aux%d", i);
ysr@777 2238 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
ysr@777 2239 }
ysr@777 2240 }
ysr@777 2241
ysr@777 2242 size_t all_region_num = _region_num_young + _region_num_tenured;
ysr@777 2243 gclog_or_tty->print_cr(" New Regions %8d, Young %8d (%6.2lf%%), "
ysr@777 2244 "Tenured %8d (%6.2lf%%)",
ysr@777 2245 all_region_num,
ysr@777 2246 _region_num_young,
ysr@777 2247 (double) _region_num_young / (double) all_region_num * 100.0,
ysr@777 2248 _region_num_tenured,
ysr@777 2249 (double) _region_num_tenured / (double) all_region_num * 100.0);
ysr@777 2250 }
ysr@777 2251 if (TraceGen1Time) {
ysr@777 2252 if (_all_full_gc_times_ms->num() > 0) {
ysr@777 2253 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
ysr@777 2254 _all_full_gc_times_ms->num(),
ysr@777 2255 _all_full_gc_times_ms->sum() / 1000.0);
ysr@777 2256 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
ysr@777 2257 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 2258 _all_full_gc_times_ms->sd(),
ysr@777 2259 _all_full_gc_times_ms->maximum());
ysr@777 2260 }
ysr@777 2261 }
ysr@777 2262 }
ysr@777 2263
ysr@777 2264 void G1CollectorPolicy::print_yg_surv_rate_info() const {
ysr@777 2265 #ifndef PRODUCT
ysr@777 2266 _short_lived_surv_rate_group->print_surv_rate_summary();
ysr@777 2267 // add this call for any other surv rate groups
ysr@777 2268 #endif // PRODUCT
ysr@777 2269 }
ysr@777 2270
tonyp@2315 2271 void
tonyp@2315 2272 G1CollectorPolicy::update_region_num(bool young) {
tonyp@2315 2273 if (young) {
ysr@777 2274 ++_region_num_young;
ysr@777 2275 } else {
ysr@777 2276 ++_region_num_tenured;
ysr@777 2277 }
ysr@777 2278 }
ysr@777 2279
ysr@777 2280 #ifndef PRODUCT
ysr@777 2281 // for debugging, bit of a hack...
ysr@777 2282 static char*
ysr@777 2283 region_num_to_mbs(int length) {
ysr@777 2284 static char buffer[64];
ysr@777 2285 double bytes = (double) (length * HeapRegion::GrainBytes);
ysr@777 2286 double mbs = bytes / (double) (1024 * 1024);
ysr@777 2287 sprintf(buffer, "%7.2lfMB", mbs);
ysr@777 2288 return buffer;
ysr@777 2289 }
ysr@777 2290 #endif // PRODUCT
ysr@777 2291
apetrusenko@980 2292 size_t G1CollectorPolicy::max_regions(int purpose) {
ysr@777 2293 switch (purpose) {
ysr@777 2294 case GCAllocForSurvived:
apetrusenko@980 2295 return _max_survivor_regions;
ysr@777 2296 case GCAllocForTenured:
apetrusenko@980 2297 return REGIONS_UNLIMITED;
ysr@777 2298 default:
apetrusenko@980 2299 ShouldNotReachHere();
apetrusenko@980 2300 return REGIONS_UNLIMITED;
ysr@777 2301 };
ysr@777 2302 }
ysr@777 2303
apetrusenko@980 2304 // Calculates survivor space parameters.
apetrusenko@980 2305 void G1CollectorPolicy::calculate_survivors_policy()
apetrusenko@980 2306 {
apetrusenko@980 2307 if (G1FixedSurvivorSpaceSize == 0) {
apetrusenko@980 2308 _max_survivor_regions = _young_list_target_length / SurvivorRatio;
apetrusenko@980 2309 } else {
apetrusenko@982 2310 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
apetrusenko@980 2311 }
apetrusenko@980 2312
apetrusenko@980 2313 if (G1FixedTenuringThreshold) {
apetrusenko@980 2314 _tenuring_threshold = MaxTenuringThreshold;
apetrusenko@980 2315 } else {
apetrusenko@980 2316 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
apetrusenko@980 2317 HeapRegion::GrainWords * _max_survivor_regions);
apetrusenko@980 2318 }
apetrusenko@980 2319 }
apetrusenko@980 2320
ysr@777 2321 #ifndef PRODUCT
ysr@777 2322 class HRSortIndexIsOKClosure: public HeapRegionClosure {
ysr@777 2323 CollectionSetChooser* _chooser;
ysr@777 2324 public:
ysr@777 2325 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
ysr@777 2326 _chooser(chooser) {}
ysr@777 2327
ysr@777 2328 bool doHeapRegion(HeapRegion* r) {
ysr@777 2329 if (!r->continuesHumongous()) {
ysr@777 2330 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
ysr@777 2331 }
ysr@777 2332 return false;
ysr@777 2333 }
ysr@777 2334 };
ysr@777 2335
ysr@777 2336 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
ysr@777 2337 HRSortIndexIsOKClosure cl(_collectionSetChooser);
ysr@777 2338 _g1->heap_region_iterate(&cl);
ysr@777 2339 return true;
ysr@777 2340 }
ysr@777 2341 #endif
ysr@777 2342
tonyp@2011 2343 bool
tonyp@2011 2344 G1CollectorPolicy::force_initial_mark_if_outside_cycle() {
tonyp@2011 2345 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@2011 2346 if (!during_cycle) {
tonyp@2011 2347 set_initiate_conc_mark_if_possible();
tonyp@2011 2348 return true;
tonyp@2011 2349 } else {
tonyp@2011 2350 return false;
tonyp@2011 2351 }
tonyp@2011 2352 }
tonyp@2011 2353
ysr@777 2354 void
tonyp@1794 2355 G1CollectorPolicy::decide_on_conc_mark_initiation() {
tonyp@1794 2356 // We are about to decide on whether this pause will be an
tonyp@1794 2357 // initial-mark pause.
tonyp@1794 2358
tonyp@1794 2359 // First, during_initial_mark_pause() should not be already set. We
tonyp@1794 2360 // will set it here if we have to. However, it should be cleared by
tonyp@1794 2361 // the end of the pause (it's only set for the duration of an
tonyp@1794 2362 // initial-mark pause).
tonyp@1794 2363 assert(!during_initial_mark_pause(), "pre-condition");
tonyp@1794 2364
tonyp@1794 2365 if (initiate_conc_mark_if_possible()) {
tonyp@1794 2366 // We had noticed on a previous pause that the heap occupancy has
tonyp@1794 2367 // gone over the initiating threshold and we should start a
tonyp@1794 2368 // concurrent marking cycle. So we might initiate one.
tonyp@1794 2369
tonyp@1794 2370 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@1794 2371 if (!during_cycle) {
tonyp@1794 2372 // The concurrent marking thread is not "during a cycle", i.e.,
tonyp@1794 2373 // it has completed the last one. So we can go ahead and
tonyp@1794 2374 // initiate a new cycle.
tonyp@1794 2375
tonyp@1794 2376 set_during_initial_mark_pause();
tonyp@1794 2377
tonyp@1794 2378 // And we can now clear initiate_conc_mark_if_possible() as
tonyp@1794 2379 // we've already acted on it.
tonyp@1794 2380 clear_initiate_conc_mark_if_possible();
tonyp@1794 2381 } else {
tonyp@1794 2382 // The concurrent marking thread is still finishing up the
tonyp@1794 2383 // previous cycle. If we start one right now the two cycles
tonyp@1794 2384 // overlap. In particular, the concurrent marking thread might
tonyp@1794 2385 // be in the process of clearing the next marking bitmap (which
tonyp@1794 2386 // we will use for the next cycle if we start one). Starting a
tonyp@1794 2387 // cycle now will be bad given that parts of the marking
tonyp@1794 2388 // information might get cleared by the marking thread. And we
tonyp@1794 2389 // cannot wait for the marking thread to finish the cycle as it
tonyp@1794 2390 // periodically yields while clearing the next marking bitmap
tonyp@1794 2391 // and, if it's in a yield point, it's waiting for us to
tonyp@1794 2392 // finish. So, at this point we will not start a cycle and we'll
tonyp@1794 2393 // let the concurrent marking thread complete the last one.
tonyp@1794 2394 }
tonyp@1794 2395 }
tonyp@1794 2396 }
tonyp@1794 2397
tonyp@1794 2398 void
ysr@777 2399 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2400 record_collection_pause_start(double start_time_sec, size_t start_used) {
ysr@777 2401 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
ysr@777 2402 }
ysr@777 2403
ysr@777 2404 class NextNonCSElemFinder: public HeapRegionClosure {
ysr@777 2405 HeapRegion* _res;
ysr@777 2406 public:
ysr@777 2407 NextNonCSElemFinder(): _res(NULL) {}
ysr@777 2408 bool doHeapRegion(HeapRegion* r) {
ysr@777 2409 if (!r->in_collection_set()) {
ysr@777 2410 _res = r;
ysr@777 2411 return true;
ysr@777 2412 } else {
ysr@777 2413 return false;
ysr@777 2414 }
ysr@777 2415 }
ysr@777 2416 HeapRegion* res() { return _res; }
ysr@777 2417 };
ysr@777 2418
ysr@777 2419 class KnownGarbageClosure: public HeapRegionClosure {
ysr@777 2420 CollectionSetChooser* _hrSorted;
ysr@777 2421
ysr@777 2422 public:
ysr@777 2423 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
ysr@777 2424 _hrSorted(hrSorted)
ysr@777 2425 {}
ysr@777 2426
ysr@777 2427 bool doHeapRegion(HeapRegion* r) {
ysr@777 2428 // We only include humongous regions in collection
ysr@777 2429 // sets when concurrent mark shows that their contained object is
ysr@777 2430 // unreachable.
ysr@777 2431
ysr@777 2432 // Do we have any marking information for this region?
ysr@777 2433 if (r->is_marked()) {
ysr@777 2434 // We don't include humongous regions in collection
ysr@777 2435 // sets because we collect them immediately at the end of a marking
ysr@777 2436 // cycle. We also don't include young regions because we *must*
ysr@777 2437 // include them in the next collection pause.
ysr@777 2438 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2439 _hrSorted->addMarkedHeapRegion(r);
ysr@777 2440 }
ysr@777 2441 }
ysr@777 2442 return false;
ysr@777 2443 }
ysr@777 2444 };
ysr@777 2445
ysr@777 2446 class ParKnownGarbageHRClosure: public HeapRegionClosure {
ysr@777 2447 CollectionSetChooser* _hrSorted;
ysr@777 2448 jint _marked_regions_added;
ysr@777 2449 jint _chunk_size;
ysr@777 2450 jint _cur_chunk_idx;
ysr@777 2451 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
ysr@777 2452 int _worker;
ysr@777 2453 int _invokes;
ysr@777 2454
ysr@777 2455 void get_new_chunk() {
ysr@777 2456 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
ysr@777 2457 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
ysr@777 2458 }
ysr@777 2459 void add_region(HeapRegion* r) {
ysr@777 2460 if (_cur_chunk_idx == _cur_chunk_end) {
ysr@777 2461 get_new_chunk();
ysr@777 2462 }
ysr@777 2463 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
ysr@777 2464 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
ysr@777 2465 _marked_regions_added++;
ysr@777 2466 _cur_chunk_idx++;
ysr@777 2467 }
ysr@777 2468
ysr@777 2469 public:
ysr@777 2470 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
ysr@777 2471 jint chunk_size,
ysr@777 2472 int worker) :
ysr@777 2473 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
ysr@777 2474 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
ysr@777 2475 _invokes(0)
ysr@777 2476 {}
ysr@777 2477
ysr@777 2478 bool doHeapRegion(HeapRegion* r) {
ysr@777 2479 // We only include humongous regions in collection
ysr@777 2480 // sets when concurrent mark shows that their contained object is
ysr@777 2481 // unreachable.
ysr@777 2482 _invokes++;
ysr@777 2483
ysr@777 2484 // Do we have any marking information for this region?
ysr@777 2485 if (r->is_marked()) {
ysr@777 2486 // We don't include humongous regions in collection
ysr@777 2487 // sets because we collect them immediately at the end of a marking
ysr@777 2488 // cycle.
ysr@777 2489 // We also do not include young regions in collection sets
ysr@777 2490 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2491 add_region(r);
ysr@777 2492 }
ysr@777 2493 }
ysr@777 2494 return false;
ysr@777 2495 }
ysr@777 2496 jint marked_regions_added() { return _marked_regions_added; }
ysr@777 2497 int invokes() { return _invokes; }
ysr@777 2498 };
ysr@777 2499
ysr@777 2500 class ParKnownGarbageTask: public AbstractGangTask {
ysr@777 2501 CollectionSetChooser* _hrSorted;
ysr@777 2502 jint _chunk_size;
ysr@777 2503 G1CollectedHeap* _g1;
ysr@777 2504 public:
ysr@777 2505 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
ysr@777 2506 AbstractGangTask("ParKnownGarbageTask"),
ysr@777 2507 _hrSorted(hrSorted), _chunk_size(chunk_size),
ysr@777 2508 _g1(G1CollectedHeap::heap())
ysr@777 2509 {}
ysr@777 2510
ysr@777 2511 void work(int i) {
ysr@777 2512 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
ysr@777 2513 // Back to zero for the claim value.
tonyp@790 2514 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
tonyp@790 2515 HeapRegion::InitialClaimValue);
ysr@777 2516 jint regions_added = parKnownGarbageCl.marked_regions_added();
ysr@777 2517 _hrSorted->incNumMarkedHeapRegions(regions_added);
ysr@777 2518 if (G1PrintParCleanupStats) {
ysr@777 2519 gclog_or_tty->print(" Thread %d called %d times, added %d regions to list.\n",
ysr@777 2520 i, parKnownGarbageCl.invokes(), regions_added);
ysr@777 2521 }
ysr@777 2522 }
ysr@777 2523 };
ysr@777 2524
ysr@777 2525 void
ysr@777 2526 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2527 record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 2528 size_t max_live_bytes) {
ysr@777 2529 double start;
ysr@777 2530 if (G1PrintParCleanupStats) start = os::elapsedTime();
ysr@777 2531 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 2532
ysr@777 2533 _collectionSetChooser->clearMarkedHeapRegions();
ysr@777 2534 double clear_marked_end;
ysr@777 2535 if (G1PrintParCleanupStats) {
ysr@777 2536 clear_marked_end = os::elapsedTime();
ysr@777 2537 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.",
ysr@777 2538 (clear_marked_end - start)*1000.0);
ysr@777 2539 }
jmasa@2188 2540 if (G1CollectedHeap::use_parallel_gc_threads()) {
ysr@777 2541 const size_t OverpartitionFactor = 4;
kvn@1926 2542 const size_t MinWorkUnit = 8;
kvn@1926 2543 const size_t WorkUnit =
ysr@777 2544 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
kvn@1926 2545 MinWorkUnit);
ysr@777 2546 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
kvn@1926 2547 WorkUnit);
ysr@777 2548 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
kvn@1926 2549 (int) WorkUnit);
ysr@777 2550 _g1->workers()->run_task(&parKnownGarbageTask);
tonyp@790 2551
tonyp@790 2552 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2553 "sanity check");
ysr@777 2554 } else {
ysr@777 2555 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
ysr@777 2556 _g1->heap_region_iterate(&knownGarbagecl);
ysr@777 2557 }
ysr@777 2558 double known_garbage_end;
ysr@777 2559 if (G1PrintParCleanupStats) {
ysr@777 2560 known_garbage_end = os::elapsedTime();
ysr@777 2561 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
ysr@777 2562 (known_garbage_end - clear_marked_end)*1000.0);
ysr@777 2563 }
ysr@777 2564 _collectionSetChooser->sortMarkedHeapRegions();
ysr@777 2565 double sort_end;
ysr@777 2566 if (G1PrintParCleanupStats) {
ysr@777 2567 sort_end = os::elapsedTime();
ysr@777 2568 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
ysr@777 2569 (sort_end - known_garbage_end)*1000.0);
ysr@777 2570 }
ysr@777 2571
ysr@777 2572 record_concurrent_mark_cleanup_end_work2();
ysr@777 2573 double work2_end;
ysr@777 2574 if (G1PrintParCleanupStats) {
ysr@777 2575 work2_end = os::elapsedTime();
ysr@777 2576 gclog_or_tty->print_cr(" work2: %8.3f ms.",
ysr@777 2577 (work2_end - sort_end)*1000.0);
ysr@777 2578 }
ysr@777 2579 }
ysr@777 2580
johnc@1829 2581 // Add the heap region at the head of the non-incremental collection set
ysr@777 2582 void G1CollectorPolicy::
ysr@777 2583 add_to_collection_set(HeapRegion* hr) {
johnc@1829 2584 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2585 assert(!hr->is_young(), "non-incremental add of young region");
johnc@1829 2586
tonyp@1717 2587 if (G1PrintHeapRegions) {
tonyp@1823 2588 gclog_or_tty->print_cr("added region to cset "
tonyp@1823 2589 "%d:["PTR_FORMAT", "PTR_FORMAT"], "
tonyp@1823 2590 "top "PTR_FORMAT", %s",
tonyp@1823 2591 hr->hrs_index(), hr->bottom(), hr->end(),
tonyp@1823 2592 hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG");
ysr@777 2593 }
ysr@777 2594
ysr@777 2595 if (_g1->mark_in_progress())
ysr@777 2596 _g1->concurrent_mark()->registerCSetRegion(hr);
ysr@777 2597
johnc@1829 2598 assert(!hr->in_collection_set(), "should not already be in the CSet");
ysr@777 2599 hr->set_in_collection_set(true);
ysr@777 2600 hr->set_next_in_collection_set(_collection_set);
ysr@777 2601 _collection_set = hr;
ysr@777 2602 _collection_set_size++;
ysr@777 2603 _collection_set_bytes_used_before += hr->used();
tonyp@961 2604 _g1->register_region_with_in_cset_fast_test(hr);
ysr@777 2605 }
ysr@777 2606
johnc@1829 2607 // Initialize the per-collection-set information
johnc@1829 2608 void G1CollectorPolicy::start_incremental_cset_building() {
johnc@1829 2609 assert(_inc_cset_build_state == Inactive, "Precondition");
johnc@1829 2610
johnc@1829 2611 _inc_cset_head = NULL;
johnc@1829 2612 _inc_cset_tail = NULL;
johnc@1829 2613 _inc_cset_size = 0;
johnc@1829 2614 _inc_cset_bytes_used_before = 0;
johnc@1829 2615
johnc@1829 2616 if (in_young_gc_mode()) {
johnc@1829 2617 _inc_cset_young_index = 0;
johnc@1829 2618 }
johnc@1829 2619
johnc@1829 2620 _inc_cset_max_finger = 0;
johnc@1829 2621 _inc_cset_recorded_young_bytes = 0;
johnc@1829 2622 _inc_cset_recorded_rs_lengths = 0;
johnc@1829 2623 _inc_cset_predicted_elapsed_time_ms = 0;
johnc@1829 2624 _inc_cset_predicted_bytes_to_copy = 0;
johnc@1829 2625 _inc_cset_build_state = Active;
johnc@1829 2626 }
johnc@1829 2627
johnc@1829 2628 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
johnc@1829 2629 // This routine is used when:
johnc@1829 2630 // * adding survivor regions to the incremental cset at the end of an
johnc@1829 2631 // evacuation pause,
johnc@1829 2632 // * adding the current allocation region to the incremental cset
johnc@1829 2633 // when it is retired, and
johnc@1829 2634 // * updating existing policy information for a region in the
johnc@1829 2635 // incremental cset via young list RSet sampling.
johnc@1829 2636 // Therefore this routine may be called at a safepoint by the
johnc@1829 2637 // VM thread, or in-between safepoints by mutator threads (when
johnc@1829 2638 // retiring the current allocation region) or a concurrent
johnc@1829 2639 // refine thread (RSet sampling).
johnc@1829 2640
johnc@1829 2641 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
johnc@1829 2642 size_t used_bytes = hr->used();
johnc@1829 2643
johnc@1829 2644 _inc_cset_recorded_rs_lengths += rs_length;
johnc@1829 2645 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
johnc@1829 2646
johnc@1829 2647 _inc_cset_bytes_used_before += used_bytes;
johnc@1829 2648
johnc@1829 2649 // Cache the values we have added to the aggregated informtion
johnc@1829 2650 // in the heap region in case we have to remove this region from
johnc@1829 2651 // the incremental collection set, or it is updated by the
johnc@1829 2652 // rset sampling code
johnc@1829 2653 hr->set_recorded_rs_length(rs_length);
johnc@1829 2654 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
johnc@1829 2655
johnc@1829 2656 #if PREDICTIONS_VERBOSE
johnc@1829 2657 size_t bytes_to_copy = predict_bytes_to_copy(hr);
johnc@1829 2658 _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
johnc@1829 2659
johnc@1829 2660 // Record the number of bytes used in this region
johnc@1829 2661 _inc_cset_recorded_young_bytes += used_bytes;
johnc@1829 2662
johnc@1829 2663 // Cache the values we have added to the aggregated informtion
johnc@1829 2664 // in the heap region in case we have to remove this region from
johnc@1829 2665 // the incremental collection set, or it is updated by the
johnc@1829 2666 // rset sampling code
johnc@1829 2667 hr->set_predicted_bytes_to_copy(bytes_to_copy);
johnc@1829 2668 #endif // PREDICTIONS_VERBOSE
johnc@1829 2669 }
johnc@1829 2670
johnc@1829 2671 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
johnc@1829 2672 // This routine is currently only called as part of the updating of
johnc@1829 2673 // existing policy information for regions in the incremental cset that
johnc@1829 2674 // is performed by the concurrent refine thread(s) as part of young list
johnc@1829 2675 // RSet sampling. Therefore we should not be at a safepoint.
johnc@1829 2676
johnc@1829 2677 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
johnc@1829 2678 assert(hr->is_young(), "it should be");
johnc@1829 2679
johnc@1829 2680 size_t used_bytes = hr->used();
johnc@1829 2681 size_t old_rs_length = hr->recorded_rs_length();
johnc@1829 2682 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
johnc@1829 2683
johnc@1829 2684 // Subtract the old recorded/predicted policy information for
johnc@1829 2685 // the given heap region from the collection set info.
johnc@1829 2686 _inc_cset_recorded_rs_lengths -= old_rs_length;
johnc@1829 2687 _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
johnc@1829 2688
johnc@1829 2689 _inc_cset_bytes_used_before -= used_bytes;
johnc@1829 2690
johnc@1829 2691 // Clear the values cached in the heap region
johnc@1829 2692 hr->set_recorded_rs_length(0);
johnc@1829 2693 hr->set_predicted_elapsed_time_ms(0);
johnc@1829 2694
johnc@1829 2695 #if PREDICTIONS_VERBOSE
johnc@1829 2696 size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
johnc@1829 2697 _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
johnc@1829 2698
johnc@1829 2699 // Subtract the number of bytes used in this region
johnc@1829 2700 _inc_cset_recorded_young_bytes -= used_bytes;
johnc@1829 2701
johnc@1829 2702 // Clear the values cached in the heap region
johnc@1829 2703 hr->set_predicted_bytes_to_copy(0);
johnc@1829 2704 #endif // PREDICTIONS_VERBOSE
johnc@1829 2705 }
johnc@1829 2706
johnc@1829 2707 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
johnc@1829 2708 // Update the collection set information that is dependent on the new RS length
johnc@1829 2709 assert(hr->is_young(), "Precondition");
johnc@1829 2710
johnc@1829 2711 remove_from_incremental_cset_info(hr);
johnc@1829 2712 add_to_incremental_cset_info(hr, new_rs_length);
johnc@1829 2713 }
johnc@1829 2714
johnc@1829 2715 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
johnc@1829 2716 assert( hr->is_young(), "invariant");
johnc@1829 2717 assert( hr->young_index_in_cset() == -1, "invariant" );
johnc@1829 2718 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2719
johnc@1829 2720 // We need to clear and set the cached recorded/cached collection set
johnc@1829 2721 // information in the heap region here (before the region gets added
johnc@1829 2722 // to the collection set). An individual heap region's cached values
johnc@1829 2723 // are calculated, aggregated with the policy collection set info,
johnc@1829 2724 // and cached in the heap region here (initially) and (subsequently)
johnc@1829 2725 // by the Young List sampling code.
johnc@1829 2726
johnc@1829 2727 size_t rs_length = hr->rem_set()->occupied();
johnc@1829 2728 add_to_incremental_cset_info(hr, rs_length);
johnc@1829 2729
johnc@1829 2730 HeapWord* hr_end = hr->end();
johnc@1829 2731 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
johnc@1829 2732
johnc@1829 2733 assert(!hr->in_collection_set(), "invariant");
johnc@1829 2734 hr->set_in_collection_set(true);
johnc@1829 2735 assert( hr->next_in_collection_set() == NULL, "invariant");
johnc@1829 2736
johnc@1829 2737 _inc_cset_size++;
johnc@1829 2738 _g1->register_region_with_in_cset_fast_test(hr);
johnc@1829 2739
johnc@1829 2740 hr->set_young_index_in_cset((int) _inc_cset_young_index);
johnc@1829 2741 ++_inc_cset_young_index;
johnc@1829 2742 }
johnc@1829 2743
johnc@1829 2744 // Add the region at the RHS of the incremental cset
johnc@1829 2745 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
johnc@1829 2746 // We should only ever be appending survivors at the end of a pause
johnc@1829 2747 assert( hr->is_survivor(), "Logic");
johnc@1829 2748
johnc@1829 2749 // Do the 'common' stuff
johnc@1829 2750 add_region_to_incremental_cset_common(hr);
johnc@1829 2751
johnc@1829 2752 // Now add the region at the right hand side
johnc@1829 2753 if (_inc_cset_tail == NULL) {
johnc@1829 2754 assert(_inc_cset_head == NULL, "invariant");
johnc@1829 2755 _inc_cset_head = hr;
johnc@1829 2756 } else {
johnc@1829 2757 _inc_cset_tail->set_next_in_collection_set(hr);
johnc@1829 2758 }
johnc@1829 2759 _inc_cset_tail = hr;
johnc@1829 2760
johnc@1829 2761 if (G1PrintHeapRegions) {
johnc@1829 2762 gclog_or_tty->print_cr(" added region to incremental cset (RHS) "
johnc@1829 2763 "%d:["PTR_FORMAT", "PTR_FORMAT"], "
johnc@1829 2764 "top "PTR_FORMAT", young %s",
johnc@1829 2765 hr->hrs_index(), hr->bottom(), hr->end(),
johnc@1829 2766 hr->top(), (hr->is_young()) ? "YES" : "NO");
johnc@1829 2767 }
johnc@1829 2768 }
johnc@1829 2769
johnc@1829 2770 // Add the region to the LHS of the incremental cset
johnc@1829 2771 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
johnc@1829 2772 // Survivors should be added to the RHS at the end of a pause
johnc@1829 2773 assert(!hr->is_survivor(), "Logic");
johnc@1829 2774
johnc@1829 2775 // Do the 'common' stuff
johnc@1829 2776 add_region_to_incremental_cset_common(hr);
johnc@1829 2777
johnc@1829 2778 // Add the region at the left hand side
johnc@1829 2779 hr->set_next_in_collection_set(_inc_cset_head);
johnc@1829 2780 if (_inc_cset_head == NULL) {
johnc@1829 2781 assert(_inc_cset_tail == NULL, "Invariant");
johnc@1829 2782 _inc_cset_tail = hr;
johnc@1829 2783 }
johnc@1829 2784 _inc_cset_head = hr;
johnc@1829 2785
johnc@1829 2786 if (G1PrintHeapRegions) {
johnc@1829 2787 gclog_or_tty->print_cr(" added region to incremental cset (LHS) "
johnc@1829 2788 "%d:["PTR_FORMAT", "PTR_FORMAT"], "
johnc@1829 2789 "top "PTR_FORMAT", young %s",
johnc@1829 2790 hr->hrs_index(), hr->bottom(), hr->end(),
johnc@1829 2791 hr->top(), (hr->is_young()) ? "YES" : "NO");
johnc@1829 2792 }
johnc@1829 2793 }
johnc@1829 2794
johnc@1829 2795 #ifndef PRODUCT
johnc@1829 2796 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
johnc@1829 2797 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
johnc@1829 2798
johnc@1829 2799 st->print_cr("\nCollection_set:");
johnc@1829 2800 HeapRegion* csr = list_head;
johnc@1829 2801 while (csr != NULL) {
johnc@1829 2802 HeapRegion* next = csr->next_in_collection_set();
johnc@1829 2803 assert(csr->in_collection_set(), "bad CS");
johnc@1829 2804 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
johnc@1829 2805 "age: %4d, y: %d, surv: %d",
johnc@1829 2806 csr->bottom(), csr->end(),
johnc@1829 2807 csr->top(),
johnc@1829 2808 csr->prev_top_at_mark_start(),
johnc@1829 2809 csr->next_top_at_mark_start(),
johnc@1829 2810 csr->top_at_conc_mark_count(),
johnc@1829 2811 csr->age_in_surv_rate_group_cond(),
johnc@1829 2812 csr->is_young(),
johnc@1829 2813 csr->is_survivor());
johnc@1829 2814 csr = next;
johnc@1829 2815 }
johnc@1829 2816 }
johnc@1829 2817 #endif // !PRODUCT
johnc@1829 2818
tonyp@2062 2819 void
tonyp@2011 2820 G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
tonyp@2011 2821 double target_pause_time_ms) {
johnc@1829 2822 // Set this here - in case we're not doing young collections.
johnc@1829 2823 double non_young_start_time_sec = os::elapsedTime();
johnc@1829 2824
ysr@777 2825 start_recording_regions();
ysr@777 2826
tonyp@2011 2827 guarantee(target_pause_time_ms > 0.0,
tonyp@2011 2828 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2011 2829 target_pause_time_ms));
tonyp@2011 2830 guarantee(_collection_set == NULL, "Precondition");
ysr@777 2831
ysr@777 2832 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
ysr@777 2833 double predicted_pause_time_ms = base_time_ms;
ysr@777 2834
tonyp@2011 2835 double time_remaining_ms = target_pause_time_ms - base_time_ms;
ysr@777 2836
ysr@777 2837 // the 10% and 50% values are arbitrary...
tonyp@2011 2838 if (time_remaining_ms < 0.10 * target_pause_time_ms) {
tonyp@2011 2839 time_remaining_ms = 0.50 * target_pause_time_ms;
ysr@777 2840 _within_target = false;
ysr@777 2841 } else {
ysr@777 2842 _within_target = true;
ysr@777 2843 }
ysr@777 2844
ysr@777 2845 // We figure out the number of bytes available for future to-space.
ysr@777 2846 // For new regions without marking information, we must assume the
ysr@777 2847 // worst-case of complete survival. If we have marking information for a
ysr@777 2848 // region, we can bound the amount of live data. We can add a number of
ysr@777 2849 // such regions, as long as the sum of the live data bounds does not
ysr@777 2850 // exceed the available evacuation space.
ysr@777 2851 size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
ysr@777 2852
ysr@777 2853 size_t expansion_bytes =
ysr@777 2854 _g1->expansion_regions() * HeapRegion::GrainBytes;
ysr@777 2855
apetrusenko@1112 2856 _collection_set_bytes_used_before = 0;
apetrusenko@1112 2857 _collection_set_size = 0;
ysr@777 2858
ysr@777 2859 // Adjust for expansion and slop.
ysr@777 2860 max_live_bytes = max_live_bytes + expansion_bytes;
ysr@777 2861
apetrusenko@1112 2862 assert(_g1->regions_accounted_for(), "Region leakage!");
ysr@777 2863
ysr@777 2864 HeapRegion* hr;
ysr@777 2865 if (in_young_gc_mode()) {
ysr@777 2866 double young_start_time_sec = os::elapsedTime();
ysr@777 2867
ysr@777 2868 if (G1PolicyVerbose > 0) {
ysr@777 2869 gclog_or_tty->print_cr("Adding %d young regions to the CSet",
johnc@1829 2870 _g1->young_list()->length());
ysr@777 2871 }
johnc@1829 2872
ysr@777 2873 _young_cset_length = 0;
ysr@777 2874 _last_young_gc_full = full_young_gcs() ? true : false;
johnc@1829 2875
ysr@777 2876 if (_last_young_gc_full)
ysr@777 2877 ++_full_young_pause_num;
ysr@777 2878 else
ysr@777 2879 ++_partial_young_pause_num;
johnc@1829 2880
johnc@1829 2881 // The young list is laid with the survivor regions from the previous
johnc@1829 2882 // pause are appended to the RHS of the young list, i.e.
johnc@1829 2883 // [Newly Young Regions ++ Survivors from last pause].
johnc@1829 2884
johnc@1829 2885 hr = _g1->young_list()->first_survivor_region();
ysr@777 2886 while (hr != NULL) {
johnc@1829 2887 assert(hr->is_survivor(), "badly formed young list");
johnc@1829 2888 hr->set_young();
johnc@1829 2889 hr = hr->get_next_young_region();
ysr@777 2890 }
ysr@777 2891
johnc@1829 2892 // Clear the fields that point to the survivor list - they are
johnc@1829 2893 // all young now.
johnc@1829 2894 _g1->young_list()->clear_survivors();
johnc@1829 2895
johnc@1829 2896 if (_g1->mark_in_progress())
johnc@1829 2897 _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
johnc@1829 2898
johnc@1829 2899 _young_cset_length = _inc_cset_young_index;
johnc@1829 2900 _collection_set = _inc_cset_head;
johnc@1829 2901 _collection_set_size = _inc_cset_size;
johnc@1829 2902 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
johnc@1829 2903
johnc@1829 2904 // For young regions in the collection set, we assume the worst
johnc@1829 2905 // case of complete survival
johnc@1829 2906 max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes;
johnc@1829 2907
johnc@1829 2908 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
johnc@1829 2909 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
johnc@1829 2910
johnc@1829 2911 // The number of recorded young regions is the incremental
johnc@1829 2912 // collection set's current size
johnc@1829 2913 set_recorded_young_regions(_inc_cset_size);
johnc@1829 2914 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
johnc@1829 2915 set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
johnc@1829 2916 #if PREDICTIONS_VERBOSE
johnc@1829 2917 set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
johnc@1829 2918 #endif // PREDICTIONS_VERBOSE
johnc@1829 2919
johnc@1829 2920 if (G1PolicyVerbose > 0) {
johnc@1829 2921 gclog_or_tty->print_cr(" Added " PTR_FORMAT " Young Regions to CS.",
johnc@1829 2922 _inc_cset_size);
johnc@1829 2923 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
johnc@1829 2924 max_live_bytes/K);
johnc@1829 2925 }
johnc@1829 2926
johnc@1829 2927 assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
ysr@777 2928
ysr@777 2929 double young_end_time_sec = os::elapsedTime();
ysr@777 2930 _recorded_young_cset_choice_time_ms =
ysr@777 2931 (young_end_time_sec - young_start_time_sec) * 1000.0;
ysr@777 2932
johnc@1829 2933 // We are doing young collections so reset this.
johnc@1829 2934 non_young_start_time_sec = young_end_time_sec;
johnc@1829 2935
johnc@1829 2936 // Note we can use either _collection_set_size or
johnc@1829 2937 // _young_cset_length here
johnc@1829 2938 if (_collection_set_size > 0 && _last_young_gc_full) {
ysr@777 2939 // don't bother adding more regions...
ysr@777 2940 goto choose_collection_set_end;
ysr@777 2941 }
ysr@777 2942 }
ysr@777 2943
ysr@777 2944 if (!in_young_gc_mode() || !full_young_gcs()) {
ysr@777 2945 bool should_continue = true;
ysr@777 2946 NumberSeq seq;
ysr@777 2947 double avg_prediction = 100000000000000000.0; // something very large
johnc@1829 2948
ysr@777 2949 do {
ysr@777 2950 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
ysr@777 2951 avg_prediction);
apetrusenko@1112 2952 if (hr != NULL) {
ysr@777 2953 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
ysr@777 2954 time_remaining_ms -= predicted_time_ms;
ysr@777 2955 predicted_pause_time_ms += predicted_time_ms;
ysr@777 2956 add_to_collection_set(hr);
johnc@1829 2957 record_non_young_cset_region(hr);
ysr@777 2958 max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
ysr@777 2959 if (G1PolicyVerbose > 0) {
ysr@777 2960 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
ysr@777 2961 max_live_bytes/K);
ysr@777 2962 }
ysr@777 2963 seq.add(predicted_time_ms);
ysr@777 2964 avg_prediction = seq.avg() + seq.sd();
ysr@777 2965 }
ysr@777 2966 should_continue =
ysr@777 2967 ( hr != NULL) &&
ysr@777 2968 ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0
ysr@777 2969 : _collection_set_size < _young_list_fixed_length );
ysr@777 2970 } while (should_continue);
ysr@777 2971
ysr@777 2972 if (!adaptive_young_list_length() &&
ysr@777 2973 _collection_set_size < _young_list_fixed_length)
ysr@777 2974 _should_revert_to_full_young_gcs = true;
ysr@777 2975 }
ysr@777 2976
ysr@777 2977 choose_collection_set_end:
johnc@1829 2978 stop_incremental_cset_building();
johnc@1829 2979
ysr@777 2980 count_CS_bytes_used();
ysr@777 2981
ysr@777 2982 end_recording_regions();
ysr@777 2983
ysr@777 2984 double non_young_end_time_sec = os::elapsedTime();
ysr@777 2985 _recorded_non_young_cset_choice_time_ms =
ysr@777 2986 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
ysr@777 2987 }
ysr@777 2988
ysr@777 2989 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
ysr@777 2990 G1CollectorPolicy::record_full_collection_end();
ysr@777 2991 _collectionSetChooser->updateAfterFullCollection();
ysr@777 2992 }
ysr@777 2993
ysr@777 2994 void G1CollectorPolicy_BestRegionsFirst::
ysr@777 2995 expand_if_possible(size_t numRegions) {
ysr@777 2996 size_t expansion_bytes = numRegions * HeapRegion::GrainBytes;
ysr@777 2997 _g1->expand(expansion_bytes);
ysr@777 2998 }
ysr@777 2999
ysr@777 3000 void G1CollectorPolicy_BestRegionsFirst::
tonyp@2062 3001 record_collection_pause_end() {
tonyp@2062 3002 G1CollectorPolicy::record_collection_pause_end();
ysr@777 3003 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
ysr@777 3004 }

mercurial