src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Mon, 02 Aug 2010 12:51:43 -0700

author
johnc
date
Mon, 02 Aug 2010 12:51:43 -0700
changeset 2060
2d160770d2e5
parent 2011
4e5661ba9d98
child 2062
0ce1569c90e5
permissions
-rw-r--r--

6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #include "incls/_precompiled.incl"
ysr@777 26 #include "incls/_g1CollectorPolicy.cpp.incl"
ysr@777 27
ysr@777 28 #define PREDICTIONS_VERBOSE 0
ysr@777 29
ysr@777 30 // <NEW PREDICTION>
ysr@777 31
ysr@777 32 // Different defaults for different number of GC threads
ysr@777 33 // They were chosen by running GCOld and SPECjbb on debris with different
ysr@777 34 // numbers of GC threads and choosing them based on the results
ysr@777 35
ysr@777 36 // all the same
ysr@777 37 static double rs_length_diff_defaults[] = {
ysr@777 38 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
ysr@777 39 };
ysr@777 40
ysr@777 41 static double cost_per_card_ms_defaults[] = {
ysr@777 42 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
ysr@777 43 };
ysr@777 44
ysr@777 45 // all the same
ysr@777 46 static double fully_young_cards_per_entry_ratio_defaults[] = {
ysr@777 47 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
ysr@777 48 };
ysr@777 49
ysr@777 50 static double cost_per_entry_ms_defaults[] = {
ysr@777 51 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
ysr@777 52 };
ysr@777 53
ysr@777 54 static double cost_per_byte_ms_defaults[] = {
ysr@777 55 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
ysr@777 56 };
ysr@777 57
ysr@777 58 // these should be pretty consistent
ysr@777 59 static double constant_other_time_ms_defaults[] = {
ysr@777 60 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
ysr@777 61 };
ysr@777 62
ysr@777 63
ysr@777 64 static double young_other_cost_per_region_ms_defaults[] = {
ysr@777 65 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
ysr@777 66 };
ysr@777 67
ysr@777 68 static double non_young_other_cost_per_region_ms_defaults[] = {
ysr@777 69 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
ysr@777 70 };
ysr@777 71
ysr@777 72 // </NEW PREDICTION>
ysr@777 73
ysr@777 74 G1CollectorPolicy::G1CollectorPolicy() :
ysr@777 75 _parallel_gc_threads((ParallelGCThreads > 0) ? ParallelGCThreads : 1),
ysr@777 76 _n_pauses(0),
ysr@777 77 _recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 78 _recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 79 _recent_evac_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 80 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 81 _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 82 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 83 _all_pause_times_ms(new NumberSeq()),
ysr@777 84 _stop_world_start(0.0),
ysr@777 85 _all_stop_world_times_ms(new NumberSeq()),
ysr@777 86 _all_yield_times_ms(new NumberSeq()),
ysr@777 87
ysr@777 88 _all_mod_union_times_ms(new NumberSeq()),
ysr@777 89
apetrusenko@1112 90 _summary(new Summary()),
apetrusenko@1112 91 _abandoned_summary(new AbandonedSummary()),
ysr@777 92
johnc@1325 93 #ifndef PRODUCT
ysr@777 94 _cur_clear_ct_time_ms(0.0),
johnc@1325 95 _min_clear_cc_time_ms(-1.0),
johnc@1325 96 _max_clear_cc_time_ms(-1.0),
johnc@1325 97 _cur_clear_cc_time_ms(0.0),
johnc@1325 98 _cum_clear_cc_time_ms(0.0),
johnc@1325 99 _num_cc_clears(0L),
johnc@1325 100 #endif
ysr@777 101
ysr@777 102 _region_num_young(0),
ysr@777 103 _region_num_tenured(0),
ysr@777 104 _prev_region_num_young(0),
ysr@777 105 _prev_region_num_tenured(0),
ysr@777 106
ysr@777 107 _aux_num(10),
ysr@777 108 _all_aux_times_ms(new NumberSeq[_aux_num]),
ysr@777 109 _cur_aux_start_times_ms(new double[_aux_num]),
ysr@777 110 _cur_aux_times_ms(new double[_aux_num]),
ysr@777 111 _cur_aux_times_set(new bool[_aux_num]),
ysr@777 112
ysr@777 113 _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 114 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 115 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 116
ysr@777 117 // <NEW PREDICTION>
ysr@777 118
ysr@777 119 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 120 _prev_collection_pause_end_ms(0.0),
ysr@777 121 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 122 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 123 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 124 _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 125 _partially_young_cards_per_entry_ratio_seq(
ysr@777 126 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 127 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 128 _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 129 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 130 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 131 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 132 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 133 _non_young_other_cost_per_region_ms_seq(
ysr@777 134 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 135
ysr@777 136 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 137 _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 138 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 139
johnc@1186 140 _pause_time_target_ms((double) MaxGCPauseMillis),
ysr@777 141
ysr@777 142 // </NEW PREDICTION>
ysr@777 143
ysr@777 144 _in_young_gc_mode(false),
ysr@777 145 _full_young_gcs(true),
ysr@777 146 _full_young_pause_num(0),
ysr@777 147 _partial_young_pause_num(0),
ysr@777 148
ysr@777 149 _during_marking(false),
ysr@777 150 _in_marking_window(false),
ysr@777 151 _in_marking_window_im(false),
ysr@777 152
ysr@777 153 _known_garbage_ratio(0.0),
ysr@777 154 _known_garbage_bytes(0),
ysr@777 155
ysr@777 156 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 157
ysr@777 158 _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 159
ysr@777 160 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 161 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 162
ysr@777 163 _recent_avg_pause_time_ratio(0.0),
ysr@777 164 _num_markings(0),
ysr@777 165 _n_marks(0),
ysr@777 166 _n_pauses_at_mark_end(0),
ysr@777 167
ysr@777 168 _all_full_gc_times_ms(new NumberSeq()),
ysr@777 169
ysr@777 170 // G1PausesBtwnConcMark defaults to -1
ysr@777 171 // so the hack is to do the cast QQQ FIXME
ysr@777 172 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
ysr@777 173 _n_marks_since_last_pause(0),
tonyp@1794 174 _initiate_conc_mark_if_possible(false),
tonyp@1794 175 _during_initial_mark_pause(false),
ysr@777 176 _should_revert_to_full_young_gcs(false),
ysr@777 177 _last_full_young_gc(false),
ysr@777 178
ysr@777 179 _prev_collection_pause_used_at_end_bytes(0),
ysr@777 180
ysr@777 181 _collection_set(NULL),
johnc@1829 182 _collection_set_size(0),
johnc@1829 183 _collection_set_bytes_used_before(0),
johnc@1829 184
johnc@1829 185 // Incremental CSet attributes
johnc@1829 186 _inc_cset_build_state(Inactive),
johnc@1829 187 _inc_cset_head(NULL),
johnc@1829 188 _inc_cset_tail(NULL),
johnc@1829 189 _inc_cset_size(0),
johnc@1829 190 _inc_cset_young_index(0),
johnc@1829 191 _inc_cset_bytes_used_before(0),
johnc@1829 192 _inc_cset_max_finger(NULL),
johnc@1829 193 _inc_cset_recorded_young_bytes(0),
johnc@1829 194 _inc_cset_recorded_rs_lengths(0),
johnc@1829 195 _inc_cset_predicted_elapsed_time_ms(0.0),
johnc@1829 196 _inc_cset_predicted_bytes_to_copy(0),
johnc@1829 197
ysr@777 198 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 199 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 200 #endif // _MSC_VER
ysr@777 201
ysr@777 202 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
ysr@777 203 G1YoungSurvRateNumRegionsSummary)),
ysr@777 204 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
apetrusenko@980 205 G1YoungSurvRateNumRegionsSummary)),
ysr@777 206 // add here any more surv rate groups
apetrusenko@980 207 _recorded_survivor_regions(0),
apetrusenko@980 208 _recorded_survivor_head(NULL),
apetrusenko@980 209 _recorded_survivor_tail(NULL),
tonyp@1791 210 _survivors_age_table(true),
tonyp@1791 211
tonyp@1791 212 _gc_overhead_perc(0.0)
apetrusenko@980 213
ysr@777 214 {
tonyp@1377 215 // Set up the region size and associated fields. Given that the
tonyp@1377 216 // policy is created before the heap, we have to set this up here,
tonyp@1377 217 // so it's done as soon as possible.
tonyp@1377 218 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
iveresov@1696 219 HeapRegionRemSet::setup_remset_size();
tonyp@1377 220
apetrusenko@1826 221 // Verify PLAB sizes
apetrusenko@1826 222 const uint region_size = HeapRegion::GrainWords;
apetrusenko@1826 223 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
apetrusenko@1826 224 char buffer[128];
apetrusenko@1826 225 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
apetrusenko@1826 226 OldPLABSize > region_size ? "Old" : "Young", region_size);
apetrusenko@1826 227 vm_exit_during_initialization(buffer);
apetrusenko@1826 228 }
apetrusenko@1826 229
ysr@777 230 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
ysr@777 231 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
ysr@777 232
tonyp@1966 233 _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads];
ysr@777 234 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 235 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 236
ysr@777 237 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 238 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
ysr@777 239
ysr@777 240 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 241
ysr@777 242 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
ysr@777 243
ysr@777 244 _par_last_termination_times_ms = new double[_parallel_gc_threads];
tonyp@1966 245 _par_last_termination_attempts = new double[_parallel_gc_threads];
tonyp@1966 246 _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads];
ysr@777 247
ysr@777 248 // start conservatively
johnc@1186 249 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
ysr@777 250
ysr@777 251 // <NEW PREDICTION>
ysr@777 252
ysr@777 253 int index;
ysr@777 254 if (ParallelGCThreads == 0)
ysr@777 255 index = 0;
ysr@777 256 else if (ParallelGCThreads > 8)
ysr@777 257 index = 7;
ysr@777 258 else
ysr@777 259 index = ParallelGCThreads - 1;
ysr@777 260
ysr@777 261 _pending_card_diff_seq->add(0.0);
ysr@777 262 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
ysr@777 263 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
ysr@777 264 _fully_young_cards_per_entry_ratio_seq->add(
ysr@777 265 fully_young_cards_per_entry_ratio_defaults[index]);
ysr@777 266 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
ysr@777 267 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
ysr@777 268 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
ysr@777 269 _young_other_cost_per_region_ms_seq->add(
ysr@777 270 young_other_cost_per_region_ms_defaults[index]);
ysr@777 271 _non_young_other_cost_per_region_ms_seq->add(
ysr@777 272 non_young_other_cost_per_region_ms_defaults[index]);
ysr@777 273
ysr@777 274 // </NEW PREDICTION>
ysr@777 275
tonyp@1965 276 // Below, we might need to calculate the pause time target based on
tonyp@1965 277 // the pause interval. When we do so we are going to give G1 maximum
tonyp@1965 278 // flexibility and allow it to do pauses when it needs to. So, we'll
tonyp@1965 279 // arrange that the pause interval to be pause time target + 1 to
tonyp@1965 280 // ensure that a) the pause time target is maximized with respect to
tonyp@1965 281 // the pause interval and b) we maintain the invariant that pause
tonyp@1965 282 // time target < pause interval. If the user does not want this
tonyp@1965 283 // maximum flexibility, they will have to set the pause interval
tonyp@1965 284 // explicitly.
tonyp@1965 285
tonyp@1965 286 // First make sure that, if either parameter is set, its value is
tonyp@1965 287 // reasonable.
tonyp@1965 288 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 289 if (MaxGCPauseMillis < 1) {
tonyp@1965 290 vm_exit_during_initialization("MaxGCPauseMillis should be "
tonyp@1965 291 "greater than 0");
tonyp@1965 292 }
tonyp@1965 293 }
tonyp@1965 294 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 295 if (GCPauseIntervalMillis < 1) {
tonyp@1965 296 vm_exit_during_initialization("GCPauseIntervalMillis should be "
tonyp@1965 297 "greater than 0");
tonyp@1965 298 }
tonyp@1965 299 }
tonyp@1965 300
tonyp@1965 301 // Then, if the pause time target parameter was not set, set it to
tonyp@1965 302 // the default value.
tonyp@1965 303 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
tonyp@1965 304 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 305 // The default pause time target in G1 is 200ms
tonyp@1965 306 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
tonyp@1965 307 } else {
tonyp@1965 308 // We do not allow the pause interval to be set without the
tonyp@1965 309 // pause time target
tonyp@1965 310 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
tonyp@1965 311 "without setting MaxGCPauseMillis");
tonyp@1965 312 }
tonyp@1965 313 }
tonyp@1965 314
tonyp@1965 315 // Then, if the interval parameter was not set, set it according to
tonyp@1965 316 // the pause time target (this will also deal with the case when the
tonyp@1965 317 // pause time target is the default value).
tonyp@1965 318 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
tonyp@1965 319 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
tonyp@1965 320 }
tonyp@1965 321
tonyp@1965 322 // Finally, make sure that the two parameters are consistent.
tonyp@1965 323 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
tonyp@1965 324 char buffer[256];
tonyp@1965 325 jio_snprintf(buffer, 256,
tonyp@1965 326 "MaxGCPauseMillis (%u) should be less than "
tonyp@1965 327 "GCPauseIntervalMillis (%u)",
tonyp@1965 328 MaxGCPauseMillis, GCPauseIntervalMillis);
tonyp@1965 329 vm_exit_during_initialization(buffer);
tonyp@1965 330 }
tonyp@1965 331
tonyp@1965 332 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
johnc@1186 333 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
ysr@777 334 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
johnc@1186 335 _sigma = (double) G1ConfidencePercent / 100.0;
ysr@777 336
ysr@777 337 // start conservatively (around 50ms is about right)
ysr@777 338 _concurrent_mark_init_times_ms->add(0.05);
ysr@777 339 _concurrent_mark_remark_times_ms->add(0.05);
ysr@777 340 _concurrent_mark_cleanup_times_ms->add(0.20);
ysr@777 341 _tenuring_threshold = MaxTenuringThreshold;
ysr@777 342
tonyp@1717 343 // if G1FixedSurvivorSpaceSize is 0 which means the size is not
tonyp@1717 344 // fixed, then _max_survivor_regions will be calculated at
johnc@1829 345 // calculate_young_list_target_length during initialization
tonyp@1717 346 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
apetrusenko@980 347
tonyp@1791 348 assert(GCTimeRatio > 0,
tonyp@1791 349 "we should have set it to a default value set_g1_gc_flags() "
tonyp@1791 350 "if a user set it to 0");
tonyp@1791 351 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
tonyp@1791 352
ysr@777 353 initialize_all();
ysr@777 354 }
ysr@777 355
ysr@777 356 // Increment "i", mod "len"
ysr@777 357 static void inc_mod(int& i, int len) {
ysr@777 358 i++; if (i == len) i = 0;
ysr@777 359 }
ysr@777 360
ysr@777 361 void G1CollectorPolicy::initialize_flags() {
ysr@777 362 set_min_alignment(HeapRegion::GrainBytes);
ysr@777 363 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
apetrusenko@982 364 if (SurvivorRatio < 1) {
apetrusenko@982 365 vm_exit_during_initialization("Invalid survivor ratio specified");
apetrusenko@982 366 }
ysr@777 367 CollectorPolicy::initialize_flags();
ysr@777 368 }
ysr@777 369
tonyp@1720 370 // The easiest way to deal with the parsing of the NewSize /
tonyp@1720 371 // MaxNewSize / etc. parameteres is to re-use the code in the
tonyp@1720 372 // TwoGenerationCollectorPolicy class. This is similar to what
tonyp@1720 373 // ParallelScavenge does with its GenerationSizer class (see
tonyp@1720 374 // ParallelScavengeHeap::initialize()). We might change this in the
tonyp@1720 375 // future, but it's a good start.
tonyp@1720 376 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
tonyp@1720 377 size_t size_to_region_num(size_t byte_size) {
tonyp@1720 378 return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
tonyp@1720 379 }
tonyp@1720 380
tonyp@1720 381 public:
tonyp@1720 382 G1YoungGenSizer() {
tonyp@1720 383 initialize_flags();
tonyp@1720 384 initialize_size_info();
tonyp@1720 385 }
tonyp@1720 386
tonyp@1720 387 size_t min_young_region_num() {
tonyp@1720 388 return size_to_region_num(_min_gen0_size);
tonyp@1720 389 }
tonyp@1720 390 size_t initial_young_region_num() {
tonyp@1720 391 return size_to_region_num(_initial_gen0_size);
tonyp@1720 392 }
tonyp@1720 393 size_t max_young_region_num() {
tonyp@1720 394 return size_to_region_num(_max_gen0_size);
tonyp@1720 395 }
tonyp@1720 396 };
tonyp@1720 397
ysr@777 398 void G1CollectorPolicy::init() {
ysr@777 399 // Set aside an initial future to_space.
ysr@777 400 _g1 = G1CollectedHeap::heap();
ysr@777 401
ysr@777 402 assert(Heap_lock->owned_by_self(), "Locking discipline.");
ysr@777 403
apetrusenko@980 404 initialize_gc_policy_counters();
apetrusenko@980 405
ysr@777 406 if (G1Gen) {
ysr@777 407 _in_young_gc_mode = true;
ysr@777 408
tonyp@1720 409 G1YoungGenSizer sizer;
tonyp@1720 410 size_t initial_region_num = sizer.initial_young_region_num();
tonyp@1720 411
tonyp@1720 412 if (UseAdaptiveSizePolicy) {
ysr@777 413 set_adaptive_young_list_length(true);
ysr@777 414 _young_list_fixed_length = 0;
ysr@777 415 } else {
ysr@777 416 set_adaptive_young_list_length(false);
tonyp@1720 417 _young_list_fixed_length = initial_region_num;
ysr@777 418 }
johnc@1829 419 _free_regions_at_end_of_collection = _g1->free_regions();
johnc@1829 420 calculate_young_list_min_length();
johnc@1829 421 guarantee( _young_list_min_length == 0, "invariant, not enough info" );
johnc@1829 422 calculate_young_list_target_length();
johnc@1829 423 } else {
ysr@777 424 _young_list_fixed_length = 0;
ysr@777 425 _in_young_gc_mode = false;
ysr@777 426 }
johnc@1829 427
johnc@1829 428 // We may immediately start allocating regions and placing them on the
johnc@1829 429 // collection set list. Initialize the per-collection set info
johnc@1829 430 start_incremental_cset_building();
ysr@777 431 }
ysr@777 432
apetrusenko@980 433 // Create the jstat counters for the policy.
apetrusenko@980 434 void G1CollectorPolicy::initialize_gc_policy_counters()
apetrusenko@980 435 {
apetrusenko@980 436 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
apetrusenko@980 437 }
apetrusenko@980 438
ysr@777 439 void G1CollectorPolicy::calculate_young_list_min_length() {
ysr@777 440 _young_list_min_length = 0;
ysr@777 441
ysr@777 442 if (!adaptive_young_list_length())
ysr@777 443 return;
ysr@777 444
ysr@777 445 if (_alloc_rate_ms_seq->num() > 3) {
ysr@777 446 double now_sec = os::elapsedTime();
ysr@777 447 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
ysr@777 448 double alloc_rate_ms = predict_alloc_rate_ms();
ysr@777 449 int min_regions = (int) ceil(alloc_rate_ms * when_ms);
johnc@1829 450 int current_region_num = (int) _g1->young_list()->length();
ysr@777 451 _young_list_min_length = min_regions + current_region_num;
ysr@777 452 }
ysr@777 453 }
ysr@777 454
johnc@1829 455 void G1CollectorPolicy::calculate_young_list_target_length() {
ysr@777 456 if (adaptive_young_list_length()) {
ysr@777 457 size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
johnc@1829 458 calculate_young_list_target_length(rs_lengths);
ysr@777 459 } else {
ysr@777 460 if (full_young_gcs())
ysr@777 461 _young_list_target_length = _young_list_fixed_length;
ysr@777 462 else
ysr@777 463 _young_list_target_length = _young_list_fixed_length / 2;
johnc@1829 464
ysr@777 465 _young_list_target_length = MAX2(_young_list_target_length, (size_t)1);
ysr@777 466 }
apetrusenko@980 467 calculate_survivors_policy();
ysr@777 468 }
ysr@777 469
johnc@1829 470 void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
ysr@777 471 guarantee( adaptive_young_list_length(), "pre-condition" );
johnc@1829 472 guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" );
ysr@777 473
ysr@777 474 double start_time_sec = os::elapsedTime();
tonyp@1717 475 size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
ysr@777 476 min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
ysr@777 477 size_t reserve_regions =
ysr@777 478 (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
ysr@777 479
ysr@777 480 if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
ysr@777 481 // we are in fully-young mode and there are free regions in the heap
ysr@777 482
apetrusenko@980 483 double survivor_regions_evac_time =
apetrusenko@980 484 predict_survivor_regions_evac_time();
apetrusenko@980 485
ysr@777 486 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
ysr@777 487 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
ysr@777 488 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
johnc@1829 489 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
apetrusenko@980 490 double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
apetrusenko@980 491 + survivor_regions_evac_time;
johnc@1829 492
ysr@777 493 // the result
ysr@777 494 size_t final_young_length = 0;
johnc@1829 495
johnc@1829 496 size_t init_free_regions =
johnc@1829 497 MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions);
johnc@1829 498
johnc@1829 499 // if we're still under the pause target...
johnc@1829 500 if (base_time_ms <= target_pause_time_ms) {
johnc@1829 501 // We make sure that the shortest young length that makes sense
johnc@1829 502 // fits within the target pause time.
johnc@1829 503 size_t min_young_length = 1;
johnc@1829 504
johnc@1829 505 if (predict_will_fit(min_young_length, base_time_ms,
johnc@1829 506 init_free_regions, target_pause_time_ms)) {
johnc@1829 507 // The shortest young length will fit within the target pause time;
johnc@1829 508 // we'll now check whether the absolute maximum number of young
johnc@1829 509 // regions will fit in the target pause time. If not, we'll do
johnc@1829 510 // a binary search between min_young_length and max_young_length
johnc@1829 511 size_t abs_max_young_length = _free_regions_at_end_of_collection - 1;
johnc@1829 512 size_t max_young_length = abs_max_young_length;
johnc@1829 513
johnc@1829 514 if (max_young_length > min_young_length) {
johnc@1829 515 // Let's check if the initial max young length will fit within the
johnc@1829 516 // target pause. If so then there is no need to search for a maximal
johnc@1829 517 // young length - we'll return the initial maximum
johnc@1829 518
johnc@1829 519 if (predict_will_fit(max_young_length, base_time_ms,
johnc@1829 520 init_free_regions, target_pause_time_ms)) {
johnc@1829 521 // The maximum young length will satisfy the target pause time.
johnc@1829 522 // We are done so set min young length to this maximum length.
johnc@1829 523 // The code after the loop will then set final_young_length using
johnc@1829 524 // the value cached in the minimum length.
johnc@1829 525 min_young_length = max_young_length;
johnc@1829 526 } else {
johnc@1829 527 // The maximum possible number of young regions will not fit within
johnc@1829 528 // the target pause time so let's search....
johnc@1829 529
johnc@1829 530 size_t diff = (max_young_length - min_young_length) / 2;
johnc@1829 531 max_young_length = min_young_length + diff;
johnc@1829 532
johnc@1829 533 while (max_young_length > min_young_length) {
johnc@1829 534 if (predict_will_fit(max_young_length, base_time_ms,
johnc@1829 535 init_free_regions, target_pause_time_ms)) {
johnc@1829 536
johnc@1829 537 // The current max young length will fit within the target
johnc@1829 538 // pause time. Note we do not exit the loop here. By setting
johnc@1829 539 // min = max, and then increasing the max below means that
johnc@1829 540 // we will continue searching for an upper bound in the
johnc@1829 541 // range [max..max+diff]
johnc@1829 542 min_young_length = max_young_length;
johnc@1829 543 }
johnc@1829 544 diff = (max_young_length - min_young_length) / 2;
johnc@1829 545 max_young_length = min_young_length + diff;
johnc@1829 546 }
johnc@1829 547 // the above loop found a maximal young length that will fit
johnc@1829 548 // within the target pause time.
johnc@1829 549 }
johnc@1829 550 assert(min_young_length <= abs_max_young_length, "just checking");
johnc@1829 551 }
johnc@1829 552 final_young_length = min_young_length;
johnc@1829 553 }
ysr@777 554 }
johnc@1829 555 // and we're done!
ysr@777 556
ysr@777 557 // we should have at least one region in the target young length
apetrusenko@980 558 _young_list_target_length =
apetrusenko@980 559 MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
ysr@777 560
ysr@777 561 // let's keep an eye of how long we spend on this calculation
ysr@777 562 // right now, I assume that we'll print it when we need it; we
ysr@777 563 // should really adde it to the breakdown of a pause
ysr@777 564 double end_time_sec = os::elapsedTime();
ysr@777 565 double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
ysr@777 566
johnc@1829 567 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 568 // leave this in for debugging, just in case
johnc@1829 569 gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", "
johnc@1829 570 "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT,
ysr@777 571 target_pause_time_ms,
johnc@1829 572 _young_list_target_length
ysr@777 573 elapsed_time_ms,
ysr@777 574 full_young_gcs() ? "full" : "partial",
tonyp@1794 575 during_initial_mark_pause() ? " i-m" : "",
apetrusenko@980 576 _in_marking_window,
apetrusenko@980 577 _in_marking_window_im);
johnc@1829 578 #endif // TRACE_CALC_YOUNG_LENGTH
ysr@777 579
ysr@777 580 if (_young_list_target_length < _young_list_min_length) {
johnc@1829 581 // bummer; this means that, if we do a pause when the maximal
johnc@1829 582 // length dictates, we'll violate the pause spacing target (the
ysr@777 583 // min length was calculate based on the application's current
ysr@777 584 // alloc rate);
ysr@777 585
ysr@777 586 // so, we have to bite the bullet, and allocate the minimum
ysr@777 587 // number. We'll violate our target, but we just can't meet it.
ysr@777 588
johnc@1829 589 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 590 // leave this in for debugging, just in case
ysr@777 591 gclog_or_tty->print_cr("adjusted target length from "
johnc@1829 592 SIZE_FORMAT " to " SIZE_FORMAT,
johnc@1829 593 _young_list_target_length, _young_list_min_length);
johnc@1829 594 #endif // TRACE_CALC_YOUNG_LENGTH
johnc@1829 595
johnc@1829 596 _young_list_target_length = _young_list_min_length;
ysr@777 597 }
ysr@777 598 } else {
ysr@777 599 // we are in a partially-young mode or we've run out of regions (due
ysr@777 600 // to evacuation failure)
ysr@777 601
johnc@1829 602 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 603 // leave this in for debugging, just in case
ysr@777 604 gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
johnc@1829 605 _young_list_min_length);
johnc@1829 606 #endif // TRACE_CALC_YOUNG_LENGTH
johnc@1829 607 // we'll do the pause as soon as possible by choosing the minimum
ysr@777 608 _young_list_target_length =
ysr@777 609 MAX2(_young_list_min_length, (size_t) 1);
ysr@777 610 }
ysr@777 611
ysr@777 612 _rs_lengths_prediction = rs_lengths;
ysr@777 613 }
ysr@777 614
johnc@1829 615 // This is used by: calculate_young_list_target_length(rs_length). It
johnc@1829 616 // returns true iff:
johnc@1829 617 // the predicted pause time for the given young list will not overflow
johnc@1829 618 // the target pause time
johnc@1829 619 // and:
johnc@1829 620 // the predicted amount of surviving data will not overflow the
johnc@1829 621 // the amount of free space available for survivor regions.
johnc@1829 622 //
ysr@777 623 bool
johnc@1829 624 G1CollectorPolicy::predict_will_fit(size_t young_length,
johnc@1829 625 double base_time_ms,
johnc@1829 626 size_t init_free_regions,
johnc@1829 627 double target_pause_time_ms) {
ysr@777 628
ysr@777 629 if (young_length >= init_free_regions)
ysr@777 630 // end condition 1: not enough space for the young regions
ysr@777 631 return false;
ysr@777 632
ysr@777 633 double accum_surv_rate_adj = 0.0;
ysr@777 634 double accum_surv_rate =
ysr@777 635 accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
johnc@1829 636
ysr@777 637 size_t bytes_to_copy =
ysr@777 638 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
johnc@1829 639
ysr@777 640 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
johnc@1829 641
ysr@777 642 double young_other_time_ms =
johnc@1829 643 predict_young_other_time_ms(young_length);
johnc@1829 644
ysr@777 645 double pause_time_ms =
johnc@1829 646 base_time_ms + copy_time_ms + young_other_time_ms;
ysr@777 647
ysr@777 648 if (pause_time_ms > target_pause_time_ms)
ysr@777 649 // end condition 2: over the target pause time
ysr@777 650 return false;
ysr@777 651
ysr@777 652 size_t free_bytes =
ysr@777 653 (init_free_regions - young_length) * HeapRegion::GrainBytes;
ysr@777 654
ysr@777 655 if ((2.0 + sigma()) * (double) bytes_to_copy > (double) free_bytes)
ysr@777 656 // end condition 3: out of to-space (conservatively)
ysr@777 657 return false;
ysr@777 658
ysr@777 659 // success!
ysr@777 660 return true;
ysr@777 661 }
ysr@777 662
apetrusenko@980 663 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
apetrusenko@980 664 double survivor_regions_evac_time = 0.0;
apetrusenko@980 665 for (HeapRegion * r = _recorded_survivor_head;
apetrusenko@980 666 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
apetrusenko@980 667 r = r->get_next_young_region()) {
apetrusenko@980 668 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
apetrusenko@980 669 }
apetrusenko@980 670 return survivor_regions_evac_time;
apetrusenko@980 671 }
apetrusenko@980 672
ysr@777 673 void G1CollectorPolicy::check_prediction_validity() {
ysr@777 674 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
ysr@777 675
johnc@1829 676 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
ysr@777 677 if (rs_lengths > _rs_lengths_prediction) {
ysr@777 678 // add 10% to avoid having to recalculate often
ysr@777 679 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
johnc@1829 680 calculate_young_list_target_length(rs_lengths_prediction);
ysr@777 681 }
ysr@777 682 }
ysr@777 683
ysr@777 684 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
ysr@777 685 bool is_tlab,
ysr@777 686 bool* gc_overhead_limit_was_exceeded) {
ysr@777 687 guarantee(false, "Not using this policy feature yet.");
ysr@777 688 return NULL;
ysr@777 689 }
ysr@777 690
ysr@777 691 // This method controls how a collector handles one or more
ysr@777 692 // of its generations being fully allocated.
ysr@777 693 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
ysr@777 694 bool is_tlab) {
ysr@777 695 guarantee(false, "Not using this policy feature yet.");
ysr@777 696 return NULL;
ysr@777 697 }
ysr@777 698
ysr@777 699
ysr@777 700 #ifndef PRODUCT
ysr@777 701 bool G1CollectorPolicy::verify_young_ages() {
johnc@1829 702 HeapRegion* head = _g1->young_list()->first_region();
ysr@777 703 return
ysr@777 704 verify_young_ages(head, _short_lived_surv_rate_group);
ysr@777 705 // also call verify_young_ages on any additional surv rate groups
ysr@777 706 }
ysr@777 707
ysr@777 708 bool
ysr@777 709 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
ysr@777 710 SurvRateGroup *surv_rate_group) {
ysr@777 711 guarantee( surv_rate_group != NULL, "pre-condition" );
ysr@777 712
ysr@777 713 const char* name = surv_rate_group->name();
ysr@777 714 bool ret = true;
ysr@777 715 int prev_age = -1;
ysr@777 716
ysr@777 717 for (HeapRegion* curr = head;
ysr@777 718 curr != NULL;
ysr@777 719 curr = curr->get_next_young_region()) {
ysr@777 720 SurvRateGroup* group = curr->surv_rate_group();
ysr@777 721 if (group == NULL && !curr->is_survivor()) {
ysr@777 722 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
ysr@777 723 ret = false;
ysr@777 724 }
ysr@777 725
ysr@777 726 if (surv_rate_group == group) {
ysr@777 727 int age = curr->age_in_surv_rate_group();
ysr@777 728
ysr@777 729 if (age < 0) {
ysr@777 730 gclog_or_tty->print_cr("## %s: encountered negative age", name);
ysr@777 731 ret = false;
ysr@777 732 }
ysr@777 733
ysr@777 734 if (age <= prev_age) {
ysr@777 735 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
ysr@777 736 "(%d, %d)", name, age, prev_age);
ysr@777 737 ret = false;
ysr@777 738 }
ysr@777 739 prev_age = age;
ysr@777 740 }
ysr@777 741 }
ysr@777 742
ysr@777 743 return ret;
ysr@777 744 }
ysr@777 745 #endif // PRODUCT
ysr@777 746
ysr@777 747 void G1CollectorPolicy::record_full_collection_start() {
ysr@777 748 _cur_collection_start_sec = os::elapsedTime();
ysr@777 749 // Release the future to-space so that it is available for compaction into.
ysr@777 750 _g1->set_full_collection();
ysr@777 751 }
ysr@777 752
ysr@777 753 void G1CollectorPolicy::record_full_collection_end() {
ysr@777 754 // Consider this like a collection pause for the purposes of allocation
ysr@777 755 // since last pause.
ysr@777 756 double end_sec = os::elapsedTime();
ysr@777 757 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
ysr@777 758 double full_gc_time_ms = full_gc_time_sec * 1000.0;
ysr@777 759
ysr@777 760 _all_full_gc_times_ms->add(full_gc_time_ms);
ysr@777 761
tonyp@1030 762 update_recent_gc_times(end_sec, full_gc_time_ms);
ysr@777 763
ysr@777 764 _g1->clear_full_collection();
ysr@777 765
ysr@777 766 // "Nuke" the heuristics that control the fully/partially young GC
ysr@777 767 // transitions and make sure we start with fully young GCs after the
ysr@777 768 // Full GC.
ysr@777 769 set_full_young_gcs(true);
ysr@777 770 _last_full_young_gc = false;
ysr@777 771 _should_revert_to_full_young_gcs = false;
tonyp@1794 772 clear_initiate_conc_mark_if_possible();
tonyp@1794 773 clear_during_initial_mark_pause();
ysr@777 774 _known_garbage_bytes = 0;
ysr@777 775 _known_garbage_ratio = 0.0;
ysr@777 776 _in_marking_window = false;
ysr@777 777 _in_marking_window_im = false;
ysr@777 778
ysr@777 779 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 780 // also call this on any additional surv rate groups
ysr@777 781
apetrusenko@980 782 record_survivor_regions(0, NULL, NULL);
apetrusenko@980 783
ysr@777 784 _prev_region_num_young = _region_num_young;
ysr@777 785 _prev_region_num_tenured = _region_num_tenured;
ysr@777 786
ysr@777 787 _free_regions_at_end_of_collection = _g1->free_regions();
apetrusenko@980 788 // Reset survivors SurvRateGroup.
apetrusenko@980 789 _survivor_surv_rate_group->reset();
ysr@777 790 calculate_young_list_min_length();
johnc@1829 791 calculate_young_list_target_length();
ysr@777 792 }
ysr@777 793
ysr@777 794 void G1CollectorPolicy::record_before_bytes(size_t bytes) {
ysr@777 795 _bytes_in_to_space_before_gc += bytes;
ysr@777 796 }
ysr@777 797
ysr@777 798 void G1CollectorPolicy::record_after_bytes(size_t bytes) {
ysr@777 799 _bytes_in_to_space_after_gc += bytes;
ysr@777 800 }
ysr@777 801
ysr@777 802 void G1CollectorPolicy::record_stop_world_start() {
ysr@777 803 _stop_world_start = os::elapsedTime();
ysr@777 804 }
ysr@777 805
ysr@777 806 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
ysr@777 807 size_t start_used) {
ysr@777 808 if (PrintGCDetails) {
ysr@777 809 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 810 gclog_or_tty->print("[GC pause");
ysr@777 811 if (in_young_gc_mode())
ysr@777 812 gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
ysr@777 813 }
ysr@777 814
ysr@777 815 assert(_g1->used_regions() == _g1->recalculate_used_regions(),
ysr@777 816 "sanity");
tonyp@1071 817 assert(_g1->used() == _g1->recalculate_used(), "sanity");
ysr@777 818
ysr@777 819 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
ysr@777 820 _all_stop_world_times_ms->add(s_w_t_ms);
ysr@777 821 _stop_world_start = 0.0;
ysr@777 822
ysr@777 823 _cur_collection_start_sec = start_time_sec;
ysr@777 824 _cur_collection_pause_used_at_start_bytes = start_used;
ysr@777 825 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
ysr@777 826 _pending_cards = _g1->pending_card_num();
ysr@777 827 _max_pending_cards = _g1->max_pending_card_num();
ysr@777 828
ysr@777 829 _bytes_in_to_space_before_gc = 0;
ysr@777 830 _bytes_in_to_space_after_gc = 0;
ysr@777 831 _bytes_in_collection_set_before_gc = 0;
ysr@777 832
ysr@777 833 #ifdef DEBUG
ysr@777 834 // initialise these to something well known so that we can spot
ysr@777 835 // if they are not set properly
ysr@777 836
ysr@777 837 for (int i = 0; i < _parallel_gc_threads; ++i) {
tonyp@1966 838 _par_last_gc_worker_start_times_ms[i] = -1234.0;
tonyp@1966 839 _par_last_ext_root_scan_times_ms[i] = -1234.0;
tonyp@1966 840 _par_last_mark_stack_scan_times_ms[i] = -1234.0;
tonyp@1966 841 _par_last_update_rs_times_ms[i] = -1234.0;
tonyp@1966 842 _par_last_update_rs_processed_buffers[i] = -1234.0;
tonyp@1966 843 _par_last_scan_rs_times_ms[i] = -1234.0;
tonyp@1966 844 _par_last_obj_copy_times_ms[i] = -1234.0;
tonyp@1966 845 _par_last_termination_times_ms[i] = -1234.0;
tonyp@1966 846 _par_last_termination_attempts[i] = -1234.0;
tonyp@1966 847 _par_last_gc_worker_end_times_ms[i] = -1234.0;
ysr@777 848 }
ysr@777 849 #endif
ysr@777 850
ysr@777 851 for (int i = 0; i < _aux_num; ++i) {
ysr@777 852 _cur_aux_times_ms[i] = 0.0;
ysr@777 853 _cur_aux_times_set[i] = false;
ysr@777 854 }
ysr@777 855
ysr@777 856 _satb_drain_time_set = false;
ysr@777 857 _last_satb_drain_processed_buffers = -1;
ysr@777 858
ysr@777 859 if (in_young_gc_mode())
ysr@777 860 _last_young_gc_full = false;
ysr@777 861
ysr@777 862 // do that for any other surv rate groups
ysr@777 863 _short_lived_surv_rate_group->stop_adding_regions();
tonyp@1717 864 _survivors_age_table.clear();
apetrusenko@980 865
ysr@777 866 assert( verify_young_ages(), "region age verification" );
ysr@777 867 }
ysr@777 868
ysr@777 869 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
ysr@777 870 _mark_closure_time_ms = mark_closure_time_ms;
ysr@777 871 }
ysr@777 872
ysr@777 873 void G1CollectorPolicy::record_concurrent_mark_init_start() {
ysr@777 874 _mark_init_start_sec = os::elapsedTime();
ysr@777 875 guarantee(!in_young_gc_mode(), "should not do be here in young GC mode");
ysr@777 876 }
ysr@777 877
ysr@777 878 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
ysr@777 879 mark_init_elapsed_time_ms) {
ysr@777 880 _during_marking = true;
tonyp@1794 881 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
tonyp@1794 882 clear_during_initial_mark_pause();
ysr@777 883 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
ysr@777 884 }
ysr@777 885
ysr@777 886 void G1CollectorPolicy::record_concurrent_mark_init_end() {
ysr@777 887 double end_time_sec = os::elapsedTime();
ysr@777 888 double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
ysr@777 889 _concurrent_mark_init_times_ms->add(elapsed_time_ms);
ysr@777 890 record_concurrent_mark_init_end_pre(elapsed_time_ms);
ysr@777 891
ysr@777 892 _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
ysr@777 893 }
ysr@777 894
ysr@777 895 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
ysr@777 896 _mark_remark_start_sec = os::elapsedTime();
ysr@777 897 _during_marking = false;
ysr@777 898 }
ysr@777 899
ysr@777 900 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
ysr@777 901 double end_time_sec = os::elapsedTime();
ysr@777 902 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
ysr@777 903 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
ysr@777 904 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 905 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 906
ysr@777 907 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
ysr@777 908 }
ysr@777 909
ysr@777 910 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
ysr@777 911 _mark_cleanup_start_sec = os::elapsedTime();
ysr@777 912 }
ysr@777 913
ysr@777 914 void
ysr@777 915 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 916 size_t max_live_bytes) {
ysr@777 917 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 918 record_concurrent_mark_cleanup_end_work2();
ysr@777 919 }
ysr@777 920
ysr@777 921 void
ysr@777 922 G1CollectorPolicy::
ysr@777 923 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 924 size_t max_live_bytes) {
ysr@777 925 if (_n_marks < 2) _n_marks++;
ysr@777 926 if (G1PolicyVerbose > 0)
ysr@777 927 gclog_or_tty->print_cr("At end of marking, max_live is " SIZE_FORMAT " MB "
ysr@777 928 " (of " SIZE_FORMAT " MB heap).",
ysr@777 929 max_live_bytes/M, _g1->capacity()/M);
ysr@777 930 }
ysr@777 931
ysr@777 932 // The important thing about this is that it includes "os::elapsedTime".
ysr@777 933 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
ysr@777 934 double end_time_sec = os::elapsedTime();
ysr@777 935 double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
ysr@777 936 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
ysr@777 937 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 938 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 939
ysr@777 940 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
ysr@777 941
ysr@777 942 _num_markings++;
ysr@777 943
ysr@777 944 // We did a marking, so reset the "since_last_mark" variables.
ysr@777 945 double considerConcMarkCost = 1.0;
ysr@777 946 // If there are available processors, concurrent activity is free...
ysr@777 947 if (Threads::number_of_non_daemon_threads() * 2 <
ysr@777 948 os::active_processor_count()) {
ysr@777 949 considerConcMarkCost = 0.0;
ysr@777 950 }
ysr@777 951 _n_pauses_at_mark_end = _n_pauses;
ysr@777 952 _n_marks_since_last_pause++;
ysr@777 953 }
ysr@777 954
ysr@777 955 void
ysr@777 956 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
ysr@777 957 if (in_young_gc_mode()) {
ysr@777 958 _should_revert_to_full_young_gcs = false;
ysr@777 959 _last_full_young_gc = true;
ysr@777 960 _in_marking_window = false;
ysr@777 961 if (adaptive_young_list_length())
johnc@1829 962 calculate_young_list_target_length();
ysr@777 963 }
ysr@777 964 }
ysr@777 965
ysr@777 966 void G1CollectorPolicy::record_concurrent_pause() {
ysr@777 967 if (_stop_world_start > 0.0) {
ysr@777 968 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
ysr@777 969 _all_yield_times_ms->add(yield_ms);
ysr@777 970 }
ysr@777 971 }
ysr@777 972
ysr@777 973 void G1CollectorPolicy::record_concurrent_pause_end() {
ysr@777 974 }
ysr@777 975
ysr@777 976 void G1CollectorPolicy::record_collection_pause_end_CH_strong_roots() {
ysr@777 977 _cur_CH_strong_roots_end_sec = os::elapsedTime();
ysr@777 978 _cur_CH_strong_roots_dur_ms =
ysr@777 979 (_cur_CH_strong_roots_end_sec - _cur_collection_start_sec) * 1000.0;
ysr@777 980 }
ysr@777 981
ysr@777 982 void G1CollectorPolicy::record_collection_pause_end_G1_strong_roots() {
ysr@777 983 _cur_G1_strong_roots_end_sec = os::elapsedTime();
ysr@777 984 _cur_G1_strong_roots_dur_ms =
ysr@777 985 (_cur_G1_strong_roots_end_sec - _cur_CH_strong_roots_end_sec) * 1000.0;
ysr@777 986 }
ysr@777 987
ysr@777 988 template<class T>
ysr@777 989 T sum_of(T* sum_arr, int start, int n, int N) {
ysr@777 990 T sum = (T)0;
ysr@777 991 for (int i = 0; i < n; i++) {
ysr@777 992 int j = (start + i) % N;
ysr@777 993 sum += sum_arr[j];
ysr@777 994 }
ysr@777 995 return sum;
ysr@777 996 }
ysr@777 997
tonyp@1966 998 void G1CollectorPolicy::print_par_stats(int level,
tonyp@1966 999 const char* str,
tonyp@1966 1000 double* data,
ysr@777 1001 bool summary) {
ysr@777 1002 double min = data[0], max = data[0];
ysr@777 1003 double total = 0.0;
ysr@777 1004 int j;
ysr@777 1005 for (j = 0; j < level; ++j)
ysr@777 1006 gclog_or_tty->print(" ");
ysr@777 1007 gclog_or_tty->print("[%s (ms):", str);
ysr@777 1008 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 1009 double val = data[i];
ysr@777 1010 if (val < min)
ysr@777 1011 min = val;
ysr@777 1012 if (val > max)
ysr@777 1013 max = val;
ysr@777 1014 total += val;
ysr@777 1015 gclog_or_tty->print(" %3.1lf", val);
ysr@777 1016 }
ysr@777 1017 if (summary) {
ysr@777 1018 gclog_or_tty->print_cr("");
ysr@777 1019 double avg = total / (double) ParallelGCThreads;
ysr@777 1020 gclog_or_tty->print(" ");
ysr@777 1021 for (j = 0; j < level; ++j)
ysr@777 1022 gclog_or_tty->print(" ");
ysr@777 1023 gclog_or_tty->print("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf",
ysr@777 1024 avg, min, max);
ysr@777 1025 }
ysr@777 1026 gclog_or_tty->print_cr("]");
ysr@777 1027 }
ysr@777 1028
tonyp@1966 1029 void G1CollectorPolicy::print_par_sizes(int level,
tonyp@1966 1030 const char* str,
tonyp@1966 1031 double* data,
tonyp@1966 1032 bool summary) {
ysr@777 1033 double min = data[0], max = data[0];
ysr@777 1034 double total = 0.0;
ysr@777 1035 int j;
ysr@777 1036 for (j = 0; j < level; ++j)
ysr@777 1037 gclog_or_tty->print(" ");
ysr@777 1038 gclog_or_tty->print("[%s :", str);
ysr@777 1039 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 1040 double val = data[i];
ysr@777 1041 if (val < min)
ysr@777 1042 min = val;
ysr@777 1043 if (val > max)
ysr@777 1044 max = val;
ysr@777 1045 total += val;
ysr@777 1046 gclog_or_tty->print(" %d", (int) val);
ysr@777 1047 }
ysr@777 1048 if (summary) {
ysr@777 1049 gclog_or_tty->print_cr("");
ysr@777 1050 double avg = total / (double) ParallelGCThreads;
ysr@777 1051 gclog_or_tty->print(" ");
ysr@777 1052 for (j = 0; j < level; ++j)
ysr@777 1053 gclog_or_tty->print(" ");
ysr@777 1054 gclog_or_tty->print("Sum: %d, Avg: %d, Min: %d, Max: %d",
ysr@777 1055 (int)total, (int)avg, (int)min, (int)max);
ysr@777 1056 }
ysr@777 1057 gclog_or_tty->print_cr("]");
ysr@777 1058 }
ysr@777 1059
ysr@777 1060 void G1CollectorPolicy::print_stats (int level,
ysr@777 1061 const char* str,
ysr@777 1062 double value) {
ysr@777 1063 for (int j = 0; j < level; ++j)
ysr@777 1064 gclog_or_tty->print(" ");
ysr@777 1065 gclog_or_tty->print_cr("[%s: %5.1lf ms]", str, value);
ysr@777 1066 }
ysr@777 1067
ysr@777 1068 void G1CollectorPolicy::print_stats (int level,
ysr@777 1069 const char* str,
ysr@777 1070 int value) {
ysr@777 1071 for (int j = 0; j < level; ++j)
ysr@777 1072 gclog_or_tty->print(" ");
ysr@777 1073 gclog_or_tty->print_cr("[%s: %d]", str, value);
ysr@777 1074 }
ysr@777 1075
ysr@777 1076 double G1CollectorPolicy::avg_value (double* data) {
ysr@777 1077 if (ParallelGCThreads > 0) {
ysr@777 1078 double ret = 0.0;
ysr@777 1079 for (uint i = 0; i < ParallelGCThreads; ++i)
ysr@777 1080 ret += data[i];
ysr@777 1081 return ret / (double) ParallelGCThreads;
ysr@777 1082 } else {
ysr@777 1083 return data[0];
ysr@777 1084 }
ysr@777 1085 }
ysr@777 1086
ysr@777 1087 double G1CollectorPolicy::max_value (double* data) {
ysr@777 1088 if (ParallelGCThreads > 0) {
ysr@777 1089 double ret = data[0];
ysr@777 1090 for (uint i = 1; i < ParallelGCThreads; ++i)
ysr@777 1091 if (data[i] > ret)
ysr@777 1092 ret = data[i];
ysr@777 1093 return ret;
ysr@777 1094 } else {
ysr@777 1095 return data[0];
ysr@777 1096 }
ysr@777 1097 }
ysr@777 1098
ysr@777 1099 double G1CollectorPolicy::sum_of_values (double* data) {
ysr@777 1100 if (ParallelGCThreads > 0) {
ysr@777 1101 double sum = 0.0;
ysr@777 1102 for (uint i = 0; i < ParallelGCThreads; i++)
ysr@777 1103 sum += data[i];
ysr@777 1104 return sum;
ysr@777 1105 } else {
ysr@777 1106 return data[0];
ysr@777 1107 }
ysr@777 1108 }
ysr@777 1109
ysr@777 1110 double G1CollectorPolicy::max_sum (double* data1,
ysr@777 1111 double* data2) {
ysr@777 1112 double ret = data1[0] + data2[0];
ysr@777 1113
ysr@777 1114 if (ParallelGCThreads > 0) {
ysr@777 1115 for (uint i = 1; i < ParallelGCThreads; ++i) {
ysr@777 1116 double data = data1[i] + data2[i];
ysr@777 1117 if (data > ret)
ysr@777 1118 ret = data;
ysr@777 1119 }
ysr@777 1120 }
ysr@777 1121 return ret;
ysr@777 1122 }
ysr@777 1123
ysr@777 1124 // Anything below that is considered to be zero
ysr@777 1125 #define MIN_TIMER_GRANULARITY 0.0000001
ysr@777 1126
apetrusenko@1112 1127 void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
ysr@777 1128 double end_time_sec = os::elapsedTime();
ysr@777 1129 double elapsed_ms = _last_pause_time_ms;
ysr@777 1130 bool parallel = ParallelGCThreads > 0;
ysr@777 1131 double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0;
ysr@777 1132 size_t rs_size =
ysr@777 1133 _cur_collection_pause_used_regions_at_start - collection_set_size();
ysr@777 1134 size_t cur_used_bytes = _g1->used();
ysr@777 1135 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
ysr@777 1136 bool last_pause_included_initial_mark = false;
tonyp@1030 1137 bool update_stats = !abandoned && !_g1->evacuation_failed();
ysr@777 1138
ysr@777 1139 #ifndef PRODUCT
ysr@777 1140 if (G1YoungSurvRateVerbose) {
ysr@777 1141 gclog_or_tty->print_cr("");
ysr@777 1142 _short_lived_surv_rate_group->print();
ysr@777 1143 // do that for any other surv rate groups too
ysr@777 1144 }
ysr@777 1145 #endif // PRODUCT
ysr@777 1146
ysr@777 1147 if (in_young_gc_mode()) {
tonyp@1794 1148 last_pause_included_initial_mark = during_initial_mark_pause();
ysr@777 1149 if (last_pause_included_initial_mark)
ysr@777 1150 record_concurrent_mark_init_end_pre(0.0);
ysr@777 1151
ysr@777 1152 size_t min_used_targ =
tonyp@1718 1153 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
ysr@777 1154
tonyp@1794 1155
tonyp@1794 1156 if (!_g1->mark_in_progress() && !_last_full_young_gc) {
tonyp@1794 1157 assert(!last_pause_included_initial_mark, "invariant");
tonyp@1794 1158 if (cur_used_bytes > min_used_targ &&
tonyp@1794 1159 cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
tonyp@1794 1160 assert(!during_initial_mark_pause(), "we should not see this here");
tonyp@1794 1161
tonyp@1794 1162 // Note: this might have already been set, if during the last
tonyp@1794 1163 // pause we decided to start a cycle but at the beginning of
tonyp@1794 1164 // this pause we decided to postpone it. That's OK.
tonyp@1794 1165 set_initiate_conc_mark_if_possible();
ysr@777 1166 }
ysr@777 1167 }
ysr@777 1168
ysr@777 1169 _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
ysr@777 1170 }
ysr@777 1171
ysr@777 1172 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
ysr@777 1173 end_time_sec, false);
ysr@777 1174
ysr@777 1175 guarantee(_cur_collection_pause_used_regions_at_start >=
ysr@777 1176 collection_set_size(),
ysr@777 1177 "Negative RS size?");
ysr@777 1178
ysr@777 1179 // This assert is exempted when we're doing parallel collection pauses,
ysr@777 1180 // because the fragmentation caused by the parallel GC allocation buffers
ysr@777 1181 // can lead to more memory being used during collection than was used
ysr@777 1182 // before. Best leave this out until the fragmentation problem is fixed.
ysr@777 1183 // Pauses in which evacuation failed can also lead to negative
ysr@777 1184 // collections, since no space is reclaimed from a region containing an
ysr@777 1185 // object whose evacuation failed.
ysr@777 1186 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1187 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1188 // (DLD, 10/05.)
ysr@777 1189 assert((true || parallel) // Always using GC LABs now.
ysr@777 1190 || _g1->evacuation_failed()
ysr@777 1191 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
ysr@777 1192 "Negative collection");
ysr@777 1193
ysr@777 1194 size_t freed_bytes =
ysr@777 1195 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
ysr@777 1196 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
johnc@1829 1197
ysr@777 1198 double survival_fraction =
ysr@777 1199 (double)surviving_bytes/
ysr@777 1200 (double)_collection_set_bytes_used_before;
ysr@777 1201
ysr@777 1202 _n_pauses++;
ysr@777 1203
tonyp@1030 1204 if (update_stats) {
ysr@777 1205 _recent_CH_strong_roots_times_ms->add(_cur_CH_strong_roots_dur_ms);
ysr@777 1206 _recent_G1_strong_roots_times_ms->add(_cur_G1_strong_roots_dur_ms);
ysr@777 1207 _recent_evac_times_ms->add(evac_ms);
ysr@777 1208 _recent_pause_times_ms->add(elapsed_ms);
ysr@777 1209
ysr@777 1210 _recent_rs_sizes->add(rs_size);
ysr@777 1211
ysr@777 1212 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1213 // fragmentation can produce negative collections. Same with evac
ysr@777 1214 // failure.
ysr@777 1215 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1216 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1217 // (DLD, 10/05.
ysr@777 1218 assert((true || parallel)
ysr@777 1219 || _g1->evacuation_failed()
ysr@777 1220 || surviving_bytes <= _collection_set_bytes_used_before,
ysr@777 1221 "Or else negative collection!");
ysr@777 1222 _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
ysr@777 1223 _recent_CS_bytes_surviving->add(surviving_bytes);
ysr@777 1224
ysr@777 1225 // this is where we update the allocation rate of the application
ysr@777 1226 double app_time_ms =
ysr@777 1227 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
ysr@777 1228 if (app_time_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1229 // This usually happens due to the timer not having the required
ysr@777 1230 // granularity. Some Linuxes are the usual culprits.
ysr@777 1231 // We'll just set it to something (arbitrarily) small.
ysr@777 1232 app_time_ms = 1.0;
ysr@777 1233 }
ysr@777 1234 size_t regions_allocated =
ysr@777 1235 (_region_num_young - _prev_region_num_young) +
ysr@777 1236 (_region_num_tenured - _prev_region_num_tenured);
ysr@777 1237 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
ysr@777 1238 _alloc_rate_ms_seq->add(alloc_rate_ms);
ysr@777 1239 _prev_region_num_young = _region_num_young;
ysr@777 1240 _prev_region_num_tenured = _region_num_tenured;
ysr@777 1241
ysr@777 1242 double interval_ms =
ysr@777 1243 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
ysr@777 1244 update_recent_gc_times(end_time_sec, elapsed_ms);
ysr@777 1245 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
ysr@1521 1246 if (recent_avg_pause_time_ratio() < 0.0 ||
ysr@1521 1247 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
ysr@1521 1248 #ifndef PRODUCT
ysr@1521 1249 // Dump info to allow post-facto debugging
ysr@1521 1250 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
ysr@1521 1251 gclog_or_tty->print_cr("-------------------------------------------");
ysr@1521 1252 gclog_or_tty->print_cr("Recent GC Times (ms):");
ysr@1521 1253 _recent_gc_times_ms->dump();
ysr@1521 1254 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
ysr@1521 1255 _recent_prev_end_times_for_all_gcs_sec->dump();
ysr@1521 1256 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
ysr@1521 1257 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
ysr@1522 1258 // In debug mode, terminate the JVM if the user wants to debug at this point.
ysr@1522 1259 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
ysr@1522 1260 #endif // !PRODUCT
ysr@1522 1261 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
ysr@1522 1262 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
ysr@1521 1263 if (_recent_avg_pause_time_ratio < 0.0) {
ysr@1521 1264 _recent_avg_pause_time_ratio = 0.0;
ysr@1521 1265 } else {
ysr@1521 1266 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
ysr@1521 1267 _recent_avg_pause_time_ratio = 1.0;
ysr@1521 1268 }
ysr@1521 1269 }
ysr@777 1270 }
ysr@777 1271
ysr@777 1272 if (G1PolicyVerbose > 1) {
ysr@777 1273 gclog_or_tty->print_cr(" Recording collection pause(%d)", _n_pauses);
ysr@777 1274 }
ysr@777 1275
ysr@777 1276 PauseSummary* summary;
apetrusenko@1112 1277 if (abandoned) {
apetrusenko@1112 1278 summary = _abandoned_summary;
apetrusenko@1112 1279 } else {
apetrusenko@1112 1280 summary = _summary;
ysr@777 1281 }
ysr@777 1282
ysr@777 1283 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
ysr@777 1284 double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
ysr@777 1285 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
ysr@777 1286 double update_rs_processed_buffers =
ysr@777 1287 sum_of_values(_par_last_update_rs_processed_buffers);
ysr@777 1288 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
ysr@777 1289 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
ysr@777 1290 double termination_time = avg_value(_par_last_termination_times_ms);
ysr@777 1291
tonyp@1083 1292 double parallel_other_time = _cur_collection_par_time_ms -
tonyp@1083 1293 (update_rs_time + ext_root_scan_time + mark_stack_scan_time +
johnc@1829 1294 scan_rs_time + obj_copy_time + termination_time);
tonyp@1030 1295 if (update_stats) {
ysr@777 1296 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 1297 guarantee(body_summary != NULL, "should not be null!");
ysr@777 1298
ysr@777 1299 if (_satb_drain_time_set)
ysr@777 1300 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
ysr@777 1301 else
ysr@777 1302 body_summary->record_satb_drain_time_ms(0.0);
ysr@777 1303 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
ysr@777 1304 body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
ysr@777 1305 body_summary->record_update_rs_time_ms(update_rs_time);
ysr@777 1306 body_summary->record_scan_rs_time_ms(scan_rs_time);
ysr@777 1307 body_summary->record_obj_copy_time_ms(obj_copy_time);
ysr@777 1308 if (parallel) {
ysr@777 1309 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
ysr@777 1310 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
ysr@777 1311 body_summary->record_termination_time_ms(termination_time);
ysr@777 1312 body_summary->record_parallel_other_time_ms(parallel_other_time);
ysr@777 1313 }
ysr@777 1314 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
ysr@777 1315 }
ysr@777 1316
ysr@777 1317 if (G1PolicyVerbose > 1) {
ysr@777 1318 gclog_or_tty->print_cr(" ET: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1319 " CH Strong: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1320 " G1 Strong: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1321 " Evac: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1322 " ET-RS: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1323 " |RS|: " SIZE_FORMAT,
ysr@777 1324 elapsed_ms, recent_avg_time_for_pauses_ms(),
ysr@777 1325 _cur_CH_strong_roots_dur_ms, recent_avg_time_for_CH_strong_ms(),
ysr@777 1326 _cur_G1_strong_roots_dur_ms, recent_avg_time_for_G1_strong_ms(),
ysr@777 1327 evac_ms, recent_avg_time_for_evac_ms(),
ysr@777 1328 scan_rs_time,
ysr@777 1329 recent_avg_time_for_pauses_ms() -
ysr@777 1330 recent_avg_time_for_G1_strong_ms(),
ysr@777 1331 rs_size);
ysr@777 1332
ysr@777 1333 gclog_or_tty->print_cr(" Used at start: " SIZE_FORMAT"K"
ysr@777 1334 " At end " SIZE_FORMAT "K\n"
ysr@777 1335 " garbage : " SIZE_FORMAT "K"
ysr@777 1336 " of " SIZE_FORMAT "K\n"
ysr@777 1337 " survival : %6.2f%% (%6.2f%% avg)",
ysr@777 1338 _cur_collection_pause_used_at_start_bytes/K,
ysr@777 1339 _g1->used()/K, freed_bytes/K,
ysr@777 1340 _collection_set_bytes_used_before/K,
ysr@777 1341 survival_fraction*100.0,
ysr@777 1342 recent_avg_survival_fraction()*100.0);
ysr@777 1343 gclog_or_tty->print_cr(" Recent %% gc pause time: %6.2f",
ysr@777 1344 recent_avg_pause_time_ratio() * 100.0);
ysr@777 1345 }
ysr@777 1346
ysr@777 1347 double other_time_ms = elapsed_ms;
ysr@777 1348
ysr@777 1349 if (!abandoned) {
ysr@777 1350 if (_satb_drain_time_set)
ysr@777 1351 other_time_ms -= _cur_satb_drain_time_ms;
ysr@777 1352
ysr@777 1353 if (parallel)
ysr@777 1354 other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
ysr@777 1355 else
ysr@777 1356 other_time_ms -=
ysr@777 1357 update_rs_time +
johnc@1829 1358 ext_root_scan_time + mark_stack_scan_time +
ysr@777 1359 scan_rs_time + obj_copy_time;
ysr@777 1360 }
ysr@777 1361
ysr@777 1362 if (PrintGCDetails) {
ysr@777 1363 gclog_or_tty->print_cr("%s%s, %1.8lf secs]",
apetrusenko@1112 1364 abandoned ? " (abandoned)" : "",
ysr@777 1365 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
ysr@777 1366 elapsed_ms / 1000.0);
ysr@777 1367
ysr@777 1368 if (!abandoned) {
apetrusenko@1112 1369 if (_satb_drain_time_set) {
ysr@777 1370 print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
apetrusenko@1112 1371 }
apetrusenko@1112 1372 if (_last_satb_drain_processed_buffers >= 0) {
ysr@777 1373 print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
apetrusenko@1112 1374 }
apetrusenko@1112 1375 if (parallel) {
apetrusenko@1112 1376 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
tonyp@1966 1377 print_par_stats(2, "GC Worker Start Time",
tonyp@1966 1378 _par_last_gc_worker_start_times_ms, false);
apetrusenko@1112 1379 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
tonyp@1966 1380 print_par_sizes(3, "Processed Buffers",
tonyp@1966 1381 _par_last_update_rs_processed_buffers, true);
tonyp@1966 1382 print_par_stats(2, "Ext Root Scanning",
tonyp@1966 1383 _par_last_ext_root_scan_times_ms);
tonyp@1966 1384 print_par_stats(2, "Mark Stack Scanning",
tonyp@1966 1385 _par_last_mark_stack_scan_times_ms);
ysr@777 1386 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
ysr@777 1387 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
ysr@777 1388 print_par_stats(2, "Termination", _par_last_termination_times_ms);
tonyp@1966 1389 print_par_sizes(3, "Termination Attempts",
tonyp@1966 1390 _par_last_termination_attempts, true);
tonyp@1966 1391 print_par_stats(2, "GC Worker End Time",
tonyp@1966 1392 _par_last_gc_worker_end_times_ms, false);
ysr@777 1393 print_stats(2, "Other", parallel_other_time);
ysr@777 1394 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
apetrusenko@1112 1395 } else {
apetrusenko@1112 1396 print_stats(1, "Update RS", update_rs_time);
iveresov@1229 1397 print_stats(2, "Processed Buffers",
iveresov@1229 1398 (int)update_rs_processed_buffers);
ysr@777 1399 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
ysr@777 1400 print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
ysr@777 1401 print_stats(1, "Scan RS", scan_rs_time);
ysr@777 1402 print_stats(1, "Object Copying", obj_copy_time);
ysr@777 1403 }
ysr@777 1404 }
johnc@1325 1405 #ifndef PRODUCT
johnc@1325 1406 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
johnc@1325 1407 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
johnc@1325 1408 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
johnc@1325 1409 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
johnc@1325 1410 if (_num_cc_clears > 0) {
johnc@1325 1411 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
johnc@1325 1412 }
johnc@1325 1413 #endif
ysr@777 1414 print_stats(1, "Other", other_time_ms);
johnc@1829 1415 print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
johnc@1829 1416
ysr@777 1417 for (int i = 0; i < _aux_num; ++i) {
ysr@777 1418 if (_cur_aux_times_set[i]) {
ysr@777 1419 char buffer[96];
ysr@777 1420 sprintf(buffer, "Aux%d", i);
ysr@777 1421 print_stats(1, buffer, _cur_aux_times_ms[i]);
ysr@777 1422 }
ysr@777 1423 }
ysr@777 1424 }
ysr@777 1425 if (PrintGCDetails)
ysr@777 1426 gclog_or_tty->print(" [");
ysr@777 1427 if (PrintGC || PrintGCDetails)
ysr@777 1428 _g1->print_size_transition(gclog_or_tty,
ysr@777 1429 _cur_collection_pause_used_at_start_bytes,
ysr@777 1430 _g1->used(), _g1->capacity());
ysr@777 1431 if (PrintGCDetails)
ysr@777 1432 gclog_or_tty->print_cr("]");
ysr@777 1433
ysr@777 1434 _all_pause_times_ms->add(elapsed_ms);
tonyp@1083 1435 if (update_stats) {
tonyp@1083 1436 summary->record_total_time_ms(elapsed_ms);
tonyp@1083 1437 summary->record_other_time_ms(other_time_ms);
tonyp@1083 1438 }
ysr@777 1439 for (int i = 0; i < _aux_num; ++i)
ysr@777 1440 if (_cur_aux_times_set[i])
ysr@777 1441 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
ysr@777 1442
ysr@777 1443 // Reset marks-between-pauses counter.
ysr@777 1444 _n_marks_since_last_pause = 0;
ysr@777 1445
ysr@777 1446 // Update the efficiency-since-mark vars.
ysr@777 1447 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
ysr@777 1448 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1449 // This usually happens due to the timer not having the required
ysr@777 1450 // granularity. Some Linuxes are the usual culprits.
ysr@777 1451 // We'll just set it to something (arbitrarily) small.
ysr@777 1452 proc_ms = 1.0;
ysr@777 1453 }
ysr@777 1454 double cur_efficiency = (double) freed_bytes / proc_ms;
ysr@777 1455
ysr@777 1456 bool new_in_marking_window = _in_marking_window;
ysr@777 1457 bool new_in_marking_window_im = false;
tonyp@1794 1458 if (during_initial_mark_pause()) {
ysr@777 1459 new_in_marking_window = true;
ysr@777 1460 new_in_marking_window_im = true;
ysr@777 1461 }
ysr@777 1462
ysr@777 1463 if (in_young_gc_mode()) {
ysr@777 1464 if (_last_full_young_gc) {
ysr@777 1465 set_full_young_gcs(false);
ysr@777 1466 _last_full_young_gc = false;
ysr@777 1467 }
ysr@777 1468
ysr@777 1469 if ( !_last_young_gc_full ) {
ysr@777 1470 if ( _should_revert_to_full_young_gcs ||
ysr@777 1471 _known_garbage_ratio < 0.05 ||
ysr@777 1472 (adaptive_young_list_length() &&
ysr@777 1473 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
ysr@777 1474 set_full_young_gcs(true);
ysr@777 1475 }
ysr@777 1476 }
ysr@777 1477 _should_revert_to_full_young_gcs = false;
ysr@777 1478
ysr@777 1479 if (_last_young_gc_full && !_during_marking)
ysr@777 1480 _young_gc_eff_seq->add(cur_efficiency);
ysr@777 1481 }
ysr@777 1482
ysr@777 1483 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 1484 // do that for any other surv rate groupsx
ysr@777 1485
ysr@777 1486 // <NEW PREDICTION>
ysr@777 1487
apetrusenko@1112 1488 if (update_stats) {
ysr@777 1489 double pause_time_ms = elapsed_ms;
ysr@777 1490
ysr@777 1491 size_t diff = 0;
ysr@777 1492 if (_max_pending_cards >= _pending_cards)
ysr@777 1493 diff = _max_pending_cards - _pending_cards;
ysr@777 1494 _pending_card_diff_seq->add((double) diff);
ysr@777 1495
ysr@777 1496 double cost_per_card_ms = 0.0;
ysr@777 1497 if (_pending_cards > 0) {
ysr@777 1498 cost_per_card_ms = update_rs_time / (double) _pending_cards;
ysr@777 1499 _cost_per_card_ms_seq->add(cost_per_card_ms);
ysr@777 1500 }
ysr@777 1501
ysr@777 1502 size_t cards_scanned = _g1->cards_scanned();
ysr@777 1503
ysr@777 1504 double cost_per_entry_ms = 0.0;
ysr@777 1505 if (cards_scanned > 10) {
ysr@777 1506 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
ysr@777 1507 if (_last_young_gc_full)
ysr@777 1508 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1509 else
ysr@777 1510 _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1511 }
ysr@777 1512
ysr@777 1513 if (_max_rs_lengths > 0) {
ysr@777 1514 double cards_per_entry_ratio =
ysr@777 1515 (double) cards_scanned / (double) _max_rs_lengths;
ysr@777 1516 if (_last_young_gc_full)
ysr@777 1517 _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1518 else
ysr@777 1519 _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1520 }
ysr@777 1521
ysr@777 1522 size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
ysr@777 1523 if (rs_length_diff >= 0)
ysr@777 1524 _rs_length_diff_seq->add((double) rs_length_diff);
ysr@777 1525
ysr@777 1526 size_t copied_bytes = surviving_bytes;
ysr@777 1527 double cost_per_byte_ms = 0.0;
ysr@777 1528 if (copied_bytes > 0) {
ysr@777 1529 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
ysr@777 1530 if (_in_marking_window)
ysr@777 1531 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
ysr@777 1532 else
ysr@777 1533 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
ysr@777 1534 }
ysr@777 1535
ysr@777 1536 double all_other_time_ms = pause_time_ms -
johnc@1829 1537 (update_rs_time + scan_rs_time + obj_copy_time +
ysr@777 1538 _mark_closure_time_ms + termination_time);
ysr@777 1539
ysr@777 1540 double young_other_time_ms = 0.0;
ysr@777 1541 if (_recorded_young_regions > 0) {
ysr@777 1542 young_other_time_ms =
ysr@777 1543 _recorded_young_cset_choice_time_ms +
ysr@777 1544 _recorded_young_free_cset_time_ms;
ysr@777 1545 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
ysr@777 1546 (double) _recorded_young_regions);
ysr@777 1547 }
ysr@777 1548 double non_young_other_time_ms = 0.0;
ysr@777 1549 if (_recorded_non_young_regions > 0) {
ysr@777 1550 non_young_other_time_ms =
ysr@777 1551 _recorded_non_young_cset_choice_time_ms +
ysr@777 1552 _recorded_non_young_free_cset_time_ms;
ysr@777 1553
ysr@777 1554 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
ysr@777 1555 (double) _recorded_non_young_regions);
ysr@777 1556 }
ysr@777 1557
ysr@777 1558 double constant_other_time_ms = all_other_time_ms -
ysr@777 1559 (young_other_time_ms + non_young_other_time_ms);
ysr@777 1560 _constant_other_time_ms_seq->add(constant_other_time_ms);
ysr@777 1561
ysr@777 1562 double survival_ratio = 0.0;
ysr@777 1563 if (_bytes_in_collection_set_before_gc > 0) {
ysr@777 1564 survival_ratio = (double) bytes_in_to_space_during_gc() /
ysr@777 1565 (double) _bytes_in_collection_set_before_gc;
ysr@777 1566 }
ysr@777 1567
ysr@777 1568 _pending_cards_seq->add((double) _pending_cards);
ysr@777 1569 _scanned_cards_seq->add((double) cards_scanned);
ysr@777 1570 _rs_lengths_seq->add((double) _max_rs_lengths);
ysr@777 1571
ysr@777 1572 double expensive_region_limit_ms =
johnc@1186 1573 (double) MaxGCPauseMillis - predict_constant_other_time_ms();
ysr@777 1574 if (expensive_region_limit_ms < 0.0) {
ysr@777 1575 // this means that the other time was predicted to be longer than
ysr@777 1576 // than the max pause time
johnc@1186 1577 expensive_region_limit_ms = (double) MaxGCPauseMillis;
ysr@777 1578 }
ysr@777 1579 _expensive_region_limit_ms = expensive_region_limit_ms;
ysr@777 1580
ysr@777 1581 if (PREDICTIONS_VERBOSE) {
ysr@777 1582 gclog_or_tty->print_cr("");
ysr@777 1583 gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
johnc@1829 1584 "REGIONS %d %d %d "
ysr@777 1585 "PENDING_CARDS %d %d "
ysr@777 1586 "CARDS_SCANNED %d %d "
ysr@777 1587 "RS_LENGTHS %d %d "
ysr@777 1588 "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
ysr@777 1589 "SURVIVAL_RATIO %1.6lf %1.6lf "
ysr@777 1590 "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
ysr@777 1591 "OTHER_YOUNG %1.6lf %1.6lf "
ysr@777 1592 "OTHER_NON_YOUNG %1.6lf %1.6lf "
ysr@777 1593 "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
ysr@777 1594 "ELAPSED %1.6lf %1.6lf ",
ysr@777 1595 _cur_collection_start_sec,
ysr@777 1596 (!_last_young_gc_full) ? 2 :
ysr@777 1597 (last_pause_included_initial_mark) ? 1 : 0,
ysr@777 1598 _recorded_region_num,
ysr@777 1599 _recorded_young_regions,
ysr@777 1600 _recorded_non_young_regions,
ysr@777 1601 _predicted_pending_cards, _pending_cards,
ysr@777 1602 _predicted_cards_scanned, cards_scanned,
ysr@777 1603 _predicted_rs_lengths, _max_rs_lengths,
ysr@777 1604 _predicted_rs_update_time_ms, update_rs_time,
ysr@777 1605 _predicted_rs_scan_time_ms, scan_rs_time,
ysr@777 1606 _predicted_survival_ratio, survival_ratio,
ysr@777 1607 _predicted_object_copy_time_ms, obj_copy_time,
ysr@777 1608 _predicted_constant_other_time_ms, constant_other_time_ms,
ysr@777 1609 _predicted_young_other_time_ms, young_other_time_ms,
ysr@777 1610 _predicted_non_young_other_time_ms,
ysr@777 1611 non_young_other_time_ms,
ysr@777 1612 _vtime_diff_ms, termination_time,
ysr@777 1613 _predicted_pause_time_ms, elapsed_ms);
ysr@777 1614 }
ysr@777 1615
ysr@777 1616 if (G1PolicyVerbose > 0) {
ysr@777 1617 gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
ysr@777 1618 _predicted_pause_time_ms,
ysr@777 1619 (_within_target) ? "within" : "outside",
ysr@777 1620 elapsed_ms);
ysr@777 1621 }
ysr@777 1622
ysr@777 1623 }
ysr@777 1624
ysr@777 1625 _in_marking_window = new_in_marking_window;
ysr@777 1626 _in_marking_window_im = new_in_marking_window_im;
ysr@777 1627 _free_regions_at_end_of_collection = _g1->free_regions();
ysr@777 1628 calculate_young_list_min_length();
johnc@1829 1629 calculate_young_list_target_length();
ysr@777 1630
iveresov@1546 1631 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
tonyp@1717 1632 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
iveresov@1546 1633 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
ysr@777 1634 // </NEW PREDICTION>
ysr@777 1635 }
ysr@777 1636
ysr@777 1637 // <NEW PREDICTION>
ysr@777 1638
iveresov@1546 1639 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 1640 double update_rs_processed_buffers,
iveresov@1546 1641 double goal_ms) {
iveresov@1546 1642 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
iveresov@1546 1643 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
iveresov@1546 1644
tonyp@1717 1645 if (G1UseAdaptiveConcRefinement) {
iveresov@1546 1646 const int k_gy = 3, k_gr = 6;
iveresov@1546 1647 const double inc_k = 1.1, dec_k = 0.9;
iveresov@1546 1648
iveresov@1546 1649 int g = cg1r->green_zone();
iveresov@1546 1650 if (update_rs_time > goal_ms) {
iveresov@1546 1651 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
iveresov@1546 1652 } else {
iveresov@1546 1653 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
iveresov@1546 1654 g = (int)MAX2(g * inc_k, g + 1.0);
iveresov@1546 1655 }
iveresov@1546 1656 }
iveresov@1546 1657 // Change the refinement threads params
iveresov@1546 1658 cg1r->set_green_zone(g);
iveresov@1546 1659 cg1r->set_yellow_zone(g * k_gy);
iveresov@1546 1660 cg1r->set_red_zone(g * k_gr);
iveresov@1546 1661 cg1r->reinitialize_threads();
iveresov@1546 1662
iveresov@1546 1663 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
iveresov@1546 1664 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
iveresov@1546 1665 cg1r->yellow_zone());
iveresov@1546 1666 // Change the barrier params
iveresov@1546 1667 dcqs.set_process_completed_threshold(processing_threshold);
iveresov@1546 1668 dcqs.set_max_completed_queue(cg1r->red_zone());
iveresov@1546 1669 }
iveresov@1546 1670
iveresov@1546 1671 int curr_queue_size = dcqs.completed_buffers_num();
iveresov@1546 1672 if (curr_queue_size >= cg1r->yellow_zone()) {
iveresov@1546 1673 dcqs.set_completed_queue_padding(curr_queue_size);
iveresov@1546 1674 } else {
iveresov@1546 1675 dcqs.set_completed_queue_padding(0);
iveresov@1546 1676 }
iveresov@1546 1677 dcqs.notify_if_necessary();
iveresov@1546 1678 }
iveresov@1546 1679
ysr@777 1680 double
ysr@777 1681 G1CollectorPolicy::
ysr@777 1682 predict_young_collection_elapsed_time_ms(size_t adjustment) {
ysr@777 1683 guarantee( adjustment == 0 || adjustment == 1, "invariant" );
ysr@777 1684
ysr@777 1685 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@1829 1686 size_t young_num = g1h->young_list()->length();
ysr@777 1687 if (young_num == 0)
ysr@777 1688 return 0.0;
ysr@777 1689
ysr@777 1690 young_num += adjustment;
ysr@777 1691 size_t pending_cards = predict_pending_cards();
johnc@1829 1692 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
ysr@777 1693 predict_rs_length_diff();
ysr@777 1694 size_t card_num;
ysr@777 1695 if (full_young_gcs())
ysr@777 1696 card_num = predict_young_card_num(rs_lengths);
ysr@777 1697 else
ysr@777 1698 card_num = predict_non_young_card_num(rs_lengths);
ysr@777 1699 size_t young_byte_size = young_num * HeapRegion::GrainBytes;
ysr@777 1700 double accum_yg_surv_rate =
ysr@777 1701 _short_lived_surv_rate_group->accum_surv_rate(adjustment);
ysr@777 1702
ysr@777 1703 size_t bytes_to_copy =
ysr@777 1704 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
ysr@777 1705
ysr@777 1706 return
ysr@777 1707 predict_rs_update_time_ms(pending_cards) +
ysr@777 1708 predict_rs_scan_time_ms(card_num) +
ysr@777 1709 predict_object_copy_time_ms(bytes_to_copy) +
ysr@777 1710 predict_young_other_time_ms(young_num) +
ysr@777 1711 predict_constant_other_time_ms();
ysr@777 1712 }
ysr@777 1713
ysr@777 1714 double
ysr@777 1715 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
ysr@777 1716 size_t rs_length = predict_rs_length_diff();
ysr@777 1717 size_t card_num;
ysr@777 1718 if (full_young_gcs())
ysr@777 1719 card_num = predict_young_card_num(rs_length);
ysr@777 1720 else
ysr@777 1721 card_num = predict_non_young_card_num(rs_length);
ysr@777 1722 return predict_base_elapsed_time_ms(pending_cards, card_num);
ysr@777 1723 }
ysr@777 1724
ysr@777 1725 double
ysr@777 1726 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 1727 size_t scanned_cards) {
ysr@777 1728 return
ysr@777 1729 predict_rs_update_time_ms(pending_cards) +
ysr@777 1730 predict_rs_scan_time_ms(scanned_cards) +
ysr@777 1731 predict_constant_other_time_ms();
ysr@777 1732 }
ysr@777 1733
ysr@777 1734 double
ysr@777 1735 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
ysr@777 1736 bool young) {
ysr@777 1737 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1738 size_t card_num;
ysr@777 1739 if (full_young_gcs())
ysr@777 1740 card_num = predict_young_card_num(rs_length);
ysr@777 1741 else
ysr@777 1742 card_num = predict_non_young_card_num(rs_length);
ysr@777 1743 size_t bytes_to_copy = predict_bytes_to_copy(hr);
ysr@777 1744
ysr@777 1745 double region_elapsed_time_ms =
ysr@777 1746 predict_rs_scan_time_ms(card_num) +
ysr@777 1747 predict_object_copy_time_ms(bytes_to_copy);
ysr@777 1748
ysr@777 1749 if (young)
ysr@777 1750 region_elapsed_time_ms += predict_young_other_time_ms(1);
ysr@777 1751 else
ysr@777 1752 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
ysr@777 1753
ysr@777 1754 return region_elapsed_time_ms;
ysr@777 1755 }
ysr@777 1756
ysr@777 1757 size_t
ysr@777 1758 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
ysr@777 1759 size_t bytes_to_copy;
ysr@777 1760 if (hr->is_marked())
ysr@777 1761 bytes_to_copy = hr->max_live_bytes();
ysr@777 1762 else {
ysr@777 1763 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
ysr@777 1764 "invariant" );
ysr@777 1765 int age = hr->age_in_surv_rate_group();
apetrusenko@980 1766 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
ysr@777 1767 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
ysr@777 1768 }
ysr@777 1769
ysr@777 1770 return bytes_to_copy;
ysr@777 1771 }
ysr@777 1772
ysr@777 1773 void
ysr@777 1774 G1CollectorPolicy::start_recording_regions() {
ysr@777 1775 _recorded_rs_lengths = 0;
ysr@777 1776 _recorded_young_regions = 0;
ysr@777 1777 _recorded_non_young_regions = 0;
ysr@777 1778
ysr@777 1779 #if PREDICTIONS_VERBOSE
ysr@777 1780 _recorded_marked_bytes = 0;
ysr@777 1781 _recorded_young_bytes = 0;
ysr@777 1782 _predicted_bytes_to_copy = 0;
johnc@1829 1783 _predicted_rs_lengths = 0;
johnc@1829 1784 _predicted_cards_scanned = 0;
ysr@777 1785 #endif // PREDICTIONS_VERBOSE
ysr@777 1786 }
ysr@777 1787
ysr@777 1788 void
johnc@1829 1789 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
ysr@777 1790 #if PREDICTIONS_VERBOSE
johnc@1829 1791 if (!young) {
ysr@777 1792 _recorded_marked_bytes += hr->max_live_bytes();
ysr@777 1793 }
ysr@777 1794 _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
ysr@777 1795 #endif // PREDICTIONS_VERBOSE
ysr@777 1796
ysr@777 1797 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1798 _recorded_rs_lengths += rs_length;
ysr@777 1799 }
ysr@777 1800
ysr@777 1801 void
johnc@1829 1802 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
johnc@1829 1803 assert(!hr->is_young(), "should not call this");
johnc@1829 1804 ++_recorded_non_young_regions;
johnc@1829 1805 record_cset_region_info(hr, false);
johnc@1829 1806 }
johnc@1829 1807
johnc@1829 1808 void
johnc@1829 1809 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
johnc@1829 1810 _recorded_young_regions = n_regions;
johnc@1829 1811 }
johnc@1829 1812
johnc@1829 1813 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
johnc@1829 1814 #if PREDICTIONS_VERBOSE
johnc@1829 1815 _recorded_young_bytes = bytes;
johnc@1829 1816 #endif // PREDICTIONS_VERBOSE
johnc@1829 1817 }
johnc@1829 1818
johnc@1829 1819 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
johnc@1829 1820 _recorded_rs_lengths = rs_lengths;
johnc@1829 1821 }
johnc@1829 1822
johnc@1829 1823 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 1824 _predicted_bytes_to_copy = bytes;
ysr@777 1825 }
ysr@777 1826
ysr@777 1827 void
ysr@777 1828 G1CollectorPolicy::end_recording_regions() {
johnc@1829 1829 // The _predicted_pause_time_ms field is referenced in code
johnc@1829 1830 // not under PREDICTIONS_VERBOSE. Let's initialize it.
johnc@1829 1831 _predicted_pause_time_ms = -1.0;
johnc@1829 1832
ysr@777 1833 #if PREDICTIONS_VERBOSE
ysr@777 1834 _predicted_pending_cards = predict_pending_cards();
ysr@777 1835 _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
ysr@777 1836 if (full_young_gcs())
ysr@777 1837 _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
ysr@777 1838 else
ysr@777 1839 _predicted_cards_scanned +=
ysr@777 1840 predict_non_young_card_num(_predicted_rs_lengths);
ysr@777 1841 _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
ysr@777 1842
ysr@777 1843 _predicted_rs_update_time_ms =
ysr@777 1844 predict_rs_update_time_ms(_g1->pending_card_num());
ysr@777 1845 _predicted_rs_scan_time_ms =
ysr@777 1846 predict_rs_scan_time_ms(_predicted_cards_scanned);
ysr@777 1847 _predicted_object_copy_time_ms =
ysr@777 1848 predict_object_copy_time_ms(_predicted_bytes_to_copy);
ysr@777 1849 _predicted_constant_other_time_ms =
ysr@777 1850 predict_constant_other_time_ms();
ysr@777 1851 _predicted_young_other_time_ms =
ysr@777 1852 predict_young_other_time_ms(_recorded_young_regions);
ysr@777 1853 _predicted_non_young_other_time_ms =
ysr@777 1854 predict_non_young_other_time_ms(_recorded_non_young_regions);
ysr@777 1855
ysr@777 1856 _predicted_pause_time_ms =
ysr@777 1857 _predicted_rs_update_time_ms +
ysr@777 1858 _predicted_rs_scan_time_ms +
ysr@777 1859 _predicted_object_copy_time_ms +
ysr@777 1860 _predicted_constant_other_time_ms +
ysr@777 1861 _predicted_young_other_time_ms +
ysr@777 1862 _predicted_non_young_other_time_ms;
ysr@777 1863 #endif // PREDICTIONS_VERBOSE
ysr@777 1864 }
ysr@777 1865
ysr@777 1866 void G1CollectorPolicy::check_if_region_is_too_expensive(double
ysr@777 1867 predicted_time_ms) {
ysr@777 1868 // I don't think we need to do this when in young GC mode since
ysr@777 1869 // marking will be initiated next time we hit the soft limit anyway...
ysr@777 1870 if (predicted_time_ms > _expensive_region_limit_ms) {
ysr@777 1871 if (!in_young_gc_mode()) {
ysr@777 1872 set_full_young_gcs(true);
tonyp@1794 1873 // We might want to do something different here. However,
tonyp@1794 1874 // right now we don't support the non-generational G1 mode
tonyp@1794 1875 // (and in fact we are planning to remove the associated code,
tonyp@1794 1876 // see CR 6814390). So, let's leave it as is and this will be
tonyp@1794 1877 // removed some time in the future
tonyp@1794 1878 ShouldNotReachHere();
tonyp@1794 1879 set_during_initial_mark_pause();
ysr@777 1880 } else
ysr@777 1881 // no point in doing another partial one
ysr@777 1882 _should_revert_to_full_young_gcs = true;
ysr@777 1883 }
ysr@777 1884 }
ysr@777 1885
ysr@777 1886 // </NEW PREDICTION>
ysr@777 1887
ysr@777 1888
ysr@777 1889 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
ysr@777 1890 double elapsed_ms) {
ysr@777 1891 _recent_gc_times_ms->add(elapsed_ms);
ysr@777 1892 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
ysr@777 1893 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
ysr@777 1894 }
ysr@777 1895
ysr@777 1896 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
johnc@1186 1897 if (_recent_pause_times_ms->num() == 0) return (double) MaxGCPauseMillis;
ysr@777 1898 else return _recent_pause_times_ms->avg();
ysr@777 1899 }
ysr@777 1900
ysr@777 1901 double G1CollectorPolicy::recent_avg_time_for_CH_strong_ms() {
ysr@777 1902 if (_recent_CH_strong_roots_times_ms->num() == 0)
johnc@1186 1903 return (double)MaxGCPauseMillis/3.0;
ysr@777 1904 else return _recent_CH_strong_roots_times_ms->avg();
ysr@777 1905 }
ysr@777 1906
ysr@777 1907 double G1CollectorPolicy::recent_avg_time_for_G1_strong_ms() {
ysr@777 1908 if (_recent_G1_strong_roots_times_ms->num() == 0)
johnc@1186 1909 return (double)MaxGCPauseMillis/3.0;
ysr@777 1910 else return _recent_G1_strong_roots_times_ms->avg();
ysr@777 1911 }
ysr@777 1912
ysr@777 1913 double G1CollectorPolicy::recent_avg_time_for_evac_ms() {
johnc@1186 1914 if (_recent_evac_times_ms->num() == 0) return (double)MaxGCPauseMillis/3.0;
ysr@777 1915 else return _recent_evac_times_ms->avg();
ysr@777 1916 }
ysr@777 1917
ysr@777 1918 int G1CollectorPolicy::number_of_recent_gcs() {
ysr@777 1919 assert(_recent_CH_strong_roots_times_ms->num() ==
ysr@777 1920 _recent_G1_strong_roots_times_ms->num(), "Sequence out of sync");
ysr@777 1921 assert(_recent_G1_strong_roots_times_ms->num() ==
ysr@777 1922 _recent_evac_times_ms->num(), "Sequence out of sync");
ysr@777 1923 assert(_recent_evac_times_ms->num() ==
ysr@777 1924 _recent_pause_times_ms->num(), "Sequence out of sync");
ysr@777 1925 assert(_recent_pause_times_ms->num() ==
ysr@777 1926 _recent_CS_bytes_used_before->num(), "Sequence out of sync");
ysr@777 1927 assert(_recent_CS_bytes_used_before->num() ==
ysr@777 1928 _recent_CS_bytes_surviving->num(), "Sequence out of sync");
ysr@777 1929 return _recent_pause_times_ms->num();
ysr@777 1930 }
ysr@777 1931
ysr@777 1932 double G1CollectorPolicy::recent_avg_survival_fraction() {
ysr@777 1933 return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 1934 _recent_CS_bytes_used_before);
ysr@777 1935 }
ysr@777 1936
ysr@777 1937 double G1CollectorPolicy::last_survival_fraction() {
ysr@777 1938 return last_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 1939 _recent_CS_bytes_used_before);
ysr@777 1940 }
ysr@777 1941
ysr@777 1942 double
ysr@777 1943 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 1944 TruncatedSeq* before) {
ysr@777 1945 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 1946 if (before->sum() > 0.0) {
ysr@777 1947 double recent_survival_rate = surviving->sum() / before->sum();
ysr@777 1948 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1949 // fragmentation can produce negative collections.
ysr@777 1950 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1951 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1952 // (DLD, 10/05.)
ysr@777 1953 assert((true || ParallelGCThreads > 0) ||
ysr@777 1954 _g1->evacuation_failed() ||
ysr@777 1955 recent_survival_rate <= 1.0, "Or bad frac");
ysr@777 1956 return recent_survival_rate;
ysr@777 1957 } else {
ysr@777 1958 return 1.0; // Be conservative.
ysr@777 1959 }
ysr@777 1960 }
ysr@777 1961
ysr@777 1962 double
ysr@777 1963 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 1964 TruncatedSeq* before) {
ysr@777 1965 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 1966 if (surviving->num() > 0 && before->last() > 0.0) {
ysr@777 1967 double last_survival_rate = surviving->last() / before->last();
ysr@777 1968 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1969 // fragmentation can produce negative collections.
ysr@777 1970 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1971 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1972 // (DLD, 10/05.)
ysr@777 1973 assert((true || ParallelGCThreads > 0) ||
ysr@777 1974 last_survival_rate <= 1.0, "Or bad frac");
ysr@777 1975 return last_survival_rate;
ysr@777 1976 } else {
ysr@777 1977 return 1.0;
ysr@777 1978 }
ysr@777 1979 }
ysr@777 1980
ysr@777 1981 static const int survival_min_obs = 5;
ysr@777 1982 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
ysr@777 1983 static const double min_survival_rate = 0.1;
ysr@777 1984
ysr@777 1985 double
ysr@777 1986 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
ysr@777 1987 double latest) {
ysr@777 1988 double res = avg;
ysr@777 1989 if (number_of_recent_gcs() < survival_min_obs) {
ysr@777 1990 res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
ysr@777 1991 }
ysr@777 1992 res = MAX2(res, latest);
ysr@777 1993 res = MAX2(res, min_survival_rate);
ysr@777 1994 // In the parallel case, LAB fragmentation can produce "negative
ysr@777 1995 // collections"; so can evac failure. Cap at 1.0
ysr@777 1996 res = MIN2(res, 1.0);
ysr@777 1997 return res;
ysr@777 1998 }
ysr@777 1999
ysr@777 2000 size_t G1CollectorPolicy::expansion_amount() {
tonyp@1791 2001 if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) {
johnc@1186 2002 // We will double the existing space, or take
johnc@1186 2003 // G1ExpandByPercentOfAvailable % of the available expansion
johnc@1186 2004 // space, whichever is smaller, bounded below by a minimum
johnc@1186 2005 // expansion (unless that's all that's left.)
ysr@777 2006 const size_t min_expand_bytes = 1*M;
ysr@777 2007 size_t reserved_bytes = _g1->g1_reserved_obj_bytes();
ysr@777 2008 size_t committed_bytes = _g1->capacity();
ysr@777 2009 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
ysr@777 2010 size_t expand_bytes;
ysr@777 2011 size_t expand_bytes_via_pct =
johnc@1186 2012 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
ysr@777 2013 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
ysr@777 2014 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
ysr@777 2015 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
ysr@777 2016 if (G1PolicyVerbose > 1) {
ysr@777 2017 gclog_or_tty->print("Decided to expand: ratio = %5.2f, "
ysr@777 2018 "committed = %d%s, uncommited = %d%s, via pct = %d%s.\n"
ysr@777 2019 " Answer = %d.\n",
ysr@777 2020 recent_avg_pause_time_ratio(),
ysr@777 2021 byte_size_in_proper_unit(committed_bytes),
ysr@777 2022 proper_unit_for_byte_size(committed_bytes),
ysr@777 2023 byte_size_in_proper_unit(uncommitted_bytes),
ysr@777 2024 proper_unit_for_byte_size(uncommitted_bytes),
ysr@777 2025 byte_size_in_proper_unit(expand_bytes_via_pct),
ysr@777 2026 proper_unit_for_byte_size(expand_bytes_via_pct),
ysr@777 2027 byte_size_in_proper_unit(expand_bytes),
ysr@777 2028 proper_unit_for_byte_size(expand_bytes));
ysr@777 2029 }
ysr@777 2030 return expand_bytes;
ysr@777 2031 } else {
ysr@777 2032 return 0;
ysr@777 2033 }
ysr@777 2034 }
ysr@777 2035
ysr@777 2036 void G1CollectorPolicy::note_start_of_mark_thread() {
ysr@777 2037 _mark_thread_startup_sec = os::elapsedTime();
ysr@777 2038 }
ysr@777 2039
ysr@777 2040 class CountCSClosure: public HeapRegionClosure {
ysr@777 2041 G1CollectorPolicy* _g1_policy;
ysr@777 2042 public:
ysr@777 2043 CountCSClosure(G1CollectorPolicy* g1_policy) :
ysr@777 2044 _g1_policy(g1_policy) {}
ysr@777 2045 bool doHeapRegion(HeapRegion* r) {
ysr@777 2046 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
ysr@777 2047 return false;
ysr@777 2048 }
ysr@777 2049 };
ysr@777 2050
ysr@777 2051 void G1CollectorPolicy::count_CS_bytes_used() {
ysr@777 2052 CountCSClosure cs_closure(this);
ysr@777 2053 _g1->collection_set_iterate(&cs_closure);
ysr@777 2054 }
ysr@777 2055
ysr@777 2056 static void print_indent(int level) {
ysr@777 2057 for (int j = 0; j < level+1; ++j)
ysr@777 2058 gclog_or_tty->print(" ");
ysr@777 2059 }
ysr@777 2060
ysr@777 2061 void G1CollectorPolicy::print_summary (int level,
ysr@777 2062 const char* str,
ysr@777 2063 NumberSeq* seq) const {
ysr@777 2064 double sum = seq->sum();
ysr@777 2065 print_indent(level);
ysr@777 2066 gclog_or_tty->print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
ysr@777 2067 str, sum / 1000.0, seq->avg());
ysr@777 2068 }
ysr@777 2069
ysr@777 2070 void G1CollectorPolicy::print_summary_sd (int level,
ysr@777 2071 const char* str,
ysr@777 2072 NumberSeq* seq) const {
ysr@777 2073 print_summary(level, str, seq);
ysr@777 2074 print_indent(level + 5);
ysr@777 2075 gclog_or_tty->print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
ysr@777 2076 seq->num(), seq->sd(), seq->maximum());
ysr@777 2077 }
ysr@777 2078
ysr@777 2079 void G1CollectorPolicy::check_other_times(int level,
ysr@777 2080 NumberSeq* other_times_ms,
ysr@777 2081 NumberSeq* calc_other_times_ms) const {
ysr@777 2082 bool should_print = false;
ysr@777 2083
ysr@777 2084 double max_sum = MAX2(fabs(other_times_ms->sum()),
ysr@777 2085 fabs(calc_other_times_ms->sum()));
ysr@777 2086 double min_sum = MIN2(fabs(other_times_ms->sum()),
ysr@777 2087 fabs(calc_other_times_ms->sum()));
ysr@777 2088 double sum_ratio = max_sum / min_sum;
ysr@777 2089 if (sum_ratio > 1.1) {
ysr@777 2090 should_print = true;
ysr@777 2091 print_indent(level + 1);
ysr@777 2092 gclog_or_tty->print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
ysr@777 2093 }
ysr@777 2094
ysr@777 2095 double max_avg = MAX2(fabs(other_times_ms->avg()),
ysr@777 2096 fabs(calc_other_times_ms->avg()));
ysr@777 2097 double min_avg = MIN2(fabs(other_times_ms->avg()),
ysr@777 2098 fabs(calc_other_times_ms->avg()));
ysr@777 2099 double avg_ratio = max_avg / min_avg;
ysr@777 2100 if (avg_ratio > 1.1) {
ysr@777 2101 should_print = true;
ysr@777 2102 print_indent(level + 1);
ysr@777 2103 gclog_or_tty->print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
ysr@777 2104 }
ysr@777 2105
ysr@777 2106 if (other_times_ms->sum() < -0.01) {
ysr@777 2107 print_indent(level + 1);
ysr@777 2108 gclog_or_tty->print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
ysr@777 2109 }
ysr@777 2110
ysr@777 2111 if (other_times_ms->avg() < -0.01) {
ysr@777 2112 print_indent(level + 1);
ysr@777 2113 gclog_or_tty->print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
ysr@777 2114 }
ysr@777 2115
ysr@777 2116 if (calc_other_times_ms->sum() < -0.01) {
ysr@777 2117 should_print = true;
ysr@777 2118 print_indent(level + 1);
ysr@777 2119 gclog_or_tty->print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
ysr@777 2120 }
ysr@777 2121
ysr@777 2122 if (calc_other_times_ms->avg() < -0.01) {
ysr@777 2123 should_print = true;
ysr@777 2124 print_indent(level + 1);
ysr@777 2125 gclog_or_tty->print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
ysr@777 2126 }
ysr@777 2127
ysr@777 2128 if (should_print)
ysr@777 2129 print_summary(level, "Other(Calc)", calc_other_times_ms);
ysr@777 2130 }
ysr@777 2131
ysr@777 2132 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
ysr@777 2133 bool parallel = ParallelGCThreads > 0;
ysr@777 2134 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 2135 if (summary->get_total_seq()->num() > 0) {
apetrusenko@1112 2136 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
ysr@777 2137 if (body_summary != NULL) {
ysr@777 2138 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
ysr@777 2139 if (parallel) {
ysr@777 2140 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
ysr@777 2141 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2142 print_summary(2, "Ext Root Scanning",
ysr@777 2143 body_summary->get_ext_root_scan_seq());
ysr@777 2144 print_summary(2, "Mark Stack Scanning",
ysr@777 2145 body_summary->get_mark_stack_scan_seq());
ysr@777 2146 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2147 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2148 print_summary(2, "Termination", body_summary->get_termination_seq());
ysr@777 2149 print_summary(2, "Other", body_summary->get_parallel_other_seq());
ysr@777 2150 {
ysr@777 2151 NumberSeq* other_parts[] = {
ysr@777 2152 body_summary->get_update_rs_seq(),
ysr@777 2153 body_summary->get_ext_root_scan_seq(),
ysr@777 2154 body_summary->get_mark_stack_scan_seq(),
ysr@777 2155 body_summary->get_scan_rs_seq(),
ysr@777 2156 body_summary->get_obj_copy_seq(),
ysr@777 2157 body_summary->get_termination_seq()
ysr@777 2158 };
ysr@777 2159 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
ysr@777 2160 7, other_parts);
ysr@777 2161 check_other_times(2, body_summary->get_parallel_other_seq(),
ysr@777 2162 &calc_other_times_ms);
ysr@777 2163 }
ysr@777 2164 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
ysr@777 2165 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
ysr@777 2166 } else {
ysr@777 2167 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2168 print_summary(1, "Ext Root Scanning",
ysr@777 2169 body_summary->get_ext_root_scan_seq());
ysr@777 2170 print_summary(1, "Mark Stack Scanning",
ysr@777 2171 body_summary->get_mark_stack_scan_seq());
ysr@777 2172 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2173 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2174 }
ysr@777 2175 }
ysr@777 2176 print_summary(1, "Other", summary->get_other_seq());
ysr@777 2177 {
ysr@777 2178 NumberSeq calc_other_times_ms;
ysr@777 2179 if (body_summary != NULL) {
ysr@777 2180 // not abandoned
ysr@777 2181 if (parallel) {
ysr@777 2182 // parallel
ysr@777 2183 NumberSeq* other_parts[] = {
ysr@777 2184 body_summary->get_satb_drain_seq(),
ysr@777 2185 body_summary->get_parallel_seq(),
ysr@777 2186 body_summary->get_clear_ct_seq()
ysr@777 2187 };
apetrusenko@1112 2188 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
apetrusenko@1112 2189 3, other_parts);
ysr@777 2190 } else {
ysr@777 2191 // serial
ysr@777 2192 NumberSeq* other_parts[] = {
ysr@777 2193 body_summary->get_satb_drain_seq(),
ysr@777 2194 body_summary->get_update_rs_seq(),
ysr@777 2195 body_summary->get_ext_root_scan_seq(),
ysr@777 2196 body_summary->get_mark_stack_scan_seq(),
ysr@777 2197 body_summary->get_scan_rs_seq(),
ysr@777 2198 body_summary->get_obj_copy_seq()
ysr@777 2199 };
ysr@777 2200 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
apetrusenko@1112 2201 7, other_parts);
ysr@777 2202 }
ysr@777 2203 } else {
ysr@777 2204 // abandoned
apetrusenko@1112 2205 calc_other_times_ms = NumberSeq();
ysr@777 2206 }
ysr@777 2207 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
ysr@777 2208 }
ysr@777 2209 } else {
ysr@777 2210 print_indent(0);
ysr@777 2211 gclog_or_tty->print_cr("none");
ysr@777 2212 }
ysr@777 2213 gclog_or_tty->print_cr("");
ysr@777 2214 }
ysr@777 2215
ysr@777 2216 void
apetrusenko@1112 2217 G1CollectorPolicy::print_abandoned_summary(PauseSummary* summary) const {
ysr@777 2218 bool printed = false;
apetrusenko@1112 2219 if (summary->get_total_seq()->num() > 0) {
ysr@777 2220 printed = true;
apetrusenko@1112 2221 print_summary(summary);
ysr@777 2222 }
ysr@777 2223 if (!printed) {
ysr@777 2224 print_indent(0);
ysr@777 2225 gclog_or_tty->print_cr("none");
ysr@777 2226 gclog_or_tty->print_cr("");
ysr@777 2227 }
ysr@777 2228 }
ysr@777 2229
ysr@777 2230 void G1CollectorPolicy::print_tracing_info() const {
ysr@777 2231 if (TraceGen0Time) {
ysr@777 2232 gclog_or_tty->print_cr("ALL PAUSES");
ysr@777 2233 print_summary_sd(0, "Total", _all_pause_times_ms);
ysr@777 2234 gclog_or_tty->print_cr("");
ysr@777 2235 gclog_or_tty->print_cr("");
ysr@777 2236 gclog_or_tty->print_cr(" Full Young GC Pauses: %8d", _full_young_pause_num);
ysr@777 2237 gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num);
ysr@777 2238 gclog_or_tty->print_cr("");
ysr@777 2239
apetrusenko@1112 2240 gclog_or_tty->print_cr("EVACUATION PAUSES");
apetrusenko@1112 2241 print_summary(_summary);
ysr@777 2242
ysr@777 2243 gclog_or_tty->print_cr("ABANDONED PAUSES");
apetrusenko@1112 2244 print_abandoned_summary(_abandoned_summary);
ysr@777 2245
ysr@777 2246 gclog_or_tty->print_cr("MISC");
ysr@777 2247 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
ysr@777 2248 print_summary_sd(0, "Yields", _all_yield_times_ms);
ysr@777 2249 for (int i = 0; i < _aux_num; ++i) {
ysr@777 2250 if (_all_aux_times_ms[i].num() > 0) {
ysr@777 2251 char buffer[96];
ysr@777 2252 sprintf(buffer, "Aux%d", i);
ysr@777 2253 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
ysr@777 2254 }
ysr@777 2255 }
ysr@777 2256
ysr@777 2257 size_t all_region_num = _region_num_young + _region_num_tenured;
ysr@777 2258 gclog_or_tty->print_cr(" New Regions %8d, Young %8d (%6.2lf%%), "
ysr@777 2259 "Tenured %8d (%6.2lf%%)",
ysr@777 2260 all_region_num,
ysr@777 2261 _region_num_young,
ysr@777 2262 (double) _region_num_young / (double) all_region_num * 100.0,
ysr@777 2263 _region_num_tenured,
ysr@777 2264 (double) _region_num_tenured / (double) all_region_num * 100.0);
ysr@777 2265 }
ysr@777 2266 if (TraceGen1Time) {
ysr@777 2267 if (_all_full_gc_times_ms->num() > 0) {
ysr@777 2268 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
ysr@777 2269 _all_full_gc_times_ms->num(),
ysr@777 2270 _all_full_gc_times_ms->sum() / 1000.0);
ysr@777 2271 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
ysr@777 2272 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 2273 _all_full_gc_times_ms->sd(),
ysr@777 2274 _all_full_gc_times_ms->maximum());
ysr@777 2275 }
ysr@777 2276 }
ysr@777 2277 }
ysr@777 2278
ysr@777 2279 void G1CollectorPolicy::print_yg_surv_rate_info() const {
ysr@777 2280 #ifndef PRODUCT
ysr@777 2281 _short_lived_surv_rate_group->print_surv_rate_summary();
ysr@777 2282 // add this call for any other surv rate groups
ysr@777 2283 #endif // PRODUCT
ysr@777 2284 }
ysr@777 2285
ysr@777 2286 bool
ysr@777 2287 G1CollectorPolicy::should_add_next_region_to_young_list() {
ysr@777 2288 assert(in_young_gc_mode(), "should be in young GC mode");
ysr@777 2289 bool ret;
johnc@1829 2290 size_t young_list_length = _g1->young_list()->length();
apetrusenko@980 2291 size_t young_list_max_length = _young_list_target_length;
apetrusenko@980 2292 if (G1FixedEdenSize) {
apetrusenko@980 2293 young_list_max_length -= _max_survivor_regions;
apetrusenko@980 2294 }
apetrusenko@980 2295 if (young_list_length < young_list_max_length) {
ysr@777 2296 ret = true;
ysr@777 2297 ++_region_num_young;
ysr@777 2298 } else {
ysr@777 2299 ret = false;
ysr@777 2300 ++_region_num_tenured;
ysr@777 2301 }
ysr@777 2302
ysr@777 2303 return ret;
ysr@777 2304 }
ysr@777 2305
ysr@777 2306 #ifndef PRODUCT
ysr@777 2307 // for debugging, bit of a hack...
ysr@777 2308 static char*
ysr@777 2309 region_num_to_mbs(int length) {
ysr@777 2310 static char buffer[64];
ysr@777 2311 double bytes = (double) (length * HeapRegion::GrainBytes);
ysr@777 2312 double mbs = bytes / (double) (1024 * 1024);
ysr@777 2313 sprintf(buffer, "%7.2lfMB", mbs);
ysr@777 2314 return buffer;
ysr@777 2315 }
ysr@777 2316 #endif // PRODUCT
ysr@777 2317
apetrusenko@980 2318 size_t G1CollectorPolicy::max_regions(int purpose) {
ysr@777 2319 switch (purpose) {
ysr@777 2320 case GCAllocForSurvived:
apetrusenko@980 2321 return _max_survivor_regions;
ysr@777 2322 case GCAllocForTenured:
apetrusenko@980 2323 return REGIONS_UNLIMITED;
ysr@777 2324 default:
apetrusenko@980 2325 ShouldNotReachHere();
apetrusenko@980 2326 return REGIONS_UNLIMITED;
ysr@777 2327 };
ysr@777 2328 }
ysr@777 2329
apetrusenko@980 2330 // Calculates survivor space parameters.
apetrusenko@980 2331 void G1CollectorPolicy::calculate_survivors_policy()
apetrusenko@980 2332 {
apetrusenko@980 2333 if (G1FixedSurvivorSpaceSize == 0) {
apetrusenko@980 2334 _max_survivor_regions = _young_list_target_length / SurvivorRatio;
apetrusenko@980 2335 } else {
apetrusenko@982 2336 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
apetrusenko@980 2337 }
apetrusenko@980 2338
apetrusenko@980 2339 if (G1FixedTenuringThreshold) {
apetrusenko@980 2340 _tenuring_threshold = MaxTenuringThreshold;
apetrusenko@980 2341 } else {
apetrusenko@980 2342 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
apetrusenko@980 2343 HeapRegion::GrainWords * _max_survivor_regions);
apetrusenko@980 2344 }
apetrusenko@980 2345 }
apetrusenko@980 2346
ysr@777 2347 bool
ysr@777 2348 G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
ysr@777 2349 word_size) {
ysr@777 2350 assert(_g1->regions_accounted_for(), "Region leakage!");
ysr@777 2351 double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
ysr@777 2352
johnc@1829 2353 size_t young_list_length = _g1->young_list()->length();
apetrusenko@980 2354 size_t young_list_max_length = _young_list_target_length;
apetrusenko@980 2355 if (G1FixedEdenSize) {
apetrusenko@980 2356 young_list_max_length -= _max_survivor_regions;
apetrusenko@980 2357 }
apetrusenko@980 2358 bool reached_target_length = young_list_length >= young_list_max_length;
ysr@777 2359
ysr@777 2360 if (in_young_gc_mode()) {
ysr@777 2361 if (reached_target_length) {
johnc@1829 2362 assert( young_list_length > 0 && _g1->young_list()->length() > 0,
ysr@777 2363 "invariant" );
ysr@777 2364 return true;
ysr@777 2365 }
ysr@777 2366 } else {
ysr@777 2367 guarantee( false, "should not reach here" );
ysr@777 2368 }
ysr@777 2369
ysr@777 2370 return false;
ysr@777 2371 }
ysr@777 2372
ysr@777 2373 #ifndef PRODUCT
ysr@777 2374 class HRSortIndexIsOKClosure: public HeapRegionClosure {
ysr@777 2375 CollectionSetChooser* _chooser;
ysr@777 2376 public:
ysr@777 2377 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
ysr@777 2378 _chooser(chooser) {}
ysr@777 2379
ysr@777 2380 bool doHeapRegion(HeapRegion* r) {
ysr@777 2381 if (!r->continuesHumongous()) {
ysr@777 2382 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
ysr@777 2383 }
ysr@777 2384 return false;
ysr@777 2385 }
ysr@777 2386 };
ysr@777 2387
ysr@777 2388 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
ysr@777 2389 HRSortIndexIsOKClosure cl(_collectionSetChooser);
ysr@777 2390 _g1->heap_region_iterate(&cl);
ysr@777 2391 return true;
ysr@777 2392 }
ysr@777 2393 #endif
ysr@777 2394
tonyp@2011 2395 bool
tonyp@2011 2396 G1CollectorPolicy::force_initial_mark_if_outside_cycle() {
tonyp@2011 2397 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@2011 2398 if (!during_cycle) {
tonyp@2011 2399 set_initiate_conc_mark_if_possible();
tonyp@2011 2400 return true;
tonyp@2011 2401 } else {
tonyp@2011 2402 return false;
tonyp@2011 2403 }
tonyp@2011 2404 }
tonyp@2011 2405
ysr@777 2406 void
tonyp@1794 2407 G1CollectorPolicy::decide_on_conc_mark_initiation() {
tonyp@1794 2408 // We are about to decide on whether this pause will be an
tonyp@1794 2409 // initial-mark pause.
tonyp@1794 2410
tonyp@1794 2411 // First, during_initial_mark_pause() should not be already set. We
tonyp@1794 2412 // will set it here if we have to. However, it should be cleared by
tonyp@1794 2413 // the end of the pause (it's only set for the duration of an
tonyp@1794 2414 // initial-mark pause).
tonyp@1794 2415 assert(!during_initial_mark_pause(), "pre-condition");
tonyp@1794 2416
tonyp@1794 2417 if (initiate_conc_mark_if_possible()) {
tonyp@1794 2418 // We had noticed on a previous pause that the heap occupancy has
tonyp@1794 2419 // gone over the initiating threshold and we should start a
tonyp@1794 2420 // concurrent marking cycle. So we might initiate one.
tonyp@1794 2421
tonyp@1794 2422 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@1794 2423 if (!during_cycle) {
tonyp@1794 2424 // The concurrent marking thread is not "during a cycle", i.e.,
tonyp@1794 2425 // it has completed the last one. So we can go ahead and
tonyp@1794 2426 // initiate a new cycle.
tonyp@1794 2427
tonyp@1794 2428 set_during_initial_mark_pause();
tonyp@1794 2429
tonyp@1794 2430 // And we can now clear initiate_conc_mark_if_possible() as
tonyp@1794 2431 // we've already acted on it.
tonyp@1794 2432 clear_initiate_conc_mark_if_possible();
tonyp@1794 2433 } else {
tonyp@1794 2434 // The concurrent marking thread is still finishing up the
tonyp@1794 2435 // previous cycle. If we start one right now the two cycles
tonyp@1794 2436 // overlap. In particular, the concurrent marking thread might
tonyp@1794 2437 // be in the process of clearing the next marking bitmap (which
tonyp@1794 2438 // we will use for the next cycle if we start one). Starting a
tonyp@1794 2439 // cycle now will be bad given that parts of the marking
tonyp@1794 2440 // information might get cleared by the marking thread. And we
tonyp@1794 2441 // cannot wait for the marking thread to finish the cycle as it
tonyp@1794 2442 // periodically yields while clearing the next marking bitmap
tonyp@1794 2443 // and, if it's in a yield point, it's waiting for us to
tonyp@1794 2444 // finish. So, at this point we will not start a cycle and we'll
tonyp@1794 2445 // let the concurrent marking thread complete the last one.
tonyp@1794 2446 }
tonyp@1794 2447 }
tonyp@1794 2448 }
tonyp@1794 2449
tonyp@1794 2450 void
ysr@777 2451 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2452 record_collection_pause_start(double start_time_sec, size_t start_used) {
ysr@777 2453 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
ysr@777 2454 }
ysr@777 2455
ysr@777 2456 class NextNonCSElemFinder: public HeapRegionClosure {
ysr@777 2457 HeapRegion* _res;
ysr@777 2458 public:
ysr@777 2459 NextNonCSElemFinder(): _res(NULL) {}
ysr@777 2460 bool doHeapRegion(HeapRegion* r) {
ysr@777 2461 if (!r->in_collection_set()) {
ysr@777 2462 _res = r;
ysr@777 2463 return true;
ysr@777 2464 } else {
ysr@777 2465 return false;
ysr@777 2466 }
ysr@777 2467 }
ysr@777 2468 HeapRegion* res() { return _res; }
ysr@777 2469 };
ysr@777 2470
ysr@777 2471 class KnownGarbageClosure: public HeapRegionClosure {
ysr@777 2472 CollectionSetChooser* _hrSorted;
ysr@777 2473
ysr@777 2474 public:
ysr@777 2475 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
ysr@777 2476 _hrSorted(hrSorted)
ysr@777 2477 {}
ysr@777 2478
ysr@777 2479 bool doHeapRegion(HeapRegion* r) {
ysr@777 2480 // We only include humongous regions in collection
ysr@777 2481 // sets when concurrent mark shows that their contained object is
ysr@777 2482 // unreachable.
ysr@777 2483
ysr@777 2484 // Do we have any marking information for this region?
ysr@777 2485 if (r->is_marked()) {
ysr@777 2486 // We don't include humongous regions in collection
ysr@777 2487 // sets because we collect them immediately at the end of a marking
ysr@777 2488 // cycle. We also don't include young regions because we *must*
ysr@777 2489 // include them in the next collection pause.
ysr@777 2490 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2491 _hrSorted->addMarkedHeapRegion(r);
ysr@777 2492 }
ysr@777 2493 }
ysr@777 2494 return false;
ysr@777 2495 }
ysr@777 2496 };
ysr@777 2497
ysr@777 2498 class ParKnownGarbageHRClosure: public HeapRegionClosure {
ysr@777 2499 CollectionSetChooser* _hrSorted;
ysr@777 2500 jint _marked_regions_added;
ysr@777 2501 jint _chunk_size;
ysr@777 2502 jint _cur_chunk_idx;
ysr@777 2503 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
ysr@777 2504 int _worker;
ysr@777 2505 int _invokes;
ysr@777 2506
ysr@777 2507 void get_new_chunk() {
ysr@777 2508 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
ysr@777 2509 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
ysr@777 2510 }
ysr@777 2511 void add_region(HeapRegion* r) {
ysr@777 2512 if (_cur_chunk_idx == _cur_chunk_end) {
ysr@777 2513 get_new_chunk();
ysr@777 2514 }
ysr@777 2515 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
ysr@777 2516 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
ysr@777 2517 _marked_regions_added++;
ysr@777 2518 _cur_chunk_idx++;
ysr@777 2519 }
ysr@777 2520
ysr@777 2521 public:
ysr@777 2522 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
ysr@777 2523 jint chunk_size,
ysr@777 2524 int worker) :
ysr@777 2525 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
ysr@777 2526 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
ysr@777 2527 _invokes(0)
ysr@777 2528 {}
ysr@777 2529
ysr@777 2530 bool doHeapRegion(HeapRegion* r) {
ysr@777 2531 // We only include humongous regions in collection
ysr@777 2532 // sets when concurrent mark shows that their contained object is
ysr@777 2533 // unreachable.
ysr@777 2534 _invokes++;
ysr@777 2535
ysr@777 2536 // Do we have any marking information for this region?
ysr@777 2537 if (r->is_marked()) {
ysr@777 2538 // We don't include humongous regions in collection
ysr@777 2539 // sets because we collect them immediately at the end of a marking
ysr@777 2540 // cycle.
ysr@777 2541 // We also do not include young regions in collection sets
ysr@777 2542 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2543 add_region(r);
ysr@777 2544 }
ysr@777 2545 }
ysr@777 2546 return false;
ysr@777 2547 }
ysr@777 2548 jint marked_regions_added() { return _marked_regions_added; }
ysr@777 2549 int invokes() { return _invokes; }
ysr@777 2550 };
ysr@777 2551
ysr@777 2552 class ParKnownGarbageTask: public AbstractGangTask {
ysr@777 2553 CollectionSetChooser* _hrSorted;
ysr@777 2554 jint _chunk_size;
ysr@777 2555 G1CollectedHeap* _g1;
ysr@777 2556 public:
ysr@777 2557 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
ysr@777 2558 AbstractGangTask("ParKnownGarbageTask"),
ysr@777 2559 _hrSorted(hrSorted), _chunk_size(chunk_size),
ysr@777 2560 _g1(G1CollectedHeap::heap())
ysr@777 2561 {}
ysr@777 2562
ysr@777 2563 void work(int i) {
ysr@777 2564 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
ysr@777 2565 // Back to zero for the claim value.
tonyp@790 2566 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
tonyp@790 2567 HeapRegion::InitialClaimValue);
ysr@777 2568 jint regions_added = parKnownGarbageCl.marked_regions_added();
ysr@777 2569 _hrSorted->incNumMarkedHeapRegions(regions_added);
ysr@777 2570 if (G1PrintParCleanupStats) {
ysr@777 2571 gclog_or_tty->print(" Thread %d called %d times, added %d regions to list.\n",
ysr@777 2572 i, parKnownGarbageCl.invokes(), regions_added);
ysr@777 2573 }
ysr@777 2574 }
ysr@777 2575 };
ysr@777 2576
ysr@777 2577 void
ysr@777 2578 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2579 record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 2580 size_t max_live_bytes) {
ysr@777 2581 double start;
ysr@777 2582 if (G1PrintParCleanupStats) start = os::elapsedTime();
ysr@777 2583 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 2584
ysr@777 2585 _collectionSetChooser->clearMarkedHeapRegions();
ysr@777 2586 double clear_marked_end;
ysr@777 2587 if (G1PrintParCleanupStats) {
ysr@777 2588 clear_marked_end = os::elapsedTime();
ysr@777 2589 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.",
ysr@777 2590 (clear_marked_end - start)*1000.0);
ysr@777 2591 }
ysr@777 2592 if (ParallelGCThreads > 0) {
ysr@777 2593 const size_t OverpartitionFactor = 4;
kvn@1926 2594 const size_t MinWorkUnit = 8;
kvn@1926 2595 const size_t WorkUnit =
ysr@777 2596 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
kvn@1926 2597 MinWorkUnit);
ysr@777 2598 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
kvn@1926 2599 WorkUnit);
ysr@777 2600 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
kvn@1926 2601 (int) WorkUnit);
ysr@777 2602 _g1->workers()->run_task(&parKnownGarbageTask);
tonyp@790 2603
tonyp@790 2604 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2605 "sanity check");
ysr@777 2606 } else {
ysr@777 2607 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
ysr@777 2608 _g1->heap_region_iterate(&knownGarbagecl);
ysr@777 2609 }
ysr@777 2610 double known_garbage_end;
ysr@777 2611 if (G1PrintParCleanupStats) {
ysr@777 2612 known_garbage_end = os::elapsedTime();
ysr@777 2613 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
ysr@777 2614 (known_garbage_end - clear_marked_end)*1000.0);
ysr@777 2615 }
ysr@777 2616 _collectionSetChooser->sortMarkedHeapRegions();
ysr@777 2617 double sort_end;
ysr@777 2618 if (G1PrintParCleanupStats) {
ysr@777 2619 sort_end = os::elapsedTime();
ysr@777 2620 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
ysr@777 2621 (sort_end - known_garbage_end)*1000.0);
ysr@777 2622 }
ysr@777 2623
ysr@777 2624 record_concurrent_mark_cleanup_end_work2();
ysr@777 2625 double work2_end;
ysr@777 2626 if (G1PrintParCleanupStats) {
ysr@777 2627 work2_end = os::elapsedTime();
ysr@777 2628 gclog_or_tty->print_cr(" work2: %8.3f ms.",
ysr@777 2629 (work2_end - sort_end)*1000.0);
ysr@777 2630 }
ysr@777 2631 }
ysr@777 2632
johnc@1829 2633 // Add the heap region at the head of the non-incremental collection set
ysr@777 2634 void G1CollectorPolicy::
ysr@777 2635 add_to_collection_set(HeapRegion* hr) {
johnc@1829 2636 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2637 assert(!hr->is_young(), "non-incremental add of young region");
johnc@1829 2638
tonyp@1717 2639 if (G1PrintHeapRegions) {
tonyp@1823 2640 gclog_or_tty->print_cr("added region to cset "
tonyp@1823 2641 "%d:["PTR_FORMAT", "PTR_FORMAT"], "
tonyp@1823 2642 "top "PTR_FORMAT", %s",
tonyp@1823 2643 hr->hrs_index(), hr->bottom(), hr->end(),
tonyp@1823 2644 hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG");
ysr@777 2645 }
ysr@777 2646
ysr@777 2647 if (_g1->mark_in_progress())
ysr@777 2648 _g1->concurrent_mark()->registerCSetRegion(hr);
ysr@777 2649
johnc@1829 2650 assert(!hr->in_collection_set(), "should not already be in the CSet");
ysr@777 2651 hr->set_in_collection_set(true);
ysr@777 2652 hr->set_next_in_collection_set(_collection_set);
ysr@777 2653 _collection_set = hr;
ysr@777 2654 _collection_set_size++;
ysr@777 2655 _collection_set_bytes_used_before += hr->used();
tonyp@961 2656 _g1->register_region_with_in_cset_fast_test(hr);
ysr@777 2657 }
ysr@777 2658
johnc@1829 2659 // Initialize the per-collection-set information
johnc@1829 2660 void G1CollectorPolicy::start_incremental_cset_building() {
johnc@1829 2661 assert(_inc_cset_build_state == Inactive, "Precondition");
johnc@1829 2662
johnc@1829 2663 _inc_cset_head = NULL;
johnc@1829 2664 _inc_cset_tail = NULL;
johnc@1829 2665 _inc_cset_size = 0;
johnc@1829 2666 _inc_cset_bytes_used_before = 0;
johnc@1829 2667
johnc@1829 2668 if (in_young_gc_mode()) {
johnc@1829 2669 _inc_cset_young_index = 0;
johnc@1829 2670 }
johnc@1829 2671
johnc@1829 2672 _inc_cset_max_finger = 0;
johnc@1829 2673 _inc_cset_recorded_young_bytes = 0;
johnc@1829 2674 _inc_cset_recorded_rs_lengths = 0;
johnc@1829 2675 _inc_cset_predicted_elapsed_time_ms = 0;
johnc@1829 2676 _inc_cset_predicted_bytes_to_copy = 0;
johnc@1829 2677 _inc_cset_build_state = Active;
johnc@1829 2678 }
johnc@1829 2679
johnc@1829 2680 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
johnc@1829 2681 // This routine is used when:
johnc@1829 2682 // * adding survivor regions to the incremental cset at the end of an
johnc@1829 2683 // evacuation pause,
johnc@1829 2684 // * adding the current allocation region to the incremental cset
johnc@1829 2685 // when it is retired, and
johnc@1829 2686 // * updating existing policy information for a region in the
johnc@1829 2687 // incremental cset via young list RSet sampling.
johnc@1829 2688 // Therefore this routine may be called at a safepoint by the
johnc@1829 2689 // VM thread, or in-between safepoints by mutator threads (when
johnc@1829 2690 // retiring the current allocation region) or a concurrent
johnc@1829 2691 // refine thread (RSet sampling).
johnc@1829 2692
johnc@1829 2693 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
johnc@1829 2694 size_t used_bytes = hr->used();
johnc@1829 2695
johnc@1829 2696 _inc_cset_recorded_rs_lengths += rs_length;
johnc@1829 2697 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
johnc@1829 2698
johnc@1829 2699 _inc_cset_bytes_used_before += used_bytes;
johnc@1829 2700
johnc@1829 2701 // Cache the values we have added to the aggregated informtion
johnc@1829 2702 // in the heap region in case we have to remove this region from
johnc@1829 2703 // the incremental collection set, or it is updated by the
johnc@1829 2704 // rset sampling code
johnc@1829 2705 hr->set_recorded_rs_length(rs_length);
johnc@1829 2706 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
johnc@1829 2707
johnc@1829 2708 #if PREDICTIONS_VERBOSE
johnc@1829 2709 size_t bytes_to_copy = predict_bytes_to_copy(hr);
johnc@1829 2710 _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
johnc@1829 2711
johnc@1829 2712 // Record the number of bytes used in this region
johnc@1829 2713 _inc_cset_recorded_young_bytes += used_bytes;
johnc@1829 2714
johnc@1829 2715 // Cache the values we have added to the aggregated informtion
johnc@1829 2716 // in the heap region in case we have to remove this region from
johnc@1829 2717 // the incremental collection set, or it is updated by the
johnc@1829 2718 // rset sampling code
johnc@1829 2719 hr->set_predicted_bytes_to_copy(bytes_to_copy);
johnc@1829 2720 #endif // PREDICTIONS_VERBOSE
johnc@1829 2721 }
johnc@1829 2722
johnc@1829 2723 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
johnc@1829 2724 // This routine is currently only called as part of the updating of
johnc@1829 2725 // existing policy information for regions in the incremental cset that
johnc@1829 2726 // is performed by the concurrent refine thread(s) as part of young list
johnc@1829 2727 // RSet sampling. Therefore we should not be at a safepoint.
johnc@1829 2728
johnc@1829 2729 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
johnc@1829 2730 assert(hr->is_young(), "it should be");
johnc@1829 2731
johnc@1829 2732 size_t used_bytes = hr->used();
johnc@1829 2733 size_t old_rs_length = hr->recorded_rs_length();
johnc@1829 2734 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
johnc@1829 2735
johnc@1829 2736 // Subtract the old recorded/predicted policy information for
johnc@1829 2737 // the given heap region from the collection set info.
johnc@1829 2738 _inc_cset_recorded_rs_lengths -= old_rs_length;
johnc@1829 2739 _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
johnc@1829 2740
johnc@1829 2741 _inc_cset_bytes_used_before -= used_bytes;
johnc@1829 2742
johnc@1829 2743 // Clear the values cached in the heap region
johnc@1829 2744 hr->set_recorded_rs_length(0);
johnc@1829 2745 hr->set_predicted_elapsed_time_ms(0);
johnc@1829 2746
johnc@1829 2747 #if PREDICTIONS_VERBOSE
johnc@1829 2748 size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
johnc@1829 2749 _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
johnc@1829 2750
johnc@1829 2751 // Subtract the number of bytes used in this region
johnc@1829 2752 _inc_cset_recorded_young_bytes -= used_bytes;
johnc@1829 2753
johnc@1829 2754 // Clear the values cached in the heap region
johnc@1829 2755 hr->set_predicted_bytes_to_copy(0);
johnc@1829 2756 #endif // PREDICTIONS_VERBOSE
johnc@1829 2757 }
johnc@1829 2758
johnc@1829 2759 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
johnc@1829 2760 // Update the collection set information that is dependent on the new RS length
johnc@1829 2761 assert(hr->is_young(), "Precondition");
johnc@1829 2762
johnc@1829 2763 remove_from_incremental_cset_info(hr);
johnc@1829 2764 add_to_incremental_cset_info(hr, new_rs_length);
johnc@1829 2765 }
johnc@1829 2766
johnc@1829 2767 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
johnc@1829 2768 assert( hr->is_young(), "invariant");
johnc@1829 2769 assert( hr->young_index_in_cset() == -1, "invariant" );
johnc@1829 2770 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2771
johnc@1829 2772 // We need to clear and set the cached recorded/cached collection set
johnc@1829 2773 // information in the heap region here (before the region gets added
johnc@1829 2774 // to the collection set). An individual heap region's cached values
johnc@1829 2775 // are calculated, aggregated with the policy collection set info,
johnc@1829 2776 // and cached in the heap region here (initially) and (subsequently)
johnc@1829 2777 // by the Young List sampling code.
johnc@1829 2778
johnc@1829 2779 size_t rs_length = hr->rem_set()->occupied();
johnc@1829 2780 add_to_incremental_cset_info(hr, rs_length);
johnc@1829 2781
johnc@1829 2782 HeapWord* hr_end = hr->end();
johnc@1829 2783 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
johnc@1829 2784
johnc@1829 2785 assert(!hr->in_collection_set(), "invariant");
johnc@1829 2786 hr->set_in_collection_set(true);
johnc@1829 2787 assert( hr->next_in_collection_set() == NULL, "invariant");
johnc@1829 2788
johnc@1829 2789 _inc_cset_size++;
johnc@1829 2790 _g1->register_region_with_in_cset_fast_test(hr);
johnc@1829 2791
johnc@1829 2792 hr->set_young_index_in_cset((int) _inc_cset_young_index);
johnc@1829 2793 ++_inc_cset_young_index;
johnc@1829 2794 }
johnc@1829 2795
johnc@1829 2796 // Add the region at the RHS of the incremental cset
johnc@1829 2797 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
johnc@1829 2798 // We should only ever be appending survivors at the end of a pause
johnc@1829 2799 assert( hr->is_survivor(), "Logic");
johnc@1829 2800
johnc@1829 2801 // Do the 'common' stuff
johnc@1829 2802 add_region_to_incremental_cset_common(hr);
johnc@1829 2803
johnc@1829 2804 // Now add the region at the right hand side
johnc@1829 2805 if (_inc_cset_tail == NULL) {
johnc@1829 2806 assert(_inc_cset_head == NULL, "invariant");
johnc@1829 2807 _inc_cset_head = hr;
johnc@1829 2808 } else {
johnc@1829 2809 _inc_cset_tail->set_next_in_collection_set(hr);
johnc@1829 2810 }
johnc@1829 2811 _inc_cset_tail = hr;
johnc@1829 2812
johnc@1829 2813 if (G1PrintHeapRegions) {
johnc@1829 2814 gclog_or_tty->print_cr(" added region to incremental cset (RHS) "
johnc@1829 2815 "%d:["PTR_FORMAT", "PTR_FORMAT"], "
johnc@1829 2816 "top "PTR_FORMAT", young %s",
johnc@1829 2817 hr->hrs_index(), hr->bottom(), hr->end(),
johnc@1829 2818 hr->top(), (hr->is_young()) ? "YES" : "NO");
johnc@1829 2819 }
johnc@1829 2820 }
johnc@1829 2821
johnc@1829 2822 // Add the region to the LHS of the incremental cset
johnc@1829 2823 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
johnc@1829 2824 // Survivors should be added to the RHS at the end of a pause
johnc@1829 2825 assert(!hr->is_survivor(), "Logic");
johnc@1829 2826
johnc@1829 2827 // Do the 'common' stuff
johnc@1829 2828 add_region_to_incremental_cset_common(hr);
johnc@1829 2829
johnc@1829 2830 // Add the region at the left hand side
johnc@1829 2831 hr->set_next_in_collection_set(_inc_cset_head);
johnc@1829 2832 if (_inc_cset_head == NULL) {
johnc@1829 2833 assert(_inc_cset_tail == NULL, "Invariant");
johnc@1829 2834 _inc_cset_tail = hr;
johnc@1829 2835 }
johnc@1829 2836 _inc_cset_head = hr;
johnc@1829 2837
johnc@1829 2838 if (G1PrintHeapRegions) {
johnc@1829 2839 gclog_or_tty->print_cr(" added region to incremental cset (LHS) "
johnc@1829 2840 "%d:["PTR_FORMAT", "PTR_FORMAT"], "
johnc@1829 2841 "top "PTR_FORMAT", young %s",
johnc@1829 2842 hr->hrs_index(), hr->bottom(), hr->end(),
johnc@1829 2843 hr->top(), (hr->is_young()) ? "YES" : "NO");
johnc@1829 2844 }
johnc@1829 2845 }
johnc@1829 2846
johnc@1829 2847 #ifndef PRODUCT
johnc@1829 2848 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
johnc@1829 2849 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
johnc@1829 2850
johnc@1829 2851 st->print_cr("\nCollection_set:");
johnc@1829 2852 HeapRegion* csr = list_head;
johnc@1829 2853 while (csr != NULL) {
johnc@1829 2854 HeapRegion* next = csr->next_in_collection_set();
johnc@1829 2855 assert(csr->in_collection_set(), "bad CS");
johnc@1829 2856 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
johnc@1829 2857 "age: %4d, y: %d, surv: %d",
johnc@1829 2858 csr->bottom(), csr->end(),
johnc@1829 2859 csr->top(),
johnc@1829 2860 csr->prev_top_at_mark_start(),
johnc@1829 2861 csr->next_top_at_mark_start(),
johnc@1829 2862 csr->top_at_conc_mark_count(),
johnc@1829 2863 csr->age_in_surv_rate_group_cond(),
johnc@1829 2864 csr->is_young(),
johnc@1829 2865 csr->is_survivor());
johnc@1829 2866 csr = next;
johnc@1829 2867 }
johnc@1829 2868 }
johnc@1829 2869 #endif // !PRODUCT
johnc@1829 2870
johnc@1829 2871 bool
tonyp@2011 2872 G1CollectorPolicy_BestRegionsFirst::choose_collection_set(
tonyp@2011 2873 double target_pause_time_ms) {
johnc@1829 2874 // Set this here - in case we're not doing young collections.
johnc@1829 2875 double non_young_start_time_sec = os::elapsedTime();
johnc@1829 2876
johnc@1829 2877 // The result that this routine will return. This will be set to
johnc@1829 2878 // false if:
johnc@1829 2879 // * we're doing a young or partially young collection and we
johnc@1829 2880 // have added the youg regions to collection set, or
johnc@1829 2881 // * we add old regions to the collection set.
johnc@1829 2882 bool abandon_collection = true;
johnc@1829 2883
ysr@777 2884 start_recording_regions();
ysr@777 2885
tonyp@2011 2886 guarantee(target_pause_time_ms > 0.0,
tonyp@2011 2887 err_msg("target_pause_time_ms = %1.6lf should be positive",
tonyp@2011 2888 target_pause_time_ms));
tonyp@2011 2889 guarantee(_collection_set == NULL, "Precondition");
ysr@777 2890
ysr@777 2891 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
ysr@777 2892 double predicted_pause_time_ms = base_time_ms;
ysr@777 2893
tonyp@2011 2894 double time_remaining_ms = target_pause_time_ms - base_time_ms;
ysr@777 2895
ysr@777 2896 // the 10% and 50% values are arbitrary...
tonyp@2011 2897 if (time_remaining_ms < 0.10 * target_pause_time_ms) {
tonyp@2011 2898 time_remaining_ms = 0.50 * target_pause_time_ms;
ysr@777 2899 _within_target = false;
ysr@777 2900 } else {
ysr@777 2901 _within_target = true;
ysr@777 2902 }
ysr@777 2903
ysr@777 2904 // We figure out the number of bytes available for future to-space.
ysr@777 2905 // For new regions without marking information, we must assume the
ysr@777 2906 // worst-case of complete survival. If we have marking information for a
ysr@777 2907 // region, we can bound the amount of live data. We can add a number of
ysr@777 2908 // such regions, as long as the sum of the live data bounds does not
ysr@777 2909 // exceed the available evacuation space.
ysr@777 2910 size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
ysr@777 2911
ysr@777 2912 size_t expansion_bytes =
ysr@777 2913 _g1->expansion_regions() * HeapRegion::GrainBytes;
ysr@777 2914
apetrusenko@1112 2915 _collection_set_bytes_used_before = 0;
apetrusenko@1112 2916 _collection_set_size = 0;
ysr@777 2917
ysr@777 2918 // Adjust for expansion and slop.
ysr@777 2919 max_live_bytes = max_live_bytes + expansion_bytes;
ysr@777 2920
apetrusenko@1112 2921 assert(_g1->regions_accounted_for(), "Region leakage!");
ysr@777 2922
ysr@777 2923 HeapRegion* hr;
ysr@777 2924 if (in_young_gc_mode()) {
ysr@777 2925 double young_start_time_sec = os::elapsedTime();
ysr@777 2926
ysr@777 2927 if (G1PolicyVerbose > 0) {
ysr@777 2928 gclog_or_tty->print_cr("Adding %d young regions to the CSet",
johnc@1829 2929 _g1->young_list()->length());
ysr@777 2930 }
johnc@1829 2931
ysr@777 2932 _young_cset_length = 0;
ysr@777 2933 _last_young_gc_full = full_young_gcs() ? true : false;
johnc@1829 2934
ysr@777 2935 if (_last_young_gc_full)
ysr@777 2936 ++_full_young_pause_num;
ysr@777 2937 else
ysr@777 2938 ++_partial_young_pause_num;
johnc@1829 2939
johnc@1829 2940 // The young list is laid with the survivor regions from the previous
johnc@1829 2941 // pause are appended to the RHS of the young list, i.e.
johnc@1829 2942 // [Newly Young Regions ++ Survivors from last pause].
johnc@1829 2943
johnc@1829 2944 hr = _g1->young_list()->first_survivor_region();
ysr@777 2945 while (hr != NULL) {
johnc@1829 2946 assert(hr->is_survivor(), "badly formed young list");
johnc@1829 2947 hr->set_young();
johnc@1829 2948 hr = hr->get_next_young_region();
ysr@777 2949 }
ysr@777 2950
johnc@1829 2951 // Clear the fields that point to the survivor list - they are
johnc@1829 2952 // all young now.
johnc@1829 2953 _g1->young_list()->clear_survivors();
johnc@1829 2954
johnc@1829 2955 if (_g1->mark_in_progress())
johnc@1829 2956 _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
johnc@1829 2957
johnc@1829 2958 _young_cset_length = _inc_cset_young_index;
johnc@1829 2959 _collection_set = _inc_cset_head;
johnc@1829 2960 _collection_set_size = _inc_cset_size;
johnc@1829 2961 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
johnc@1829 2962
johnc@1829 2963 // For young regions in the collection set, we assume the worst
johnc@1829 2964 // case of complete survival
johnc@1829 2965 max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes;
johnc@1829 2966
johnc@1829 2967 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
johnc@1829 2968 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
johnc@1829 2969
johnc@1829 2970 // The number of recorded young regions is the incremental
johnc@1829 2971 // collection set's current size
johnc@1829 2972 set_recorded_young_regions(_inc_cset_size);
johnc@1829 2973 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
johnc@1829 2974 set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
johnc@1829 2975 #if PREDICTIONS_VERBOSE
johnc@1829 2976 set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
johnc@1829 2977 #endif // PREDICTIONS_VERBOSE
johnc@1829 2978
johnc@1829 2979 if (G1PolicyVerbose > 0) {
johnc@1829 2980 gclog_or_tty->print_cr(" Added " PTR_FORMAT " Young Regions to CS.",
johnc@1829 2981 _inc_cset_size);
johnc@1829 2982 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
johnc@1829 2983 max_live_bytes/K);
johnc@1829 2984 }
johnc@1829 2985
johnc@1829 2986 assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
johnc@1829 2987 if (_inc_cset_size > 0) {
johnc@1829 2988 assert(_collection_set != NULL, "Invariant");
johnc@1829 2989 abandon_collection = false;
johnc@1829 2990 }
ysr@777 2991
ysr@777 2992 double young_end_time_sec = os::elapsedTime();
ysr@777 2993 _recorded_young_cset_choice_time_ms =
ysr@777 2994 (young_end_time_sec - young_start_time_sec) * 1000.0;
ysr@777 2995
johnc@1829 2996 // We are doing young collections so reset this.
johnc@1829 2997 non_young_start_time_sec = young_end_time_sec;
johnc@1829 2998
johnc@1829 2999 // Note we can use either _collection_set_size or
johnc@1829 3000 // _young_cset_length here
johnc@1829 3001 if (_collection_set_size > 0 && _last_young_gc_full) {
ysr@777 3002 // don't bother adding more regions...
ysr@777 3003 goto choose_collection_set_end;
ysr@777 3004 }
ysr@777 3005 }
ysr@777 3006
ysr@777 3007 if (!in_young_gc_mode() || !full_young_gcs()) {
ysr@777 3008 bool should_continue = true;
ysr@777 3009 NumberSeq seq;
ysr@777 3010 double avg_prediction = 100000000000000000.0; // something very large
johnc@1829 3011
johnc@1829 3012 // Save the current size of the collection set to detect
johnc@1829 3013 // if we actually added any old regions.
johnc@1829 3014 size_t n_young_regions = _collection_set_size;
johnc@1829 3015
ysr@777 3016 do {
ysr@777 3017 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
ysr@777 3018 avg_prediction);
apetrusenko@1112 3019 if (hr != NULL) {
ysr@777 3020 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
ysr@777 3021 time_remaining_ms -= predicted_time_ms;
ysr@777 3022 predicted_pause_time_ms += predicted_time_ms;
ysr@777 3023 add_to_collection_set(hr);
johnc@1829 3024 record_non_young_cset_region(hr);
ysr@777 3025 max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
ysr@777 3026 if (G1PolicyVerbose > 0) {
ysr@777 3027 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
ysr@777 3028 max_live_bytes/K);
ysr@777 3029 }
ysr@777 3030 seq.add(predicted_time_ms);
ysr@777 3031 avg_prediction = seq.avg() + seq.sd();
ysr@777 3032 }
ysr@777 3033 should_continue =
ysr@777 3034 ( hr != NULL) &&
ysr@777 3035 ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0
ysr@777 3036 : _collection_set_size < _young_list_fixed_length );
ysr@777 3037 } while (should_continue);
ysr@777 3038
ysr@777 3039 if (!adaptive_young_list_length() &&
ysr@777 3040 _collection_set_size < _young_list_fixed_length)
ysr@777 3041 _should_revert_to_full_young_gcs = true;
johnc@1829 3042
johnc@1829 3043 if (_collection_set_size > n_young_regions) {
johnc@1829 3044 // We actually added old regions to the collection set
johnc@1829 3045 // so we are not abandoning this collection.
johnc@1829 3046 abandon_collection = false;
johnc@1829 3047 }
ysr@777 3048 }
ysr@777 3049
ysr@777 3050 choose_collection_set_end:
johnc@1829 3051 stop_incremental_cset_building();
johnc@1829 3052
ysr@777 3053 count_CS_bytes_used();
ysr@777 3054
ysr@777 3055 end_recording_regions();
ysr@777 3056
ysr@777 3057 double non_young_end_time_sec = os::elapsedTime();
ysr@777 3058 _recorded_non_young_cset_choice_time_ms =
ysr@777 3059 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
johnc@1829 3060
tonyp@2011 3061 // Here we are supposed to return whether the pause should be
tonyp@2011 3062 // abandoned or not (i.e., whether the collection set is empty or
tonyp@2011 3063 // not). However, this introduces a subtle issue when a pause is
tonyp@2011 3064 // initiated explicitly with System.gc() and
tonyp@2011 3065 // +ExplicitGCInvokesConcurrent (see Comment #2 in CR 6944166), it's
tonyp@2011 3066 // supposed to start a marking cycle, and it's abandoned. So, by
tonyp@2011 3067 // returning false here we are telling the caller never to consider
tonyp@2011 3068 // a pause to be abandoned. We'll actually remove all the code
tonyp@2011 3069 // associated with abandoned pauses as part of CR 6963209, but we are
tonyp@2011 3070 // just disabling them this way for the moment to avoid increasing
tonyp@2011 3071 // further the amount of changes for CR 6944166.
tonyp@2011 3072 return false;
ysr@777 3073 }
ysr@777 3074
ysr@777 3075 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
ysr@777 3076 G1CollectorPolicy::record_full_collection_end();
ysr@777 3077 _collectionSetChooser->updateAfterFullCollection();
ysr@777 3078 }
ysr@777 3079
ysr@777 3080 void G1CollectorPolicy_BestRegionsFirst::
ysr@777 3081 expand_if_possible(size_t numRegions) {
ysr@777 3082 size_t expansion_bytes = numRegions * HeapRegion::GrainBytes;
ysr@777 3083 _g1->expand(expansion_bytes);
ysr@777 3084 }
ysr@777 3085
ysr@777 3086 void G1CollectorPolicy_BestRegionsFirst::
apetrusenko@1112 3087 record_collection_pause_end(bool abandoned) {
apetrusenko@1112 3088 G1CollectorPolicy::record_collection_pause_end(abandoned);
ysr@777 3089 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
ysr@777 3090 }

mercurial