src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Thu, 27 May 2010 19:08:38 -0700

author
trims
date
Thu, 27 May 2010 19:08:38 -0700
changeset 1907
c18cbe5936b8
parent 1829
1316cec51b4d
child 1934
e9ff18c4ace7
permissions
-rw-r--r--

6941466: Oracle rebranding changes for Hotspot repositories
Summary: Change all the Sun copyrights to Oracle copyright
Reviewed-by: ohair

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #include "incls/_precompiled.incl"
ysr@777 26 #include "incls/_g1CollectorPolicy.cpp.incl"
ysr@777 27
ysr@777 28 #define PREDICTIONS_VERBOSE 0
ysr@777 29
ysr@777 30 // <NEW PREDICTION>
ysr@777 31
ysr@777 32 // Different defaults for different number of GC threads
ysr@777 33 // They were chosen by running GCOld and SPECjbb on debris with different
ysr@777 34 // numbers of GC threads and choosing them based on the results
ysr@777 35
ysr@777 36 // all the same
ysr@777 37 static double rs_length_diff_defaults[] = {
ysr@777 38 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
ysr@777 39 };
ysr@777 40
ysr@777 41 static double cost_per_card_ms_defaults[] = {
ysr@777 42 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
ysr@777 43 };
ysr@777 44
ysr@777 45 // all the same
ysr@777 46 static double fully_young_cards_per_entry_ratio_defaults[] = {
ysr@777 47 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
ysr@777 48 };
ysr@777 49
ysr@777 50 static double cost_per_entry_ms_defaults[] = {
ysr@777 51 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
ysr@777 52 };
ysr@777 53
ysr@777 54 static double cost_per_byte_ms_defaults[] = {
ysr@777 55 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
ysr@777 56 };
ysr@777 57
ysr@777 58 // these should be pretty consistent
ysr@777 59 static double constant_other_time_ms_defaults[] = {
ysr@777 60 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
ysr@777 61 };
ysr@777 62
ysr@777 63
ysr@777 64 static double young_other_cost_per_region_ms_defaults[] = {
ysr@777 65 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
ysr@777 66 };
ysr@777 67
ysr@777 68 static double non_young_other_cost_per_region_ms_defaults[] = {
ysr@777 69 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
ysr@777 70 };
ysr@777 71
ysr@777 72 // </NEW PREDICTION>
ysr@777 73
ysr@777 74 G1CollectorPolicy::G1CollectorPolicy() :
ysr@777 75 _parallel_gc_threads((ParallelGCThreads > 0) ? ParallelGCThreads : 1),
ysr@777 76 _n_pauses(0),
ysr@777 77 _recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 78 _recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 79 _recent_evac_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 80 _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 81 _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 82 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 83 _all_pause_times_ms(new NumberSeq()),
ysr@777 84 _stop_world_start(0.0),
ysr@777 85 _all_stop_world_times_ms(new NumberSeq()),
ysr@777 86 _all_yield_times_ms(new NumberSeq()),
ysr@777 87
ysr@777 88 _all_mod_union_times_ms(new NumberSeq()),
ysr@777 89
apetrusenko@1112 90 _summary(new Summary()),
apetrusenko@1112 91 _abandoned_summary(new AbandonedSummary()),
ysr@777 92
johnc@1325 93 #ifndef PRODUCT
ysr@777 94 _cur_clear_ct_time_ms(0.0),
johnc@1325 95 _min_clear_cc_time_ms(-1.0),
johnc@1325 96 _max_clear_cc_time_ms(-1.0),
johnc@1325 97 _cur_clear_cc_time_ms(0.0),
johnc@1325 98 _cum_clear_cc_time_ms(0.0),
johnc@1325 99 _num_cc_clears(0L),
johnc@1325 100 #endif
ysr@777 101
ysr@777 102 _region_num_young(0),
ysr@777 103 _region_num_tenured(0),
ysr@777 104 _prev_region_num_young(0),
ysr@777 105 _prev_region_num_tenured(0),
ysr@777 106
ysr@777 107 _aux_num(10),
ysr@777 108 _all_aux_times_ms(new NumberSeq[_aux_num]),
ysr@777 109 _cur_aux_start_times_ms(new double[_aux_num]),
ysr@777 110 _cur_aux_times_ms(new double[_aux_num]),
ysr@777 111 _cur_aux_times_set(new bool[_aux_num]),
ysr@777 112
ysr@777 113 _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 114 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 115 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 116
ysr@777 117 // <NEW PREDICTION>
ysr@777 118
ysr@777 119 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 120 _prev_collection_pause_end_ms(0.0),
ysr@777 121 _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 122 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 123 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 124 _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 125 _partially_young_cards_per_entry_ratio_seq(
ysr@777 126 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 127 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 128 _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 129 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 130 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 131 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 132 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 133 _non_young_other_cost_per_region_ms_seq(
ysr@777 134 new TruncatedSeq(TruncatedSeqLength)),
ysr@777 135
ysr@777 136 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 137 _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 138 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 139
johnc@1186 140 _pause_time_target_ms((double) MaxGCPauseMillis),
ysr@777 141
ysr@777 142 // </NEW PREDICTION>
ysr@777 143
ysr@777 144 _in_young_gc_mode(false),
ysr@777 145 _full_young_gcs(true),
ysr@777 146 _full_young_pause_num(0),
ysr@777 147 _partial_young_pause_num(0),
ysr@777 148
ysr@777 149 _during_marking(false),
ysr@777 150 _in_marking_window(false),
ysr@777 151 _in_marking_window_im(false),
ysr@777 152
ysr@777 153 _known_garbage_ratio(0.0),
ysr@777 154 _known_garbage_bytes(0),
ysr@777 155
ysr@777 156 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
ysr@777 157 _target_pause_time_ms(-1.0),
ysr@777 158
ysr@777 159 _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 160
ysr@777 161 _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 162 _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
ysr@777 163
ysr@777 164 _recent_avg_pause_time_ratio(0.0),
ysr@777 165 _num_markings(0),
ysr@777 166 _n_marks(0),
ysr@777 167 _n_pauses_at_mark_end(0),
ysr@777 168
ysr@777 169 _all_full_gc_times_ms(new NumberSeq()),
ysr@777 170
ysr@777 171 // G1PausesBtwnConcMark defaults to -1
ysr@777 172 // so the hack is to do the cast QQQ FIXME
ysr@777 173 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
ysr@777 174 _n_marks_since_last_pause(0),
tonyp@1794 175 _initiate_conc_mark_if_possible(false),
tonyp@1794 176 _during_initial_mark_pause(false),
ysr@777 177 _should_revert_to_full_young_gcs(false),
ysr@777 178 _last_full_young_gc(false),
ysr@777 179
ysr@777 180 _prev_collection_pause_used_at_end_bytes(0),
ysr@777 181
ysr@777 182 _collection_set(NULL),
johnc@1829 183 _collection_set_size(0),
johnc@1829 184 _collection_set_bytes_used_before(0),
johnc@1829 185
johnc@1829 186 // Incremental CSet attributes
johnc@1829 187 _inc_cset_build_state(Inactive),
johnc@1829 188 _inc_cset_head(NULL),
johnc@1829 189 _inc_cset_tail(NULL),
johnc@1829 190 _inc_cset_size(0),
johnc@1829 191 _inc_cset_young_index(0),
johnc@1829 192 _inc_cset_bytes_used_before(0),
johnc@1829 193 _inc_cset_max_finger(NULL),
johnc@1829 194 _inc_cset_recorded_young_bytes(0),
johnc@1829 195 _inc_cset_recorded_rs_lengths(0),
johnc@1829 196 _inc_cset_predicted_elapsed_time_ms(0.0),
johnc@1829 197 _inc_cset_predicted_bytes_to_copy(0),
johnc@1829 198
ysr@777 199 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
ysr@777 200 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
ysr@777 201 #endif // _MSC_VER
ysr@777 202
ysr@777 203 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
ysr@777 204 G1YoungSurvRateNumRegionsSummary)),
ysr@777 205 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
apetrusenko@980 206 G1YoungSurvRateNumRegionsSummary)),
ysr@777 207 // add here any more surv rate groups
apetrusenko@980 208 _recorded_survivor_regions(0),
apetrusenko@980 209 _recorded_survivor_head(NULL),
apetrusenko@980 210 _recorded_survivor_tail(NULL),
tonyp@1791 211 _survivors_age_table(true),
tonyp@1791 212
tonyp@1791 213 _gc_overhead_perc(0.0)
apetrusenko@980 214
ysr@777 215 {
tonyp@1377 216 // Set up the region size and associated fields. Given that the
tonyp@1377 217 // policy is created before the heap, we have to set this up here,
tonyp@1377 218 // so it's done as soon as possible.
tonyp@1377 219 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
iveresov@1696 220 HeapRegionRemSet::setup_remset_size();
tonyp@1377 221
apetrusenko@1826 222 // Verify PLAB sizes
apetrusenko@1826 223 const uint region_size = HeapRegion::GrainWords;
apetrusenko@1826 224 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
apetrusenko@1826 225 char buffer[128];
apetrusenko@1826 226 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
apetrusenko@1826 227 OldPLABSize > region_size ? "Old" : "Young", region_size);
apetrusenko@1826 228 vm_exit_during_initialization(buffer);
apetrusenko@1826 229 }
apetrusenko@1826 230
ysr@777 231 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
ysr@777 232 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
ysr@777 233
ysr@777 234 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 235 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads];
ysr@777 236
ysr@777 237 _par_last_update_rs_start_times_ms = new double[_parallel_gc_threads];
ysr@777 238 _par_last_update_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 239 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads];
ysr@777 240
ysr@777 241 _par_last_scan_rs_start_times_ms = new double[_parallel_gc_threads];
ysr@777 242 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads];
ysr@777 243 _par_last_scan_new_refs_times_ms = new double[_parallel_gc_threads];
ysr@777 244
ysr@777 245 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads];
ysr@777 246
ysr@777 247 _par_last_termination_times_ms = new double[_parallel_gc_threads];
ysr@777 248
ysr@777 249 // start conservatively
johnc@1186 250 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
ysr@777 251
ysr@777 252 // <NEW PREDICTION>
ysr@777 253
ysr@777 254 int index;
ysr@777 255 if (ParallelGCThreads == 0)
ysr@777 256 index = 0;
ysr@777 257 else if (ParallelGCThreads > 8)
ysr@777 258 index = 7;
ysr@777 259 else
ysr@777 260 index = ParallelGCThreads - 1;
ysr@777 261
ysr@777 262 _pending_card_diff_seq->add(0.0);
ysr@777 263 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
ysr@777 264 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
ysr@777 265 _fully_young_cards_per_entry_ratio_seq->add(
ysr@777 266 fully_young_cards_per_entry_ratio_defaults[index]);
ysr@777 267 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
ysr@777 268 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
ysr@777 269 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
ysr@777 270 _young_other_cost_per_region_ms_seq->add(
ysr@777 271 young_other_cost_per_region_ms_defaults[index]);
ysr@777 272 _non_young_other_cost_per_region_ms_seq->add(
ysr@777 273 non_young_other_cost_per_region_ms_defaults[index]);
ysr@777 274
ysr@777 275 // </NEW PREDICTION>
ysr@777 276
johnc@1186 277 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
johnc@1186 278 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
ysr@777 279 guarantee(max_gc_time < time_slice,
ysr@777 280 "Max GC time should not be greater than the time slice");
ysr@777 281 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
johnc@1186 282 _sigma = (double) G1ConfidencePercent / 100.0;
ysr@777 283
ysr@777 284 // start conservatively (around 50ms is about right)
ysr@777 285 _concurrent_mark_init_times_ms->add(0.05);
ysr@777 286 _concurrent_mark_remark_times_ms->add(0.05);
ysr@777 287 _concurrent_mark_cleanup_times_ms->add(0.20);
ysr@777 288 _tenuring_threshold = MaxTenuringThreshold;
ysr@777 289
tonyp@1717 290 // if G1FixedSurvivorSpaceSize is 0 which means the size is not
tonyp@1717 291 // fixed, then _max_survivor_regions will be calculated at
johnc@1829 292 // calculate_young_list_target_length during initialization
tonyp@1717 293 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
apetrusenko@980 294
tonyp@1791 295 assert(GCTimeRatio > 0,
tonyp@1791 296 "we should have set it to a default value set_g1_gc_flags() "
tonyp@1791 297 "if a user set it to 0");
tonyp@1791 298 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
tonyp@1791 299
ysr@777 300 initialize_all();
ysr@777 301 }
ysr@777 302
ysr@777 303 // Increment "i", mod "len"
ysr@777 304 static void inc_mod(int& i, int len) {
ysr@777 305 i++; if (i == len) i = 0;
ysr@777 306 }
ysr@777 307
ysr@777 308 void G1CollectorPolicy::initialize_flags() {
ysr@777 309 set_min_alignment(HeapRegion::GrainBytes);
ysr@777 310 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
apetrusenko@982 311 if (SurvivorRatio < 1) {
apetrusenko@982 312 vm_exit_during_initialization("Invalid survivor ratio specified");
apetrusenko@982 313 }
ysr@777 314 CollectorPolicy::initialize_flags();
ysr@777 315 }
ysr@777 316
tonyp@1720 317 // The easiest way to deal with the parsing of the NewSize /
tonyp@1720 318 // MaxNewSize / etc. parameteres is to re-use the code in the
tonyp@1720 319 // TwoGenerationCollectorPolicy class. This is similar to what
tonyp@1720 320 // ParallelScavenge does with its GenerationSizer class (see
tonyp@1720 321 // ParallelScavengeHeap::initialize()). We might change this in the
tonyp@1720 322 // future, but it's a good start.
tonyp@1720 323 class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
tonyp@1720 324 size_t size_to_region_num(size_t byte_size) {
tonyp@1720 325 return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
tonyp@1720 326 }
tonyp@1720 327
tonyp@1720 328 public:
tonyp@1720 329 G1YoungGenSizer() {
tonyp@1720 330 initialize_flags();
tonyp@1720 331 initialize_size_info();
tonyp@1720 332 }
tonyp@1720 333
tonyp@1720 334 size_t min_young_region_num() {
tonyp@1720 335 return size_to_region_num(_min_gen0_size);
tonyp@1720 336 }
tonyp@1720 337 size_t initial_young_region_num() {
tonyp@1720 338 return size_to_region_num(_initial_gen0_size);
tonyp@1720 339 }
tonyp@1720 340 size_t max_young_region_num() {
tonyp@1720 341 return size_to_region_num(_max_gen0_size);
tonyp@1720 342 }
tonyp@1720 343 };
tonyp@1720 344
ysr@777 345 void G1CollectorPolicy::init() {
ysr@777 346 // Set aside an initial future to_space.
ysr@777 347 _g1 = G1CollectedHeap::heap();
ysr@777 348
ysr@777 349 assert(Heap_lock->owned_by_self(), "Locking discipline.");
ysr@777 350
apetrusenko@980 351 initialize_gc_policy_counters();
apetrusenko@980 352
ysr@777 353 if (G1Gen) {
ysr@777 354 _in_young_gc_mode = true;
ysr@777 355
tonyp@1720 356 G1YoungGenSizer sizer;
tonyp@1720 357 size_t initial_region_num = sizer.initial_young_region_num();
tonyp@1720 358
tonyp@1720 359 if (UseAdaptiveSizePolicy) {
ysr@777 360 set_adaptive_young_list_length(true);
ysr@777 361 _young_list_fixed_length = 0;
ysr@777 362 } else {
ysr@777 363 set_adaptive_young_list_length(false);
tonyp@1720 364 _young_list_fixed_length = initial_region_num;
ysr@777 365 }
johnc@1829 366 _free_regions_at_end_of_collection = _g1->free_regions();
johnc@1829 367 calculate_young_list_min_length();
johnc@1829 368 guarantee( _young_list_min_length == 0, "invariant, not enough info" );
johnc@1829 369 calculate_young_list_target_length();
johnc@1829 370 } else {
ysr@777 371 _young_list_fixed_length = 0;
ysr@777 372 _in_young_gc_mode = false;
ysr@777 373 }
johnc@1829 374
johnc@1829 375 // We may immediately start allocating regions and placing them on the
johnc@1829 376 // collection set list. Initialize the per-collection set info
johnc@1829 377 start_incremental_cset_building();
ysr@777 378 }
ysr@777 379
apetrusenko@980 380 // Create the jstat counters for the policy.
apetrusenko@980 381 void G1CollectorPolicy::initialize_gc_policy_counters()
apetrusenko@980 382 {
apetrusenko@980 383 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
apetrusenko@980 384 }
apetrusenko@980 385
ysr@777 386 void G1CollectorPolicy::calculate_young_list_min_length() {
ysr@777 387 _young_list_min_length = 0;
ysr@777 388
ysr@777 389 if (!adaptive_young_list_length())
ysr@777 390 return;
ysr@777 391
ysr@777 392 if (_alloc_rate_ms_seq->num() > 3) {
ysr@777 393 double now_sec = os::elapsedTime();
ysr@777 394 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
ysr@777 395 double alloc_rate_ms = predict_alloc_rate_ms();
ysr@777 396 int min_regions = (int) ceil(alloc_rate_ms * when_ms);
johnc@1829 397 int current_region_num = (int) _g1->young_list()->length();
ysr@777 398 _young_list_min_length = min_regions + current_region_num;
ysr@777 399 }
ysr@777 400 }
ysr@777 401
johnc@1829 402 void G1CollectorPolicy::calculate_young_list_target_length() {
ysr@777 403 if (adaptive_young_list_length()) {
ysr@777 404 size_t rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
johnc@1829 405 calculate_young_list_target_length(rs_lengths);
ysr@777 406 } else {
ysr@777 407 if (full_young_gcs())
ysr@777 408 _young_list_target_length = _young_list_fixed_length;
ysr@777 409 else
ysr@777 410 _young_list_target_length = _young_list_fixed_length / 2;
johnc@1829 411
ysr@777 412 _young_list_target_length = MAX2(_young_list_target_length, (size_t)1);
ysr@777 413 }
apetrusenko@980 414 calculate_survivors_policy();
ysr@777 415 }
ysr@777 416
johnc@1829 417 void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
ysr@777 418 guarantee( adaptive_young_list_length(), "pre-condition" );
johnc@1829 419 guarantee( !_in_marking_window || !_last_full_young_gc, "invariant" );
ysr@777 420
ysr@777 421 double start_time_sec = os::elapsedTime();
tonyp@1717 422 size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
ysr@777 423 min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
ysr@777 424 size_t reserve_regions =
ysr@777 425 (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
ysr@777 426
ysr@777 427 if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
ysr@777 428 // we are in fully-young mode and there are free regions in the heap
ysr@777 429
apetrusenko@980 430 double survivor_regions_evac_time =
apetrusenko@980 431 predict_survivor_regions_evac_time();
apetrusenko@980 432
ysr@777 433 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
ysr@777 434 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
ysr@777 435 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
johnc@1829 436 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
apetrusenko@980 437 double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
apetrusenko@980 438 + survivor_regions_evac_time;
johnc@1829 439
ysr@777 440 // the result
ysr@777 441 size_t final_young_length = 0;
johnc@1829 442
johnc@1829 443 size_t init_free_regions =
johnc@1829 444 MAX2((size_t)0, _free_regions_at_end_of_collection - reserve_regions);
johnc@1829 445
johnc@1829 446 // if we're still under the pause target...
johnc@1829 447 if (base_time_ms <= target_pause_time_ms) {
johnc@1829 448 // We make sure that the shortest young length that makes sense
johnc@1829 449 // fits within the target pause time.
johnc@1829 450 size_t min_young_length = 1;
johnc@1829 451
johnc@1829 452 if (predict_will_fit(min_young_length, base_time_ms,
johnc@1829 453 init_free_regions, target_pause_time_ms)) {
johnc@1829 454 // The shortest young length will fit within the target pause time;
johnc@1829 455 // we'll now check whether the absolute maximum number of young
johnc@1829 456 // regions will fit in the target pause time. If not, we'll do
johnc@1829 457 // a binary search between min_young_length and max_young_length
johnc@1829 458 size_t abs_max_young_length = _free_regions_at_end_of_collection - 1;
johnc@1829 459 size_t max_young_length = abs_max_young_length;
johnc@1829 460
johnc@1829 461 if (max_young_length > min_young_length) {
johnc@1829 462 // Let's check if the initial max young length will fit within the
johnc@1829 463 // target pause. If so then there is no need to search for a maximal
johnc@1829 464 // young length - we'll return the initial maximum
johnc@1829 465
johnc@1829 466 if (predict_will_fit(max_young_length, base_time_ms,
johnc@1829 467 init_free_regions, target_pause_time_ms)) {
johnc@1829 468 // The maximum young length will satisfy the target pause time.
johnc@1829 469 // We are done so set min young length to this maximum length.
johnc@1829 470 // The code after the loop will then set final_young_length using
johnc@1829 471 // the value cached in the minimum length.
johnc@1829 472 min_young_length = max_young_length;
johnc@1829 473 } else {
johnc@1829 474 // The maximum possible number of young regions will not fit within
johnc@1829 475 // the target pause time so let's search....
johnc@1829 476
johnc@1829 477 size_t diff = (max_young_length - min_young_length) / 2;
johnc@1829 478 max_young_length = min_young_length + diff;
johnc@1829 479
johnc@1829 480 while (max_young_length > min_young_length) {
johnc@1829 481 if (predict_will_fit(max_young_length, base_time_ms,
johnc@1829 482 init_free_regions, target_pause_time_ms)) {
johnc@1829 483
johnc@1829 484 // The current max young length will fit within the target
johnc@1829 485 // pause time. Note we do not exit the loop here. By setting
johnc@1829 486 // min = max, and then increasing the max below means that
johnc@1829 487 // we will continue searching for an upper bound in the
johnc@1829 488 // range [max..max+diff]
johnc@1829 489 min_young_length = max_young_length;
johnc@1829 490 }
johnc@1829 491 diff = (max_young_length - min_young_length) / 2;
johnc@1829 492 max_young_length = min_young_length + diff;
johnc@1829 493 }
johnc@1829 494 // the above loop found a maximal young length that will fit
johnc@1829 495 // within the target pause time.
johnc@1829 496 }
johnc@1829 497 assert(min_young_length <= abs_max_young_length, "just checking");
johnc@1829 498 }
johnc@1829 499 final_young_length = min_young_length;
johnc@1829 500 }
ysr@777 501 }
johnc@1829 502 // and we're done!
ysr@777 503
ysr@777 504 // we should have at least one region in the target young length
apetrusenko@980 505 _young_list_target_length =
apetrusenko@980 506 MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
ysr@777 507
ysr@777 508 // let's keep an eye of how long we spend on this calculation
ysr@777 509 // right now, I assume that we'll print it when we need it; we
ysr@777 510 // should really adde it to the breakdown of a pause
ysr@777 511 double end_time_sec = os::elapsedTime();
ysr@777 512 double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
ysr@777 513
johnc@1829 514 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 515 // leave this in for debugging, just in case
johnc@1829 516 gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT ", "
johnc@1829 517 "elapsed %1.2lf ms, (%s%s) " SIZE_FORMAT SIZE_FORMAT,
ysr@777 518 target_pause_time_ms,
johnc@1829 519 _young_list_target_length
ysr@777 520 elapsed_time_ms,
ysr@777 521 full_young_gcs() ? "full" : "partial",
tonyp@1794 522 during_initial_mark_pause() ? " i-m" : "",
apetrusenko@980 523 _in_marking_window,
apetrusenko@980 524 _in_marking_window_im);
johnc@1829 525 #endif // TRACE_CALC_YOUNG_LENGTH
ysr@777 526
ysr@777 527 if (_young_list_target_length < _young_list_min_length) {
johnc@1829 528 // bummer; this means that, if we do a pause when the maximal
johnc@1829 529 // length dictates, we'll violate the pause spacing target (the
ysr@777 530 // min length was calculate based on the application's current
ysr@777 531 // alloc rate);
ysr@777 532
ysr@777 533 // so, we have to bite the bullet, and allocate the minimum
ysr@777 534 // number. We'll violate our target, but we just can't meet it.
ysr@777 535
johnc@1829 536 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 537 // leave this in for debugging, just in case
ysr@777 538 gclog_or_tty->print_cr("adjusted target length from "
johnc@1829 539 SIZE_FORMAT " to " SIZE_FORMAT,
johnc@1829 540 _young_list_target_length, _young_list_min_length);
johnc@1829 541 #endif // TRACE_CALC_YOUNG_LENGTH
johnc@1829 542
johnc@1829 543 _young_list_target_length = _young_list_min_length;
ysr@777 544 }
ysr@777 545 } else {
ysr@777 546 // we are in a partially-young mode or we've run out of regions (due
ysr@777 547 // to evacuation failure)
ysr@777 548
johnc@1829 549 #ifdef TRACE_CALC_YOUNG_LENGTH
ysr@777 550 // leave this in for debugging, just in case
ysr@777 551 gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
johnc@1829 552 _young_list_min_length);
johnc@1829 553 #endif // TRACE_CALC_YOUNG_LENGTH
johnc@1829 554 // we'll do the pause as soon as possible by choosing the minimum
ysr@777 555 _young_list_target_length =
ysr@777 556 MAX2(_young_list_min_length, (size_t) 1);
ysr@777 557 }
ysr@777 558
ysr@777 559 _rs_lengths_prediction = rs_lengths;
ysr@777 560 }
ysr@777 561
johnc@1829 562 // This is used by: calculate_young_list_target_length(rs_length). It
johnc@1829 563 // returns true iff:
johnc@1829 564 // the predicted pause time for the given young list will not overflow
johnc@1829 565 // the target pause time
johnc@1829 566 // and:
johnc@1829 567 // the predicted amount of surviving data will not overflow the
johnc@1829 568 // the amount of free space available for survivor regions.
johnc@1829 569 //
ysr@777 570 bool
johnc@1829 571 G1CollectorPolicy::predict_will_fit(size_t young_length,
johnc@1829 572 double base_time_ms,
johnc@1829 573 size_t init_free_regions,
johnc@1829 574 double target_pause_time_ms) {
ysr@777 575
ysr@777 576 if (young_length >= init_free_regions)
ysr@777 577 // end condition 1: not enough space for the young regions
ysr@777 578 return false;
ysr@777 579
ysr@777 580 double accum_surv_rate_adj = 0.0;
ysr@777 581 double accum_surv_rate =
ysr@777 582 accum_yg_surv_rate_pred((int)(young_length - 1)) - accum_surv_rate_adj;
johnc@1829 583
ysr@777 584 size_t bytes_to_copy =
ysr@777 585 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
johnc@1829 586
ysr@777 587 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
johnc@1829 588
ysr@777 589 double young_other_time_ms =
johnc@1829 590 predict_young_other_time_ms(young_length);
johnc@1829 591
ysr@777 592 double pause_time_ms =
johnc@1829 593 base_time_ms + copy_time_ms + young_other_time_ms;
ysr@777 594
ysr@777 595 if (pause_time_ms > target_pause_time_ms)
ysr@777 596 // end condition 2: over the target pause time
ysr@777 597 return false;
ysr@777 598
ysr@777 599 size_t free_bytes =
ysr@777 600 (init_free_regions - young_length) * HeapRegion::GrainBytes;
ysr@777 601
ysr@777 602 if ((2.0 + sigma()) * (double) bytes_to_copy > (double) free_bytes)
ysr@777 603 // end condition 3: out of to-space (conservatively)
ysr@777 604 return false;
ysr@777 605
ysr@777 606 // success!
ysr@777 607 return true;
ysr@777 608 }
ysr@777 609
apetrusenko@980 610 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
apetrusenko@980 611 double survivor_regions_evac_time = 0.0;
apetrusenko@980 612 for (HeapRegion * r = _recorded_survivor_head;
apetrusenko@980 613 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
apetrusenko@980 614 r = r->get_next_young_region()) {
apetrusenko@980 615 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
apetrusenko@980 616 }
apetrusenko@980 617 return survivor_regions_evac_time;
apetrusenko@980 618 }
apetrusenko@980 619
ysr@777 620 void G1CollectorPolicy::check_prediction_validity() {
ysr@777 621 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
ysr@777 622
johnc@1829 623 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
ysr@777 624 if (rs_lengths > _rs_lengths_prediction) {
ysr@777 625 // add 10% to avoid having to recalculate often
ysr@777 626 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
johnc@1829 627 calculate_young_list_target_length(rs_lengths_prediction);
ysr@777 628 }
ysr@777 629 }
ysr@777 630
ysr@777 631 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
ysr@777 632 bool is_tlab,
ysr@777 633 bool* gc_overhead_limit_was_exceeded) {
ysr@777 634 guarantee(false, "Not using this policy feature yet.");
ysr@777 635 return NULL;
ysr@777 636 }
ysr@777 637
ysr@777 638 // This method controls how a collector handles one or more
ysr@777 639 // of its generations being fully allocated.
ysr@777 640 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
ysr@777 641 bool is_tlab) {
ysr@777 642 guarantee(false, "Not using this policy feature yet.");
ysr@777 643 return NULL;
ysr@777 644 }
ysr@777 645
ysr@777 646
ysr@777 647 #ifndef PRODUCT
ysr@777 648 bool G1CollectorPolicy::verify_young_ages() {
johnc@1829 649 HeapRegion* head = _g1->young_list()->first_region();
ysr@777 650 return
ysr@777 651 verify_young_ages(head, _short_lived_surv_rate_group);
ysr@777 652 // also call verify_young_ages on any additional surv rate groups
ysr@777 653 }
ysr@777 654
ysr@777 655 bool
ysr@777 656 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
ysr@777 657 SurvRateGroup *surv_rate_group) {
ysr@777 658 guarantee( surv_rate_group != NULL, "pre-condition" );
ysr@777 659
ysr@777 660 const char* name = surv_rate_group->name();
ysr@777 661 bool ret = true;
ysr@777 662 int prev_age = -1;
ysr@777 663
ysr@777 664 for (HeapRegion* curr = head;
ysr@777 665 curr != NULL;
ysr@777 666 curr = curr->get_next_young_region()) {
ysr@777 667 SurvRateGroup* group = curr->surv_rate_group();
ysr@777 668 if (group == NULL && !curr->is_survivor()) {
ysr@777 669 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
ysr@777 670 ret = false;
ysr@777 671 }
ysr@777 672
ysr@777 673 if (surv_rate_group == group) {
ysr@777 674 int age = curr->age_in_surv_rate_group();
ysr@777 675
ysr@777 676 if (age < 0) {
ysr@777 677 gclog_or_tty->print_cr("## %s: encountered negative age", name);
ysr@777 678 ret = false;
ysr@777 679 }
ysr@777 680
ysr@777 681 if (age <= prev_age) {
ysr@777 682 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
ysr@777 683 "(%d, %d)", name, age, prev_age);
ysr@777 684 ret = false;
ysr@777 685 }
ysr@777 686 prev_age = age;
ysr@777 687 }
ysr@777 688 }
ysr@777 689
ysr@777 690 return ret;
ysr@777 691 }
ysr@777 692 #endif // PRODUCT
ysr@777 693
ysr@777 694 void G1CollectorPolicy::record_full_collection_start() {
ysr@777 695 _cur_collection_start_sec = os::elapsedTime();
ysr@777 696 // Release the future to-space so that it is available for compaction into.
ysr@777 697 _g1->set_full_collection();
ysr@777 698 }
ysr@777 699
ysr@777 700 void G1CollectorPolicy::record_full_collection_end() {
ysr@777 701 // Consider this like a collection pause for the purposes of allocation
ysr@777 702 // since last pause.
ysr@777 703 double end_sec = os::elapsedTime();
ysr@777 704 double full_gc_time_sec = end_sec - _cur_collection_start_sec;
ysr@777 705 double full_gc_time_ms = full_gc_time_sec * 1000.0;
ysr@777 706
ysr@777 707 _all_full_gc_times_ms->add(full_gc_time_ms);
ysr@777 708
tonyp@1030 709 update_recent_gc_times(end_sec, full_gc_time_ms);
ysr@777 710
ysr@777 711 _g1->clear_full_collection();
ysr@777 712
ysr@777 713 // "Nuke" the heuristics that control the fully/partially young GC
ysr@777 714 // transitions and make sure we start with fully young GCs after the
ysr@777 715 // Full GC.
ysr@777 716 set_full_young_gcs(true);
ysr@777 717 _last_full_young_gc = false;
ysr@777 718 _should_revert_to_full_young_gcs = false;
tonyp@1794 719 clear_initiate_conc_mark_if_possible();
tonyp@1794 720 clear_during_initial_mark_pause();
ysr@777 721 _known_garbage_bytes = 0;
ysr@777 722 _known_garbage_ratio = 0.0;
ysr@777 723 _in_marking_window = false;
ysr@777 724 _in_marking_window_im = false;
ysr@777 725
ysr@777 726 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 727 // also call this on any additional surv rate groups
ysr@777 728
apetrusenko@980 729 record_survivor_regions(0, NULL, NULL);
apetrusenko@980 730
ysr@777 731 _prev_region_num_young = _region_num_young;
ysr@777 732 _prev_region_num_tenured = _region_num_tenured;
ysr@777 733
ysr@777 734 _free_regions_at_end_of_collection = _g1->free_regions();
apetrusenko@980 735 // Reset survivors SurvRateGroup.
apetrusenko@980 736 _survivor_surv_rate_group->reset();
ysr@777 737 calculate_young_list_min_length();
johnc@1829 738 calculate_young_list_target_length();
ysr@777 739 }
ysr@777 740
ysr@777 741 void G1CollectorPolicy::record_before_bytes(size_t bytes) {
ysr@777 742 _bytes_in_to_space_before_gc += bytes;
ysr@777 743 }
ysr@777 744
ysr@777 745 void G1CollectorPolicy::record_after_bytes(size_t bytes) {
ysr@777 746 _bytes_in_to_space_after_gc += bytes;
ysr@777 747 }
ysr@777 748
ysr@777 749 void G1CollectorPolicy::record_stop_world_start() {
ysr@777 750 _stop_world_start = os::elapsedTime();
ysr@777 751 }
ysr@777 752
ysr@777 753 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
ysr@777 754 size_t start_used) {
ysr@777 755 if (PrintGCDetails) {
ysr@777 756 gclog_or_tty->stamp(PrintGCTimeStamps);
ysr@777 757 gclog_or_tty->print("[GC pause");
ysr@777 758 if (in_young_gc_mode())
ysr@777 759 gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
ysr@777 760 }
ysr@777 761
ysr@777 762 assert(_g1->used_regions() == _g1->recalculate_used_regions(),
ysr@777 763 "sanity");
tonyp@1071 764 assert(_g1->used() == _g1->recalculate_used(), "sanity");
ysr@777 765
ysr@777 766 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
ysr@777 767 _all_stop_world_times_ms->add(s_w_t_ms);
ysr@777 768 _stop_world_start = 0.0;
ysr@777 769
ysr@777 770 _cur_collection_start_sec = start_time_sec;
ysr@777 771 _cur_collection_pause_used_at_start_bytes = start_used;
ysr@777 772 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
ysr@777 773 _pending_cards = _g1->pending_card_num();
ysr@777 774 _max_pending_cards = _g1->max_pending_card_num();
ysr@777 775
ysr@777 776 _bytes_in_to_space_before_gc = 0;
ysr@777 777 _bytes_in_to_space_after_gc = 0;
ysr@777 778 _bytes_in_collection_set_before_gc = 0;
ysr@777 779
ysr@777 780 #ifdef DEBUG
ysr@777 781 // initialise these to something well known so that we can spot
ysr@777 782 // if they are not set properly
ysr@777 783
ysr@777 784 for (int i = 0; i < _parallel_gc_threads; ++i) {
ysr@777 785 _par_last_ext_root_scan_times_ms[i] = -666.0;
ysr@777 786 _par_last_mark_stack_scan_times_ms[i] = -666.0;
ysr@777 787 _par_last_update_rs_start_times_ms[i] = -666.0;
ysr@777 788 _par_last_update_rs_times_ms[i] = -666.0;
ysr@777 789 _par_last_update_rs_processed_buffers[i] = -666.0;
ysr@777 790 _par_last_scan_rs_start_times_ms[i] = -666.0;
ysr@777 791 _par_last_scan_rs_times_ms[i] = -666.0;
ysr@777 792 _par_last_scan_new_refs_times_ms[i] = -666.0;
ysr@777 793 _par_last_obj_copy_times_ms[i] = -666.0;
ysr@777 794 _par_last_termination_times_ms[i] = -666.0;
ysr@777 795 }
ysr@777 796 #endif
ysr@777 797
ysr@777 798 for (int i = 0; i < _aux_num; ++i) {
ysr@777 799 _cur_aux_times_ms[i] = 0.0;
ysr@777 800 _cur_aux_times_set[i] = false;
ysr@777 801 }
ysr@777 802
ysr@777 803 _satb_drain_time_set = false;
ysr@777 804 _last_satb_drain_processed_buffers = -1;
ysr@777 805
ysr@777 806 if (in_young_gc_mode())
ysr@777 807 _last_young_gc_full = false;
ysr@777 808
ysr@777 809 // do that for any other surv rate groups
ysr@777 810 _short_lived_surv_rate_group->stop_adding_regions();
tonyp@1717 811 _survivors_age_table.clear();
apetrusenko@980 812
ysr@777 813 assert( verify_young_ages(), "region age verification" );
ysr@777 814 }
ysr@777 815
ysr@777 816 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
ysr@777 817 _mark_closure_time_ms = mark_closure_time_ms;
ysr@777 818 }
ysr@777 819
ysr@777 820 void G1CollectorPolicy::record_concurrent_mark_init_start() {
ysr@777 821 _mark_init_start_sec = os::elapsedTime();
ysr@777 822 guarantee(!in_young_gc_mode(), "should not do be here in young GC mode");
ysr@777 823 }
ysr@777 824
ysr@777 825 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
ysr@777 826 mark_init_elapsed_time_ms) {
ysr@777 827 _during_marking = true;
tonyp@1794 828 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
tonyp@1794 829 clear_during_initial_mark_pause();
ysr@777 830 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
ysr@777 831 }
ysr@777 832
ysr@777 833 void G1CollectorPolicy::record_concurrent_mark_init_end() {
ysr@777 834 double end_time_sec = os::elapsedTime();
ysr@777 835 double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
ysr@777 836 _concurrent_mark_init_times_ms->add(elapsed_time_ms);
ysr@777 837 record_concurrent_mark_init_end_pre(elapsed_time_ms);
ysr@777 838
ysr@777 839 _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
ysr@777 840 }
ysr@777 841
ysr@777 842 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
ysr@777 843 _mark_remark_start_sec = os::elapsedTime();
ysr@777 844 _during_marking = false;
ysr@777 845 }
ysr@777 846
ysr@777 847 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
ysr@777 848 double end_time_sec = os::elapsedTime();
ysr@777 849 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
ysr@777 850 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
ysr@777 851 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 852 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 853
ysr@777 854 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
ysr@777 855 }
ysr@777 856
ysr@777 857 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
ysr@777 858 _mark_cleanup_start_sec = os::elapsedTime();
ysr@777 859 }
ysr@777 860
ysr@777 861 void
ysr@777 862 G1CollectorPolicy::record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 863 size_t max_live_bytes) {
ysr@777 864 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 865 record_concurrent_mark_cleanup_end_work2();
ysr@777 866 }
ysr@777 867
ysr@777 868 void
ysr@777 869 G1CollectorPolicy::
ysr@777 870 record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 871 size_t max_live_bytes) {
ysr@777 872 if (_n_marks < 2) _n_marks++;
ysr@777 873 if (G1PolicyVerbose > 0)
ysr@777 874 gclog_or_tty->print_cr("At end of marking, max_live is " SIZE_FORMAT " MB "
ysr@777 875 " (of " SIZE_FORMAT " MB heap).",
ysr@777 876 max_live_bytes/M, _g1->capacity()/M);
ysr@777 877 }
ysr@777 878
ysr@777 879 // The important thing about this is that it includes "os::elapsedTime".
ysr@777 880 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
ysr@777 881 double end_time_sec = os::elapsedTime();
ysr@777 882 double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
ysr@777 883 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
ysr@777 884 _cur_mark_stop_world_time_ms += elapsed_time_ms;
ysr@777 885 _prev_collection_pause_end_ms += elapsed_time_ms;
ysr@777 886
ysr@777 887 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
ysr@777 888
ysr@777 889 _num_markings++;
ysr@777 890
ysr@777 891 // We did a marking, so reset the "since_last_mark" variables.
ysr@777 892 double considerConcMarkCost = 1.0;
ysr@777 893 // If there are available processors, concurrent activity is free...
ysr@777 894 if (Threads::number_of_non_daemon_threads() * 2 <
ysr@777 895 os::active_processor_count()) {
ysr@777 896 considerConcMarkCost = 0.0;
ysr@777 897 }
ysr@777 898 _n_pauses_at_mark_end = _n_pauses;
ysr@777 899 _n_marks_since_last_pause++;
ysr@777 900 }
ysr@777 901
ysr@777 902 void
ysr@777 903 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
ysr@777 904 if (in_young_gc_mode()) {
ysr@777 905 _should_revert_to_full_young_gcs = false;
ysr@777 906 _last_full_young_gc = true;
ysr@777 907 _in_marking_window = false;
ysr@777 908 if (adaptive_young_list_length())
johnc@1829 909 calculate_young_list_target_length();
ysr@777 910 }
ysr@777 911 }
ysr@777 912
ysr@777 913 void G1CollectorPolicy::record_concurrent_pause() {
ysr@777 914 if (_stop_world_start > 0.0) {
ysr@777 915 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
ysr@777 916 _all_yield_times_ms->add(yield_ms);
ysr@777 917 }
ysr@777 918 }
ysr@777 919
ysr@777 920 void G1CollectorPolicy::record_concurrent_pause_end() {
ysr@777 921 }
ysr@777 922
ysr@777 923 void G1CollectorPolicy::record_collection_pause_end_CH_strong_roots() {
ysr@777 924 _cur_CH_strong_roots_end_sec = os::elapsedTime();
ysr@777 925 _cur_CH_strong_roots_dur_ms =
ysr@777 926 (_cur_CH_strong_roots_end_sec - _cur_collection_start_sec) * 1000.0;
ysr@777 927 }
ysr@777 928
ysr@777 929 void G1CollectorPolicy::record_collection_pause_end_G1_strong_roots() {
ysr@777 930 _cur_G1_strong_roots_end_sec = os::elapsedTime();
ysr@777 931 _cur_G1_strong_roots_dur_ms =
ysr@777 932 (_cur_G1_strong_roots_end_sec - _cur_CH_strong_roots_end_sec) * 1000.0;
ysr@777 933 }
ysr@777 934
ysr@777 935 template<class T>
ysr@777 936 T sum_of(T* sum_arr, int start, int n, int N) {
ysr@777 937 T sum = (T)0;
ysr@777 938 for (int i = 0; i < n; i++) {
ysr@777 939 int j = (start + i) % N;
ysr@777 940 sum += sum_arr[j];
ysr@777 941 }
ysr@777 942 return sum;
ysr@777 943 }
ysr@777 944
ysr@777 945 void G1CollectorPolicy::print_par_stats (int level,
ysr@777 946 const char* str,
ysr@777 947 double* data,
ysr@777 948 bool summary) {
ysr@777 949 double min = data[0], max = data[0];
ysr@777 950 double total = 0.0;
ysr@777 951 int j;
ysr@777 952 for (j = 0; j < level; ++j)
ysr@777 953 gclog_or_tty->print(" ");
ysr@777 954 gclog_or_tty->print("[%s (ms):", str);
ysr@777 955 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 956 double val = data[i];
ysr@777 957 if (val < min)
ysr@777 958 min = val;
ysr@777 959 if (val > max)
ysr@777 960 max = val;
ysr@777 961 total += val;
ysr@777 962 gclog_or_tty->print(" %3.1lf", val);
ysr@777 963 }
ysr@777 964 if (summary) {
ysr@777 965 gclog_or_tty->print_cr("");
ysr@777 966 double avg = total / (double) ParallelGCThreads;
ysr@777 967 gclog_or_tty->print(" ");
ysr@777 968 for (j = 0; j < level; ++j)
ysr@777 969 gclog_or_tty->print(" ");
ysr@777 970 gclog_or_tty->print("Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf",
ysr@777 971 avg, min, max);
ysr@777 972 }
ysr@777 973 gclog_or_tty->print_cr("]");
ysr@777 974 }
ysr@777 975
ysr@777 976 void G1CollectorPolicy::print_par_buffers (int level,
ysr@777 977 const char* str,
ysr@777 978 double* data,
ysr@777 979 bool summary) {
ysr@777 980 double min = data[0], max = data[0];
ysr@777 981 double total = 0.0;
ysr@777 982 int j;
ysr@777 983 for (j = 0; j < level; ++j)
ysr@777 984 gclog_or_tty->print(" ");
ysr@777 985 gclog_or_tty->print("[%s :", str);
ysr@777 986 for (uint i = 0; i < ParallelGCThreads; ++i) {
ysr@777 987 double val = data[i];
ysr@777 988 if (val < min)
ysr@777 989 min = val;
ysr@777 990 if (val > max)
ysr@777 991 max = val;
ysr@777 992 total += val;
ysr@777 993 gclog_or_tty->print(" %d", (int) val);
ysr@777 994 }
ysr@777 995 if (summary) {
ysr@777 996 gclog_or_tty->print_cr("");
ysr@777 997 double avg = total / (double) ParallelGCThreads;
ysr@777 998 gclog_or_tty->print(" ");
ysr@777 999 for (j = 0; j < level; ++j)
ysr@777 1000 gclog_or_tty->print(" ");
ysr@777 1001 gclog_or_tty->print("Sum: %d, Avg: %d, Min: %d, Max: %d",
ysr@777 1002 (int)total, (int)avg, (int)min, (int)max);
ysr@777 1003 }
ysr@777 1004 gclog_or_tty->print_cr("]");
ysr@777 1005 }
ysr@777 1006
ysr@777 1007 void G1CollectorPolicy::print_stats (int level,
ysr@777 1008 const char* str,
ysr@777 1009 double value) {
ysr@777 1010 for (int j = 0; j < level; ++j)
ysr@777 1011 gclog_or_tty->print(" ");
ysr@777 1012 gclog_or_tty->print_cr("[%s: %5.1lf ms]", str, value);
ysr@777 1013 }
ysr@777 1014
ysr@777 1015 void G1CollectorPolicy::print_stats (int level,
ysr@777 1016 const char* str,
ysr@777 1017 int value) {
ysr@777 1018 for (int j = 0; j < level; ++j)
ysr@777 1019 gclog_or_tty->print(" ");
ysr@777 1020 gclog_or_tty->print_cr("[%s: %d]", str, value);
ysr@777 1021 }
ysr@777 1022
ysr@777 1023 double G1CollectorPolicy::avg_value (double* data) {
ysr@777 1024 if (ParallelGCThreads > 0) {
ysr@777 1025 double ret = 0.0;
ysr@777 1026 for (uint i = 0; i < ParallelGCThreads; ++i)
ysr@777 1027 ret += data[i];
ysr@777 1028 return ret / (double) ParallelGCThreads;
ysr@777 1029 } else {
ysr@777 1030 return data[0];
ysr@777 1031 }
ysr@777 1032 }
ysr@777 1033
ysr@777 1034 double G1CollectorPolicy::max_value (double* data) {
ysr@777 1035 if (ParallelGCThreads > 0) {
ysr@777 1036 double ret = data[0];
ysr@777 1037 for (uint i = 1; i < ParallelGCThreads; ++i)
ysr@777 1038 if (data[i] > ret)
ysr@777 1039 ret = data[i];
ysr@777 1040 return ret;
ysr@777 1041 } else {
ysr@777 1042 return data[0];
ysr@777 1043 }
ysr@777 1044 }
ysr@777 1045
ysr@777 1046 double G1CollectorPolicy::sum_of_values (double* data) {
ysr@777 1047 if (ParallelGCThreads > 0) {
ysr@777 1048 double sum = 0.0;
ysr@777 1049 for (uint i = 0; i < ParallelGCThreads; i++)
ysr@777 1050 sum += data[i];
ysr@777 1051 return sum;
ysr@777 1052 } else {
ysr@777 1053 return data[0];
ysr@777 1054 }
ysr@777 1055 }
ysr@777 1056
ysr@777 1057 double G1CollectorPolicy::max_sum (double* data1,
ysr@777 1058 double* data2) {
ysr@777 1059 double ret = data1[0] + data2[0];
ysr@777 1060
ysr@777 1061 if (ParallelGCThreads > 0) {
ysr@777 1062 for (uint i = 1; i < ParallelGCThreads; ++i) {
ysr@777 1063 double data = data1[i] + data2[i];
ysr@777 1064 if (data > ret)
ysr@777 1065 ret = data;
ysr@777 1066 }
ysr@777 1067 }
ysr@777 1068 return ret;
ysr@777 1069 }
ysr@777 1070
ysr@777 1071 // Anything below that is considered to be zero
ysr@777 1072 #define MIN_TIMER_GRANULARITY 0.0000001
ysr@777 1073
apetrusenko@1112 1074 void G1CollectorPolicy::record_collection_pause_end(bool abandoned) {
ysr@777 1075 double end_time_sec = os::elapsedTime();
ysr@777 1076 double elapsed_ms = _last_pause_time_ms;
ysr@777 1077 bool parallel = ParallelGCThreads > 0;
ysr@777 1078 double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0;
ysr@777 1079 size_t rs_size =
ysr@777 1080 _cur_collection_pause_used_regions_at_start - collection_set_size();
ysr@777 1081 size_t cur_used_bytes = _g1->used();
ysr@777 1082 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
ysr@777 1083 bool last_pause_included_initial_mark = false;
tonyp@1030 1084 bool update_stats = !abandoned && !_g1->evacuation_failed();
ysr@777 1085
ysr@777 1086 #ifndef PRODUCT
ysr@777 1087 if (G1YoungSurvRateVerbose) {
ysr@777 1088 gclog_or_tty->print_cr("");
ysr@777 1089 _short_lived_surv_rate_group->print();
ysr@777 1090 // do that for any other surv rate groups too
ysr@777 1091 }
ysr@777 1092 #endif // PRODUCT
ysr@777 1093
ysr@777 1094 if (in_young_gc_mode()) {
tonyp@1794 1095 last_pause_included_initial_mark = during_initial_mark_pause();
ysr@777 1096 if (last_pause_included_initial_mark)
ysr@777 1097 record_concurrent_mark_init_end_pre(0.0);
ysr@777 1098
ysr@777 1099 size_t min_used_targ =
tonyp@1718 1100 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
ysr@777 1101
tonyp@1794 1102
tonyp@1794 1103 if (!_g1->mark_in_progress() && !_last_full_young_gc) {
tonyp@1794 1104 assert(!last_pause_included_initial_mark, "invariant");
tonyp@1794 1105 if (cur_used_bytes > min_used_targ &&
tonyp@1794 1106 cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
tonyp@1794 1107 assert(!during_initial_mark_pause(), "we should not see this here");
tonyp@1794 1108
tonyp@1794 1109 // Note: this might have already been set, if during the last
tonyp@1794 1110 // pause we decided to start a cycle but at the beginning of
tonyp@1794 1111 // this pause we decided to postpone it. That's OK.
tonyp@1794 1112 set_initiate_conc_mark_if_possible();
ysr@777 1113 }
ysr@777 1114 }
ysr@777 1115
ysr@777 1116 _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
ysr@777 1117 }
ysr@777 1118
ysr@777 1119 _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
ysr@777 1120 end_time_sec, false);
ysr@777 1121
ysr@777 1122 guarantee(_cur_collection_pause_used_regions_at_start >=
ysr@777 1123 collection_set_size(),
ysr@777 1124 "Negative RS size?");
ysr@777 1125
ysr@777 1126 // This assert is exempted when we're doing parallel collection pauses,
ysr@777 1127 // because the fragmentation caused by the parallel GC allocation buffers
ysr@777 1128 // can lead to more memory being used during collection than was used
ysr@777 1129 // before. Best leave this out until the fragmentation problem is fixed.
ysr@777 1130 // Pauses in which evacuation failed can also lead to negative
ysr@777 1131 // collections, since no space is reclaimed from a region containing an
ysr@777 1132 // object whose evacuation failed.
ysr@777 1133 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1134 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1135 // (DLD, 10/05.)
ysr@777 1136 assert((true || parallel) // Always using GC LABs now.
ysr@777 1137 || _g1->evacuation_failed()
ysr@777 1138 || _cur_collection_pause_used_at_start_bytes >= cur_used_bytes,
ysr@777 1139 "Negative collection");
ysr@777 1140
ysr@777 1141 size_t freed_bytes =
ysr@777 1142 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
ysr@777 1143 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
johnc@1829 1144
ysr@777 1145 double survival_fraction =
ysr@777 1146 (double)surviving_bytes/
ysr@777 1147 (double)_collection_set_bytes_used_before;
ysr@777 1148
ysr@777 1149 _n_pauses++;
ysr@777 1150
tonyp@1030 1151 if (update_stats) {
ysr@777 1152 _recent_CH_strong_roots_times_ms->add(_cur_CH_strong_roots_dur_ms);
ysr@777 1153 _recent_G1_strong_roots_times_ms->add(_cur_G1_strong_roots_dur_ms);
ysr@777 1154 _recent_evac_times_ms->add(evac_ms);
ysr@777 1155 _recent_pause_times_ms->add(elapsed_ms);
ysr@777 1156
ysr@777 1157 _recent_rs_sizes->add(rs_size);
ysr@777 1158
ysr@777 1159 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1160 // fragmentation can produce negative collections. Same with evac
ysr@777 1161 // failure.
ysr@777 1162 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1163 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1164 // (DLD, 10/05.
ysr@777 1165 assert((true || parallel)
ysr@777 1166 || _g1->evacuation_failed()
ysr@777 1167 || surviving_bytes <= _collection_set_bytes_used_before,
ysr@777 1168 "Or else negative collection!");
ysr@777 1169 _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
ysr@777 1170 _recent_CS_bytes_surviving->add(surviving_bytes);
ysr@777 1171
ysr@777 1172 // this is where we update the allocation rate of the application
ysr@777 1173 double app_time_ms =
ysr@777 1174 (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
ysr@777 1175 if (app_time_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1176 // This usually happens due to the timer not having the required
ysr@777 1177 // granularity. Some Linuxes are the usual culprits.
ysr@777 1178 // We'll just set it to something (arbitrarily) small.
ysr@777 1179 app_time_ms = 1.0;
ysr@777 1180 }
ysr@777 1181 size_t regions_allocated =
ysr@777 1182 (_region_num_young - _prev_region_num_young) +
ysr@777 1183 (_region_num_tenured - _prev_region_num_tenured);
ysr@777 1184 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
ysr@777 1185 _alloc_rate_ms_seq->add(alloc_rate_ms);
ysr@777 1186 _prev_region_num_young = _region_num_young;
ysr@777 1187 _prev_region_num_tenured = _region_num_tenured;
ysr@777 1188
ysr@777 1189 double interval_ms =
ysr@777 1190 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
ysr@777 1191 update_recent_gc_times(end_time_sec, elapsed_ms);
ysr@777 1192 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
ysr@1521 1193 if (recent_avg_pause_time_ratio() < 0.0 ||
ysr@1521 1194 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
ysr@1521 1195 #ifndef PRODUCT
ysr@1521 1196 // Dump info to allow post-facto debugging
ysr@1521 1197 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
ysr@1521 1198 gclog_or_tty->print_cr("-------------------------------------------");
ysr@1521 1199 gclog_or_tty->print_cr("Recent GC Times (ms):");
ysr@1521 1200 _recent_gc_times_ms->dump();
ysr@1521 1201 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
ysr@1521 1202 _recent_prev_end_times_for_all_gcs_sec->dump();
ysr@1521 1203 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
ysr@1521 1204 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
ysr@1522 1205 // In debug mode, terminate the JVM if the user wants to debug at this point.
ysr@1522 1206 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
ysr@1522 1207 #endif // !PRODUCT
ysr@1522 1208 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
ysr@1522 1209 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
ysr@1521 1210 if (_recent_avg_pause_time_ratio < 0.0) {
ysr@1521 1211 _recent_avg_pause_time_ratio = 0.0;
ysr@1521 1212 } else {
ysr@1521 1213 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
ysr@1521 1214 _recent_avg_pause_time_ratio = 1.0;
ysr@1521 1215 }
ysr@1521 1216 }
ysr@777 1217 }
ysr@777 1218
ysr@777 1219 if (G1PolicyVerbose > 1) {
ysr@777 1220 gclog_or_tty->print_cr(" Recording collection pause(%d)", _n_pauses);
ysr@777 1221 }
ysr@777 1222
ysr@777 1223 PauseSummary* summary;
apetrusenko@1112 1224 if (abandoned) {
apetrusenko@1112 1225 summary = _abandoned_summary;
apetrusenko@1112 1226 } else {
apetrusenko@1112 1227 summary = _summary;
ysr@777 1228 }
ysr@777 1229
ysr@777 1230 double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
ysr@777 1231 double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
ysr@777 1232 double update_rs_time = avg_value(_par_last_update_rs_times_ms);
ysr@777 1233 double update_rs_processed_buffers =
ysr@777 1234 sum_of_values(_par_last_update_rs_processed_buffers);
ysr@777 1235 double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
ysr@777 1236 double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
ysr@777 1237 double termination_time = avg_value(_par_last_termination_times_ms);
ysr@777 1238
tonyp@1083 1239 double parallel_other_time = _cur_collection_par_time_ms -
tonyp@1083 1240 (update_rs_time + ext_root_scan_time + mark_stack_scan_time +
johnc@1829 1241 scan_rs_time + obj_copy_time + termination_time);
tonyp@1030 1242 if (update_stats) {
ysr@777 1243 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 1244 guarantee(body_summary != NULL, "should not be null!");
ysr@777 1245
ysr@777 1246 if (_satb_drain_time_set)
ysr@777 1247 body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
ysr@777 1248 else
ysr@777 1249 body_summary->record_satb_drain_time_ms(0.0);
ysr@777 1250 body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
ysr@777 1251 body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
ysr@777 1252 body_summary->record_update_rs_time_ms(update_rs_time);
ysr@777 1253 body_summary->record_scan_rs_time_ms(scan_rs_time);
ysr@777 1254 body_summary->record_obj_copy_time_ms(obj_copy_time);
ysr@777 1255 if (parallel) {
ysr@777 1256 body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
ysr@777 1257 body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
ysr@777 1258 body_summary->record_termination_time_ms(termination_time);
ysr@777 1259 body_summary->record_parallel_other_time_ms(parallel_other_time);
ysr@777 1260 }
ysr@777 1261 body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
ysr@777 1262 }
ysr@777 1263
ysr@777 1264 if (G1PolicyVerbose > 1) {
ysr@777 1265 gclog_or_tty->print_cr(" ET: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1266 " CH Strong: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1267 " G1 Strong: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1268 " Evac: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1269 " ET-RS: %10.6f ms (avg: %10.6f ms)\n"
ysr@777 1270 " |RS|: " SIZE_FORMAT,
ysr@777 1271 elapsed_ms, recent_avg_time_for_pauses_ms(),
ysr@777 1272 _cur_CH_strong_roots_dur_ms, recent_avg_time_for_CH_strong_ms(),
ysr@777 1273 _cur_G1_strong_roots_dur_ms, recent_avg_time_for_G1_strong_ms(),
ysr@777 1274 evac_ms, recent_avg_time_for_evac_ms(),
ysr@777 1275 scan_rs_time,
ysr@777 1276 recent_avg_time_for_pauses_ms() -
ysr@777 1277 recent_avg_time_for_G1_strong_ms(),
ysr@777 1278 rs_size);
ysr@777 1279
ysr@777 1280 gclog_or_tty->print_cr(" Used at start: " SIZE_FORMAT"K"
ysr@777 1281 " At end " SIZE_FORMAT "K\n"
ysr@777 1282 " garbage : " SIZE_FORMAT "K"
ysr@777 1283 " of " SIZE_FORMAT "K\n"
ysr@777 1284 " survival : %6.2f%% (%6.2f%% avg)",
ysr@777 1285 _cur_collection_pause_used_at_start_bytes/K,
ysr@777 1286 _g1->used()/K, freed_bytes/K,
ysr@777 1287 _collection_set_bytes_used_before/K,
ysr@777 1288 survival_fraction*100.0,
ysr@777 1289 recent_avg_survival_fraction()*100.0);
ysr@777 1290 gclog_or_tty->print_cr(" Recent %% gc pause time: %6.2f",
ysr@777 1291 recent_avg_pause_time_ratio() * 100.0);
ysr@777 1292 }
ysr@777 1293
ysr@777 1294 double other_time_ms = elapsed_ms;
ysr@777 1295
ysr@777 1296 if (!abandoned) {
ysr@777 1297 if (_satb_drain_time_set)
ysr@777 1298 other_time_ms -= _cur_satb_drain_time_ms;
ysr@777 1299
ysr@777 1300 if (parallel)
ysr@777 1301 other_time_ms -= _cur_collection_par_time_ms + _cur_clear_ct_time_ms;
ysr@777 1302 else
ysr@777 1303 other_time_ms -=
ysr@777 1304 update_rs_time +
johnc@1829 1305 ext_root_scan_time + mark_stack_scan_time +
ysr@777 1306 scan_rs_time + obj_copy_time;
ysr@777 1307 }
ysr@777 1308
ysr@777 1309 if (PrintGCDetails) {
ysr@777 1310 gclog_or_tty->print_cr("%s%s, %1.8lf secs]",
apetrusenko@1112 1311 abandoned ? " (abandoned)" : "",
ysr@777 1312 (last_pause_included_initial_mark) ? " (initial-mark)" : "",
ysr@777 1313 elapsed_ms / 1000.0);
ysr@777 1314
ysr@777 1315 if (!abandoned) {
apetrusenko@1112 1316 if (_satb_drain_time_set) {
ysr@777 1317 print_stats(1, "SATB Drain Time", _cur_satb_drain_time_ms);
apetrusenko@1112 1318 }
apetrusenko@1112 1319 if (_last_satb_drain_processed_buffers >= 0) {
ysr@777 1320 print_stats(2, "Processed Buffers", _last_satb_drain_processed_buffers);
apetrusenko@1112 1321 }
apetrusenko@1112 1322 if (parallel) {
apetrusenko@1112 1323 print_stats(1, "Parallel Time", _cur_collection_par_time_ms);
apetrusenko@1112 1324 print_par_stats(2, "Update RS (Start)", _par_last_update_rs_start_times_ms, false);
apetrusenko@1112 1325 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
iveresov@1229 1326 print_par_buffers(3, "Processed Buffers",
iveresov@1229 1327 _par_last_update_rs_processed_buffers, true);
ysr@777 1328 print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
ysr@777 1329 print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms);
ysr@777 1330 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
ysr@777 1331 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
ysr@777 1332 print_par_stats(2, "Termination", _par_last_termination_times_ms);
ysr@777 1333 print_stats(2, "Other", parallel_other_time);
ysr@777 1334 print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
apetrusenko@1112 1335 } else {
apetrusenko@1112 1336 print_stats(1, "Update RS", update_rs_time);
iveresov@1229 1337 print_stats(2, "Processed Buffers",
iveresov@1229 1338 (int)update_rs_processed_buffers);
ysr@777 1339 print_stats(1, "Ext Root Scanning", ext_root_scan_time);
ysr@777 1340 print_stats(1, "Mark Stack Scanning", mark_stack_scan_time);
ysr@777 1341 print_stats(1, "Scan RS", scan_rs_time);
ysr@777 1342 print_stats(1, "Object Copying", obj_copy_time);
ysr@777 1343 }
ysr@777 1344 }
johnc@1325 1345 #ifndef PRODUCT
johnc@1325 1346 print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
johnc@1325 1347 print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
johnc@1325 1348 print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
johnc@1325 1349 print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
johnc@1325 1350 if (_num_cc_clears > 0) {
johnc@1325 1351 print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
johnc@1325 1352 }
johnc@1325 1353 #endif
ysr@777 1354 print_stats(1, "Other", other_time_ms);
johnc@1829 1355 print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
johnc@1829 1356
ysr@777 1357 for (int i = 0; i < _aux_num; ++i) {
ysr@777 1358 if (_cur_aux_times_set[i]) {
ysr@777 1359 char buffer[96];
ysr@777 1360 sprintf(buffer, "Aux%d", i);
ysr@777 1361 print_stats(1, buffer, _cur_aux_times_ms[i]);
ysr@777 1362 }
ysr@777 1363 }
ysr@777 1364 }
ysr@777 1365 if (PrintGCDetails)
ysr@777 1366 gclog_or_tty->print(" [");
ysr@777 1367 if (PrintGC || PrintGCDetails)
ysr@777 1368 _g1->print_size_transition(gclog_or_tty,
ysr@777 1369 _cur_collection_pause_used_at_start_bytes,
ysr@777 1370 _g1->used(), _g1->capacity());
ysr@777 1371 if (PrintGCDetails)
ysr@777 1372 gclog_or_tty->print_cr("]");
ysr@777 1373
ysr@777 1374 _all_pause_times_ms->add(elapsed_ms);
tonyp@1083 1375 if (update_stats) {
tonyp@1083 1376 summary->record_total_time_ms(elapsed_ms);
tonyp@1083 1377 summary->record_other_time_ms(other_time_ms);
tonyp@1083 1378 }
ysr@777 1379 for (int i = 0; i < _aux_num; ++i)
ysr@777 1380 if (_cur_aux_times_set[i])
ysr@777 1381 _all_aux_times_ms[i].add(_cur_aux_times_ms[i]);
ysr@777 1382
ysr@777 1383 // Reset marks-between-pauses counter.
ysr@777 1384 _n_marks_since_last_pause = 0;
ysr@777 1385
ysr@777 1386 // Update the efficiency-since-mark vars.
ysr@777 1387 double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
ysr@777 1388 if (elapsed_ms < MIN_TIMER_GRANULARITY) {
ysr@777 1389 // This usually happens due to the timer not having the required
ysr@777 1390 // granularity. Some Linuxes are the usual culprits.
ysr@777 1391 // We'll just set it to something (arbitrarily) small.
ysr@777 1392 proc_ms = 1.0;
ysr@777 1393 }
ysr@777 1394 double cur_efficiency = (double) freed_bytes / proc_ms;
ysr@777 1395
ysr@777 1396 bool new_in_marking_window = _in_marking_window;
ysr@777 1397 bool new_in_marking_window_im = false;
tonyp@1794 1398 if (during_initial_mark_pause()) {
ysr@777 1399 new_in_marking_window = true;
ysr@777 1400 new_in_marking_window_im = true;
ysr@777 1401 }
ysr@777 1402
ysr@777 1403 if (in_young_gc_mode()) {
ysr@777 1404 if (_last_full_young_gc) {
ysr@777 1405 set_full_young_gcs(false);
ysr@777 1406 _last_full_young_gc = false;
ysr@777 1407 }
ysr@777 1408
ysr@777 1409 if ( !_last_young_gc_full ) {
ysr@777 1410 if ( _should_revert_to_full_young_gcs ||
ysr@777 1411 _known_garbage_ratio < 0.05 ||
ysr@777 1412 (adaptive_young_list_length() &&
ysr@777 1413 (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
ysr@777 1414 set_full_young_gcs(true);
ysr@777 1415 }
ysr@777 1416 }
ysr@777 1417 _should_revert_to_full_young_gcs = false;
ysr@777 1418
ysr@777 1419 if (_last_young_gc_full && !_during_marking)
ysr@777 1420 _young_gc_eff_seq->add(cur_efficiency);
ysr@777 1421 }
ysr@777 1422
ysr@777 1423 _short_lived_surv_rate_group->start_adding_regions();
ysr@777 1424 // do that for any other surv rate groupsx
ysr@777 1425
ysr@777 1426 // <NEW PREDICTION>
ysr@777 1427
apetrusenko@1112 1428 if (update_stats) {
ysr@777 1429 double pause_time_ms = elapsed_ms;
ysr@777 1430
ysr@777 1431 size_t diff = 0;
ysr@777 1432 if (_max_pending_cards >= _pending_cards)
ysr@777 1433 diff = _max_pending_cards - _pending_cards;
ysr@777 1434 _pending_card_diff_seq->add((double) diff);
ysr@777 1435
ysr@777 1436 double cost_per_card_ms = 0.0;
ysr@777 1437 if (_pending_cards > 0) {
ysr@777 1438 cost_per_card_ms = update_rs_time / (double) _pending_cards;
ysr@777 1439 _cost_per_card_ms_seq->add(cost_per_card_ms);
ysr@777 1440 }
ysr@777 1441
ysr@777 1442 size_t cards_scanned = _g1->cards_scanned();
ysr@777 1443
ysr@777 1444 double cost_per_entry_ms = 0.0;
ysr@777 1445 if (cards_scanned > 10) {
ysr@777 1446 cost_per_entry_ms = scan_rs_time / (double) cards_scanned;
ysr@777 1447 if (_last_young_gc_full)
ysr@777 1448 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1449 else
ysr@777 1450 _partially_young_cost_per_entry_ms_seq->add(cost_per_entry_ms);
ysr@777 1451 }
ysr@777 1452
ysr@777 1453 if (_max_rs_lengths > 0) {
ysr@777 1454 double cards_per_entry_ratio =
ysr@777 1455 (double) cards_scanned / (double) _max_rs_lengths;
ysr@777 1456 if (_last_young_gc_full)
ysr@777 1457 _fully_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1458 else
ysr@777 1459 _partially_young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
ysr@777 1460 }
ysr@777 1461
ysr@777 1462 size_t rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
ysr@777 1463 if (rs_length_diff >= 0)
ysr@777 1464 _rs_length_diff_seq->add((double) rs_length_diff);
ysr@777 1465
ysr@777 1466 size_t copied_bytes = surviving_bytes;
ysr@777 1467 double cost_per_byte_ms = 0.0;
ysr@777 1468 if (copied_bytes > 0) {
ysr@777 1469 cost_per_byte_ms = obj_copy_time / (double) copied_bytes;
ysr@777 1470 if (_in_marking_window)
ysr@777 1471 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
ysr@777 1472 else
ysr@777 1473 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
ysr@777 1474 }
ysr@777 1475
ysr@777 1476 double all_other_time_ms = pause_time_ms -
johnc@1829 1477 (update_rs_time + scan_rs_time + obj_copy_time +
ysr@777 1478 _mark_closure_time_ms + termination_time);
ysr@777 1479
ysr@777 1480 double young_other_time_ms = 0.0;
ysr@777 1481 if (_recorded_young_regions > 0) {
ysr@777 1482 young_other_time_ms =
ysr@777 1483 _recorded_young_cset_choice_time_ms +
ysr@777 1484 _recorded_young_free_cset_time_ms;
ysr@777 1485 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
ysr@777 1486 (double) _recorded_young_regions);
ysr@777 1487 }
ysr@777 1488 double non_young_other_time_ms = 0.0;
ysr@777 1489 if (_recorded_non_young_regions > 0) {
ysr@777 1490 non_young_other_time_ms =
ysr@777 1491 _recorded_non_young_cset_choice_time_ms +
ysr@777 1492 _recorded_non_young_free_cset_time_ms;
ysr@777 1493
ysr@777 1494 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
ysr@777 1495 (double) _recorded_non_young_regions);
ysr@777 1496 }
ysr@777 1497
ysr@777 1498 double constant_other_time_ms = all_other_time_ms -
ysr@777 1499 (young_other_time_ms + non_young_other_time_ms);
ysr@777 1500 _constant_other_time_ms_seq->add(constant_other_time_ms);
ysr@777 1501
ysr@777 1502 double survival_ratio = 0.0;
ysr@777 1503 if (_bytes_in_collection_set_before_gc > 0) {
ysr@777 1504 survival_ratio = (double) bytes_in_to_space_during_gc() /
ysr@777 1505 (double) _bytes_in_collection_set_before_gc;
ysr@777 1506 }
ysr@777 1507
ysr@777 1508 _pending_cards_seq->add((double) _pending_cards);
ysr@777 1509 _scanned_cards_seq->add((double) cards_scanned);
ysr@777 1510 _rs_lengths_seq->add((double) _max_rs_lengths);
ysr@777 1511
ysr@777 1512 double expensive_region_limit_ms =
johnc@1186 1513 (double) MaxGCPauseMillis - predict_constant_other_time_ms();
ysr@777 1514 if (expensive_region_limit_ms < 0.0) {
ysr@777 1515 // this means that the other time was predicted to be longer than
ysr@777 1516 // than the max pause time
johnc@1186 1517 expensive_region_limit_ms = (double) MaxGCPauseMillis;
ysr@777 1518 }
ysr@777 1519 _expensive_region_limit_ms = expensive_region_limit_ms;
ysr@777 1520
ysr@777 1521 if (PREDICTIONS_VERBOSE) {
ysr@777 1522 gclog_or_tty->print_cr("");
ysr@777 1523 gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
johnc@1829 1524 "REGIONS %d %d %d "
ysr@777 1525 "PENDING_CARDS %d %d "
ysr@777 1526 "CARDS_SCANNED %d %d "
ysr@777 1527 "RS_LENGTHS %d %d "
ysr@777 1528 "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
ysr@777 1529 "SURVIVAL_RATIO %1.6lf %1.6lf "
ysr@777 1530 "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
ysr@777 1531 "OTHER_YOUNG %1.6lf %1.6lf "
ysr@777 1532 "OTHER_NON_YOUNG %1.6lf %1.6lf "
ysr@777 1533 "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
ysr@777 1534 "ELAPSED %1.6lf %1.6lf ",
ysr@777 1535 _cur_collection_start_sec,
ysr@777 1536 (!_last_young_gc_full) ? 2 :
ysr@777 1537 (last_pause_included_initial_mark) ? 1 : 0,
ysr@777 1538 _recorded_region_num,
ysr@777 1539 _recorded_young_regions,
ysr@777 1540 _recorded_non_young_regions,
ysr@777 1541 _predicted_pending_cards, _pending_cards,
ysr@777 1542 _predicted_cards_scanned, cards_scanned,
ysr@777 1543 _predicted_rs_lengths, _max_rs_lengths,
ysr@777 1544 _predicted_rs_update_time_ms, update_rs_time,
ysr@777 1545 _predicted_rs_scan_time_ms, scan_rs_time,
ysr@777 1546 _predicted_survival_ratio, survival_ratio,
ysr@777 1547 _predicted_object_copy_time_ms, obj_copy_time,
ysr@777 1548 _predicted_constant_other_time_ms, constant_other_time_ms,
ysr@777 1549 _predicted_young_other_time_ms, young_other_time_ms,
ysr@777 1550 _predicted_non_young_other_time_ms,
ysr@777 1551 non_young_other_time_ms,
ysr@777 1552 _vtime_diff_ms, termination_time,
ysr@777 1553 _predicted_pause_time_ms, elapsed_ms);
ysr@777 1554 }
ysr@777 1555
ysr@777 1556 if (G1PolicyVerbose > 0) {
ysr@777 1557 gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
ysr@777 1558 _predicted_pause_time_ms,
ysr@777 1559 (_within_target) ? "within" : "outside",
ysr@777 1560 elapsed_ms);
ysr@777 1561 }
ysr@777 1562
ysr@777 1563 }
ysr@777 1564
ysr@777 1565 _in_marking_window = new_in_marking_window;
ysr@777 1566 _in_marking_window_im = new_in_marking_window_im;
ysr@777 1567 _free_regions_at_end_of_collection = _g1->free_regions();
ysr@777 1568 calculate_young_list_min_length();
johnc@1829 1569 calculate_young_list_target_length();
ysr@777 1570
iveresov@1546 1571 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
tonyp@1717 1572 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
iveresov@1546 1573 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
ysr@777 1574 // </NEW PREDICTION>
ysr@777 1575
ysr@777 1576 _target_pause_time_ms = -1.0;
ysr@777 1577 }
ysr@777 1578
ysr@777 1579 // <NEW PREDICTION>
ysr@777 1580
iveresov@1546 1581 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 1582 double update_rs_processed_buffers,
iveresov@1546 1583 double goal_ms) {
iveresov@1546 1584 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
iveresov@1546 1585 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
iveresov@1546 1586
tonyp@1717 1587 if (G1UseAdaptiveConcRefinement) {
iveresov@1546 1588 const int k_gy = 3, k_gr = 6;
iveresov@1546 1589 const double inc_k = 1.1, dec_k = 0.9;
iveresov@1546 1590
iveresov@1546 1591 int g = cg1r->green_zone();
iveresov@1546 1592 if (update_rs_time > goal_ms) {
iveresov@1546 1593 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
iveresov@1546 1594 } else {
iveresov@1546 1595 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
iveresov@1546 1596 g = (int)MAX2(g * inc_k, g + 1.0);
iveresov@1546 1597 }
iveresov@1546 1598 }
iveresov@1546 1599 // Change the refinement threads params
iveresov@1546 1600 cg1r->set_green_zone(g);
iveresov@1546 1601 cg1r->set_yellow_zone(g * k_gy);
iveresov@1546 1602 cg1r->set_red_zone(g * k_gr);
iveresov@1546 1603 cg1r->reinitialize_threads();
iveresov@1546 1604
iveresov@1546 1605 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
iveresov@1546 1606 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
iveresov@1546 1607 cg1r->yellow_zone());
iveresov@1546 1608 // Change the barrier params
iveresov@1546 1609 dcqs.set_process_completed_threshold(processing_threshold);
iveresov@1546 1610 dcqs.set_max_completed_queue(cg1r->red_zone());
iveresov@1546 1611 }
iveresov@1546 1612
iveresov@1546 1613 int curr_queue_size = dcqs.completed_buffers_num();
iveresov@1546 1614 if (curr_queue_size >= cg1r->yellow_zone()) {
iveresov@1546 1615 dcqs.set_completed_queue_padding(curr_queue_size);
iveresov@1546 1616 } else {
iveresov@1546 1617 dcqs.set_completed_queue_padding(0);
iveresov@1546 1618 }
iveresov@1546 1619 dcqs.notify_if_necessary();
iveresov@1546 1620 }
iveresov@1546 1621
ysr@777 1622 double
ysr@777 1623 G1CollectorPolicy::
ysr@777 1624 predict_young_collection_elapsed_time_ms(size_t adjustment) {
ysr@777 1625 guarantee( adjustment == 0 || adjustment == 1, "invariant" );
ysr@777 1626
ysr@777 1627 G1CollectedHeap* g1h = G1CollectedHeap::heap();
johnc@1829 1628 size_t young_num = g1h->young_list()->length();
ysr@777 1629 if (young_num == 0)
ysr@777 1630 return 0.0;
ysr@777 1631
ysr@777 1632 young_num += adjustment;
ysr@777 1633 size_t pending_cards = predict_pending_cards();
johnc@1829 1634 size_t rs_lengths = g1h->young_list()->sampled_rs_lengths() +
ysr@777 1635 predict_rs_length_diff();
ysr@777 1636 size_t card_num;
ysr@777 1637 if (full_young_gcs())
ysr@777 1638 card_num = predict_young_card_num(rs_lengths);
ysr@777 1639 else
ysr@777 1640 card_num = predict_non_young_card_num(rs_lengths);
ysr@777 1641 size_t young_byte_size = young_num * HeapRegion::GrainBytes;
ysr@777 1642 double accum_yg_surv_rate =
ysr@777 1643 _short_lived_surv_rate_group->accum_surv_rate(adjustment);
ysr@777 1644
ysr@777 1645 size_t bytes_to_copy =
ysr@777 1646 (size_t) (accum_yg_surv_rate * (double) HeapRegion::GrainBytes);
ysr@777 1647
ysr@777 1648 return
ysr@777 1649 predict_rs_update_time_ms(pending_cards) +
ysr@777 1650 predict_rs_scan_time_ms(card_num) +
ysr@777 1651 predict_object_copy_time_ms(bytes_to_copy) +
ysr@777 1652 predict_young_other_time_ms(young_num) +
ysr@777 1653 predict_constant_other_time_ms();
ysr@777 1654 }
ysr@777 1655
ysr@777 1656 double
ysr@777 1657 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
ysr@777 1658 size_t rs_length = predict_rs_length_diff();
ysr@777 1659 size_t card_num;
ysr@777 1660 if (full_young_gcs())
ysr@777 1661 card_num = predict_young_card_num(rs_length);
ysr@777 1662 else
ysr@777 1663 card_num = predict_non_young_card_num(rs_length);
ysr@777 1664 return predict_base_elapsed_time_ms(pending_cards, card_num);
ysr@777 1665 }
ysr@777 1666
ysr@777 1667 double
ysr@777 1668 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 1669 size_t scanned_cards) {
ysr@777 1670 return
ysr@777 1671 predict_rs_update_time_ms(pending_cards) +
ysr@777 1672 predict_rs_scan_time_ms(scanned_cards) +
ysr@777 1673 predict_constant_other_time_ms();
ysr@777 1674 }
ysr@777 1675
ysr@777 1676 double
ysr@777 1677 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
ysr@777 1678 bool young) {
ysr@777 1679 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1680 size_t card_num;
ysr@777 1681 if (full_young_gcs())
ysr@777 1682 card_num = predict_young_card_num(rs_length);
ysr@777 1683 else
ysr@777 1684 card_num = predict_non_young_card_num(rs_length);
ysr@777 1685 size_t bytes_to_copy = predict_bytes_to_copy(hr);
ysr@777 1686
ysr@777 1687 double region_elapsed_time_ms =
ysr@777 1688 predict_rs_scan_time_ms(card_num) +
ysr@777 1689 predict_object_copy_time_ms(bytes_to_copy);
ysr@777 1690
ysr@777 1691 if (young)
ysr@777 1692 region_elapsed_time_ms += predict_young_other_time_ms(1);
ysr@777 1693 else
ysr@777 1694 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
ysr@777 1695
ysr@777 1696 return region_elapsed_time_ms;
ysr@777 1697 }
ysr@777 1698
ysr@777 1699 size_t
ysr@777 1700 G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
ysr@777 1701 size_t bytes_to_copy;
ysr@777 1702 if (hr->is_marked())
ysr@777 1703 bytes_to_copy = hr->max_live_bytes();
ysr@777 1704 else {
ysr@777 1705 guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
ysr@777 1706 "invariant" );
ysr@777 1707 int age = hr->age_in_surv_rate_group();
apetrusenko@980 1708 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
ysr@777 1709 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
ysr@777 1710 }
ysr@777 1711
ysr@777 1712 return bytes_to_copy;
ysr@777 1713 }
ysr@777 1714
ysr@777 1715 void
ysr@777 1716 G1CollectorPolicy::start_recording_regions() {
ysr@777 1717 _recorded_rs_lengths = 0;
ysr@777 1718 _recorded_young_regions = 0;
ysr@777 1719 _recorded_non_young_regions = 0;
ysr@777 1720
ysr@777 1721 #if PREDICTIONS_VERBOSE
ysr@777 1722 _recorded_marked_bytes = 0;
ysr@777 1723 _recorded_young_bytes = 0;
ysr@777 1724 _predicted_bytes_to_copy = 0;
johnc@1829 1725 _predicted_rs_lengths = 0;
johnc@1829 1726 _predicted_cards_scanned = 0;
ysr@777 1727 #endif // PREDICTIONS_VERBOSE
ysr@777 1728 }
ysr@777 1729
ysr@777 1730 void
johnc@1829 1731 G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
ysr@777 1732 #if PREDICTIONS_VERBOSE
johnc@1829 1733 if (!young) {
ysr@777 1734 _recorded_marked_bytes += hr->max_live_bytes();
ysr@777 1735 }
ysr@777 1736 _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
ysr@777 1737 #endif // PREDICTIONS_VERBOSE
ysr@777 1738
ysr@777 1739 size_t rs_length = hr->rem_set()->occupied();
ysr@777 1740 _recorded_rs_lengths += rs_length;
ysr@777 1741 }
ysr@777 1742
ysr@777 1743 void
johnc@1829 1744 G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
johnc@1829 1745 assert(!hr->is_young(), "should not call this");
johnc@1829 1746 ++_recorded_non_young_regions;
johnc@1829 1747 record_cset_region_info(hr, false);
johnc@1829 1748 }
johnc@1829 1749
johnc@1829 1750 void
johnc@1829 1751 G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
johnc@1829 1752 _recorded_young_regions = n_regions;
johnc@1829 1753 }
johnc@1829 1754
johnc@1829 1755 void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
johnc@1829 1756 #if PREDICTIONS_VERBOSE
johnc@1829 1757 _recorded_young_bytes = bytes;
johnc@1829 1758 #endif // PREDICTIONS_VERBOSE
johnc@1829 1759 }
johnc@1829 1760
johnc@1829 1761 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
johnc@1829 1762 _recorded_rs_lengths = rs_lengths;
johnc@1829 1763 }
johnc@1829 1764
johnc@1829 1765 void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 1766 _predicted_bytes_to_copy = bytes;
ysr@777 1767 }
ysr@777 1768
ysr@777 1769 void
ysr@777 1770 G1CollectorPolicy::end_recording_regions() {
johnc@1829 1771 // The _predicted_pause_time_ms field is referenced in code
johnc@1829 1772 // not under PREDICTIONS_VERBOSE. Let's initialize it.
johnc@1829 1773 _predicted_pause_time_ms = -1.0;
johnc@1829 1774
ysr@777 1775 #if PREDICTIONS_VERBOSE
ysr@777 1776 _predicted_pending_cards = predict_pending_cards();
ysr@777 1777 _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
ysr@777 1778 if (full_young_gcs())
ysr@777 1779 _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
ysr@777 1780 else
ysr@777 1781 _predicted_cards_scanned +=
ysr@777 1782 predict_non_young_card_num(_predicted_rs_lengths);
ysr@777 1783 _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
ysr@777 1784
ysr@777 1785 _predicted_rs_update_time_ms =
ysr@777 1786 predict_rs_update_time_ms(_g1->pending_card_num());
ysr@777 1787 _predicted_rs_scan_time_ms =
ysr@777 1788 predict_rs_scan_time_ms(_predicted_cards_scanned);
ysr@777 1789 _predicted_object_copy_time_ms =
ysr@777 1790 predict_object_copy_time_ms(_predicted_bytes_to_copy);
ysr@777 1791 _predicted_constant_other_time_ms =
ysr@777 1792 predict_constant_other_time_ms();
ysr@777 1793 _predicted_young_other_time_ms =
ysr@777 1794 predict_young_other_time_ms(_recorded_young_regions);
ysr@777 1795 _predicted_non_young_other_time_ms =
ysr@777 1796 predict_non_young_other_time_ms(_recorded_non_young_regions);
ysr@777 1797
ysr@777 1798 _predicted_pause_time_ms =
ysr@777 1799 _predicted_rs_update_time_ms +
ysr@777 1800 _predicted_rs_scan_time_ms +
ysr@777 1801 _predicted_object_copy_time_ms +
ysr@777 1802 _predicted_constant_other_time_ms +
ysr@777 1803 _predicted_young_other_time_ms +
ysr@777 1804 _predicted_non_young_other_time_ms;
ysr@777 1805 #endif // PREDICTIONS_VERBOSE
ysr@777 1806 }
ysr@777 1807
ysr@777 1808 void G1CollectorPolicy::check_if_region_is_too_expensive(double
ysr@777 1809 predicted_time_ms) {
ysr@777 1810 // I don't think we need to do this when in young GC mode since
ysr@777 1811 // marking will be initiated next time we hit the soft limit anyway...
ysr@777 1812 if (predicted_time_ms > _expensive_region_limit_ms) {
ysr@777 1813 if (!in_young_gc_mode()) {
ysr@777 1814 set_full_young_gcs(true);
tonyp@1794 1815 // We might want to do something different here. However,
tonyp@1794 1816 // right now we don't support the non-generational G1 mode
tonyp@1794 1817 // (and in fact we are planning to remove the associated code,
tonyp@1794 1818 // see CR 6814390). So, let's leave it as is and this will be
tonyp@1794 1819 // removed some time in the future
tonyp@1794 1820 ShouldNotReachHere();
tonyp@1794 1821 set_during_initial_mark_pause();
ysr@777 1822 } else
ysr@777 1823 // no point in doing another partial one
ysr@777 1824 _should_revert_to_full_young_gcs = true;
ysr@777 1825 }
ysr@777 1826 }
ysr@777 1827
ysr@777 1828 // </NEW PREDICTION>
ysr@777 1829
ysr@777 1830
ysr@777 1831 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
ysr@777 1832 double elapsed_ms) {
ysr@777 1833 _recent_gc_times_ms->add(elapsed_ms);
ysr@777 1834 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
ysr@777 1835 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
ysr@777 1836 }
ysr@777 1837
ysr@777 1838 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
johnc@1186 1839 if (_recent_pause_times_ms->num() == 0) return (double) MaxGCPauseMillis;
ysr@777 1840 else return _recent_pause_times_ms->avg();
ysr@777 1841 }
ysr@777 1842
ysr@777 1843 double G1CollectorPolicy::recent_avg_time_for_CH_strong_ms() {
ysr@777 1844 if (_recent_CH_strong_roots_times_ms->num() == 0)
johnc@1186 1845 return (double)MaxGCPauseMillis/3.0;
ysr@777 1846 else return _recent_CH_strong_roots_times_ms->avg();
ysr@777 1847 }
ysr@777 1848
ysr@777 1849 double G1CollectorPolicy::recent_avg_time_for_G1_strong_ms() {
ysr@777 1850 if (_recent_G1_strong_roots_times_ms->num() == 0)
johnc@1186 1851 return (double)MaxGCPauseMillis/3.0;
ysr@777 1852 else return _recent_G1_strong_roots_times_ms->avg();
ysr@777 1853 }
ysr@777 1854
ysr@777 1855 double G1CollectorPolicy::recent_avg_time_for_evac_ms() {
johnc@1186 1856 if (_recent_evac_times_ms->num() == 0) return (double)MaxGCPauseMillis/3.0;
ysr@777 1857 else return _recent_evac_times_ms->avg();
ysr@777 1858 }
ysr@777 1859
ysr@777 1860 int G1CollectorPolicy::number_of_recent_gcs() {
ysr@777 1861 assert(_recent_CH_strong_roots_times_ms->num() ==
ysr@777 1862 _recent_G1_strong_roots_times_ms->num(), "Sequence out of sync");
ysr@777 1863 assert(_recent_G1_strong_roots_times_ms->num() ==
ysr@777 1864 _recent_evac_times_ms->num(), "Sequence out of sync");
ysr@777 1865 assert(_recent_evac_times_ms->num() ==
ysr@777 1866 _recent_pause_times_ms->num(), "Sequence out of sync");
ysr@777 1867 assert(_recent_pause_times_ms->num() ==
ysr@777 1868 _recent_CS_bytes_used_before->num(), "Sequence out of sync");
ysr@777 1869 assert(_recent_CS_bytes_used_before->num() ==
ysr@777 1870 _recent_CS_bytes_surviving->num(), "Sequence out of sync");
ysr@777 1871 return _recent_pause_times_ms->num();
ysr@777 1872 }
ysr@777 1873
ysr@777 1874 double G1CollectorPolicy::recent_avg_survival_fraction() {
ysr@777 1875 return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 1876 _recent_CS_bytes_used_before);
ysr@777 1877 }
ysr@777 1878
ysr@777 1879 double G1CollectorPolicy::last_survival_fraction() {
ysr@777 1880 return last_survival_fraction_work(_recent_CS_bytes_surviving,
ysr@777 1881 _recent_CS_bytes_used_before);
ysr@777 1882 }
ysr@777 1883
ysr@777 1884 double
ysr@777 1885 G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 1886 TruncatedSeq* before) {
ysr@777 1887 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 1888 if (before->sum() > 0.0) {
ysr@777 1889 double recent_survival_rate = surviving->sum() / before->sum();
ysr@777 1890 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1891 // fragmentation can produce negative collections.
ysr@777 1892 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1893 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1894 // (DLD, 10/05.)
ysr@777 1895 assert((true || ParallelGCThreads > 0) ||
ysr@777 1896 _g1->evacuation_failed() ||
ysr@777 1897 recent_survival_rate <= 1.0, "Or bad frac");
ysr@777 1898 return recent_survival_rate;
ysr@777 1899 } else {
ysr@777 1900 return 1.0; // Be conservative.
ysr@777 1901 }
ysr@777 1902 }
ysr@777 1903
ysr@777 1904 double
ysr@777 1905 G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 1906 TruncatedSeq* before) {
ysr@777 1907 assert(surviving->num() == before->num(), "Sequence out of sync");
ysr@777 1908 if (surviving->num() > 0 && before->last() > 0.0) {
ysr@777 1909 double last_survival_rate = surviving->last() / before->last();
ysr@777 1910 // We exempt parallel collection from this check because Alloc Buffer
ysr@777 1911 // fragmentation can produce negative collections.
ysr@777 1912 // Further, we're now always doing parallel collection. But I'm still
ysr@777 1913 // leaving this here as a placeholder for a more precise assertion later.
ysr@777 1914 // (DLD, 10/05.)
ysr@777 1915 assert((true || ParallelGCThreads > 0) ||
ysr@777 1916 last_survival_rate <= 1.0, "Or bad frac");
ysr@777 1917 return last_survival_rate;
ysr@777 1918 } else {
ysr@777 1919 return 1.0;
ysr@777 1920 }
ysr@777 1921 }
ysr@777 1922
ysr@777 1923 static const int survival_min_obs = 5;
ysr@777 1924 static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
ysr@777 1925 static const double min_survival_rate = 0.1;
ysr@777 1926
ysr@777 1927 double
ysr@777 1928 G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
ysr@777 1929 double latest) {
ysr@777 1930 double res = avg;
ysr@777 1931 if (number_of_recent_gcs() < survival_min_obs) {
ysr@777 1932 res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
ysr@777 1933 }
ysr@777 1934 res = MAX2(res, latest);
ysr@777 1935 res = MAX2(res, min_survival_rate);
ysr@777 1936 // In the parallel case, LAB fragmentation can produce "negative
ysr@777 1937 // collections"; so can evac failure. Cap at 1.0
ysr@777 1938 res = MIN2(res, 1.0);
ysr@777 1939 return res;
ysr@777 1940 }
ysr@777 1941
ysr@777 1942 size_t G1CollectorPolicy::expansion_amount() {
tonyp@1791 1943 if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) {
johnc@1186 1944 // We will double the existing space, or take
johnc@1186 1945 // G1ExpandByPercentOfAvailable % of the available expansion
johnc@1186 1946 // space, whichever is smaller, bounded below by a minimum
johnc@1186 1947 // expansion (unless that's all that's left.)
ysr@777 1948 const size_t min_expand_bytes = 1*M;
ysr@777 1949 size_t reserved_bytes = _g1->g1_reserved_obj_bytes();
ysr@777 1950 size_t committed_bytes = _g1->capacity();
ysr@777 1951 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
ysr@777 1952 size_t expand_bytes;
ysr@777 1953 size_t expand_bytes_via_pct =
johnc@1186 1954 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
ysr@777 1955 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
ysr@777 1956 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
ysr@777 1957 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
ysr@777 1958 if (G1PolicyVerbose > 1) {
ysr@777 1959 gclog_or_tty->print("Decided to expand: ratio = %5.2f, "
ysr@777 1960 "committed = %d%s, uncommited = %d%s, via pct = %d%s.\n"
ysr@777 1961 " Answer = %d.\n",
ysr@777 1962 recent_avg_pause_time_ratio(),
ysr@777 1963 byte_size_in_proper_unit(committed_bytes),
ysr@777 1964 proper_unit_for_byte_size(committed_bytes),
ysr@777 1965 byte_size_in_proper_unit(uncommitted_bytes),
ysr@777 1966 proper_unit_for_byte_size(uncommitted_bytes),
ysr@777 1967 byte_size_in_proper_unit(expand_bytes_via_pct),
ysr@777 1968 proper_unit_for_byte_size(expand_bytes_via_pct),
ysr@777 1969 byte_size_in_proper_unit(expand_bytes),
ysr@777 1970 proper_unit_for_byte_size(expand_bytes));
ysr@777 1971 }
ysr@777 1972 return expand_bytes;
ysr@777 1973 } else {
ysr@777 1974 return 0;
ysr@777 1975 }
ysr@777 1976 }
ysr@777 1977
ysr@777 1978 void G1CollectorPolicy::note_start_of_mark_thread() {
ysr@777 1979 _mark_thread_startup_sec = os::elapsedTime();
ysr@777 1980 }
ysr@777 1981
ysr@777 1982 class CountCSClosure: public HeapRegionClosure {
ysr@777 1983 G1CollectorPolicy* _g1_policy;
ysr@777 1984 public:
ysr@777 1985 CountCSClosure(G1CollectorPolicy* g1_policy) :
ysr@777 1986 _g1_policy(g1_policy) {}
ysr@777 1987 bool doHeapRegion(HeapRegion* r) {
ysr@777 1988 _g1_policy->_bytes_in_collection_set_before_gc += r->used();
ysr@777 1989 return false;
ysr@777 1990 }
ysr@777 1991 };
ysr@777 1992
ysr@777 1993 void G1CollectorPolicy::count_CS_bytes_used() {
ysr@777 1994 CountCSClosure cs_closure(this);
ysr@777 1995 _g1->collection_set_iterate(&cs_closure);
ysr@777 1996 }
ysr@777 1997
ysr@777 1998 static void print_indent(int level) {
ysr@777 1999 for (int j = 0; j < level+1; ++j)
ysr@777 2000 gclog_or_tty->print(" ");
ysr@777 2001 }
ysr@777 2002
ysr@777 2003 void G1CollectorPolicy::print_summary (int level,
ysr@777 2004 const char* str,
ysr@777 2005 NumberSeq* seq) const {
ysr@777 2006 double sum = seq->sum();
ysr@777 2007 print_indent(level);
ysr@777 2008 gclog_or_tty->print_cr("%-24s = %8.2lf s (avg = %8.2lf ms)",
ysr@777 2009 str, sum / 1000.0, seq->avg());
ysr@777 2010 }
ysr@777 2011
ysr@777 2012 void G1CollectorPolicy::print_summary_sd (int level,
ysr@777 2013 const char* str,
ysr@777 2014 NumberSeq* seq) const {
ysr@777 2015 print_summary(level, str, seq);
ysr@777 2016 print_indent(level + 5);
ysr@777 2017 gclog_or_tty->print_cr("(num = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
ysr@777 2018 seq->num(), seq->sd(), seq->maximum());
ysr@777 2019 }
ysr@777 2020
ysr@777 2021 void G1CollectorPolicy::check_other_times(int level,
ysr@777 2022 NumberSeq* other_times_ms,
ysr@777 2023 NumberSeq* calc_other_times_ms) const {
ysr@777 2024 bool should_print = false;
ysr@777 2025
ysr@777 2026 double max_sum = MAX2(fabs(other_times_ms->sum()),
ysr@777 2027 fabs(calc_other_times_ms->sum()));
ysr@777 2028 double min_sum = MIN2(fabs(other_times_ms->sum()),
ysr@777 2029 fabs(calc_other_times_ms->sum()));
ysr@777 2030 double sum_ratio = max_sum / min_sum;
ysr@777 2031 if (sum_ratio > 1.1) {
ysr@777 2032 should_print = true;
ysr@777 2033 print_indent(level + 1);
ysr@777 2034 gclog_or_tty->print_cr("## CALCULATED OTHER SUM DOESN'T MATCH RECORDED ###");
ysr@777 2035 }
ysr@777 2036
ysr@777 2037 double max_avg = MAX2(fabs(other_times_ms->avg()),
ysr@777 2038 fabs(calc_other_times_ms->avg()));
ysr@777 2039 double min_avg = MIN2(fabs(other_times_ms->avg()),
ysr@777 2040 fabs(calc_other_times_ms->avg()));
ysr@777 2041 double avg_ratio = max_avg / min_avg;
ysr@777 2042 if (avg_ratio > 1.1) {
ysr@777 2043 should_print = true;
ysr@777 2044 print_indent(level + 1);
ysr@777 2045 gclog_or_tty->print_cr("## CALCULATED OTHER AVG DOESN'T MATCH RECORDED ###");
ysr@777 2046 }
ysr@777 2047
ysr@777 2048 if (other_times_ms->sum() < -0.01) {
ysr@777 2049 print_indent(level + 1);
ysr@777 2050 gclog_or_tty->print_cr("## RECORDED OTHER SUM IS NEGATIVE ###");
ysr@777 2051 }
ysr@777 2052
ysr@777 2053 if (other_times_ms->avg() < -0.01) {
ysr@777 2054 print_indent(level + 1);
ysr@777 2055 gclog_or_tty->print_cr("## RECORDED OTHER AVG IS NEGATIVE ###");
ysr@777 2056 }
ysr@777 2057
ysr@777 2058 if (calc_other_times_ms->sum() < -0.01) {
ysr@777 2059 should_print = true;
ysr@777 2060 print_indent(level + 1);
ysr@777 2061 gclog_or_tty->print_cr("## CALCULATED OTHER SUM IS NEGATIVE ###");
ysr@777 2062 }
ysr@777 2063
ysr@777 2064 if (calc_other_times_ms->avg() < -0.01) {
ysr@777 2065 should_print = true;
ysr@777 2066 print_indent(level + 1);
ysr@777 2067 gclog_or_tty->print_cr("## CALCULATED OTHER AVG IS NEGATIVE ###");
ysr@777 2068 }
ysr@777 2069
ysr@777 2070 if (should_print)
ysr@777 2071 print_summary(level, "Other(Calc)", calc_other_times_ms);
ysr@777 2072 }
ysr@777 2073
ysr@777 2074 void G1CollectorPolicy::print_summary(PauseSummary* summary) const {
ysr@777 2075 bool parallel = ParallelGCThreads > 0;
ysr@777 2076 MainBodySummary* body_summary = summary->main_body_summary();
ysr@777 2077 if (summary->get_total_seq()->num() > 0) {
apetrusenko@1112 2078 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq());
ysr@777 2079 if (body_summary != NULL) {
ysr@777 2080 print_summary(1, "SATB Drain", body_summary->get_satb_drain_seq());
ysr@777 2081 if (parallel) {
ysr@777 2082 print_summary(1, "Parallel Time", body_summary->get_parallel_seq());
ysr@777 2083 print_summary(2, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2084 print_summary(2, "Ext Root Scanning",
ysr@777 2085 body_summary->get_ext_root_scan_seq());
ysr@777 2086 print_summary(2, "Mark Stack Scanning",
ysr@777 2087 body_summary->get_mark_stack_scan_seq());
ysr@777 2088 print_summary(2, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2089 print_summary(2, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2090 print_summary(2, "Termination", body_summary->get_termination_seq());
ysr@777 2091 print_summary(2, "Other", body_summary->get_parallel_other_seq());
ysr@777 2092 {
ysr@777 2093 NumberSeq* other_parts[] = {
ysr@777 2094 body_summary->get_update_rs_seq(),
ysr@777 2095 body_summary->get_ext_root_scan_seq(),
ysr@777 2096 body_summary->get_mark_stack_scan_seq(),
ysr@777 2097 body_summary->get_scan_rs_seq(),
ysr@777 2098 body_summary->get_obj_copy_seq(),
ysr@777 2099 body_summary->get_termination_seq()
ysr@777 2100 };
ysr@777 2101 NumberSeq calc_other_times_ms(body_summary->get_parallel_seq(),
ysr@777 2102 7, other_parts);
ysr@777 2103 check_other_times(2, body_summary->get_parallel_other_seq(),
ysr@777 2104 &calc_other_times_ms);
ysr@777 2105 }
ysr@777 2106 print_summary(1, "Mark Closure", body_summary->get_mark_closure_seq());
ysr@777 2107 print_summary(1, "Clear CT", body_summary->get_clear_ct_seq());
ysr@777 2108 } else {
ysr@777 2109 print_summary(1, "Update RS", body_summary->get_update_rs_seq());
ysr@777 2110 print_summary(1, "Ext Root Scanning",
ysr@777 2111 body_summary->get_ext_root_scan_seq());
ysr@777 2112 print_summary(1, "Mark Stack Scanning",
ysr@777 2113 body_summary->get_mark_stack_scan_seq());
ysr@777 2114 print_summary(1, "Scan RS", body_summary->get_scan_rs_seq());
ysr@777 2115 print_summary(1, "Object Copy", body_summary->get_obj_copy_seq());
ysr@777 2116 }
ysr@777 2117 }
ysr@777 2118 print_summary(1, "Other", summary->get_other_seq());
ysr@777 2119 {
ysr@777 2120 NumberSeq calc_other_times_ms;
ysr@777 2121 if (body_summary != NULL) {
ysr@777 2122 // not abandoned
ysr@777 2123 if (parallel) {
ysr@777 2124 // parallel
ysr@777 2125 NumberSeq* other_parts[] = {
ysr@777 2126 body_summary->get_satb_drain_seq(),
ysr@777 2127 body_summary->get_parallel_seq(),
ysr@777 2128 body_summary->get_clear_ct_seq()
ysr@777 2129 };
apetrusenko@1112 2130 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
apetrusenko@1112 2131 3, other_parts);
ysr@777 2132 } else {
ysr@777 2133 // serial
ysr@777 2134 NumberSeq* other_parts[] = {
ysr@777 2135 body_summary->get_satb_drain_seq(),
ysr@777 2136 body_summary->get_update_rs_seq(),
ysr@777 2137 body_summary->get_ext_root_scan_seq(),
ysr@777 2138 body_summary->get_mark_stack_scan_seq(),
ysr@777 2139 body_summary->get_scan_rs_seq(),
ysr@777 2140 body_summary->get_obj_copy_seq()
ysr@777 2141 };
ysr@777 2142 calc_other_times_ms = NumberSeq(summary->get_total_seq(),
apetrusenko@1112 2143 7, other_parts);
ysr@777 2144 }
ysr@777 2145 } else {
ysr@777 2146 // abandoned
apetrusenko@1112 2147 calc_other_times_ms = NumberSeq();
ysr@777 2148 }
ysr@777 2149 check_other_times(1, summary->get_other_seq(), &calc_other_times_ms);
ysr@777 2150 }
ysr@777 2151 } else {
ysr@777 2152 print_indent(0);
ysr@777 2153 gclog_or_tty->print_cr("none");
ysr@777 2154 }
ysr@777 2155 gclog_or_tty->print_cr("");
ysr@777 2156 }
ysr@777 2157
ysr@777 2158 void
apetrusenko@1112 2159 G1CollectorPolicy::print_abandoned_summary(PauseSummary* summary) const {
ysr@777 2160 bool printed = false;
apetrusenko@1112 2161 if (summary->get_total_seq()->num() > 0) {
ysr@777 2162 printed = true;
apetrusenko@1112 2163 print_summary(summary);
ysr@777 2164 }
ysr@777 2165 if (!printed) {
ysr@777 2166 print_indent(0);
ysr@777 2167 gclog_or_tty->print_cr("none");
ysr@777 2168 gclog_or_tty->print_cr("");
ysr@777 2169 }
ysr@777 2170 }
ysr@777 2171
ysr@777 2172 void G1CollectorPolicy::print_tracing_info() const {
ysr@777 2173 if (TraceGen0Time) {
ysr@777 2174 gclog_or_tty->print_cr("ALL PAUSES");
ysr@777 2175 print_summary_sd(0, "Total", _all_pause_times_ms);
ysr@777 2176 gclog_or_tty->print_cr("");
ysr@777 2177 gclog_or_tty->print_cr("");
ysr@777 2178 gclog_or_tty->print_cr(" Full Young GC Pauses: %8d", _full_young_pause_num);
ysr@777 2179 gclog_or_tty->print_cr(" Partial Young GC Pauses: %8d", _partial_young_pause_num);
ysr@777 2180 gclog_or_tty->print_cr("");
ysr@777 2181
apetrusenko@1112 2182 gclog_or_tty->print_cr("EVACUATION PAUSES");
apetrusenko@1112 2183 print_summary(_summary);
ysr@777 2184
ysr@777 2185 gclog_or_tty->print_cr("ABANDONED PAUSES");
apetrusenko@1112 2186 print_abandoned_summary(_abandoned_summary);
ysr@777 2187
ysr@777 2188 gclog_or_tty->print_cr("MISC");
ysr@777 2189 print_summary_sd(0, "Stop World", _all_stop_world_times_ms);
ysr@777 2190 print_summary_sd(0, "Yields", _all_yield_times_ms);
ysr@777 2191 for (int i = 0; i < _aux_num; ++i) {
ysr@777 2192 if (_all_aux_times_ms[i].num() > 0) {
ysr@777 2193 char buffer[96];
ysr@777 2194 sprintf(buffer, "Aux%d", i);
ysr@777 2195 print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
ysr@777 2196 }
ysr@777 2197 }
ysr@777 2198
ysr@777 2199 size_t all_region_num = _region_num_young + _region_num_tenured;
ysr@777 2200 gclog_or_tty->print_cr(" New Regions %8d, Young %8d (%6.2lf%%), "
ysr@777 2201 "Tenured %8d (%6.2lf%%)",
ysr@777 2202 all_region_num,
ysr@777 2203 _region_num_young,
ysr@777 2204 (double) _region_num_young / (double) all_region_num * 100.0,
ysr@777 2205 _region_num_tenured,
ysr@777 2206 (double) _region_num_tenured / (double) all_region_num * 100.0);
ysr@777 2207 }
ysr@777 2208 if (TraceGen1Time) {
ysr@777 2209 if (_all_full_gc_times_ms->num() > 0) {
ysr@777 2210 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
ysr@777 2211 _all_full_gc_times_ms->num(),
ysr@777 2212 _all_full_gc_times_ms->sum() / 1000.0);
ysr@777 2213 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times_ms->avg());
ysr@777 2214 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
ysr@777 2215 _all_full_gc_times_ms->sd(),
ysr@777 2216 _all_full_gc_times_ms->maximum());
ysr@777 2217 }
ysr@777 2218 }
ysr@777 2219 }
ysr@777 2220
ysr@777 2221 void G1CollectorPolicy::print_yg_surv_rate_info() const {
ysr@777 2222 #ifndef PRODUCT
ysr@777 2223 _short_lived_surv_rate_group->print_surv_rate_summary();
ysr@777 2224 // add this call for any other surv rate groups
ysr@777 2225 #endif // PRODUCT
ysr@777 2226 }
ysr@777 2227
ysr@777 2228 bool
ysr@777 2229 G1CollectorPolicy::should_add_next_region_to_young_list() {
ysr@777 2230 assert(in_young_gc_mode(), "should be in young GC mode");
ysr@777 2231 bool ret;
johnc@1829 2232 size_t young_list_length = _g1->young_list()->length();
apetrusenko@980 2233 size_t young_list_max_length = _young_list_target_length;
apetrusenko@980 2234 if (G1FixedEdenSize) {
apetrusenko@980 2235 young_list_max_length -= _max_survivor_regions;
apetrusenko@980 2236 }
apetrusenko@980 2237 if (young_list_length < young_list_max_length) {
ysr@777 2238 ret = true;
ysr@777 2239 ++_region_num_young;
ysr@777 2240 } else {
ysr@777 2241 ret = false;
ysr@777 2242 ++_region_num_tenured;
ysr@777 2243 }
ysr@777 2244
ysr@777 2245 return ret;
ysr@777 2246 }
ysr@777 2247
ysr@777 2248 #ifndef PRODUCT
ysr@777 2249 // for debugging, bit of a hack...
ysr@777 2250 static char*
ysr@777 2251 region_num_to_mbs(int length) {
ysr@777 2252 static char buffer[64];
ysr@777 2253 double bytes = (double) (length * HeapRegion::GrainBytes);
ysr@777 2254 double mbs = bytes / (double) (1024 * 1024);
ysr@777 2255 sprintf(buffer, "%7.2lfMB", mbs);
ysr@777 2256 return buffer;
ysr@777 2257 }
ysr@777 2258 #endif // PRODUCT
ysr@777 2259
apetrusenko@980 2260 size_t G1CollectorPolicy::max_regions(int purpose) {
ysr@777 2261 switch (purpose) {
ysr@777 2262 case GCAllocForSurvived:
apetrusenko@980 2263 return _max_survivor_regions;
ysr@777 2264 case GCAllocForTenured:
apetrusenko@980 2265 return REGIONS_UNLIMITED;
ysr@777 2266 default:
apetrusenko@980 2267 ShouldNotReachHere();
apetrusenko@980 2268 return REGIONS_UNLIMITED;
ysr@777 2269 };
ysr@777 2270 }
ysr@777 2271
apetrusenko@980 2272 // Calculates survivor space parameters.
apetrusenko@980 2273 void G1CollectorPolicy::calculate_survivors_policy()
apetrusenko@980 2274 {
apetrusenko@980 2275 if (G1FixedSurvivorSpaceSize == 0) {
apetrusenko@980 2276 _max_survivor_regions = _young_list_target_length / SurvivorRatio;
apetrusenko@980 2277 } else {
apetrusenko@982 2278 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
apetrusenko@980 2279 }
apetrusenko@980 2280
apetrusenko@980 2281 if (G1FixedTenuringThreshold) {
apetrusenko@980 2282 _tenuring_threshold = MaxTenuringThreshold;
apetrusenko@980 2283 } else {
apetrusenko@980 2284 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
apetrusenko@980 2285 HeapRegion::GrainWords * _max_survivor_regions);
apetrusenko@980 2286 }
apetrusenko@980 2287 }
apetrusenko@980 2288
ysr@777 2289 bool
ysr@777 2290 G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
ysr@777 2291 word_size) {
ysr@777 2292 assert(_g1->regions_accounted_for(), "Region leakage!");
ysr@777 2293 double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
ysr@777 2294
johnc@1829 2295 size_t young_list_length = _g1->young_list()->length();
apetrusenko@980 2296 size_t young_list_max_length = _young_list_target_length;
apetrusenko@980 2297 if (G1FixedEdenSize) {
apetrusenko@980 2298 young_list_max_length -= _max_survivor_regions;
apetrusenko@980 2299 }
apetrusenko@980 2300 bool reached_target_length = young_list_length >= young_list_max_length;
ysr@777 2301
ysr@777 2302 if (in_young_gc_mode()) {
ysr@777 2303 if (reached_target_length) {
johnc@1829 2304 assert( young_list_length > 0 && _g1->young_list()->length() > 0,
ysr@777 2305 "invariant" );
ysr@777 2306 _target_pause_time_ms = max_pause_time_ms;
ysr@777 2307 return true;
ysr@777 2308 }
ysr@777 2309 } else {
ysr@777 2310 guarantee( false, "should not reach here" );
ysr@777 2311 }
ysr@777 2312
ysr@777 2313 return false;
ysr@777 2314 }
ysr@777 2315
ysr@777 2316 #ifndef PRODUCT
ysr@777 2317 class HRSortIndexIsOKClosure: public HeapRegionClosure {
ysr@777 2318 CollectionSetChooser* _chooser;
ysr@777 2319 public:
ysr@777 2320 HRSortIndexIsOKClosure(CollectionSetChooser* chooser) :
ysr@777 2321 _chooser(chooser) {}
ysr@777 2322
ysr@777 2323 bool doHeapRegion(HeapRegion* r) {
ysr@777 2324 if (!r->continuesHumongous()) {
ysr@777 2325 assert(_chooser->regionProperlyOrdered(r), "Ought to be.");
ysr@777 2326 }
ysr@777 2327 return false;
ysr@777 2328 }
ysr@777 2329 };
ysr@777 2330
ysr@777 2331 bool G1CollectorPolicy_BestRegionsFirst::assertMarkedBytesDataOK() {
ysr@777 2332 HRSortIndexIsOKClosure cl(_collectionSetChooser);
ysr@777 2333 _g1->heap_region_iterate(&cl);
ysr@777 2334 return true;
ysr@777 2335 }
ysr@777 2336 #endif
ysr@777 2337
ysr@777 2338 void
tonyp@1794 2339 G1CollectorPolicy::decide_on_conc_mark_initiation() {
tonyp@1794 2340 // We are about to decide on whether this pause will be an
tonyp@1794 2341 // initial-mark pause.
tonyp@1794 2342
tonyp@1794 2343 // First, during_initial_mark_pause() should not be already set. We
tonyp@1794 2344 // will set it here if we have to. However, it should be cleared by
tonyp@1794 2345 // the end of the pause (it's only set for the duration of an
tonyp@1794 2346 // initial-mark pause).
tonyp@1794 2347 assert(!during_initial_mark_pause(), "pre-condition");
tonyp@1794 2348
tonyp@1794 2349 if (initiate_conc_mark_if_possible()) {
tonyp@1794 2350 // We had noticed on a previous pause that the heap occupancy has
tonyp@1794 2351 // gone over the initiating threshold and we should start a
tonyp@1794 2352 // concurrent marking cycle. So we might initiate one.
tonyp@1794 2353
tonyp@1794 2354 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
tonyp@1794 2355 if (!during_cycle) {
tonyp@1794 2356 // The concurrent marking thread is not "during a cycle", i.e.,
tonyp@1794 2357 // it has completed the last one. So we can go ahead and
tonyp@1794 2358 // initiate a new cycle.
tonyp@1794 2359
tonyp@1794 2360 set_during_initial_mark_pause();
tonyp@1794 2361
tonyp@1794 2362 // And we can now clear initiate_conc_mark_if_possible() as
tonyp@1794 2363 // we've already acted on it.
tonyp@1794 2364 clear_initiate_conc_mark_if_possible();
tonyp@1794 2365 } else {
tonyp@1794 2366 // The concurrent marking thread is still finishing up the
tonyp@1794 2367 // previous cycle. If we start one right now the two cycles
tonyp@1794 2368 // overlap. In particular, the concurrent marking thread might
tonyp@1794 2369 // be in the process of clearing the next marking bitmap (which
tonyp@1794 2370 // we will use for the next cycle if we start one). Starting a
tonyp@1794 2371 // cycle now will be bad given that parts of the marking
tonyp@1794 2372 // information might get cleared by the marking thread. And we
tonyp@1794 2373 // cannot wait for the marking thread to finish the cycle as it
tonyp@1794 2374 // periodically yields while clearing the next marking bitmap
tonyp@1794 2375 // and, if it's in a yield point, it's waiting for us to
tonyp@1794 2376 // finish. So, at this point we will not start a cycle and we'll
tonyp@1794 2377 // let the concurrent marking thread complete the last one.
tonyp@1794 2378 }
tonyp@1794 2379 }
tonyp@1794 2380 }
tonyp@1794 2381
tonyp@1794 2382 void
ysr@777 2383 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2384 record_collection_pause_start(double start_time_sec, size_t start_used) {
ysr@777 2385 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
ysr@777 2386 }
ysr@777 2387
ysr@777 2388 class NextNonCSElemFinder: public HeapRegionClosure {
ysr@777 2389 HeapRegion* _res;
ysr@777 2390 public:
ysr@777 2391 NextNonCSElemFinder(): _res(NULL) {}
ysr@777 2392 bool doHeapRegion(HeapRegion* r) {
ysr@777 2393 if (!r->in_collection_set()) {
ysr@777 2394 _res = r;
ysr@777 2395 return true;
ysr@777 2396 } else {
ysr@777 2397 return false;
ysr@777 2398 }
ysr@777 2399 }
ysr@777 2400 HeapRegion* res() { return _res; }
ysr@777 2401 };
ysr@777 2402
ysr@777 2403 class KnownGarbageClosure: public HeapRegionClosure {
ysr@777 2404 CollectionSetChooser* _hrSorted;
ysr@777 2405
ysr@777 2406 public:
ysr@777 2407 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
ysr@777 2408 _hrSorted(hrSorted)
ysr@777 2409 {}
ysr@777 2410
ysr@777 2411 bool doHeapRegion(HeapRegion* r) {
ysr@777 2412 // We only include humongous regions in collection
ysr@777 2413 // sets when concurrent mark shows that their contained object is
ysr@777 2414 // unreachable.
ysr@777 2415
ysr@777 2416 // Do we have any marking information for this region?
ysr@777 2417 if (r->is_marked()) {
ysr@777 2418 // We don't include humongous regions in collection
ysr@777 2419 // sets because we collect them immediately at the end of a marking
ysr@777 2420 // cycle. We also don't include young regions because we *must*
ysr@777 2421 // include them in the next collection pause.
ysr@777 2422 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2423 _hrSorted->addMarkedHeapRegion(r);
ysr@777 2424 }
ysr@777 2425 }
ysr@777 2426 return false;
ysr@777 2427 }
ysr@777 2428 };
ysr@777 2429
ysr@777 2430 class ParKnownGarbageHRClosure: public HeapRegionClosure {
ysr@777 2431 CollectionSetChooser* _hrSorted;
ysr@777 2432 jint _marked_regions_added;
ysr@777 2433 jint _chunk_size;
ysr@777 2434 jint _cur_chunk_idx;
ysr@777 2435 jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
ysr@777 2436 int _worker;
ysr@777 2437 int _invokes;
ysr@777 2438
ysr@777 2439 void get_new_chunk() {
ysr@777 2440 _cur_chunk_idx = _hrSorted->getParMarkedHeapRegionChunk(_chunk_size);
ysr@777 2441 _cur_chunk_end = _cur_chunk_idx + _chunk_size;
ysr@777 2442 }
ysr@777 2443 void add_region(HeapRegion* r) {
ysr@777 2444 if (_cur_chunk_idx == _cur_chunk_end) {
ysr@777 2445 get_new_chunk();
ysr@777 2446 }
ysr@777 2447 assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
ysr@777 2448 _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
ysr@777 2449 _marked_regions_added++;
ysr@777 2450 _cur_chunk_idx++;
ysr@777 2451 }
ysr@777 2452
ysr@777 2453 public:
ysr@777 2454 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
ysr@777 2455 jint chunk_size,
ysr@777 2456 int worker) :
ysr@777 2457 _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
ysr@777 2458 _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
ysr@777 2459 _invokes(0)
ysr@777 2460 {}
ysr@777 2461
ysr@777 2462 bool doHeapRegion(HeapRegion* r) {
ysr@777 2463 // We only include humongous regions in collection
ysr@777 2464 // sets when concurrent mark shows that their contained object is
ysr@777 2465 // unreachable.
ysr@777 2466 _invokes++;
ysr@777 2467
ysr@777 2468 // Do we have any marking information for this region?
ysr@777 2469 if (r->is_marked()) {
ysr@777 2470 // We don't include humongous regions in collection
ysr@777 2471 // sets because we collect them immediately at the end of a marking
ysr@777 2472 // cycle.
ysr@777 2473 // We also do not include young regions in collection sets
ysr@777 2474 if (!r->isHumongous() && !r->is_young()) {
ysr@777 2475 add_region(r);
ysr@777 2476 }
ysr@777 2477 }
ysr@777 2478 return false;
ysr@777 2479 }
ysr@777 2480 jint marked_regions_added() { return _marked_regions_added; }
ysr@777 2481 int invokes() { return _invokes; }
ysr@777 2482 };
ysr@777 2483
ysr@777 2484 class ParKnownGarbageTask: public AbstractGangTask {
ysr@777 2485 CollectionSetChooser* _hrSorted;
ysr@777 2486 jint _chunk_size;
ysr@777 2487 G1CollectedHeap* _g1;
ysr@777 2488 public:
ysr@777 2489 ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
ysr@777 2490 AbstractGangTask("ParKnownGarbageTask"),
ysr@777 2491 _hrSorted(hrSorted), _chunk_size(chunk_size),
ysr@777 2492 _g1(G1CollectedHeap::heap())
ysr@777 2493 {}
ysr@777 2494
ysr@777 2495 void work(int i) {
ysr@777 2496 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
ysr@777 2497 // Back to zero for the claim value.
tonyp@790 2498 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
tonyp@790 2499 HeapRegion::InitialClaimValue);
ysr@777 2500 jint regions_added = parKnownGarbageCl.marked_regions_added();
ysr@777 2501 _hrSorted->incNumMarkedHeapRegions(regions_added);
ysr@777 2502 if (G1PrintParCleanupStats) {
ysr@777 2503 gclog_or_tty->print(" Thread %d called %d times, added %d regions to list.\n",
ysr@777 2504 i, parKnownGarbageCl.invokes(), regions_added);
ysr@777 2505 }
ysr@777 2506 }
ysr@777 2507 };
ysr@777 2508
ysr@777 2509 void
ysr@777 2510 G1CollectorPolicy_BestRegionsFirst::
ysr@777 2511 record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 2512 size_t max_live_bytes) {
ysr@777 2513 double start;
ysr@777 2514 if (G1PrintParCleanupStats) start = os::elapsedTime();
ysr@777 2515 record_concurrent_mark_cleanup_end_work1(freed_bytes, max_live_bytes);
ysr@777 2516
ysr@777 2517 _collectionSetChooser->clearMarkedHeapRegions();
ysr@777 2518 double clear_marked_end;
ysr@777 2519 if (G1PrintParCleanupStats) {
ysr@777 2520 clear_marked_end = os::elapsedTime();
ysr@777 2521 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.",
ysr@777 2522 (clear_marked_end - start)*1000.0);
ysr@777 2523 }
ysr@777 2524 if (ParallelGCThreads > 0) {
ysr@777 2525 const size_t OverpartitionFactor = 4;
ysr@777 2526 const size_t MinChunkSize = 8;
ysr@777 2527 const size_t ChunkSize =
ysr@777 2528 MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
ysr@777 2529 MinChunkSize);
ysr@777 2530 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
ysr@777 2531 ChunkSize);
ysr@777 2532 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
ysr@777 2533 (int) ChunkSize);
ysr@777 2534 _g1->workers()->run_task(&parKnownGarbageTask);
tonyp@790 2535
tonyp@790 2536 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
tonyp@790 2537 "sanity check");
ysr@777 2538 } else {
ysr@777 2539 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
ysr@777 2540 _g1->heap_region_iterate(&knownGarbagecl);
ysr@777 2541 }
ysr@777 2542 double known_garbage_end;
ysr@777 2543 if (G1PrintParCleanupStats) {
ysr@777 2544 known_garbage_end = os::elapsedTime();
ysr@777 2545 gclog_or_tty->print_cr(" compute known garbage: %8.3f ms.",
ysr@777 2546 (known_garbage_end - clear_marked_end)*1000.0);
ysr@777 2547 }
ysr@777 2548 _collectionSetChooser->sortMarkedHeapRegions();
ysr@777 2549 double sort_end;
ysr@777 2550 if (G1PrintParCleanupStats) {
ysr@777 2551 sort_end = os::elapsedTime();
ysr@777 2552 gclog_or_tty->print_cr(" sorting: %8.3f ms.",
ysr@777 2553 (sort_end - known_garbage_end)*1000.0);
ysr@777 2554 }
ysr@777 2555
ysr@777 2556 record_concurrent_mark_cleanup_end_work2();
ysr@777 2557 double work2_end;
ysr@777 2558 if (G1PrintParCleanupStats) {
ysr@777 2559 work2_end = os::elapsedTime();
ysr@777 2560 gclog_or_tty->print_cr(" work2: %8.3f ms.",
ysr@777 2561 (work2_end - sort_end)*1000.0);
ysr@777 2562 }
ysr@777 2563 }
ysr@777 2564
johnc@1829 2565 // Add the heap region at the head of the non-incremental collection set
ysr@777 2566 void G1CollectorPolicy::
ysr@777 2567 add_to_collection_set(HeapRegion* hr) {
johnc@1829 2568 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2569 assert(!hr->is_young(), "non-incremental add of young region");
johnc@1829 2570
tonyp@1717 2571 if (G1PrintHeapRegions) {
tonyp@1823 2572 gclog_or_tty->print_cr("added region to cset "
tonyp@1823 2573 "%d:["PTR_FORMAT", "PTR_FORMAT"], "
tonyp@1823 2574 "top "PTR_FORMAT", %s",
tonyp@1823 2575 hr->hrs_index(), hr->bottom(), hr->end(),
tonyp@1823 2576 hr->top(), hr->is_young() ? "YOUNG" : "NOT_YOUNG");
ysr@777 2577 }
ysr@777 2578
ysr@777 2579 if (_g1->mark_in_progress())
ysr@777 2580 _g1->concurrent_mark()->registerCSetRegion(hr);
ysr@777 2581
johnc@1829 2582 assert(!hr->in_collection_set(), "should not already be in the CSet");
ysr@777 2583 hr->set_in_collection_set(true);
ysr@777 2584 hr->set_next_in_collection_set(_collection_set);
ysr@777 2585 _collection_set = hr;
ysr@777 2586 _collection_set_size++;
ysr@777 2587 _collection_set_bytes_used_before += hr->used();
tonyp@961 2588 _g1->register_region_with_in_cset_fast_test(hr);
ysr@777 2589 }
ysr@777 2590
johnc@1829 2591 // Initialize the per-collection-set information
johnc@1829 2592 void G1CollectorPolicy::start_incremental_cset_building() {
johnc@1829 2593 assert(_inc_cset_build_state == Inactive, "Precondition");
johnc@1829 2594
johnc@1829 2595 _inc_cset_head = NULL;
johnc@1829 2596 _inc_cset_tail = NULL;
johnc@1829 2597 _inc_cset_size = 0;
johnc@1829 2598 _inc_cset_bytes_used_before = 0;
johnc@1829 2599
johnc@1829 2600 if (in_young_gc_mode()) {
johnc@1829 2601 _inc_cset_young_index = 0;
johnc@1829 2602 }
johnc@1829 2603
johnc@1829 2604 _inc_cset_max_finger = 0;
johnc@1829 2605 _inc_cset_recorded_young_bytes = 0;
johnc@1829 2606 _inc_cset_recorded_rs_lengths = 0;
johnc@1829 2607 _inc_cset_predicted_elapsed_time_ms = 0;
johnc@1829 2608 _inc_cset_predicted_bytes_to_copy = 0;
johnc@1829 2609 _inc_cset_build_state = Active;
johnc@1829 2610 }
johnc@1829 2611
johnc@1829 2612 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
johnc@1829 2613 // This routine is used when:
johnc@1829 2614 // * adding survivor regions to the incremental cset at the end of an
johnc@1829 2615 // evacuation pause,
johnc@1829 2616 // * adding the current allocation region to the incremental cset
johnc@1829 2617 // when it is retired, and
johnc@1829 2618 // * updating existing policy information for a region in the
johnc@1829 2619 // incremental cset via young list RSet sampling.
johnc@1829 2620 // Therefore this routine may be called at a safepoint by the
johnc@1829 2621 // VM thread, or in-between safepoints by mutator threads (when
johnc@1829 2622 // retiring the current allocation region) or a concurrent
johnc@1829 2623 // refine thread (RSet sampling).
johnc@1829 2624
johnc@1829 2625 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, true);
johnc@1829 2626 size_t used_bytes = hr->used();
johnc@1829 2627
johnc@1829 2628 _inc_cset_recorded_rs_lengths += rs_length;
johnc@1829 2629 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
johnc@1829 2630
johnc@1829 2631 _inc_cset_bytes_used_before += used_bytes;
johnc@1829 2632
johnc@1829 2633 // Cache the values we have added to the aggregated informtion
johnc@1829 2634 // in the heap region in case we have to remove this region from
johnc@1829 2635 // the incremental collection set, or it is updated by the
johnc@1829 2636 // rset sampling code
johnc@1829 2637 hr->set_recorded_rs_length(rs_length);
johnc@1829 2638 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
johnc@1829 2639
johnc@1829 2640 #if PREDICTIONS_VERBOSE
johnc@1829 2641 size_t bytes_to_copy = predict_bytes_to_copy(hr);
johnc@1829 2642 _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
johnc@1829 2643
johnc@1829 2644 // Record the number of bytes used in this region
johnc@1829 2645 _inc_cset_recorded_young_bytes += used_bytes;
johnc@1829 2646
johnc@1829 2647 // Cache the values we have added to the aggregated informtion
johnc@1829 2648 // in the heap region in case we have to remove this region from
johnc@1829 2649 // the incremental collection set, or it is updated by the
johnc@1829 2650 // rset sampling code
johnc@1829 2651 hr->set_predicted_bytes_to_copy(bytes_to_copy);
johnc@1829 2652 #endif // PREDICTIONS_VERBOSE
johnc@1829 2653 }
johnc@1829 2654
johnc@1829 2655 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
johnc@1829 2656 // This routine is currently only called as part of the updating of
johnc@1829 2657 // existing policy information for regions in the incremental cset that
johnc@1829 2658 // is performed by the concurrent refine thread(s) as part of young list
johnc@1829 2659 // RSet sampling. Therefore we should not be at a safepoint.
johnc@1829 2660
johnc@1829 2661 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
johnc@1829 2662 assert(hr->is_young(), "it should be");
johnc@1829 2663
johnc@1829 2664 size_t used_bytes = hr->used();
johnc@1829 2665 size_t old_rs_length = hr->recorded_rs_length();
johnc@1829 2666 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
johnc@1829 2667
johnc@1829 2668 // Subtract the old recorded/predicted policy information for
johnc@1829 2669 // the given heap region from the collection set info.
johnc@1829 2670 _inc_cset_recorded_rs_lengths -= old_rs_length;
johnc@1829 2671 _inc_cset_predicted_elapsed_time_ms -= old_elapsed_time_ms;
johnc@1829 2672
johnc@1829 2673 _inc_cset_bytes_used_before -= used_bytes;
johnc@1829 2674
johnc@1829 2675 // Clear the values cached in the heap region
johnc@1829 2676 hr->set_recorded_rs_length(0);
johnc@1829 2677 hr->set_predicted_elapsed_time_ms(0);
johnc@1829 2678
johnc@1829 2679 #if PREDICTIONS_VERBOSE
johnc@1829 2680 size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
johnc@1829 2681 _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
johnc@1829 2682
johnc@1829 2683 // Subtract the number of bytes used in this region
johnc@1829 2684 _inc_cset_recorded_young_bytes -= used_bytes;
johnc@1829 2685
johnc@1829 2686 // Clear the values cached in the heap region
johnc@1829 2687 hr->set_predicted_bytes_to_copy(0);
johnc@1829 2688 #endif // PREDICTIONS_VERBOSE
johnc@1829 2689 }
johnc@1829 2690
johnc@1829 2691 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
johnc@1829 2692 // Update the collection set information that is dependent on the new RS length
johnc@1829 2693 assert(hr->is_young(), "Precondition");
johnc@1829 2694
johnc@1829 2695 remove_from_incremental_cset_info(hr);
johnc@1829 2696 add_to_incremental_cset_info(hr, new_rs_length);
johnc@1829 2697 }
johnc@1829 2698
johnc@1829 2699 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
johnc@1829 2700 assert( hr->is_young(), "invariant");
johnc@1829 2701 assert( hr->young_index_in_cset() == -1, "invariant" );
johnc@1829 2702 assert(_inc_cset_build_state == Active, "Precondition");
johnc@1829 2703
johnc@1829 2704 // We need to clear and set the cached recorded/cached collection set
johnc@1829 2705 // information in the heap region here (before the region gets added
johnc@1829 2706 // to the collection set). An individual heap region's cached values
johnc@1829 2707 // are calculated, aggregated with the policy collection set info,
johnc@1829 2708 // and cached in the heap region here (initially) and (subsequently)
johnc@1829 2709 // by the Young List sampling code.
johnc@1829 2710
johnc@1829 2711 size_t rs_length = hr->rem_set()->occupied();
johnc@1829 2712 add_to_incremental_cset_info(hr, rs_length);
johnc@1829 2713
johnc@1829 2714 HeapWord* hr_end = hr->end();
johnc@1829 2715 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
johnc@1829 2716
johnc@1829 2717 assert(!hr->in_collection_set(), "invariant");
johnc@1829 2718 hr->set_in_collection_set(true);
johnc@1829 2719 assert( hr->next_in_collection_set() == NULL, "invariant");
johnc@1829 2720
johnc@1829 2721 _inc_cset_size++;
johnc@1829 2722 _g1->register_region_with_in_cset_fast_test(hr);
johnc@1829 2723
johnc@1829 2724 hr->set_young_index_in_cset((int) _inc_cset_young_index);
johnc@1829 2725 ++_inc_cset_young_index;
johnc@1829 2726 }
johnc@1829 2727
johnc@1829 2728 // Add the region at the RHS of the incremental cset
johnc@1829 2729 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
johnc@1829 2730 // We should only ever be appending survivors at the end of a pause
johnc@1829 2731 assert( hr->is_survivor(), "Logic");
johnc@1829 2732
johnc@1829 2733 // Do the 'common' stuff
johnc@1829 2734 add_region_to_incremental_cset_common(hr);
johnc@1829 2735
johnc@1829 2736 // Now add the region at the right hand side
johnc@1829 2737 if (_inc_cset_tail == NULL) {
johnc@1829 2738 assert(_inc_cset_head == NULL, "invariant");
johnc@1829 2739 _inc_cset_head = hr;
johnc@1829 2740 } else {
johnc@1829 2741 _inc_cset_tail->set_next_in_collection_set(hr);
johnc@1829 2742 }
johnc@1829 2743 _inc_cset_tail = hr;
johnc@1829 2744
johnc@1829 2745 if (G1PrintHeapRegions) {
johnc@1829 2746 gclog_or_tty->print_cr(" added region to incremental cset (RHS) "
johnc@1829 2747 "%d:["PTR_FORMAT", "PTR_FORMAT"], "
johnc@1829 2748 "top "PTR_FORMAT", young %s",
johnc@1829 2749 hr->hrs_index(), hr->bottom(), hr->end(),
johnc@1829 2750 hr->top(), (hr->is_young()) ? "YES" : "NO");
johnc@1829 2751 }
johnc@1829 2752 }
johnc@1829 2753
johnc@1829 2754 // Add the region to the LHS of the incremental cset
johnc@1829 2755 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
johnc@1829 2756 // Survivors should be added to the RHS at the end of a pause
johnc@1829 2757 assert(!hr->is_survivor(), "Logic");
johnc@1829 2758
johnc@1829 2759 // Do the 'common' stuff
johnc@1829 2760 add_region_to_incremental_cset_common(hr);
johnc@1829 2761
johnc@1829 2762 // Add the region at the left hand side
johnc@1829 2763 hr->set_next_in_collection_set(_inc_cset_head);
johnc@1829 2764 if (_inc_cset_head == NULL) {
johnc@1829 2765 assert(_inc_cset_tail == NULL, "Invariant");
johnc@1829 2766 _inc_cset_tail = hr;
johnc@1829 2767 }
johnc@1829 2768 _inc_cset_head = hr;
johnc@1829 2769
johnc@1829 2770 if (G1PrintHeapRegions) {
johnc@1829 2771 gclog_or_tty->print_cr(" added region to incremental cset (LHS) "
johnc@1829 2772 "%d:["PTR_FORMAT", "PTR_FORMAT"], "
johnc@1829 2773 "top "PTR_FORMAT", young %s",
johnc@1829 2774 hr->hrs_index(), hr->bottom(), hr->end(),
johnc@1829 2775 hr->top(), (hr->is_young()) ? "YES" : "NO");
johnc@1829 2776 }
johnc@1829 2777 }
johnc@1829 2778
johnc@1829 2779 #ifndef PRODUCT
johnc@1829 2780 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
johnc@1829 2781 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
johnc@1829 2782
johnc@1829 2783 st->print_cr("\nCollection_set:");
johnc@1829 2784 HeapRegion* csr = list_head;
johnc@1829 2785 while (csr != NULL) {
johnc@1829 2786 HeapRegion* next = csr->next_in_collection_set();
johnc@1829 2787 assert(csr->in_collection_set(), "bad CS");
johnc@1829 2788 st->print_cr(" [%08x-%08x], t: %08x, P: %08x, N: %08x, C: %08x, "
johnc@1829 2789 "age: %4d, y: %d, surv: %d",
johnc@1829 2790 csr->bottom(), csr->end(),
johnc@1829 2791 csr->top(),
johnc@1829 2792 csr->prev_top_at_mark_start(),
johnc@1829 2793 csr->next_top_at_mark_start(),
johnc@1829 2794 csr->top_at_conc_mark_count(),
johnc@1829 2795 csr->age_in_surv_rate_group_cond(),
johnc@1829 2796 csr->is_young(),
johnc@1829 2797 csr->is_survivor());
johnc@1829 2798 csr = next;
johnc@1829 2799 }
johnc@1829 2800 }
johnc@1829 2801 #endif // !PRODUCT
johnc@1829 2802
johnc@1829 2803 bool
johnc@1829 2804 G1CollectorPolicy_BestRegionsFirst::choose_collection_set() {
johnc@1829 2805 // Set this here - in case we're not doing young collections.
johnc@1829 2806 double non_young_start_time_sec = os::elapsedTime();
johnc@1829 2807
johnc@1829 2808 // The result that this routine will return. This will be set to
johnc@1829 2809 // false if:
johnc@1829 2810 // * we're doing a young or partially young collection and we
johnc@1829 2811 // have added the youg regions to collection set, or
johnc@1829 2812 // * we add old regions to the collection set.
johnc@1829 2813 bool abandon_collection = true;
johnc@1829 2814
ysr@777 2815 start_recording_regions();
ysr@777 2816
ysr@1523 2817 guarantee(_target_pause_time_ms > -1.0
ysr@1523 2818 NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot),
apetrusenko@1112 2819 "_target_pause_time_ms should have been set!");
ysr@1523 2820 #ifndef PRODUCT
ysr@1523 2821 if (_target_pause_time_ms <= -1.0) {
ysr@1523 2822 assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
ysr@1523 2823 _target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
ysr@1523 2824 }
ysr@1523 2825 #endif
apetrusenko@1112 2826 assert(_collection_set == NULL, "Precondition");
ysr@777 2827
ysr@777 2828 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
ysr@777 2829 double predicted_pause_time_ms = base_time_ms;
ysr@777 2830
ysr@777 2831 double target_time_ms = _target_pause_time_ms;
ysr@777 2832 double time_remaining_ms = target_time_ms - base_time_ms;
ysr@777 2833
ysr@777 2834 // the 10% and 50% values are arbitrary...
ysr@777 2835 if (time_remaining_ms < 0.10*target_time_ms) {
ysr@777 2836 time_remaining_ms = 0.50 * target_time_ms;
ysr@777 2837 _within_target = false;
ysr@777 2838 } else {
ysr@777 2839 _within_target = true;
ysr@777 2840 }
ysr@777 2841
ysr@777 2842 // We figure out the number of bytes available for future to-space.
ysr@777 2843 // For new regions without marking information, we must assume the
ysr@777 2844 // worst-case of complete survival. If we have marking information for a
ysr@777 2845 // region, we can bound the amount of live data. We can add a number of
ysr@777 2846 // such regions, as long as the sum of the live data bounds does not
ysr@777 2847 // exceed the available evacuation space.
ysr@777 2848 size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
ysr@777 2849
ysr@777 2850 size_t expansion_bytes =
ysr@777 2851 _g1->expansion_regions() * HeapRegion::GrainBytes;
ysr@777 2852
apetrusenko@1112 2853 _collection_set_bytes_used_before = 0;
apetrusenko@1112 2854 _collection_set_size = 0;
ysr@777 2855
ysr@777 2856 // Adjust for expansion and slop.
ysr@777 2857 max_live_bytes = max_live_bytes + expansion_bytes;
ysr@777 2858
apetrusenko@1112 2859 assert(_g1->regions_accounted_for(), "Region leakage!");
ysr@777 2860
ysr@777 2861 HeapRegion* hr;
ysr@777 2862 if (in_young_gc_mode()) {
ysr@777 2863 double young_start_time_sec = os::elapsedTime();
ysr@777 2864
ysr@777 2865 if (G1PolicyVerbose > 0) {
ysr@777 2866 gclog_or_tty->print_cr("Adding %d young regions to the CSet",
johnc@1829 2867 _g1->young_list()->length());
ysr@777 2868 }
johnc@1829 2869
ysr@777 2870 _young_cset_length = 0;
ysr@777 2871 _last_young_gc_full = full_young_gcs() ? true : false;
johnc@1829 2872
ysr@777 2873 if (_last_young_gc_full)
ysr@777 2874 ++_full_young_pause_num;
ysr@777 2875 else
ysr@777 2876 ++_partial_young_pause_num;
johnc@1829 2877
johnc@1829 2878 // The young list is laid with the survivor regions from the previous
johnc@1829 2879 // pause are appended to the RHS of the young list, i.e.
johnc@1829 2880 // [Newly Young Regions ++ Survivors from last pause].
johnc@1829 2881
johnc@1829 2882 hr = _g1->young_list()->first_survivor_region();
ysr@777 2883 while (hr != NULL) {
johnc@1829 2884 assert(hr->is_survivor(), "badly formed young list");
johnc@1829 2885 hr->set_young();
johnc@1829 2886 hr = hr->get_next_young_region();
ysr@777 2887 }
ysr@777 2888
johnc@1829 2889 // Clear the fields that point to the survivor list - they are
johnc@1829 2890 // all young now.
johnc@1829 2891 _g1->young_list()->clear_survivors();
johnc@1829 2892
johnc@1829 2893 if (_g1->mark_in_progress())
johnc@1829 2894 _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
johnc@1829 2895
johnc@1829 2896 _young_cset_length = _inc_cset_young_index;
johnc@1829 2897 _collection_set = _inc_cset_head;
johnc@1829 2898 _collection_set_size = _inc_cset_size;
johnc@1829 2899 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
johnc@1829 2900
johnc@1829 2901 // For young regions in the collection set, we assume the worst
johnc@1829 2902 // case of complete survival
johnc@1829 2903 max_live_bytes -= _inc_cset_size * HeapRegion::GrainBytes;
johnc@1829 2904
johnc@1829 2905 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
johnc@1829 2906 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
johnc@1829 2907
johnc@1829 2908 // The number of recorded young regions is the incremental
johnc@1829 2909 // collection set's current size
johnc@1829 2910 set_recorded_young_regions(_inc_cset_size);
johnc@1829 2911 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
johnc@1829 2912 set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
johnc@1829 2913 #if PREDICTIONS_VERBOSE
johnc@1829 2914 set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
johnc@1829 2915 #endif // PREDICTIONS_VERBOSE
johnc@1829 2916
johnc@1829 2917 if (G1PolicyVerbose > 0) {
johnc@1829 2918 gclog_or_tty->print_cr(" Added " PTR_FORMAT " Young Regions to CS.",
johnc@1829 2919 _inc_cset_size);
johnc@1829 2920 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
johnc@1829 2921 max_live_bytes/K);
johnc@1829 2922 }
johnc@1829 2923
johnc@1829 2924 assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
johnc@1829 2925 if (_inc_cset_size > 0) {
johnc@1829 2926 assert(_collection_set != NULL, "Invariant");
johnc@1829 2927 abandon_collection = false;
johnc@1829 2928 }
ysr@777 2929
ysr@777 2930 double young_end_time_sec = os::elapsedTime();
ysr@777 2931 _recorded_young_cset_choice_time_ms =
ysr@777 2932 (young_end_time_sec - young_start_time_sec) * 1000.0;
ysr@777 2933
johnc@1829 2934 // We are doing young collections so reset this.
johnc@1829 2935 non_young_start_time_sec = young_end_time_sec;
johnc@1829 2936
johnc@1829 2937 // Note we can use either _collection_set_size or
johnc@1829 2938 // _young_cset_length here
johnc@1829 2939 if (_collection_set_size > 0 && _last_young_gc_full) {
ysr@777 2940 // don't bother adding more regions...
ysr@777 2941 goto choose_collection_set_end;
ysr@777 2942 }
ysr@777 2943 }
ysr@777 2944
ysr@777 2945 if (!in_young_gc_mode() || !full_young_gcs()) {
ysr@777 2946 bool should_continue = true;
ysr@777 2947 NumberSeq seq;
ysr@777 2948 double avg_prediction = 100000000000000000.0; // something very large
johnc@1829 2949
johnc@1829 2950 // Save the current size of the collection set to detect
johnc@1829 2951 // if we actually added any old regions.
johnc@1829 2952 size_t n_young_regions = _collection_set_size;
johnc@1829 2953
ysr@777 2954 do {
ysr@777 2955 hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
ysr@777 2956 avg_prediction);
apetrusenko@1112 2957 if (hr != NULL) {
ysr@777 2958 double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
ysr@777 2959 time_remaining_ms -= predicted_time_ms;
ysr@777 2960 predicted_pause_time_ms += predicted_time_ms;
ysr@777 2961 add_to_collection_set(hr);
johnc@1829 2962 record_non_young_cset_region(hr);
ysr@777 2963 max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
ysr@777 2964 if (G1PolicyVerbose > 0) {
ysr@777 2965 gclog_or_tty->print_cr(" (" SIZE_FORMAT " KB left in heap.)",
ysr@777 2966 max_live_bytes/K);
ysr@777 2967 }
ysr@777 2968 seq.add(predicted_time_ms);
ysr@777 2969 avg_prediction = seq.avg() + seq.sd();
ysr@777 2970 }
ysr@777 2971 should_continue =
ysr@777 2972 ( hr != NULL) &&
ysr@777 2973 ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0
ysr@777 2974 : _collection_set_size < _young_list_fixed_length );
ysr@777 2975 } while (should_continue);
ysr@777 2976
ysr@777 2977 if (!adaptive_young_list_length() &&
ysr@777 2978 _collection_set_size < _young_list_fixed_length)
ysr@777 2979 _should_revert_to_full_young_gcs = true;
johnc@1829 2980
johnc@1829 2981 if (_collection_set_size > n_young_regions) {
johnc@1829 2982 // We actually added old regions to the collection set
johnc@1829 2983 // so we are not abandoning this collection.
johnc@1829 2984 abandon_collection = false;
johnc@1829 2985 }
ysr@777 2986 }
ysr@777 2987
ysr@777 2988 choose_collection_set_end:
johnc@1829 2989 stop_incremental_cset_building();
johnc@1829 2990
ysr@777 2991 count_CS_bytes_used();
ysr@777 2992
ysr@777 2993 end_recording_regions();
ysr@777 2994
ysr@777 2995 double non_young_end_time_sec = os::elapsedTime();
ysr@777 2996 _recorded_non_young_cset_choice_time_ms =
ysr@777 2997 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
johnc@1829 2998
johnc@1829 2999 return abandon_collection;
ysr@777 3000 }
ysr@777 3001
ysr@777 3002 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
ysr@777 3003 G1CollectorPolicy::record_full_collection_end();
ysr@777 3004 _collectionSetChooser->updateAfterFullCollection();
ysr@777 3005 }
ysr@777 3006
ysr@777 3007 void G1CollectorPolicy_BestRegionsFirst::
ysr@777 3008 expand_if_possible(size_t numRegions) {
ysr@777 3009 size_t expansion_bytes = numRegions * HeapRegion::GrainBytes;
ysr@777 3010 _g1->expand(expansion_bytes);
ysr@777 3011 }
ysr@777 3012
ysr@777 3013 void G1CollectorPolicy_BestRegionsFirst::
apetrusenko@1112 3014 record_collection_pause_end(bool abandoned) {
apetrusenko@1112 3015 G1CollectorPolicy::record_collection_pause_end(abandoned);
ysr@777 3016 assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
ysr@777 3017 }

mercurial