src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Thu, 10 Jun 2010 08:27:35 -0700

author
jmasa
date
Thu, 10 Jun 2010 08:27:35 -0700
changeset 1949
b9bc732be7c0
parent 1907
c18cbe5936b8
child 1966
215576b54709
permissions
-rw-r--r--

Merge

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 // A G1CollectorPolicy makes policy decisions that determine the
ysr@777 26 // characteristics of the collector. Examples include:
ysr@777 27 // * choice of collection set.
ysr@777 28 // * when to collect.
ysr@777 29
ysr@777 30 class HeapRegion;
ysr@777 31 class CollectionSetChooser;
ysr@777 32
ysr@777 33 // Yes, this is a bit unpleasant... but it saves replicating the same thing
ysr@777 34 // over and over again and introducing subtle problems through small typos and
ysr@777 35 // cutting and pasting mistakes. The macros below introduces a number
ysr@777 36 // sequnce into the following two classes and the methods that access it.
ysr@777 37
ysr@777 38 #define define_num_seq(name) \
ysr@777 39 private: \
ysr@777 40 NumberSeq _all_##name##_times_ms; \
ysr@777 41 public: \
ysr@777 42 void record_##name##_time_ms(double ms) { \
ysr@777 43 _all_##name##_times_ms.add(ms); \
ysr@777 44 } \
ysr@777 45 NumberSeq* get_##name##_seq() { \
ysr@777 46 return &_all_##name##_times_ms; \
ysr@777 47 }
ysr@777 48
ysr@777 49 class MainBodySummary;
ysr@777 50
apetrusenko@984 51 class PauseSummary: public CHeapObj {
ysr@777 52 define_num_seq(total)
ysr@777 53 define_num_seq(other)
ysr@777 54
ysr@777 55 public:
ysr@777 56 virtual MainBodySummary* main_body_summary() { return NULL; }
ysr@777 57 };
ysr@777 58
apetrusenko@984 59 class MainBodySummary: public CHeapObj {
ysr@777 60 define_num_seq(satb_drain) // optional
ysr@777 61 define_num_seq(parallel) // parallel only
ysr@777 62 define_num_seq(ext_root_scan)
ysr@777 63 define_num_seq(mark_stack_scan)
ysr@777 64 define_num_seq(update_rs)
ysr@777 65 define_num_seq(scan_rs)
ysr@777 66 define_num_seq(scan_new_refs) // Only for temp use; added to
ysr@777 67 // in parallel case.
ysr@777 68 define_num_seq(obj_copy)
ysr@777 69 define_num_seq(termination) // parallel only
ysr@777 70 define_num_seq(parallel_other) // parallel only
ysr@777 71 define_num_seq(mark_closure)
ysr@777 72 define_num_seq(clear_ct) // parallel only
ysr@777 73 };
ysr@777 74
apetrusenko@1112 75 class Summary: public PauseSummary,
apetrusenko@1112 76 public MainBodySummary {
ysr@777 77 public:
ysr@777 78 virtual MainBodySummary* main_body_summary() { return this; }
ysr@777 79 };
ysr@777 80
apetrusenko@1112 81 class AbandonedSummary: public PauseSummary {
ysr@777 82 };
ysr@777 83
ysr@777 84 class G1CollectorPolicy: public CollectorPolicy {
ysr@777 85 protected:
ysr@777 86 // The number of pauses during the execution.
ysr@777 87 long _n_pauses;
ysr@777 88
ysr@777 89 // either equal to the number of parallel threads, if ParallelGCThreads
ysr@777 90 // has been set, or 1 otherwise
ysr@777 91 int _parallel_gc_threads;
ysr@777 92
ysr@777 93 enum SomePrivateConstants {
tonyp@1377 94 NumPrevPausesForHeuristics = 10
ysr@777 95 };
ysr@777 96
ysr@777 97 G1MMUTracker* _mmu_tracker;
ysr@777 98
ysr@777 99 void initialize_flags();
ysr@777 100
ysr@777 101 void initialize_all() {
ysr@777 102 initialize_flags();
ysr@777 103 initialize_size_info();
ysr@777 104 initialize_perm_generation(PermGen::MarkSweepCompact);
ysr@777 105 }
ysr@777 106
ysr@777 107 virtual size_t default_init_heap_size() {
ysr@777 108 // Pick some reasonable default.
ysr@777 109 return 8*M;
ysr@777 110 }
ysr@777 111
ysr@777 112 double _cur_collection_start_sec;
ysr@777 113 size_t _cur_collection_pause_used_at_start_bytes;
ysr@777 114 size_t _cur_collection_pause_used_regions_at_start;
ysr@777 115 size_t _prev_collection_pause_used_at_end_bytes;
ysr@777 116 double _cur_collection_par_time_ms;
ysr@777 117 double _cur_satb_drain_time_ms;
ysr@777 118 double _cur_clear_ct_time_ms;
ysr@777 119 bool _satb_drain_time_set;
ysr@777 120
johnc@1325 121 #ifndef PRODUCT
johnc@1325 122 // Card Table Count Cache stats
johnc@1325 123 double _min_clear_cc_time_ms; // min
johnc@1325 124 double _max_clear_cc_time_ms; // max
johnc@1325 125 double _cur_clear_cc_time_ms; // clearing time during current pause
johnc@1325 126 double _cum_clear_cc_time_ms; // cummulative clearing time
johnc@1325 127 jlong _num_cc_clears; // number of times the card count cache has been cleared
johnc@1325 128 #endif
johnc@1325 129
ysr@777 130 double _cur_CH_strong_roots_end_sec;
ysr@777 131 double _cur_CH_strong_roots_dur_ms;
ysr@777 132 double _cur_G1_strong_roots_end_sec;
ysr@777 133 double _cur_G1_strong_roots_dur_ms;
ysr@777 134
ysr@777 135 // Statistics for recent GC pauses. See below for how indexed.
ysr@777 136 TruncatedSeq* _recent_CH_strong_roots_times_ms;
ysr@777 137 TruncatedSeq* _recent_G1_strong_roots_times_ms;
ysr@777 138 TruncatedSeq* _recent_evac_times_ms;
ysr@777 139 // These exclude marking times.
ysr@777 140 TruncatedSeq* _recent_pause_times_ms;
ysr@777 141 TruncatedSeq* _recent_gc_times_ms;
ysr@777 142
ysr@777 143 TruncatedSeq* _recent_CS_bytes_used_before;
ysr@777 144 TruncatedSeq* _recent_CS_bytes_surviving;
ysr@777 145
ysr@777 146 TruncatedSeq* _recent_rs_sizes;
ysr@777 147
ysr@777 148 TruncatedSeq* _concurrent_mark_init_times_ms;
ysr@777 149 TruncatedSeq* _concurrent_mark_remark_times_ms;
ysr@777 150 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
ysr@777 151
apetrusenko@1112 152 Summary* _summary;
apetrusenko@1112 153 AbandonedSummary* _abandoned_summary;
ysr@777 154
ysr@777 155 NumberSeq* _all_pause_times_ms;
ysr@777 156 NumberSeq* _all_full_gc_times_ms;
ysr@777 157 double _stop_world_start;
ysr@777 158 NumberSeq* _all_stop_world_times_ms;
ysr@777 159 NumberSeq* _all_yield_times_ms;
ysr@777 160
ysr@777 161 size_t _region_num_young;
ysr@777 162 size_t _region_num_tenured;
ysr@777 163 size_t _prev_region_num_young;
ysr@777 164 size_t _prev_region_num_tenured;
ysr@777 165
ysr@777 166 NumberSeq* _all_mod_union_times_ms;
ysr@777 167
ysr@777 168 int _aux_num;
ysr@777 169 NumberSeq* _all_aux_times_ms;
ysr@777 170 double* _cur_aux_start_times_ms;
ysr@777 171 double* _cur_aux_times_ms;
ysr@777 172 bool* _cur_aux_times_set;
ysr@777 173
ysr@777 174 double* _par_last_ext_root_scan_times_ms;
ysr@777 175 double* _par_last_mark_stack_scan_times_ms;
ysr@777 176 double* _par_last_update_rs_start_times_ms;
ysr@777 177 double* _par_last_update_rs_times_ms;
ysr@777 178 double* _par_last_update_rs_processed_buffers;
ysr@777 179 double* _par_last_scan_rs_start_times_ms;
ysr@777 180 double* _par_last_scan_rs_times_ms;
ysr@777 181 double* _par_last_scan_new_refs_times_ms;
ysr@777 182 double* _par_last_obj_copy_times_ms;
ysr@777 183 double* _par_last_termination_times_ms;
ysr@777 184
ysr@777 185 // indicates that we are in young GC mode
ysr@777 186 bool _in_young_gc_mode;
ysr@777 187
ysr@777 188 // indicates whether we are in full young or partially young GC mode
ysr@777 189 bool _full_young_gcs;
ysr@777 190
ysr@777 191 // if true, then it tries to dynamically adjust the length of the
ysr@777 192 // young list
ysr@777 193 bool _adaptive_young_list_length;
ysr@777 194 size_t _young_list_min_length;
ysr@777 195 size_t _young_list_target_length;
ysr@777 196 size_t _young_list_fixed_length;
ysr@777 197
ysr@777 198 size_t _young_cset_length;
ysr@777 199 bool _last_young_gc_full;
ysr@777 200
ysr@777 201 double _target_pause_time_ms;
ysr@777 202
ysr@777 203 unsigned _full_young_pause_num;
ysr@777 204 unsigned _partial_young_pause_num;
ysr@777 205
ysr@777 206 bool _during_marking;
ysr@777 207 bool _in_marking_window;
ysr@777 208 bool _in_marking_window_im;
ysr@777 209
ysr@777 210 SurvRateGroup* _short_lived_surv_rate_group;
ysr@777 211 SurvRateGroup* _survivor_surv_rate_group;
ysr@777 212 // add here any more surv rate groups
ysr@777 213
tonyp@1791 214 double _gc_overhead_perc;
tonyp@1791 215
ysr@777 216 bool during_marking() {
ysr@777 217 return _during_marking;
ysr@777 218 }
ysr@777 219
ysr@777 220 // <NEW PREDICTION>
ysr@777 221
ysr@777 222 private:
ysr@777 223 enum PredictionConstants {
ysr@777 224 TruncatedSeqLength = 10
ysr@777 225 };
ysr@777 226
ysr@777 227 TruncatedSeq* _alloc_rate_ms_seq;
ysr@777 228 double _prev_collection_pause_end_ms;
ysr@777 229
ysr@777 230 TruncatedSeq* _pending_card_diff_seq;
ysr@777 231 TruncatedSeq* _rs_length_diff_seq;
ysr@777 232 TruncatedSeq* _cost_per_card_ms_seq;
ysr@777 233 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
ysr@777 234 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
ysr@777 235 TruncatedSeq* _cost_per_entry_ms_seq;
ysr@777 236 TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
ysr@777 237 TruncatedSeq* _cost_per_byte_ms_seq;
ysr@777 238 TruncatedSeq* _constant_other_time_ms_seq;
ysr@777 239 TruncatedSeq* _young_other_cost_per_region_ms_seq;
ysr@777 240 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
ysr@777 241
ysr@777 242 TruncatedSeq* _pending_cards_seq;
ysr@777 243 TruncatedSeq* _scanned_cards_seq;
ysr@777 244 TruncatedSeq* _rs_lengths_seq;
ysr@777 245
ysr@777 246 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
ysr@777 247
ysr@777 248 TruncatedSeq* _young_gc_eff_seq;
ysr@777 249
ysr@777 250 TruncatedSeq* _max_conc_overhead_seq;
ysr@777 251
ysr@777 252 size_t _recorded_young_regions;
ysr@777 253 size_t _recorded_non_young_regions;
ysr@777 254 size_t _recorded_region_num;
ysr@777 255
ysr@777 256 size_t _free_regions_at_end_of_collection;
ysr@777 257
ysr@777 258 size_t _recorded_rs_lengths;
ysr@777 259 size_t _max_rs_lengths;
ysr@777 260
ysr@777 261 size_t _recorded_marked_bytes;
ysr@777 262 size_t _recorded_young_bytes;
ysr@777 263
ysr@777 264 size_t _predicted_pending_cards;
ysr@777 265 size_t _predicted_cards_scanned;
ysr@777 266 size_t _predicted_rs_lengths;
ysr@777 267 size_t _predicted_bytes_to_copy;
ysr@777 268
ysr@777 269 double _predicted_survival_ratio;
ysr@777 270 double _predicted_rs_update_time_ms;
ysr@777 271 double _predicted_rs_scan_time_ms;
ysr@777 272 double _predicted_object_copy_time_ms;
ysr@777 273 double _predicted_constant_other_time_ms;
ysr@777 274 double _predicted_young_other_time_ms;
ysr@777 275 double _predicted_non_young_other_time_ms;
ysr@777 276 double _predicted_pause_time_ms;
ysr@777 277
ysr@777 278 double _vtime_diff_ms;
ysr@777 279
ysr@777 280 double _recorded_young_free_cset_time_ms;
ysr@777 281 double _recorded_non_young_free_cset_time_ms;
ysr@777 282
ysr@777 283 double _sigma;
ysr@777 284 double _expensive_region_limit_ms;
ysr@777 285
ysr@777 286 size_t _rs_lengths_prediction;
ysr@777 287
ysr@777 288 size_t _known_garbage_bytes;
ysr@777 289 double _known_garbage_ratio;
ysr@777 290
ysr@777 291 double sigma() {
ysr@777 292 return _sigma;
ysr@777 293 }
ysr@777 294
ysr@777 295 // A function that prevents us putting too much stock in small sample
ysr@777 296 // sets. Returns a number between 2.0 and 1.0, depending on the number
ysr@777 297 // of samples. 5 or more samples yields one; fewer scales linearly from
ysr@777 298 // 2.0 at 1 sample to 1.0 at 5.
ysr@777 299 double confidence_factor(int samples) {
ysr@777 300 if (samples > 4) return 1.0;
ysr@777 301 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
ysr@777 302 }
ysr@777 303
ysr@777 304 double get_new_neg_prediction(TruncatedSeq* seq) {
ysr@777 305 return seq->davg() - sigma() * seq->dsd();
ysr@777 306 }
ysr@777 307
ysr@777 308 #ifndef PRODUCT
ysr@777 309 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
ysr@777 310 #endif // PRODUCT
ysr@777 311
iveresov@1546 312 void adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 313 double update_rs_processed_buffers,
iveresov@1546 314 double goal_ms);
iveresov@1546 315
ysr@777 316 protected:
ysr@777 317 double _pause_time_target_ms;
ysr@777 318 double _recorded_young_cset_choice_time_ms;
ysr@777 319 double _recorded_non_young_cset_choice_time_ms;
ysr@777 320 bool _within_target;
ysr@777 321 size_t _pending_cards;
ysr@777 322 size_t _max_pending_cards;
ysr@777 323
ysr@777 324 public:
ysr@777 325
ysr@777 326 void set_region_short_lived(HeapRegion* hr) {
ysr@777 327 hr->install_surv_rate_group(_short_lived_surv_rate_group);
ysr@777 328 }
ysr@777 329
ysr@777 330 void set_region_survivors(HeapRegion* hr) {
ysr@777 331 hr->install_surv_rate_group(_survivor_surv_rate_group);
ysr@777 332 }
ysr@777 333
ysr@777 334 #ifndef PRODUCT
ysr@777 335 bool verify_young_ages();
ysr@777 336 #endif // PRODUCT
ysr@777 337
ysr@777 338 double get_new_prediction(TruncatedSeq* seq) {
ysr@777 339 return MAX2(seq->davg() + sigma() * seq->dsd(),
ysr@777 340 seq->davg() * confidence_factor(seq->num()));
ysr@777 341 }
ysr@777 342
ysr@777 343 size_t young_cset_length() {
ysr@777 344 return _young_cset_length;
ysr@777 345 }
ysr@777 346
ysr@777 347 void record_max_rs_lengths(size_t rs_lengths) {
ysr@777 348 _max_rs_lengths = rs_lengths;
ysr@777 349 }
ysr@777 350
ysr@777 351 size_t predict_pending_card_diff() {
ysr@777 352 double prediction = get_new_neg_prediction(_pending_card_diff_seq);
ysr@777 353 if (prediction < 0.00001)
ysr@777 354 return 0;
ysr@777 355 else
ysr@777 356 return (size_t) prediction;
ysr@777 357 }
ysr@777 358
ysr@777 359 size_t predict_pending_cards() {
ysr@777 360 size_t max_pending_card_num = _g1->max_pending_card_num();
ysr@777 361 size_t diff = predict_pending_card_diff();
ysr@777 362 size_t prediction;
ysr@777 363 if (diff > max_pending_card_num)
ysr@777 364 prediction = max_pending_card_num;
ysr@777 365 else
ysr@777 366 prediction = max_pending_card_num - diff;
ysr@777 367
ysr@777 368 return prediction;
ysr@777 369 }
ysr@777 370
ysr@777 371 size_t predict_rs_length_diff() {
ysr@777 372 return (size_t) get_new_prediction(_rs_length_diff_seq);
ysr@777 373 }
ysr@777 374
ysr@777 375 double predict_alloc_rate_ms() {
ysr@777 376 return get_new_prediction(_alloc_rate_ms_seq);
ysr@777 377 }
ysr@777 378
ysr@777 379 double predict_cost_per_card_ms() {
ysr@777 380 return get_new_prediction(_cost_per_card_ms_seq);
ysr@777 381 }
ysr@777 382
ysr@777 383 double predict_rs_update_time_ms(size_t pending_cards) {
ysr@777 384 return (double) pending_cards * predict_cost_per_card_ms();
ysr@777 385 }
ysr@777 386
ysr@777 387 double predict_fully_young_cards_per_entry_ratio() {
ysr@777 388 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
ysr@777 389 }
ysr@777 390
ysr@777 391 double predict_partially_young_cards_per_entry_ratio() {
ysr@777 392 if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
ysr@777 393 return predict_fully_young_cards_per_entry_ratio();
ysr@777 394 else
ysr@777 395 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
ysr@777 396 }
ysr@777 397
ysr@777 398 size_t predict_young_card_num(size_t rs_length) {
ysr@777 399 return (size_t) ((double) rs_length *
ysr@777 400 predict_fully_young_cards_per_entry_ratio());
ysr@777 401 }
ysr@777 402
ysr@777 403 size_t predict_non_young_card_num(size_t rs_length) {
ysr@777 404 return (size_t) ((double) rs_length *
ysr@777 405 predict_partially_young_cards_per_entry_ratio());
ysr@777 406 }
ysr@777 407
ysr@777 408 double predict_rs_scan_time_ms(size_t card_num) {
ysr@777 409 if (full_young_gcs())
ysr@777 410 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
ysr@777 411 else
ysr@777 412 return predict_partially_young_rs_scan_time_ms(card_num);
ysr@777 413 }
ysr@777 414
ysr@777 415 double predict_partially_young_rs_scan_time_ms(size_t card_num) {
ysr@777 416 if (_partially_young_cost_per_entry_ms_seq->num() < 3)
ysr@777 417 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
ysr@777 418 else
ysr@777 419 return (double) card_num *
ysr@777 420 get_new_prediction(_partially_young_cost_per_entry_ms_seq);
ysr@777 421 }
ysr@777 422
ysr@777 423 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
ysr@777 424 if (_cost_per_byte_ms_during_cm_seq->num() < 3)
ysr@777 425 return 1.1 * (double) bytes_to_copy *
ysr@777 426 get_new_prediction(_cost_per_byte_ms_seq);
ysr@777 427 else
ysr@777 428 return (double) bytes_to_copy *
ysr@777 429 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
ysr@777 430 }
ysr@777 431
ysr@777 432 double predict_object_copy_time_ms(size_t bytes_to_copy) {
ysr@777 433 if (_in_marking_window && !_in_marking_window_im)
ysr@777 434 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
ysr@777 435 else
ysr@777 436 return (double) bytes_to_copy *
ysr@777 437 get_new_prediction(_cost_per_byte_ms_seq);
ysr@777 438 }
ysr@777 439
ysr@777 440 double predict_constant_other_time_ms() {
ysr@777 441 return get_new_prediction(_constant_other_time_ms_seq);
ysr@777 442 }
ysr@777 443
ysr@777 444 double predict_young_other_time_ms(size_t young_num) {
ysr@777 445 return
ysr@777 446 (double) young_num *
ysr@777 447 get_new_prediction(_young_other_cost_per_region_ms_seq);
ysr@777 448 }
ysr@777 449
ysr@777 450 double predict_non_young_other_time_ms(size_t non_young_num) {
ysr@777 451 return
ysr@777 452 (double) non_young_num *
ysr@777 453 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
ysr@777 454 }
ysr@777 455
ysr@777 456 void check_if_region_is_too_expensive(double predicted_time_ms);
ysr@777 457
ysr@777 458 double predict_young_collection_elapsed_time_ms(size_t adjustment);
ysr@777 459 double predict_base_elapsed_time_ms(size_t pending_cards);
ysr@777 460 double predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 461 size_t scanned_cards);
ysr@777 462 size_t predict_bytes_to_copy(HeapRegion* hr);
ysr@777 463 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
ysr@777 464
johnc@1829 465 // for use by: calculate_young_list_target_length(rs_length)
johnc@1829 466 bool predict_will_fit(size_t young_region_num,
johnc@1829 467 double base_time_ms,
johnc@1829 468 size_t init_free_regions,
johnc@1829 469 double target_pause_time_ms);
ysr@777 470
ysr@777 471 void start_recording_regions();
johnc@1829 472 void record_cset_region_info(HeapRegion* hr, bool young);
johnc@1829 473 void record_non_young_cset_region(HeapRegion* hr);
johnc@1829 474
johnc@1829 475 void set_recorded_young_regions(size_t n_regions);
johnc@1829 476 void set_recorded_young_bytes(size_t bytes);
johnc@1829 477 void set_recorded_rs_lengths(size_t rs_lengths);
johnc@1829 478 void set_predicted_bytes_to_copy(size_t bytes);
johnc@1829 479
ysr@777 480 void end_recording_regions();
ysr@777 481
ysr@777 482 void record_vtime_diff_ms(double vtime_diff_ms) {
ysr@777 483 _vtime_diff_ms = vtime_diff_ms;
ysr@777 484 }
ysr@777 485
ysr@777 486 void record_young_free_cset_time_ms(double time_ms) {
ysr@777 487 _recorded_young_free_cset_time_ms = time_ms;
ysr@777 488 }
ysr@777 489
ysr@777 490 void record_non_young_free_cset_time_ms(double time_ms) {
ysr@777 491 _recorded_non_young_free_cset_time_ms = time_ms;
ysr@777 492 }
ysr@777 493
ysr@777 494 double predict_young_gc_eff() {
ysr@777 495 return get_new_neg_prediction(_young_gc_eff_seq);
ysr@777 496 }
ysr@777 497
apetrusenko@980 498 double predict_survivor_regions_evac_time();
apetrusenko@980 499
ysr@777 500 // </NEW PREDICTION>
ysr@777 501
ysr@777 502 public:
ysr@777 503 void cset_regions_freed() {
ysr@777 504 bool propagate = _last_young_gc_full && !_in_marking_window;
ysr@777 505 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 506 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 507 // also call it on any more surv rate groups
ysr@777 508 }
ysr@777 509
ysr@777 510 void set_known_garbage_bytes(size_t known_garbage_bytes) {
ysr@777 511 _known_garbage_bytes = known_garbage_bytes;
ysr@777 512 size_t heap_bytes = _g1->capacity();
ysr@777 513 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
ysr@777 514 }
ysr@777 515
ysr@777 516 void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
ysr@777 517 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
ysr@777 518
ysr@777 519 _known_garbage_bytes -= known_garbage_bytes;
ysr@777 520 size_t heap_bytes = _g1->capacity();
ysr@777 521 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
ysr@777 522 }
ysr@777 523
ysr@777 524 G1MMUTracker* mmu_tracker() {
ysr@777 525 return _mmu_tracker;
ysr@777 526 }
ysr@777 527
ysr@777 528 double predict_init_time_ms() {
ysr@777 529 return get_new_prediction(_concurrent_mark_init_times_ms);
ysr@777 530 }
ysr@777 531
ysr@777 532 double predict_remark_time_ms() {
ysr@777 533 return get_new_prediction(_concurrent_mark_remark_times_ms);
ysr@777 534 }
ysr@777 535
ysr@777 536 double predict_cleanup_time_ms() {
ysr@777 537 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
ysr@777 538 }
ysr@777 539
ysr@777 540 // Returns an estimate of the survival rate of the region at yg-age
ysr@777 541 // "yg_age".
apetrusenko@980 542 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
apetrusenko@980 543 TruncatedSeq* seq = surv_rate_group->get_seq(age);
ysr@777 544 if (seq->num() == 0)
ysr@777 545 gclog_or_tty->print("BARF! age is %d", age);
ysr@777 546 guarantee( seq->num() > 0, "invariant" );
ysr@777 547 double pred = get_new_prediction(seq);
ysr@777 548 if (pred > 1.0)
ysr@777 549 pred = 1.0;
ysr@777 550 return pred;
ysr@777 551 }
ysr@777 552
apetrusenko@980 553 double predict_yg_surv_rate(int age) {
apetrusenko@980 554 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
apetrusenko@980 555 }
apetrusenko@980 556
ysr@777 557 double accum_yg_surv_rate_pred(int age) {
ysr@777 558 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
ysr@777 559 }
ysr@777 560
ysr@777 561 protected:
ysr@777 562 void print_stats (int level, const char* str, double value);
ysr@777 563 void print_stats (int level, const char* str, int value);
ysr@777 564 void print_par_stats (int level, const char* str, double* data) {
ysr@777 565 print_par_stats(level, str, data, true);
ysr@777 566 }
ysr@777 567 void print_par_stats (int level, const char* str, double* data, bool summary);
ysr@777 568 void print_par_buffers (int level, const char* str, double* data, bool summary);
ysr@777 569
ysr@777 570 void check_other_times(int level,
ysr@777 571 NumberSeq* other_times_ms,
ysr@777 572 NumberSeq* calc_other_times_ms) const;
ysr@777 573
ysr@777 574 void print_summary (PauseSummary* stats) const;
apetrusenko@1112 575 void print_abandoned_summary(PauseSummary* summary) const;
ysr@777 576
ysr@777 577 void print_summary (int level, const char* str, NumberSeq* seq) const;
ysr@777 578 void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
ysr@777 579
ysr@777 580 double avg_value (double* data);
ysr@777 581 double max_value (double* data);
ysr@777 582 double sum_of_values (double* data);
ysr@777 583 double max_sum (double* data1, double* data2);
ysr@777 584
ysr@777 585 int _last_satb_drain_processed_buffers;
ysr@777 586 int _last_update_rs_processed_buffers;
ysr@777 587 double _last_pause_time_ms;
ysr@777 588
ysr@777 589 size_t _bytes_in_to_space_before_gc;
ysr@777 590 size_t _bytes_in_to_space_after_gc;
ysr@777 591 size_t bytes_in_to_space_during_gc() {
ysr@777 592 return
ysr@777 593 _bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc;
ysr@777 594 }
ysr@777 595 size_t _bytes_in_collection_set_before_gc;
ysr@777 596 // Used to count used bytes in CS.
ysr@777 597 friend class CountCSClosure;
ysr@777 598
ysr@777 599 // Statistics kept per GC stoppage, pause or full.
ysr@777 600 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
ysr@777 601
ysr@777 602 // We track markings.
ysr@777 603 int _num_markings;
ysr@777 604 double _mark_thread_startup_sec; // Time at startup of marking thread
ysr@777 605
ysr@777 606 // Add a new GC of the given duration and end time to the record.
ysr@777 607 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
ysr@777 608
ysr@777 609 // The head of the list (via "next_in_collection_set()") representing the
johnc@1829 610 // current collection set. Set from the incrementally built collection
johnc@1829 611 // set at the start of the pause.
ysr@777 612 HeapRegion* _collection_set;
johnc@1829 613
johnc@1829 614 // The number of regions in the collection set. Set from the incrementally
johnc@1829 615 // built collection set at the start of an evacuation pause.
ysr@777 616 size_t _collection_set_size;
johnc@1829 617
johnc@1829 618 // The number of bytes in the collection set before the pause. Set from
johnc@1829 619 // the incrementally built collection set at the start of an evacuation
johnc@1829 620 // pause.
ysr@777 621 size_t _collection_set_bytes_used_before;
ysr@777 622
johnc@1829 623 // The associated information that is maintained while the incremental
johnc@1829 624 // collection set is being built with young regions. Used to populate
johnc@1829 625 // the recorded info for the evacuation pause.
johnc@1829 626
johnc@1829 627 enum CSetBuildType {
johnc@1829 628 Active, // We are actively building the collection set
johnc@1829 629 Inactive // We are not actively building the collection set
johnc@1829 630 };
johnc@1829 631
johnc@1829 632 CSetBuildType _inc_cset_build_state;
johnc@1829 633
johnc@1829 634 // The head of the incrementally built collection set.
johnc@1829 635 HeapRegion* _inc_cset_head;
johnc@1829 636
johnc@1829 637 // The tail of the incrementally built collection set.
johnc@1829 638 HeapRegion* _inc_cset_tail;
johnc@1829 639
johnc@1829 640 // The number of regions in the incrementally built collection set.
johnc@1829 641 // Used to set _collection_set_size at the start of an evacuation
johnc@1829 642 // pause.
johnc@1829 643 size_t _inc_cset_size;
johnc@1829 644
johnc@1829 645 // Used as the index in the surving young words structure
johnc@1829 646 // which tracks the amount of space, for each young region,
johnc@1829 647 // that survives the pause.
johnc@1829 648 size_t _inc_cset_young_index;
johnc@1829 649
johnc@1829 650 // The number of bytes in the incrementally built collection set.
johnc@1829 651 // Used to set _collection_set_bytes_used_before at the start of
johnc@1829 652 // an evacuation pause.
johnc@1829 653 size_t _inc_cset_bytes_used_before;
johnc@1829 654
johnc@1829 655 // Used to record the highest end of heap region in collection set
johnc@1829 656 HeapWord* _inc_cset_max_finger;
johnc@1829 657
johnc@1829 658 // The number of recorded used bytes in the young regions
johnc@1829 659 // of the collection set. This is the sum of the used() bytes
johnc@1829 660 // of retired young regions in the collection set.
johnc@1829 661 size_t _inc_cset_recorded_young_bytes;
johnc@1829 662
johnc@1829 663 // The RSet lengths recorded for regions in the collection set
johnc@1829 664 // (updated by the periodic sampling of the regions in the
johnc@1829 665 // young list/collection set).
johnc@1829 666 size_t _inc_cset_recorded_rs_lengths;
johnc@1829 667
johnc@1829 668 // The predicted elapsed time it will take to collect the regions
johnc@1829 669 // in the collection set (updated by the periodic sampling of the
johnc@1829 670 // regions in the young list/collection set).
johnc@1829 671 double _inc_cset_predicted_elapsed_time_ms;
johnc@1829 672
johnc@1829 673 // The predicted bytes to copy for the regions in the collection
johnc@1829 674 // set (updated by the periodic sampling of the regions in the
johnc@1829 675 // young list/collection set).
johnc@1829 676 size_t _inc_cset_predicted_bytes_to_copy;
johnc@1829 677
ysr@777 678 // Info about marking.
ysr@777 679 int _n_marks; // Sticky at 2, so we know when we've done at least 2.
ysr@777 680
ysr@777 681 // The number of collection pauses at the end of the last mark.
ysr@777 682 size_t _n_pauses_at_mark_end;
ysr@777 683
ysr@777 684 // Stash a pointer to the g1 heap.
ysr@777 685 G1CollectedHeap* _g1;
ysr@777 686
ysr@777 687 // The average time in ms per collection pause, averaged over recent pauses.
ysr@777 688 double recent_avg_time_for_pauses_ms();
ysr@777 689
ysr@777 690 // The average time in ms for processing CollectedHeap strong roots, per
ysr@777 691 // collection pause, averaged over recent pauses.
ysr@777 692 double recent_avg_time_for_CH_strong_ms();
ysr@777 693
ysr@777 694 // The average time in ms for processing the G1 remembered set, per
ysr@777 695 // pause, averaged over recent pauses.
ysr@777 696 double recent_avg_time_for_G1_strong_ms();
ysr@777 697
ysr@777 698 // The average time in ms for "evacuating followers", per pause, averaged
ysr@777 699 // over recent pauses.
ysr@777 700 double recent_avg_time_for_evac_ms();
ysr@777 701
ysr@777 702 // The number of "recent" GCs recorded in the number sequences
ysr@777 703 int number_of_recent_gcs();
ysr@777 704
ysr@777 705 // The average survival ratio, computed by the total number of bytes
ysr@777 706 // suriviving / total number of bytes before collection over the last
ysr@777 707 // several recent pauses.
ysr@777 708 double recent_avg_survival_fraction();
ysr@777 709 // The survival fraction of the most recent pause; if there have been no
ysr@777 710 // pauses, returns 1.0.
ysr@777 711 double last_survival_fraction();
ysr@777 712
ysr@777 713 // Returns a "conservative" estimate of the recent survival rate, i.e.,
ysr@777 714 // one that may be higher than "recent_avg_survival_fraction".
ysr@777 715 // This is conservative in several ways:
ysr@777 716 // If there have been few pauses, it will assume a potential high
ysr@777 717 // variance, and err on the side of caution.
ysr@777 718 // It puts a lower bound (currently 0.1) on the value it will return.
ysr@777 719 // To try to detect phase changes, if the most recent pause ("latest") has a
ysr@777 720 // higher-than average ("avg") survival rate, it returns that rate.
ysr@777 721 // "work" version is a utility function; young is restricted to young regions.
ysr@777 722 double conservative_avg_survival_fraction_work(double avg,
ysr@777 723 double latest);
ysr@777 724
ysr@777 725 // The arguments are the two sequences that keep track of the number of bytes
ysr@777 726 // surviving and the total number of bytes before collection, resp.,
ysr@777 727 // over the last evereal recent pauses
ysr@777 728 // Returns the survival rate for the category in the most recent pause.
ysr@777 729 // If there have been no pauses, returns 1.0.
ysr@777 730 double last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 731 TruncatedSeq* before);
ysr@777 732
ysr@777 733 // The arguments are the two sequences that keep track of the number of bytes
ysr@777 734 // surviving and the total number of bytes before collection, resp.,
ysr@777 735 // over the last several recent pauses
ysr@777 736 // Returns the average survival ration over the last several recent pauses
ysr@777 737 // If there have been no pauses, return 1.0
ysr@777 738 double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 739 TruncatedSeq* before);
ysr@777 740
ysr@777 741 double conservative_avg_survival_fraction() {
ysr@777 742 double avg = recent_avg_survival_fraction();
ysr@777 743 double latest = last_survival_fraction();
ysr@777 744 return conservative_avg_survival_fraction_work(avg, latest);
ysr@777 745 }
ysr@777 746
ysr@777 747 // The ratio of gc time to elapsed time, computed over recent pauses.
ysr@777 748 double _recent_avg_pause_time_ratio;
ysr@777 749
ysr@777 750 double recent_avg_pause_time_ratio() {
ysr@777 751 return _recent_avg_pause_time_ratio;
ysr@777 752 }
ysr@777 753
ysr@777 754 // Number of pauses between concurrent marking.
ysr@777 755 size_t _pauses_btwn_concurrent_mark;
ysr@777 756
ysr@777 757 size_t _n_marks_since_last_pause;
ysr@777 758
tonyp@1794 759 // At the end of a pause we check the heap occupancy and we decide
tonyp@1794 760 // whether we will start a marking cycle during the next pause. If
tonyp@1794 761 // we decide that we want to do that, we will set this parameter to
tonyp@1794 762 // true. So, this parameter will stay true between the end of a
tonyp@1794 763 // pause and the beginning of a subsequent pause (not necessarily
tonyp@1794 764 // the next one, see the comments on the next field) when we decide
tonyp@1794 765 // that we will indeed start a marking cycle and do the initial-mark
tonyp@1794 766 // work.
tonyp@1794 767 volatile bool _initiate_conc_mark_if_possible;
ysr@777 768
tonyp@1794 769 // If initiate_conc_mark_if_possible() is set at the beginning of a
tonyp@1794 770 // pause, it is a suggestion that the pause should start a marking
tonyp@1794 771 // cycle by doing the initial-mark work. However, it is possible
tonyp@1794 772 // that the concurrent marking thread is still finishing up the
tonyp@1794 773 // previous marking cycle (e.g., clearing the next marking
tonyp@1794 774 // bitmap). If that is the case we cannot start a new cycle and
tonyp@1794 775 // we'll have to wait for the concurrent marking thread to finish
tonyp@1794 776 // what it is doing. In this case we will postpone the marking cycle
tonyp@1794 777 // initiation decision for the next pause. When we eventually decide
tonyp@1794 778 // to start a cycle, we will set _during_initial_mark_pause which
tonyp@1794 779 // will stay true until the end of the initial-mark pause and it's
tonyp@1794 780 // the condition that indicates that a pause is doing the
tonyp@1794 781 // initial-mark work.
tonyp@1794 782 volatile bool _during_initial_mark_pause;
tonyp@1794 783
ysr@777 784 bool _should_revert_to_full_young_gcs;
ysr@777 785 bool _last_full_young_gc;
ysr@777 786
ysr@777 787 // This set of variables tracks the collector efficiency, in order to
ysr@777 788 // determine whether we should initiate a new marking.
ysr@777 789 double _cur_mark_stop_world_time_ms;
ysr@777 790 double _mark_init_start_sec;
ysr@777 791 double _mark_remark_start_sec;
ysr@777 792 double _mark_cleanup_start_sec;
ysr@777 793 double _mark_closure_time_ms;
ysr@777 794
ysr@777 795 void calculate_young_list_min_length();
johnc@1829 796 void calculate_young_list_target_length();
johnc@1829 797 void calculate_young_list_target_length(size_t rs_lengths);
ysr@777 798
ysr@777 799 public:
ysr@777 800
ysr@777 801 G1CollectorPolicy();
ysr@777 802
ysr@777 803 virtual G1CollectorPolicy* as_g1_policy() { return this; }
ysr@777 804
ysr@777 805 virtual CollectorPolicy::Name kind() {
ysr@777 806 return CollectorPolicy::G1CollectorPolicyKind;
ysr@777 807 }
ysr@777 808
ysr@777 809 void check_prediction_validity();
ysr@777 810
ysr@777 811 size_t bytes_in_collection_set() {
ysr@777 812 return _bytes_in_collection_set_before_gc;
ysr@777 813 }
ysr@777 814
ysr@777 815 size_t bytes_in_to_space() {
ysr@777 816 return bytes_in_to_space_during_gc();
ysr@777 817 }
ysr@777 818
ysr@777 819 unsigned calc_gc_alloc_time_stamp() {
ysr@777 820 return _all_pause_times_ms->num() + 1;
ysr@777 821 }
ysr@777 822
ysr@777 823 protected:
ysr@777 824
ysr@777 825 // Count the number of bytes used in the CS.
ysr@777 826 void count_CS_bytes_used();
ysr@777 827
ysr@777 828 // Together these do the base cleanup-recording work. Subclasses might
ysr@777 829 // want to put something between them.
ysr@777 830 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 831 size_t max_live_bytes);
ysr@777 832 void record_concurrent_mark_cleanup_end_work2();
ysr@777 833
ysr@777 834 public:
ysr@777 835
ysr@777 836 virtual void init();
ysr@777 837
apetrusenko@980 838 // Create jstat counters for the policy.
apetrusenko@980 839 virtual void initialize_gc_policy_counters();
apetrusenko@980 840
ysr@777 841 virtual HeapWord* mem_allocate_work(size_t size,
ysr@777 842 bool is_tlab,
ysr@777 843 bool* gc_overhead_limit_was_exceeded);
ysr@777 844
ysr@777 845 // This method controls how a collector handles one or more
ysr@777 846 // of its generations being fully allocated.
ysr@777 847 virtual HeapWord* satisfy_failed_allocation(size_t size,
ysr@777 848 bool is_tlab);
ysr@777 849
ysr@777 850 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
ysr@777 851
ysr@777 852 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
ysr@777 853
ysr@777 854 // The number of collection pauses so far.
ysr@777 855 long n_pauses() const { return _n_pauses; }
ysr@777 856
ysr@777 857 // Update the heuristic info to record a collection pause of the given
ysr@777 858 // start time, where the given number of bytes were used at the start.
ysr@777 859 // This may involve changing the desired size of a collection set.
ysr@777 860
ysr@777 861 virtual void record_stop_world_start();
ysr@777 862
ysr@777 863 virtual void record_collection_pause_start(double start_time_sec,
ysr@777 864 size_t start_used);
ysr@777 865
ysr@777 866 // Must currently be called while the world is stopped.
ysr@777 867 virtual void record_concurrent_mark_init_start();
ysr@777 868 virtual void record_concurrent_mark_init_end();
ysr@777 869 void record_concurrent_mark_init_end_pre(double
ysr@777 870 mark_init_elapsed_time_ms);
ysr@777 871
ysr@777 872 void record_mark_closure_time(double mark_closure_time_ms);
ysr@777 873
ysr@777 874 virtual void record_concurrent_mark_remark_start();
ysr@777 875 virtual void record_concurrent_mark_remark_end();
ysr@777 876
ysr@777 877 virtual void record_concurrent_mark_cleanup_start();
ysr@777 878 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 879 size_t max_live_bytes);
ysr@777 880 virtual void record_concurrent_mark_cleanup_completed();
ysr@777 881
ysr@777 882 virtual void record_concurrent_pause();
ysr@777 883 virtual void record_concurrent_pause_end();
ysr@777 884
ysr@777 885 virtual void record_collection_pause_end_CH_strong_roots();
ysr@777 886 virtual void record_collection_pause_end_G1_strong_roots();
ysr@777 887
apetrusenko@1112 888 virtual void record_collection_pause_end(bool abandoned);
ysr@777 889
ysr@777 890 // Record the fact that a full collection occurred.
ysr@777 891 virtual void record_full_collection_start();
ysr@777 892 virtual void record_full_collection_end();
ysr@777 893
ysr@777 894 void record_ext_root_scan_time(int worker_i, double ms) {
ysr@777 895 _par_last_ext_root_scan_times_ms[worker_i] = ms;
ysr@777 896 }
ysr@777 897
ysr@777 898 void record_mark_stack_scan_time(int worker_i, double ms) {
ysr@777 899 _par_last_mark_stack_scan_times_ms[worker_i] = ms;
ysr@777 900 }
ysr@777 901
ysr@777 902 void record_satb_drain_time(double ms) {
ysr@777 903 _cur_satb_drain_time_ms = ms;
ysr@777 904 _satb_drain_time_set = true;
ysr@777 905 }
ysr@777 906
ysr@777 907 void record_satb_drain_processed_buffers (int processed_buffers) {
ysr@777 908 _last_satb_drain_processed_buffers = processed_buffers;
ysr@777 909 }
ysr@777 910
ysr@777 911 void record_mod_union_time(double ms) {
ysr@777 912 _all_mod_union_times_ms->add(ms);
ysr@777 913 }
ysr@777 914
ysr@777 915 void record_update_rs_start_time(int thread, double ms) {
ysr@777 916 _par_last_update_rs_start_times_ms[thread] = ms;
ysr@777 917 }
ysr@777 918
ysr@777 919 void record_update_rs_time(int thread, double ms) {
ysr@777 920 _par_last_update_rs_times_ms[thread] = ms;
ysr@777 921 }
ysr@777 922
ysr@777 923 void record_update_rs_processed_buffers (int thread,
ysr@777 924 double processed_buffers) {
ysr@777 925 _par_last_update_rs_processed_buffers[thread] = processed_buffers;
ysr@777 926 }
ysr@777 927
ysr@777 928 void record_scan_rs_start_time(int thread, double ms) {
ysr@777 929 _par_last_scan_rs_start_times_ms[thread] = ms;
ysr@777 930 }
ysr@777 931
ysr@777 932 void record_scan_rs_time(int thread, double ms) {
ysr@777 933 _par_last_scan_rs_times_ms[thread] = ms;
ysr@777 934 }
ysr@777 935
ysr@777 936 void record_scan_new_refs_time(int thread, double ms) {
ysr@777 937 _par_last_scan_new_refs_times_ms[thread] = ms;
ysr@777 938 }
ysr@777 939
ysr@777 940 double get_scan_new_refs_time(int thread) {
ysr@777 941 return _par_last_scan_new_refs_times_ms[thread];
ysr@777 942 }
ysr@777 943
ysr@777 944 void reset_obj_copy_time(int thread) {
ysr@777 945 _par_last_obj_copy_times_ms[thread] = 0.0;
ysr@777 946 }
ysr@777 947
ysr@777 948 void reset_obj_copy_time() {
ysr@777 949 reset_obj_copy_time(0);
ysr@777 950 }
ysr@777 951
ysr@777 952 void record_obj_copy_time(int thread, double ms) {
ysr@777 953 _par_last_obj_copy_times_ms[thread] += ms;
ysr@777 954 }
ysr@777 955
ysr@777 956 void record_obj_copy_time(double ms) {
ysr@777 957 record_obj_copy_time(0, ms);
ysr@777 958 }
ysr@777 959
ysr@777 960 void record_termination_time(int thread, double ms) {
ysr@777 961 _par_last_termination_times_ms[thread] = ms;
ysr@777 962 }
ysr@777 963
ysr@777 964 void record_termination_time(double ms) {
ysr@777 965 record_termination_time(0, ms);
ysr@777 966 }
ysr@777 967
tonyp@1030 968 void record_pause_time_ms(double ms) {
ysr@777 969 _last_pause_time_ms = ms;
ysr@777 970 }
ysr@777 971
ysr@777 972 void record_clear_ct_time(double ms) {
ysr@777 973 _cur_clear_ct_time_ms = ms;
ysr@777 974 }
ysr@777 975
ysr@777 976 void record_par_time(double ms) {
ysr@777 977 _cur_collection_par_time_ms = ms;
ysr@777 978 }
ysr@777 979
ysr@777 980 void record_aux_start_time(int i) {
ysr@777 981 guarantee(i < _aux_num, "should be within range");
ysr@777 982 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
ysr@777 983 }
ysr@777 984
ysr@777 985 void record_aux_end_time(int i) {
ysr@777 986 guarantee(i < _aux_num, "should be within range");
ysr@777 987 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
ysr@777 988 _cur_aux_times_set[i] = true;
ysr@777 989 _cur_aux_times_ms[i] += ms;
ysr@777 990 }
ysr@777 991
johnc@1325 992 #ifndef PRODUCT
johnc@1325 993 void record_cc_clear_time(double ms) {
johnc@1325 994 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
johnc@1325 995 _min_clear_cc_time_ms = ms;
johnc@1325 996 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
johnc@1325 997 _max_clear_cc_time_ms = ms;
johnc@1325 998 _cur_clear_cc_time_ms = ms;
johnc@1325 999 _cum_clear_cc_time_ms += ms;
johnc@1325 1000 _num_cc_clears++;
johnc@1325 1001 }
johnc@1325 1002 #endif
johnc@1325 1003
ysr@777 1004 // Record the fact that "bytes" bytes allocated in a region.
ysr@777 1005 void record_before_bytes(size_t bytes);
ysr@777 1006 void record_after_bytes(size_t bytes);
ysr@777 1007
ysr@777 1008 // Returns "true" if this is a good time to do a collection pause.
ysr@777 1009 // The "word_size" argument, if non-zero, indicates the size of an
ysr@777 1010 // allocation request that is prompting this query.
ysr@777 1011 virtual bool should_do_collection_pause(size_t word_size) = 0;
ysr@777 1012
ysr@777 1013 // Choose a new collection set. Marks the chosen regions as being
ysr@777 1014 // "in_collection_set", and links them together. The head and number of
ysr@777 1015 // the collection set are available via access methods.
johnc@1829 1016 virtual bool choose_collection_set() = 0;
ysr@777 1017
ysr@777 1018 // The head of the list (via "next_in_collection_set()") representing the
ysr@777 1019 // current collection set.
ysr@777 1020 HeapRegion* collection_set() { return _collection_set; }
ysr@777 1021
johnc@1829 1022 void clear_collection_set() { _collection_set = NULL; }
johnc@1829 1023
ysr@777 1024 // The number of elements in the current collection set.
ysr@777 1025 size_t collection_set_size() { return _collection_set_size; }
ysr@777 1026
ysr@777 1027 // Add "hr" to the CS.
ysr@777 1028 void add_to_collection_set(HeapRegion* hr);
ysr@777 1029
johnc@1829 1030 // Incremental CSet Support
johnc@1829 1031
johnc@1829 1032 // The head of the incrementally built collection set.
johnc@1829 1033 HeapRegion* inc_cset_head() { return _inc_cset_head; }
johnc@1829 1034
johnc@1829 1035 // The tail of the incrementally built collection set.
johnc@1829 1036 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
johnc@1829 1037
johnc@1829 1038 // The number of elements in the incrementally built collection set.
johnc@1829 1039 size_t inc_cset_size() { return _inc_cset_size; }
johnc@1829 1040
johnc@1829 1041 // Initialize incremental collection set info.
johnc@1829 1042 void start_incremental_cset_building();
johnc@1829 1043
johnc@1829 1044 void clear_incremental_cset() {
johnc@1829 1045 _inc_cset_head = NULL;
johnc@1829 1046 _inc_cset_tail = NULL;
johnc@1829 1047 }
johnc@1829 1048
johnc@1829 1049 // Stop adding regions to the incremental collection set
johnc@1829 1050 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
johnc@1829 1051
johnc@1829 1052 // Add/remove information about hr to the aggregated information
johnc@1829 1053 // for the incrementally built collection set.
johnc@1829 1054 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
johnc@1829 1055 void remove_from_incremental_cset_info(HeapRegion* hr);
johnc@1829 1056
johnc@1829 1057 // Update information about hr in the aggregated information for
johnc@1829 1058 // the incrementally built collection set.
johnc@1829 1059 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
johnc@1829 1060
johnc@1829 1061 private:
johnc@1829 1062 // Update the incremental cset information when adding a region
johnc@1829 1063 // (should not be called directly).
johnc@1829 1064 void add_region_to_incremental_cset_common(HeapRegion* hr);
johnc@1829 1065
johnc@1829 1066 public:
johnc@1829 1067 // Add hr to the LHS of the incremental collection set.
johnc@1829 1068 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
johnc@1829 1069
johnc@1829 1070 // Add hr to the RHS of the incremental collection set.
johnc@1829 1071 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
johnc@1829 1072
johnc@1829 1073 #ifndef PRODUCT
johnc@1829 1074 void print_collection_set(HeapRegion* list_head, outputStream* st);
johnc@1829 1075 #endif // !PRODUCT
johnc@1829 1076
tonyp@1794 1077 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
tonyp@1794 1078 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
tonyp@1794 1079 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
tonyp@1794 1080
tonyp@1794 1081 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
tonyp@1794 1082 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
tonyp@1794 1083 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
tonyp@1794 1084
tonyp@1794 1085 // This is called at the very beginning of an evacuation pause (it
tonyp@1794 1086 // has to be the first thing that the pause does). If
tonyp@1794 1087 // initiate_conc_mark_if_possible() is true, and the concurrent
tonyp@1794 1088 // marking thread has completed its work during the previous cycle,
tonyp@1794 1089 // it will set during_initial_mark_pause() to so that the pause does
tonyp@1794 1090 // the initial-mark work and start a marking cycle.
tonyp@1794 1091 void decide_on_conc_mark_initiation();
ysr@777 1092
ysr@777 1093 // If an expansion would be appropriate, because recent GC overhead had
ysr@777 1094 // exceeded the desired limit, return an amount to expand by.
ysr@777 1095 virtual size_t expansion_amount();
ysr@777 1096
ysr@777 1097 // note start of mark thread
ysr@777 1098 void note_start_of_mark_thread();
ysr@777 1099
ysr@777 1100 // The marked bytes of the "r" has changed; reclassify it's desirability
ysr@777 1101 // for marking. Also asserts that "r" is eligible for a CS.
ysr@777 1102 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
ysr@777 1103
ysr@777 1104 #ifndef PRODUCT
ysr@777 1105 // Check any appropriate marked bytes info, asserting false if
ysr@777 1106 // something's wrong, else returning "true".
ysr@777 1107 virtual bool assertMarkedBytesDataOK() = 0;
ysr@777 1108 #endif
ysr@777 1109
ysr@777 1110 // Print tracing information.
ysr@777 1111 void print_tracing_info() const;
ysr@777 1112
ysr@777 1113 // Print stats on young survival ratio
ysr@777 1114 void print_yg_surv_rate_info() const;
ysr@777 1115
apetrusenko@980 1116 void finished_recalculating_age_indexes(bool is_survivors) {
apetrusenko@980 1117 if (is_survivors) {
apetrusenko@980 1118 _survivor_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 1119 } else {
apetrusenko@980 1120 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 1121 }
ysr@777 1122 // do that for any other surv rate groups
ysr@777 1123 }
ysr@777 1124
ysr@777 1125 bool should_add_next_region_to_young_list();
ysr@777 1126
ysr@777 1127 bool in_young_gc_mode() {
ysr@777 1128 return _in_young_gc_mode;
ysr@777 1129 }
ysr@777 1130 void set_in_young_gc_mode(bool in_young_gc_mode) {
ysr@777 1131 _in_young_gc_mode = in_young_gc_mode;
ysr@777 1132 }
ysr@777 1133
ysr@777 1134 bool full_young_gcs() {
ysr@777 1135 return _full_young_gcs;
ysr@777 1136 }
ysr@777 1137 void set_full_young_gcs(bool full_young_gcs) {
ysr@777 1138 _full_young_gcs = full_young_gcs;
ysr@777 1139 }
ysr@777 1140
ysr@777 1141 bool adaptive_young_list_length() {
ysr@777 1142 return _adaptive_young_list_length;
ysr@777 1143 }
ysr@777 1144 void set_adaptive_young_list_length(bool adaptive_young_list_length) {
ysr@777 1145 _adaptive_young_list_length = adaptive_young_list_length;
ysr@777 1146 }
ysr@777 1147
ysr@777 1148 inline double get_gc_eff_factor() {
ysr@777 1149 double ratio = _known_garbage_ratio;
ysr@777 1150
ysr@777 1151 double square = ratio * ratio;
ysr@777 1152 // square = square * square;
ysr@777 1153 double ret = square * 9.0 + 1.0;
ysr@777 1154 #if 0
ysr@777 1155 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
ysr@777 1156 #endif // 0
ysr@777 1157 guarantee(0.0 <= ret && ret < 10.0, "invariant!");
ysr@777 1158 return ret;
ysr@777 1159 }
ysr@777 1160
ysr@777 1161 //
ysr@777 1162 // Survivor regions policy.
ysr@777 1163 //
ysr@777 1164 protected:
ysr@777 1165
ysr@777 1166 // Current tenuring threshold, set to 0 if the collector reaches the
ysr@777 1167 // maximum amount of suvivors regions.
ysr@777 1168 int _tenuring_threshold;
ysr@777 1169
apetrusenko@980 1170 // The limit on the number of regions allocated for survivors.
apetrusenko@980 1171 size_t _max_survivor_regions;
apetrusenko@980 1172
apetrusenko@980 1173 // The amount of survor regions after a collection.
apetrusenko@980 1174 size_t _recorded_survivor_regions;
apetrusenko@980 1175 // List of survivor regions.
apetrusenko@980 1176 HeapRegion* _recorded_survivor_head;
apetrusenko@980 1177 HeapRegion* _recorded_survivor_tail;
apetrusenko@980 1178
apetrusenko@980 1179 ageTable _survivors_age_table;
apetrusenko@980 1180
ysr@777 1181 public:
ysr@777 1182
ysr@777 1183 inline GCAllocPurpose
ysr@777 1184 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
ysr@777 1185 if (age < _tenuring_threshold && src_region->is_young()) {
ysr@777 1186 return GCAllocForSurvived;
ysr@777 1187 } else {
ysr@777 1188 return GCAllocForTenured;
ysr@777 1189 }
ysr@777 1190 }
ysr@777 1191
ysr@777 1192 inline bool track_object_age(GCAllocPurpose purpose) {
ysr@777 1193 return purpose == GCAllocForSurvived;
ysr@777 1194 }
ysr@777 1195
ysr@777 1196 inline GCAllocPurpose alternative_purpose(int purpose) {
ysr@777 1197 return GCAllocForTenured;
ysr@777 1198 }
ysr@777 1199
apetrusenko@980 1200 static const size_t REGIONS_UNLIMITED = ~(size_t)0;
apetrusenko@980 1201
apetrusenko@980 1202 size_t max_regions(int purpose);
ysr@777 1203
ysr@777 1204 // The limit on regions for a particular purpose is reached.
ysr@777 1205 void note_alloc_region_limit_reached(int purpose) {
ysr@777 1206 if (purpose == GCAllocForSurvived) {
ysr@777 1207 _tenuring_threshold = 0;
ysr@777 1208 }
ysr@777 1209 }
ysr@777 1210
ysr@777 1211 void note_start_adding_survivor_regions() {
ysr@777 1212 _survivor_surv_rate_group->start_adding_regions();
ysr@777 1213 }
ysr@777 1214
ysr@777 1215 void note_stop_adding_survivor_regions() {
ysr@777 1216 _survivor_surv_rate_group->stop_adding_regions();
ysr@777 1217 }
apetrusenko@980 1218
apetrusenko@980 1219 void record_survivor_regions(size_t regions,
apetrusenko@980 1220 HeapRegion* head,
apetrusenko@980 1221 HeapRegion* tail) {
apetrusenko@980 1222 _recorded_survivor_regions = regions;
apetrusenko@980 1223 _recorded_survivor_head = head;
apetrusenko@980 1224 _recorded_survivor_tail = tail;
apetrusenko@980 1225 }
apetrusenko@980 1226
tonyp@1273 1227 size_t recorded_survivor_regions() {
tonyp@1273 1228 return _recorded_survivor_regions;
tonyp@1273 1229 }
tonyp@1273 1230
apetrusenko@980 1231 void record_thread_age_table(ageTable* age_table)
apetrusenko@980 1232 {
apetrusenko@980 1233 _survivors_age_table.merge_par(age_table);
apetrusenko@980 1234 }
apetrusenko@980 1235
apetrusenko@980 1236 // Calculates survivor space parameters.
apetrusenko@980 1237 void calculate_survivors_policy();
apetrusenko@980 1238
ysr@777 1239 };
ysr@777 1240
ysr@777 1241 // This encapsulates a particular strategy for a g1 Collector.
ysr@777 1242 //
ysr@777 1243 // Start a concurrent mark when our heap size is n bytes
ysr@777 1244 // greater then our heap size was at the last concurrent
ysr@777 1245 // mark. Where n is a function of the CMSTriggerRatio
ysr@777 1246 // and the MinHeapFreeRatio.
ysr@777 1247 //
ysr@777 1248 // Start a g1 collection pause when we have allocated the
ysr@777 1249 // average number of bytes currently being freed in
ysr@777 1250 // a collection, but only if it is at least one region
ysr@777 1251 // full
ysr@777 1252 //
ysr@777 1253 // Resize Heap based on desired
ysr@777 1254 // allocation space, where desired allocation space is
ysr@777 1255 // a function of survival rate and desired future to size.
ysr@777 1256 //
ysr@777 1257 // Choose collection set by first picking all older regions
ysr@777 1258 // which have a survival rate which beats our projected young
ysr@777 1259 // survival rate. Then fill out the number of needed regions
ysr@777 1260 // with young regions.
ysr@777 1261
ysr@777 1262 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
ysr@777 1263 CollectionSetChooser* _collectionSetChooser;
ysr@777 1264 // If the estimated is less then desirable, resize if possible.
ysr@777 1265 void expand_if_possible(size_t numRegions);
ysr@777 1266
johnc@1829 1267 virtual bool choose_collection_set();
ysr@777 1268 virtual void record_collection_pause_start(double start_time_sec,
ysr@777 1269 size_t start_used);
ysr@777 1270 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 1271 size_t max_live_bytes);
ysr@777 1272 virtual void record_full_collection_end();
ysr@777 1273
ysr@777 1274 public:
ysr@777 1275 G1CollectorPolicy_BestRegionsFirst() {
ysr@777 1276 _collectionSetChooser = new CollectionSetChooser();
ysr@777 1277 }
apetrusenko@1112 1278 void record_collection_pause_end(bool abandoned);
ysr@777 1279 bool should_do_collection_pause(size_t word_size);
ysr@777 1280 // This is not needed any more, after the CSet choosing code was
ysr@777 1281 // changed to use the pause prediction work. But let's leave the
ysr@777 1282 // hook in just in case.
ysr@777 1283 void note_change_in_marked_bytes(HeapRegion* r) { }
ysr@777 1284 #ifndef PRODUCT
ysr@777 1285 bool assertMarkedBytesDataOK();
ysr@777 1286 #endif
ysr@777 1287 };
ysr@777 1288
ysr@777 1289 // This should move to some place more general...
ysr@777 1290
ysr@777 1291 // If we have "n" measurements, and we've kept track of their "sum" and the
ysr@777 1292 // "sum_of_squares" of the measurements, this returns the variance of the
ysr@777 1293 // sequence.
ysr@777 1294 inline double variance(int n, double sum_of_squares, double sum) {
ysr@777 1295 double n_d = (double)n;
ysr@777 1296 double avg = sum/n_d;
ysr@777 1297 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
ysr@777 1298 }
ysr@777 1299
ysr@777 1300 // Local Variables: ***
ysr@777 1301 // c-indentation-style: gnu ***
ysr@777 1302 // End: ***

mercurial