src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Mon, 02 Aug 2010 12:51:43 -0700

author
johnc
date
Mon, 02 Aug 2010 12:51:43 -0700
changeset 2060
2d160770d2e5
parent 2011
4e5661ba9d98
child 2062
0ce1569c90e5
permissions
-rw-r--r--

6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 // A G1CollectorPolicy makes policy decisions that determine the
ysr@777 26 // characteristics of the collector. Examples include:
ysr@777 27 // * choice of collection set.
ysr@777 28 // * when to collect.
ysr@777 29
ysr@777 30 class HeapRegion;
ysr@777 31 class CollectionSetChooser;
ysr@777 32
ysr@777 33 // Yes, this is a bit unpleasant... but it saves replicating the same thing
ysr@777 34 // over and over again and introducing subtle problems through small typos and
ysr@777 35 // cutting and pasting mistakes. The macros below introduces a number
ysr@777 36 // sequnce into the following two classes and the methods that access it.
ysr@777 37
ysr@777 38 #define define_num_seq(name) \
ysr@777 39 private: \
ysr@777 40 NumberSeq _all_##name##_times_ms; \
ysr@777 41 public: \
ysr@777 42 void record_##name##_time_ms(double ms) { \
ysr@777 43 _all_##name##_times_ms.add(ms); \
ysr@777 44 } \
ysr@777 45 NumberSeq* get_##name##_seq() { \
ysr@777 46 return &_all_##name##_times_ms; \
ysr@777 47 }
ysr@777 48
ysr@777 49 class MainBodySummary;
ysr@777 50
apetrusenko@984 51 class PauseSummary: public CHeapObj {
ysr@777 52 define_num_seq(total)
ysr@777 53 define_num_seq(other)
ysr@777 54
ysr@777 55 public:
ysr@777 56 virtual MainBodySummary* main_body_summary() { return NULL; }
ysr@777 57 };
ysr@777 58
apetrusenko@984 59 class MainBodySummary: public CHeapObj {
ysr@777 60 define_num_seq(satb_drain) // optional
ysr@777 61 define_num_seq(parallel) // parallel only
ysr@777 62 define_num_seq(ext_root_scan)
ysr@777 63 define_num_seq(mark_stack_scan)
ysr@777 64 define_num_seq(update_rs)
ysr@777 65 define_num_seq(scan_rs)
ysr@777 66 define_num_seq(obj_copy)
ysr@777 67 define_num_seq(termination) // parallel only
ysr@777 68 define_num_seq(parallel_other) // parallel only
ysr@777 69 define_num_seq(mark_closure)
ysr@777 70 define_num_seq(clear_ct) // parallel only
ysr@777 71 };
ysr@777 72
apetrusenko@1112 73 class Summary: public PauseSummary,
apetrusenko@1112 74 public MainBodySummary {
ysr@777 75 public:
ysr@777 76 virtual MainBodySummary* main_body_summary() { return this; }
ysr@777 77 };
ysr@777 78
apetrusenko@1112 79 class AbandonedSummary: public PauseSummary {
ysr@777 80 };
ysr@777 81
ysr@777 82 class G1CollectorPolicy: public CollectorPolicy {
ysr@777 83 protected:
ysr@777 84 // The number of pauses during the execution.
ysr@777 85 long _n_pauses;
ysr@777 86
ysr@777 87 // either equal to the number of parallel threads, if ParallelGCThreads
ysr@777 88 // has been set, or 1 otherwise
ysr@777 89 int _parallel_gc_threads;
ysr@777 90
ysr@777 91 enum SomePrivateConstants {
tonyp@1377 92 NumPrevPausesForHeuristics = 10
ysr@777 93 };
ysr@777 94
ysr@777 95 G1MMUTracker* _mmu_tracker;
ysr@777 96
ysr@777 97 void initialize_flags();
ysr@777 98
ysr@777 99 void initialize_all() {
ysr@777 100 initialize_flags();
ysr@777 101 initialize_size_info();
ysr@777 102 initialize_perm_generation(PermGen::MarkSweepCompact);
ysr@777 103 }
ysr@777 104
ysr@777 105 virtual size_t default_init_heap_size() {
ysr@777 106 // Pick some reasonable default.
ysr@777 107 return 8*M;
ysr@777 108 }
ysr@777 109
ysr@777 110 double _cur_collection_start_sec;
ysr@777 111 size_t _cur_collection_pause_used_at_start_bytes;
ysr@777 112 size_t _cur_collection_pause_used_regions_at_start;
ysr@777 113 size_t _prev_collection_pause_used_at_end_bytes;
ysr@777 114 double _cur_collection_par_time_ms;
ysr@777 115 double _cur_satb_drain_time_ms;
ysr@777 116 double _cur_clear_ct_time_ms;
ysr@777 117 bool _satb_drain_time_set;
ysr@777 118
johnc@1325 119 #ifndef PRODUCT
johnc@1325 120 // Card Table Count Cache stats
johnc@1325 121 double _min_clear_cc_time_ms; // min
johnc@1325 122 double _max_clear_cc_time_ms; // max
johnc@1325 123 double _cur_clear_cc_time_ms; // clearing time during current pause
johnc@1325 124 double _cum_clear_cc_time_ms; // cummulative clearing time
johnc@1325 125 jlong _num_cc_clears; // number of times the card count cache has been cleared
johnc@1325 126 #endif
johnc@1325 127
ysr@777 128 double _cur_CH_strong_roots_end_sec;
ysr@777 129 double _cur_CH_strong_roots_dur_ms;
ysr@777 130 double _cur_G1_strong_roots_end_sec;
ysr@777 131 double _cur_G1_strong_roots_dur_ms;
ysr@777 132
ysr@777 133 // Statistics for recent GC pauses. See below for how indexed.
ysr@777 134 TruncatedSeq* _recent_CH_strong_roots_times_ms;
ysr@777 135 TruncatedSeq* _recent_G1_strong_roots_times_ms;
ysr@777 136 TruncatedSeq* _recent_evac_times_ms;
ysr@777 137 // These exclude marking times.
ysr@777 138 TruncatedSeq* _recent_pause_times_ms;
ysr@777 139 TruncatedSeq* _recent_gc_times_ms;
ysr@777 140
ysr@777 141 TruncatedSeq* _recent_CS_bytes_used_before;
ysr@777 142 TruncatedSeq* _recent_CS_bytes_surviving;
ysr@777 143
ysr@777 144 TruncatedSeq* _recent_rs_sizes;
ysr@777 145
ysr@777 146 TruncatedSeq* _concurrent_mark_init_times_ms;
ysr@777 147 TruncatedSeq* _concurrent_mark_remark_times_ms;
ysr@777 148 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
ysr@777 149
apetrusenko@1112 150 Summary* _summary;
apetrusenko@1112 151 AbandonedSummary* _abandoned_summary;
ysr@777 152
ysr@777 153 NumberSeq* _all_pause_times_ms;
ysr@777 154 NumberSeq* _all_full_gc_times_ms;
ysr@777 155 double _stop_world_start;
ysr@777 156 NumberSeq* _all_stop_world_times_ms;
ysr@777 157 NumberSeq* _all_yield_times_ms;
ysr@777 158
ysr@777 159 size_t _region_num_young;
ysr@777 160 size_t _region_num_tenured;
ysr@777 161 size_t _prev_region_num_young;
ysr@777 162 size_t _prev_region_num_tenured;
ysr@777 163
ysr@777 164 NumberSeq* _all_mod_union_times_ms;
ysr@777 165
ysr@777 166 int _aux_num;
ysr@777 167 NumberSeq* _all_aux_times_ms;
ysr@777 168 double* _cur_aux_start_times_ms;
ysr@777 169 double* _cur_aux_times_ms;
ysr@777 170 bool* _cur_aux_times_set;
ysr@777 171
tonyp@1966 172 double* _par_last_gc_worker_start_times_ms;
ysr@777 173 double* _par_last_ext_root_scan_times_ms;
ysr@777 174 double* _par_last_mark_stack_scan_times_ms;
ysr@777 175 double* _par_last_update_rs_times_ms;
ysr@777 176 double* _par_last_update_rs_processed_buffers;
ysr@777 177 double* _par_last_scan_rs_times_ms;
ysr@777 178 double* _par_last_obj_copy_times_ms;
ysr@777 179 double* _par_last_termination_times_ms;
tonyp@1966 180 double* _par_last_termination_attempts;
tonyp@1966 181 double* _par_last_gc_worker_end_times_ms;
ysr@777 182
ysr@777 183 // indicates that we are in young GC mode
ysr@777 184 bool _in_young_gc_mode;
ysr@777 185
ysr@777 186 // indicates whether we are in full young or partially young GC mode
ysr@777 187 bool _full_young_gcs;
ysr@777 188
ysr@777 189 // if true, then it tries to dynamically adjust the length of the
ysr@777 190 // young list
ysr@777 191 bool _adaptive_young_list_length;
ysr@777 192 size_t _young_list_min_length;
ysr@777 193 size_t _young_list_target_length;
ysr@777 194 size_t _young_list_fixed_length;
ysr@777 195
ysr@777 196 size_t _young_cset_length;
ysr@777 197 bool _last_young_gc_full;
ysr@777 198
ysr@777 199 unsigned _full_young_pause_num;
ysr@777 200 unsigned _partial_young_pause_num;
ysr@777 201
ysr@777 202 bool _during_marking;
ysr@777 203 bool _in_marking_window;
ysr@777 204 bool _in_marking_window_im;
ysr@777 205
ysr@777 206 SurvRateGroup* _short_lived_surv_rate_group;
ysr@777 207 SurvRateGroup* _survivor_surv_rate_group;
ysr@777 208 // add here any more surv rate groups
ysr@777 209
tonyp@1791 210 double _gc_overhead_perc;
tonyp@1791 211
ysr@777 212 bool during_marking() {
ysr@777 213 return _during_marking;
ysr@777 214 }
ysr@777 215
ysr@777 216 // <NEW PREDICTION>
ysr@777 217
ysr@777 218 private:
ysr@777 219 enum PredictionConstants {
ysr@777 220 TruncatedSeqLength = 10
ysr@777 221 };
ysr@777 222
ysr@777 223 TruncatedSeq* _alloc_rate_ms_seq;
ysr@777 224 double _prev_collection_pause_end_ms;
ysr@777 225
ysr@777 226 TruncatedSeq* _pending_card_diff_seq;
ysr@777 227 TruncatedSeq* _rs_length_diff_seq;
ysr@777 228 TruncatedSeq* _cost_per_card_ms_seq;
ysr@777 229 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
ysr@777 230 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
ysr@777 231 TruncatedSeq* _cost_per_entry_ms_seq;
ysr@777 232 TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
ysr@777 233 TruncatedSeq* _cost_per_byte_ms_seq;
ysr@777 234 TruncatedSeq* _constant_other_time_ms_seq;
ysr@777 235 TruncatedSeq* _young_other_cost_per_region_ms_seq;
ysr@777 236 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
ysr@777 237
ysr@777 238 TruncatedSeq* _pending_cards_seq;
ysr@777 239 TruncatedSeq* _scanned_cards_seq;
ysr@777 240 TruncatedSeq* _rs_lengths_seq;
ysr@777 241
ysr@777 242 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
ysr@777 243
ysr@777 244 TruncatedSeq* _young_gc_eff_seq;
ysr@777 245
ysr@777 246 TruncatedSeq* _max_conc_overhead_seq;
ysr@777 247
ysr@777 248 size_t _recorded_young_regions;
ysr@777 249 size_t _recorded_non_young_regions;
ysr@777 250 size_t _recorded_region_num;
ysr@777 251
ysr@777 252 size_t _free_regions_at_end_of_collection;
ysr@777 253
ysr@777 254 size_t _recorded_rs_lengths;
ysr@777 255 size_t _max_rs_lengths;
ysr@777 256
ysr@777 257 size_t _recorded_marked_bytes;
ysr@777 258 size_t _recorded_young_bytes;
ysr@777 259
ysr@777 260 size_t _predicted_pending_cards;
ysr@777 261 size_t _predicted_cards_scanned;
ysr@777 262 size_t _predicted_rs_lengths;
ysr@777 263 size_t _predicted_bytes_to_copy;
ysr@777 264
ysr@777 265 double _predicted_survival_ratio;
ysr@777 266 double _predicted_rs_update_time_ms;
ysr@777 267 double _predicted_rs_scan_time_ms;
ysr@777 268 double _predicted_object_copy_time_ms;
ysr@777 269 double _predicted_constant_other_time_ms;
ysr@777 270 double _predicted_young_other_time_ms;
ysr@777 271 double _predicted_non_young_other_time_ms;
ysr@777 272 double _predicted_pause_time_ms;
ysr@777 273
ysr@777 274 double _vtime_diff_ms;
ysr@777 275
ysr@777 276 double _recorded_young_free_cset_time_ms;
ysr@777 277 double _recorded_non_young_free_cset_time_ms;
ysr@777 278
ysr@777 279 double _sigma;
ysr@777 280 double _expensive_region_limit_ms;
ysr@777 281
ysr@777 282 size_t _rs_lengths_prediction;
ysr@777 283
ysr@777 284 size_t _known_garbage_bytes;
ysr@777 285 double _known_garbage_ratio;
ysr@777 286
ysr@777 287 double sigma() {
ysr@777 288 return _sigma;
ysr@777 289 }
ysr@777 290
ysr@777 291 // A function that prevents us putting too much stock in small sample
ysr@777 292 // sets. Returns a number between 2.0 and 1.0, depending on the number
ysr@777 293 // of samples. 5 or more samples yields one; fewer scales linearly from
ysr@777 294 // 2.0 at 1 sample to 1.0 at 5.
ysr@777 295 double confidence_factor(int samples) {
ysr@777 296 if (samples > 4) return 1.0;
ysr@777 297 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
ysr@777 298 }
ysr@777 299
ysr@777 300 double get_new_neg_prediction(TruncatedSeq* seq) {
ysr@777 301 return seq->davg() - sigma() * seq->dsd();
ysr@777 302 }
ysr@777 303
ysr@777 304 #ifndef PRODUCT
ysr@777 305 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
ysr@777 306 #endif // PRODUCT
ysr@777 307
iveresov@1546 308 void adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 309 double update_rs_processed_buffers,
iveresov@1546 310 double goal_ms);
iveresov@1546 311
ysr@777 312 protected:
ysr@777 313 double _pause_time_target_ms;
ysr@777 314 double _recorded_young_cset_choice_time_ms;
ysr@777 315 double _recorded_non_young_cset_choice_time_ms;
ysr@777 316 bool _within_target;
ysr@777 317 size_t _pending_cards;
ysr@777 318 size_t _max_pending_cards;
ysr@777 319
ysr@777 320 public:
ysr@777 321
ysr@777 322 void set_region_short_lived(HeapRegion* hr) {
ysr@777 323 hr->install_surv_rate_group(_short_lived_surv_rate_group);
ysr@777 324 }
ysr@777 325
ysr@777 326 void set_region_survivors(HeapRegion* hr) {
ysr@777 327 hr->install_surv_rate_group(_survivor_surv_rate_group);
ysr@777 328 }
ysr@777 329
ysr@777 330 #ifndef PRODUCT
ysr@777 331 bool verify_young_ages();
ysr@777 332 #endif // PRODUCT
ysr@777 333
ysr@777 334 double get_new_prediction(TruncatedSeq* seq) {
ysr@777 335 return MAX2(seq->davg() + sigma() * seq->dsd(),
ysr@777 336 seq->davg() * confidence_factor(seq->num()));
ysr@777 337 }
ysr@777 338
ysr@777 339 size_t young_cset_length() {
ysr@777 340 return _young_cset_length;
ysr@777 341 }
ysr@777 342
ysr@777 343 void record_max_rs_lengths(size_t rs_lengths) {
ysr@777 344 _max_rs_lengths = rs_lengths;
ysr@777 345 }
ysr@777 346
ysr@777 347 size_t predict_pending_card_diff() {
ysr@777 348 double prediction = get_new_neg_prediction(_pending_card_diff_seq);
ysr@777 349 if (prediction < 0.00001)
ysr@777 350 return 0;
ysr@777 351 else
ysr@777 352 return (size_t) prediction;
ysr@777 353 }
ysr@777 354
ysr@777 355 size_t predict_pending_cards() {
ysr@777 356 size_t max_pending_card_num = _g1->max_pending_card_num();
ysr@777 357 size_t diff = predict_pending_card_diff();
ysr@777 358 size_t prediction;
ysr@777 359 if (diff > max_pending_card_num)
ysr@777 360 prediction = max_pending_card_num;
ysr@777 361 else
ysr@777 362 prediction = max_pending_card_num - diff;
ysr@777 363
ysr@777 364 return prediction;
ysr@777 365 }
ysr@777 366
ysr@777 367 size_t predict_rs_length_diff() {
ysr@777 368 return (size_t) get_new_prediction(_rs_length_diff_seq);
ysr@777 369 }
ysr@777 370
ysr@777 371 double predict_alloc_rate_ms() {
ysr@777 372 return get_new_prediction(_alloc_rate_ms_seq);
ysr@777 373 }
ysr@777 374
ysr@777 375 double predict_cost_per_card_ms() {
ysr@777 376 return get_new_prediction(_cost_per_card_ms_seq);
ysr@777 377 }
ysr@777 378
ysr@777 379 double predict_rs_update_time_ms(size_t pending_cards) {
ysr@777 380 return (double) pending_cards * predict_cost_per_card_ms();
ysr@777 381 }
ysr@777 382
ysr@777 383 double predict_fully_young_cards_per_entry_ratio() {
ysr@777 384 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
ysr@777 385 }
ysr@777 386
ysr@777 387 double predict_partially_young_cards_per_entry_ratio() {
ysr@777 388 if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
ysr@777 389 return predict_fully_young_cards_per_entry_ratio();
ysr@777 390 else
ysr@777 391 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
ysr@777 392 }
ysr@777 393
ysr@777 394 size_t predict_young_card_num(size_t rs_length) {
ysr@777 395 return (size_t) ((double) rs_length *
ysr@777 396 predict_fully_young_cards_per_entry_ratio());
ysr@777 397 }
ysr@777 398
ysr@777 399 size_t predict_non_young_card_num(size_t rs_length) {
ysr@777 400 return (size_t) ((double) rs_length *
ysr@777 401 predict_partially_young_cards_per_entry_ratio());
ysr@777 402 }
ysr@777 403
ysr@777 404 double predict_rs_scan_time_ms(size_t card_num) {
ysr@777 405 if (full_young_gcs())
ysr@777 406 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
ysr@777 407 else
ysr@777 408 return predict_partially_young_rs_scan_time_ms(card_num);
ysr@777 409 }
ysr@777 410
ysr@777 411 double predict_partially_young_rs_scan_time_ms(size_t card_num) {
ysr@777 412 if (_partially_young_cost_per_entry_ms_seq->num() < 3)
ysr@777 413 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
ysr@777 414 else
ysr@777 415 return (double) card_num *
ysr@777 416 get_new_prediction(_partially_young_cost_per_entry_ms_seq);
ysr@777 417 }
ysr@777 418
ysr@777 419 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
ysr@777 420 if (_cost_per_byte_ms_during_cm_seq->num() < 3)
ysr@777 421 return 1.1 * (double) bytes_to_copy *
ysr@777 422 get_new_prediction(_cost_per_byte_ms_seq);
ysr@777 423 else
ysr@777 424 return (double) bytes_to_copy *
ysr@777 425 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
ysr@777 426 }
ysr@777 427
ysr@777 428 double predict_object_copy_time_ms(size_t bytes_to_copy) {
ysr@777 429 if (_in_marking_window && !_in_marking_window_im)
ysr@777 430 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
ysr@777 431 else
ysr@777 432 return (double) bytes_to_copy *
ysr@777 433 get_new_prediction(_cost_per_byte_ms_seq);
ysr@777 434 }
ysr@777 435
ysr@777 436 double predict_constant_other_time_ms() {
ysr@777 437 return get_new_prediction(_constant_other_time_ms_seq);
ysr@777 438 }
ysr@777 439
ysr@777 440 double predict_young_other_time_ms(size_t young_num) {
ysr@777 441 return
ysr@777 442 (double) young_num *
ysr@777 443 get_new_prediction(_young_other_cost_per_region_ms_seq);
ysr@777 444 }
ysr@777 445
ysr@777 446 double predict_non_young_other_time_ms(size_t non_young_num) {
ysr@777 447 return
ysr@777 448 (double) non_young_num *
ysr@777 449 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
ysr@777 450 }
ysr@777 451
ysr@777 452 void check_if_region_is_too_expensive(double predicted_time_ms);
ysr@777 453
ysr@777 454 double predict_young_collection_elapsed_time_ms(size_t adjustment);
ysr@777 455 double predict_base_elapsed_time_ms(size_t pending_cards);
ysr@777 456 double predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 457 size_t scanned_cards);
ysr@777 458 size_t predict_bytes_to_copy(HeapRegion* hr);
ysr@777 459 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
ysr@777 460
johnc@1829 461 // for use by: calculate_young_list_target_length(rs_length)
johnc@1829 462 bool predict_will_fit(size_t young_region_num,
johnc@1829 463 double base_time_ms,
johnc@1829 464 size_t init_free_regions,
johnc@1829 465 double target_pause_time_ms);
ysr@777 466
ysr@777 467 void start_recording_regions();
johnc@1829 468 void record_cset_region_info(HeapRegion* hr, bool young);
johnc@1829 469 void record_non_young_cset_region(HeapRegion* hr);
johnc@1829 470
johnc@1829 471 void set_recorded_young_regions(size_t n_regions);
johnc@1829 472 void set_recorded_young_bytes(size_t bytes);
johnc@1829 473 void set_recorded_rs_lengths(size_t rs_lengths);
johnc@1829 474 void set_predicted_bytes_to_copy(size_t bytes);
johnc@1829 475
ysr@777 476 void end_recording_regions();
ysr@777 477
ysr@777 478 void record_vtime_diff_ms(double vtime_diff_ms) {
ysr@777 479 _vtime_diff_ms = vtime_diff_ms;
ysr@777 480 }
ysr@777 481
ysr@777 482 void record_young_free_cset_time_ms(double time_ms) {
ysr@777 483 _recorded_young_free_cset_time_ms = time_ms;
ysr@777 484 }
ysr@777 485
ysr@777 486 void record_non_young_free_cset_time_ms(double time_ms) {
ysr@777 487 _recorded_non_young_free_cset_time_ms = time_ms;
ysr@777 488 }
ysr@777 489
ysr@777 490 double predict_young_gc_eff() {
ysr@777 491 return get_new_neg_prediction(_young_gc_eff_seq);
ysr@777 492 }
ysr@777 493
apetrusenko@980 494 double predict_survivor_regions_evac_time();
apetrusenko@980 495
ysr@777 496 // </NEW PREDICTION>
ysr@777 497
ysr@777 498 public:
ysr@777 499 void cset_regions_freed() {
ysr@777 500 bool propagate = _last_young_gc_full && !_in_marking_window;
ysr@777 501 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 502 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 503 // also call it on any more surv rate groups
ysr@777 504 }
ysr@777 505
ysr@777 506 void set_known_garbage_bytes(size_t known_garbage_bytes) {
ysr@777 507 _known_garbage_bytes = known_garbage_bytes;
ysr@777 508 size_t heap_bytes = _g1->capacity();
ysr@777 509 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
ysr@777 510 }
ysr@777 511
ysr@777 512 void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
ysr@777 513 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
ysr@777 514
ysr@777 515 _known_garbage_bytes -= known_garbage_bytes;
ysr@777 516 size_t heap_bytes = _g1->capacity();
ysr@777 517 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
ysr@777 518 }
ysr@777 519
ysr@777 520 G1MMUTracker* mmu_tracker() {
ysr@777 521 return _mmu_tracker;
ysr@777 522 }
ysr@777 523
tonyp@2011 524 double max_pause_time_ms() {
tonyp@2011 525 return _mmu_tracker->max_gc_time() * 1000.0;
tonyp@2011 526 }
tonyp@2011 527
ysr@777 528 double predict_init_time_ms() {
ysr@777 529 return get_new_prediction(_concurrent_mark_init_times_ms);
ysr@777 530 }
ysr@777 531
ysr@777 532 double predict_remark_time_ms() {
ysr@777 533 return get_new_prediction(_concurrent_mark_remark_times_ms);
ysr@777 534 }
ysr@777 535
ysr@777 536 double predict_cleanup_time_ms() {
ysr@777 537 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
ysr@777 538 }
ysr@777 539
ysr@777 540 // Returns an estimate of the survival rate of the region at yg-age
ysr@777 541 // "yg_age".
apetrusenko@980 542 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
apetrusenko@980 543 TruncatedSeq* seq = surv_rate_group->get_seq(age);
ysr@777 544 if (seq->num() == 0)
ysr@777 545 gclog_or_tty->print("BARF! age is %d", age);
ysr@777 546 guarantee( seq->num() > 0, "invariant" );
ysr@777 547 double pred = get_new_prediction(seq);
ysr@777 548 if (pred > 1.0)
ysr@777 549 pred = 1.0;
ysr@777 550 return pred;
ysr@777 551 }
ysr@777 552
apetrusenko@980 553 double predict_yg_surv_rate(int age) {
apetrusenko@980 554 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
apetrusenko@980 555 }
apetrusenko@980 556
ysr@777 557 double accum_yg_surv_rate_pred(int age) {
ysr@777 558 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
ysr@777 559 }
ysr@777 560
ysr@777 561 protected:
tonyp@1966 562 void print_stats(int level, const char* str, double value);
tonyp@1966 563 void print_stats(int level, const char* str, int value);
tonyp@1966 564
tonyp@1966 565 void print_par_stats(int level, const char* str, double* data) {
ysr@777 566 print_par_stats(level, str, data, true);
ysr@777 567 }
tonyp@1966 568 void print_par_stats(int level, const char* str, double* data, bool summary);
tonyp@1966 569 void print_par_sizes(int level, const char* str, double* data, bool summary);
ysr@777 570
ysr@777 571 void check_other_times(int level,
ysr@777 572 NumberSeq* other_times_ms,
ysr@777 573 NumberSeq* calc_other_times_ms) const;
ysr@777 574
ysr@777 575 void print_summary (PauseSummary* stats) const;
apetrusenko@1112 576 void print_abandoned_summary(PauseSummary* summary) const;
ysr@777 577
ysr@777 578 void print_summary (int level, const char* str, NumberSeq* seq) const;
ysr@777 579 void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
ysr@777 580
ysr@777 581 double avg_value (double* data);
ysr@777 582 double max_value (double* data);
ysr@777 583 double sum_of_values (double* data);
ysr@777 584 double max_sum (double* data1, double* data2);
ysr@777 585
ysr@777 586 int _last_satb_drain_processed_buffers;
ysr@777 587 int _last_update_rs_processed_buffers;
ysr@777 588 double _last_pause_time_ms;
ysr@777 589
ysr@777 590 size_t _bytes_in_to_space_before_gc;
ysr@777 591 size_t _bytes_in_to_space_after_gc;
ysr@777 592 size_t bytes_in_to_space_during_gc() {
ysr@777 593 return
ysr@777 594 _bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc;
ysr@777 595 }
ysr@777 596 size_t _bytes_in_collection_set_before_gc;
ysr@777 597 // Used to count used bytes in CS.
ysr@777 598 friend class CountCSClosure;
ysr@777 599
ysr@777 600 // Statistics kept per GC stoppage, pause or full.
ysr@777 601 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
ysr@777 602
ysr@777 603 // We track markings.
ysr@777 604 int _num_markings;
ysr@777 605 double _mark_thread_startup_sec; // Time at startup of marking thread
ysr@777 606
ysr@777 607 // Add a new GC of the given duration and end time to the record.
ysr@777 608 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
ysr@777 609
ysr@777 610 // The head of the list (via "next_in_collection_set()") representing the
johnc@1829 611 // current collection set. Set from the incrementally built collection
johnc@1829 612 // set at the start of the pause.
ysr@777 613 HeapRegion* _collection_set;
johnc@1829 614
johnc@1829 615 // The number of regions in the collection set. Set from the incrementally
johnc@1829 616 // built collection set at the start of an evacuation pause.
ysr@777 617 size_t _collection_set_size;
johnc@1829 618
johnc@1829 619 // The number of bytes in the collection set before the pause. Set from
johnc@1829 620 // the incrementally built collection set at the start of an evacuation
johnc@1829 621 // pause.
ysr@777 622 size_t _collection_set_bytes_used_before;
ysr@777 623
johnc@1829 624 // The associated information that is maintained while the incremental
johnc@1829 625 // collection set is being built with young regions. Used to populate
johnc@1829 626 // the recorded info for the evacuation pause.
johnc@1829 627
johnc@1829 628 enum CSetBuildType {
johnc@1829 629 Active, // We are actively building the collection set
johnc@1829 630 Inactive // We are not actively building the collection set
johnc@1829 631 };
johnc@1829 632
johnc@1829 633 CSetBuildType _inc_cset_build_state;
johnc@1829 634
johnc@1829 635 // The head of the incrementally built collection set.
johnc@1829 636 HeapRegion* _inc_cset_head;
johnc@1829 637
johnc@1829 638 // The tail of the incrementally built collection set.
johnc@1829 639 HeapRegion* _inc_cset_tail;
johnc@1829 640
johnc@1829 641 // The number of regions in the incrementally built collection set.
johnc@1829 642 // Used to set _collection_set_size at the start of an evacuation
johnc@1829 643 // pause.
johnc@1829 644 size_t _inc_cset_size;
johnc@1829 645
johnc@1829 646 // Used as the index in the surving young words structure
johnc@1829 647 // which tracks the amount of space, for each young region,
johnc@1829 648 // that survives the pause.
johnc@1829 649 size_t _inc_cset_young_index;
johnc@1829 650
johnc@1829 651 // The number of bytes in the incrementally built collection set.
johnc@1829 652 // Used to set _collection_set_bytes_used_before at the start of
johnc@1829 653 // an evacuation pause.
johnc@1829 654 size_t _inc_cset_bytes_used_before;
johnc@1829 655
johnc@1829 656 // Used to record the highest end of heap region in collection set
johnc@1829 657 HeapWord* _inc_cset_max_finger;
johnc@1829 658
johnc@1829 659 // The number of recorded used bytes in the young regions
johnc@1829 660 // of the collection set. This is the sum of the used() bytes
johnc@1829 661 // of retired young regions in the collection set.
johnc@1829 662 size_t _inc_cset_recorded_young_bytes;
johnc@1829 663
johnc@1829 664 // The RSet lengths recorded for regions in the collection set
johnc@1829 665 // (updated by the periodic sampling of the regions in the
johnc@1829 666 // young list/collection set).
johnc@1829 667 size_t _inc_cset_recorded_rs_lengths;
johnc@1829 668
johnc@1829 669 // The predicted elapsed time it will take to collect the regions
johnc@1829 670 // in the collection set (updated by the periodic sampling of the
johnc@1829 671 // regions in the young list/collection set).
johnc@1829 672 double _inc_cset_predicted_elapsed_time_ms;
johnc@1829 673
johnc@1829 674 // The predicted bytes to copy for the regions in the collection
johnc@1829 675 // set (updated by the periodic sampling of the regions in the
johnc@1829 676 // young list/collection set).
johnc@1829 677 size_t _inc_cset_predicted_bytes_to_copy;
johnc@1829 678
ysr@777 679 // Info about marking.
ysr@777 680 int _n_marks; // Sticky at 2, so we know when we've done at least 2.
ysr@777 681
ysr@777 682 // The number of collection pauses at the end of the last mark.
ysr@777 683 size_t _n_pauses_at_mark_end;
ysr@777 684
ysr@777 685 // Stash a pointer to the g1 heap.
ysr@777 686 G1CollectedHeap* _g1;
ysr@777 687
ysr@777 688 // The average time in ms per collection pause, averaged over recent pauses.
ysr@777 689 double recent_avg_time_for_pauses_ms();
ysr@777 690
ysr@777 691 // The average time in ms for processing CollectedHeap strong roots, per
ysr@777 692 // collection pause, averaged over recent pauses.
ysr@777 693 double recent_avg_time_for_CH_strong_ms();
ysr@777 694
ysr@777 695 // The average time in ms for processing the G1 remembered set, per
ysr@777 696 // pause, averaged over recent pauses.
ysr@777 697 double recent_avg_time_for_G1_strong_ms();
ysr@777 698
ysr@777 699 // The average time in ms for "evacuating followers", per pause, averaged
ysr@777 700 // over recent pauses.
ysr@777 701 double recent_avg_time_for_evac_ms();
ysr@777 702
ysr@777 703 // The number of "recent" GCs recorded in the number sequences
ysr@777 704 int number_of_recent_gcs();
ysr@777 705
ysr@777 706 // The average survival ratio, computed by the total number of bytes
ysr@777 707 // suriviving / total number of bytes before collection over the last
ysr@777 708 // several recent pauses.
ysr@777 709 double recent_avg_survival_fraction();
ysr@777 710 // The survival fraction of the most recent pause; if there have been no
ysr@777 711 // pauses, returns 1.0.
ysr@777 712 double last_survival_fraction();
ysr@777 713
ysr@777 714 // Returns a "conservative" estimate of the recent survival rate, i.e.,
ysr@777 715 // one that may be higher than "recent_avg_survival_fraction".
ysr@777 716 // This is conservative in several ways:
ysr@777 717 // If there have been few pauses, it will assume a potential high
ysr@777 718 // variance, and err on the side of caution.
ysr@777 719 // It puts a lower bound (currently 0.1) on the value it will return.
ysr@777 720 // To try to detect phase changes, if the most recent pause ("latest") has a
ysr@777 721 // higher-than average ("avg") survival rate, it returns that rate.
ysr@777 722 // "work" version is a utility function; young is restricted to young regions.
ysr@777 723 double conservative_avg_survival_fraction_work(double avg,
ysr@777 724 double latest);
ysr@777 725
ysr@777 726 // The arguments are the two sequences that keep track of the number of bytes
ysr@777 727 // surviving and the total number of bytes before collection, resp.,
ysr@777 728 // over the last evereal recent pauses
ysr@777 729 // Returns the survival rate for the category in the most recent pause.
ysr@777 730 // If there have been no pauses, returns 1.0.
ysr@777 731 double last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 732 TruncatedSeq* before);
ysr@777 733
ysr@777 734 // The arguments are the two sequences that keep track of the number of bytes
ysr@777 735 // surviving and the total number of bytes before collection, resp.,
ysr@777 736 // over the last several recent pauses
ysr@777 737 // Returns the average survival ration over the last several recent pauses
ysr@777 738 // If there have been no pauses, return 1.0
ysr@777 739 double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 740 TruncatedSeq* before);
ysr@777 741
ysr@777 742 double conservative_avg_survival_fraction() {
ysr@777 743 double avg = recent_avg_survival_fraction();
ysr@777 744 double latest = last_survival_fraction();
ysr@777 745 return conservative_avg_survival_fraction_work(avg, latest);
ysr@777 746 }
ysr@777 747
ysr@777 748 // The ratio of gc time to elapsed time, computed over recent pauses.
ysr@777 749 double _recent_avg_pause_time_ratio;
ysr@777 750
ysr@777 751 double recent_avg_pause_time_ratio() {
ysr@777 752 return _recent_avg_pause_time_ratio;
ysr@777 753 }
ysr@777 754
ysr@777 755 // Number of pauses between concurrent marking.
ysr@777 756 size_t _pauses_btwn_concurrent_mark;
ysr@777 757
ysr@777 758 size_t _n_marks_since_last_pause;
ysr@777 759
tonyp@1794 760 // At the end of a pause we check the heap occupancy and we decide
tonyp@1794 761 // whether we will start a marking cycle during the next pause. If
tonyp@1794 762 // we decide that we want to do that, we will set this parameter to
tonyp@1794 763 // true. So, this parameter will stay true between the end of a
tonyp@1794 764 // pause and the beginning of a subsequent pause (not necessarily
tonyp@1794 765 // the next one, see the comments on the next field) when we decide
tonyp@1794 766 // that we will indeed start a marking cycle and do the initial-mark
tonyp@1794 767 // work.
tonyp@1794 768 volatile bool _initiate_conc_mark_if_possible;
ysr@777 769
tonyp@1794 770 // If initiate_conc_mark_if_possible() is set at the beginning of a
tonyp@1794 771 // pause, it is a suggestion that the pause should start a marking
tonyp@1794 772 // cycle by doing the initial-mark work. However, it is possible
tonyp@1794 773 // that the concurrent marking thread is still finishing up the
tonyp@1794 774 // previous marking cycle (e.g., clearing the next marking
tonyp@1794 775 // bitmap). If that is the case we cannot start a new cycle and
tonyp@1794 776 // we'll have to wait for the concurrent marking thread to finish
tonyp@1794 777 // what it is doing. In this case we will postpone the marking cycle
tonyp@1794 778 // initiation decision for the next pause. When we eventually decide
tonyp@1794 779 // to start a cycle, we will set _during_initial_mark_pause which
tonyp@1794 780 // will stay true until the end of the initial-mark pause and it's
tonyp@1794 781 // the condition that indicates that a pause is doing the
tonyp@1794 782 // initial-mark work.
tonyp@1794 783 volatile bool _during_initial_mark_pause;
tonyp@1794 784
ysr@777 785 bool _should_revert_to_full_young_gcs;
ysr@777 786 bool _last_full_young_gc;
ysr@777 787
ysr@777 788 // This set of variables tracks the collector efficiency, in order to
ysr@777 789 // determine whether we should initiate a new marking.
ysr@777 790 double _cur_mark_stop_world_time_ms;
ysr@777 791 double _mark_init_start_sec;
ysr@777 792 double _mark_remark_start_sec;
ysr@777 793 double _mark_cleanup_start_sec;
ysr@777 794 double _mark_closure_time_ms;
ysr@777 795
ysr@777 796 void calculate_young_list_min_length();
johnc@1829 797 void calculate_young_list_target_length();
johnc@1829 798 void calculate_young_list_target_length(size_t rs_lengths);
ysr@777 799
ysr@777 800 public:
ysr@777 801
ysr@777 802 G1CollectorPolicy();
ysr@777 803
ysr@777 804 virtual G1CollectorPolicy* as_g1_policy() { return this; }
ysr@777 805
ysr@777 806 virtual CollectorPolicy::Name kind() {
ysr@777 807 return CollectorPolicy::G1CollectorPolicyKind;
ysr@777 808 }
ysr@777 809
ysr@777 810 void check_prediction_validity();
ysr@777 811
ysr@777 812 size_t bytes_in_collection_set() {
ysr@777 813 return _bytes_in_collection_set_before_gc;
ysr@777 814 }
ysr@777 815
ysr@777 816 size_t bytes_in_to_space() {
ysr@777 817 return bytes_in_to_space_during_gc();
ysr@777 818 }
ysr@777 819
ysr@777 820 unsigned calc_gc_alloc_time_stamp() {
ysr@777 821 return _all_pause_times_ms->num() + 1;
ysr@777 822 }
ysr@777 823
ysr@777 824 protected:
ysr@777 825
ysr@777 826 // Count the number of bytes used in the CS.
ysr@777 827 void count_CS_bytes_used();
ysr@777 828
ysr@777 829 // Together these do the base cleanup-recording work. Subclasses might
ysr@777 830 // want to put something between them.
ysr@777 831 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 832 size_t max_live_bytes);
ysr@777 833 void record_concurrent_mark_cleanup_end_work2();
ysr@777 834
ysr@777 835 public:
ysr@777 836
ysr@777 837 virtual void init();
ysr@777 838
apetrusenko@980 839 // Create jstat counters for the policy.
apetrusenko@980 840 virtual void initialize_gc_policy_counters();
apetrusenko@980 841
ysr@777 842 virtual HeapWord* mem_allocate_work(size_t size,
ysr@777 843 bool is_tlab,
ysr@777 844 bool* gc_overhead_limit_was_exceeded);
ysr@777 845
ysr@777 846 // This method controls how a collector handles one or more
ysr@777 847 // of its generations being fully allocated.
ysr@777 848 virtual HeapWord* satisfy_failed_allocation(size_t size,
ysr@777 849 bool is_tlab);
ysr@777 850
ysr@777 851 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
ysr@777 852
ysr@777 853 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
ysr@777 854
ysr@777 855 // The number of collection pauses so far.
ysr@777 856 long n_pauses() const { return _n_pauses; }
ysr@777 857
ysr@777 858 // Update the heuristic info to record a collection pause of the given
ysr@777 859 // start time, where the given number of bytes were used at the start.
ysr@777 860 // This may involve changing the desired size of a collection set.
ysr@777 861
ysr@777 862 virtual void record_stop_world_start();
ysr@777 863
ysr@777 864 virtual void record_collection_pause_start(double start_time_sec,
ysr@777 865 size_t start_used);
ysr@777 866
ysr@777 867 // Must currently be called while the world is stopped.
ysr@777 868 virtual void record_concurrent_mark_init_start();
ysr@777 869 virtual void record_concurrent_mark_init_end();
ysr@777 870 void record_concurrent_mark_init_end_pre(double
ysr@777 871 mark_init_elapsed_time_ms);
ysr@777 872
ysr@777 873 void record_mark_closure_time(double mark_closure_time_ms);
ysr@777 874
ysr@777 875 virtual void record_concurrent_mark_remark_start();
ysr@777 876 virtual void record_concurrent_mark_remark_end();
ysr@777 877
ysr@777 878 virtual void record_concurrent_mark_cleanup_start();
ysr@777 879 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 880 size_t max_live_bytes);
ysr@777 881 virtual void record_concurrent_mark_cleanup_completed();
ysr@777 882
ysr@777 883 virtual void record_concurrent_pause();
ysr@777 884 virtual void record_concurrent_pause_end();
ysr@777 885
ysr@777 886 virtual void record_collection_pause_end_CH_strong_roots();
ysr@777 887 virtual void record_collection_pause_end_G1_strong_roots();
ysr@777 888
apetrusenko@1112 889 virtual void record_collection_pause_end(bool abandoned);
ysr@777 890
ysr@777 891 // Record the fact that a full collection occurred.
ysr@777 892 virtual void record_full_collection_start();
ysr@777 893 virtual void record_full_collection_end();
ysr@777 894
tonyp@1966 895 void record_gc_worker_start_time(int worker_i, double ms) {
tonyp@1966 896 _par_last_gc_worker_start_times_ms[worker_i] = ms;
tonyp@1966 897 }
tonyp@1966 898
ysr@777 899 void record_ext_root_scan_time(int worker_i, double ms) {
ysr@777 900 _par_last_ext_root_scan_times_ms[worker_i] = ms;
ysr@777 901 }
ysr@777 902
ysr@777 903 void record_mark_stack_scan_time(int worker_i, double ms) {
ysr@777 904 _par_last_mark_stack_scan_times_ms[worker_i] = ms;
ysr@777 905 }
ysr@777 906
ysr@777 907 void record_satb_drain_time(double ms) {
ysr@777 908 _cur_satb_drain_time_ms = ms;
ysr@777 909 _satb_drain_time_set = true;
ysr@777 910 }
ysr@777 911
ysr@777 912 void record_satb_drain_processed_buffers (int processed_buffers) {
ysr@777 913 _last_satb_drain_processed_buffers = processed_buffers;
ysr@777 914 }
ysr@777 915
ysr@777 916 void record_mod_union_time(double ms) {
ysr@777 917 _all_mod_union_times_ms->add(ms);
ysr@777 918 }
ysr@777 919
ysr@777 920 void record_update_rs_time(int thread, double ms) {
ysr@777 921 _par_last_update_rs_times_ms[thread] = ms;
ysr@777 922 }
ysr@777 923
ysr@777 924 void record_update_rs_processed_buffers (int thread,
ysr@777 925 double processed_buffers) {
ysr@777 926 _par_last_update_rs_processed_buffers[thread] = processed_buffers;
ysr@777 927 }
ysr@777 928
ysr@777 929 void record_scan_rs_time(int thread, double ms) {
ysr@777 930 _par_last_scan_rs_times_ms[thread] = ms;
ysr@777 931 }
ysr@777 932
ysr@777 933 void reset_obj_copy_time(int thread) {
ysr@777 934 _par_last_obj_copy_times_ms[thread] = 0.0;
ysr@777 935 }
ysr@777 936
ysr@777 937 void reset_obj_copy_time() {
ysr@777 938 reset_obj_copy_time(0);
ysr@777 939 }
ysr@777 940
ysr@777 941 void record_obj_copy_time(int thread, double ms) {
ysr@777 942 _par_last_obj_copy_times_ms[thread] += ms;
ysr@777 943 }
ysr@777 944
tonyp@1966 945 void record_termination(int thread, double ms, size_t attempts) {
tonyp@1966 946 _par_last_termination_times_ms[thread] = ms;
tonyp@1966 947 _par_last_termination_attempts[thread] = (double) attempts;
ysr@777 948 }
ysr@777 949
tonyp@1966 950 void record_gc_worker_end_time(int worker_i, double ms) {
tonyp@1966 951 _par_last_gc_worker_end_times_ms[worker_i] = ms;
ysr@777 952 }
ysr@777 953
tonyp@1030 954 void record_pause_time_ms(double ms) {
ysr@777 955 _last_pause_time_ms = ms;
ysr@777 956 }
ysr@777 957
ysr@777 958 void record_clear_ct_time(double ms) {
ysr@777 959 _cur_clear_ct_time_ms = ms;
ysr@777 960 }
ysr@777 961
ysr@777 962 void record_par_time(double ms) {
ysr@777 963 _cur_collection_par_time_ms = ms;
ysr@777 964 }
ysr@777 965
ysr@777 966 void record_aux_start_time(int i) {
ysr@777 967 guarantee(i < _aux_num, "should be within range");
ysr@777 968 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
ysr@777 969 }
ysr@777 970
ysr@777 971 void record_aux_end_time(int i) {
ysr@777 972 guarantee(i < _aux_num, "should be within range");
ysr@777 973 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
ysr@777 974 _cur_aux_times_set[i] = true;
ysr@777 975 _cur_aux_times_ms[i] += ms;
ysr@777 976 }
ysr@777 977
johnc@1325 978 #ifndef PRODUCT
johnc@1325 979 void record_cc_clear_time(double ms) {
johnc@1325 980 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
johnc@1325 981 _min_clear_cc_time_ms = ms;
johnc@1325 982 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
johnc@1325 983 _max_clear_cc_time_ms = ms;
johnc@1325 984 _cur_clear_cc_time_ms = ms;
johnc@1325 985 _cum_clear_cc_time_ms += ms;
johnc@1325 986 _num_cc_clears++;
johnc@1325 987 }
johnc@1325 988 #endif
johnc@1325 989
ysr@777 990 // Record the fact that "bytes" bytes allocated in a region.
ysr@777 991 void record_before_bytes(size_t bytes);
ysr@777 992 void record_after_bytes(size_t bytes);
ysr@777 993
ysr@777 994 // Returns "true" if this is a good time to do a collection pause.
ysr@777 995 // The "word_size" argument, if non-zero, indicates the size of an
ysr@777 996 // allocation request that is prompting this query.
ysr@777 997 virtual bool should_do_collection_pause(size_t word_size) = 0;
ysr@777 998
ysr@777 999 // Choose a new collection set. Marks the chosen regions as being
ysr@777 1000 // "in_collection_set", and links them together. The head and number of
ysr@777 1001 // the collection set are available via access methods.
tonyp@2011 1002 virtual bool choose_collection_set(double target_pause_time_ms) = 0;
ysr@777 1003
ysr@777 1004 // The head of the list (via "next_in_collection_set()") representing the
ysr@777 1005 // current collection set.
ysr@777 1006 HeapRegion* collection_set() { return _collection_set; }
ysr@777 1007
johnc@1829 1008 void clear_collection_set() { _collection_set = NULL; }
johnc@1829 1009
ysr@777 1010 // The number of elements in the current collection set.
ysr@777 1011 size_t collection_set_size() { return _collection_set_size; }
ysr@777 1012
ysr@777 1013 // Add "hr" to the CS.
ysr@777 1014 void add_to_collection_set(HeapRegion* hr);
ysr@777 1015
johnc@1829 1016 // Incremental CSet Support
johnc@1829 1017
johnc@1829 1018 // The head of the incrementally built collection set.
johnc@1829 1019 HeapRegion* inc_cset_head() { return _inc_cset_head; }
johnc@1829 1020
johnc@1829 1021 // The tail of the incrementally built collection set.
johnc@1829 1022 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
johnc@1829 1023
johnc@1829 1024 // The number of elements in the incrementally built collection set.
johnc@1829 1025 size_t inc_cset_size() { return _inc_cset_size; }
johnc@1829 1026
johnc@1829 1027 // Initialize incremental collection set info.
johnc@1829 1028 void start_incremental_cset_building();
johnc@1829 1029
johnc@1829 1030 void clear_incremental_cset() {
johnc@1829 1031 _inc_cset_head = NULL;
johnc@1829 1032 _inc_cset_tail = NULL;
johnc@1829 1033 }
johnc@1829 1034
johnc@1829 1035 // Stop adding regions to the incremental collection set
johnc@1829 1036 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
johnc@1829 1037
johnc@1829 1038 // Add/remove information about hr to the aggregated information
johnc@1829 1039 // for the incrementally built collection set.
johnc@1829 1040 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
johnc@1829 1041 void remove_from_incremental_cset_info(HeapRegion* hr);
johnc@1829 1042
johnc@1829 1043 // Update information about hr in the aggregated information for
johnc@1829 1044 // the incrementally built collection set.
johnc@1829 1045 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
johnc@1829 1046
johnc@1829 1047 private:
johnc@1829 1048 // Update the incremental cset information when adding a region
johnc@1829 1049 // (should not be called directly).
johnc@1829 1050 void add_region_to_incremental_cset_common(HeapRegion* hr);
johnc@1829 1051
johnc@1829 1052 public:
johnc@1829 1053 // Add hr to the LHS of the incremental collection set.
johnc@1829 1054 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
johnc@1829 1055
johnc@1829 1056 // Add hr to the RHS of the incremental collection set.
johnc@1829 1057 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
johnc@1829 1058
johnc@1829 1059 #ifndef PRODUCT
johnc@1829 1060 void print_collection_set(HeapRegion* list_head, outputStream* st);
johnc@1829 1061 #endif // !PRODUCT
johnc@1829 1062
tonyp@1794 1063 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
tonyp@1794 1064 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
tonyp@1794 1065 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
tonyp@1794 1066
tonyp@1794 1067 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
tonyp@1794 1068 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
tonyp@1794 1069 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
tonyp@1794 1070
tonyp@2011 1071 // This sets the initiate_conc_mark_if_possible() flag to start a
tonyp@2011 1072 // new cycle, as long as we are not already in one. It's best if it
tonyp@2011 1073 // is called during a safepoint when the test whether a cycle is in
tonyp@2011 1074 // progress or not is stable.
tonyp@2011 1075 bool force_initial_mark_if_outside_cycle();
tonyp@2011 1076
tonyp@1794 1077 // This is called at the very beginning of an evacuation pause (it
tonyp@1794 1078 // has to be the first thing that the pause does). If
tonyp@1794 1079 // initiate_conc_mark_if_possible() is true, and the concurrent
tonyp@1794 1080 // marking thread has completed its work during the previous cycle,
tonyp@1794 1081 // it will set during_initial_mark_pause() to so that the pause does
tonyp@1794 1082 // the initial-mark work and start a marking cycle.
tonyp@1794 1083 void decide_on_conc_mark_initiation();
ysr@777 1084
ysr@777 1085 // If an expansion would be appropriate, because recent GC overhead had
ysr@777 1086 // exceeded the desired limit, return an amount to expand by.
ysr@777 1087 virtual size_t expansion_amount();
ysr@777 1088
ysr@777 1089 // note start of mark thread
ysr@777 1090 void note_start_of_mark_thread();
ysr@777 1091
ysr@777 1092 // The marked bytes of the "r" has changed; reclassify it's desirability
ysr@777 1093 // for marking. Also asserts that "r" is eligible for a CS.
ysr@777 1094 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
ysr@777 1095
ysr@777 1096 #ifndef PRODUCT
ysr@777 1097 // Check any appropriate marked bytes info, asserting false if
ysr@777 1098 // something's wrong, else returning "true".
ysr@777 1099 virtual bool assertMarkedBytesDataOK() = 0;
ysr@777 1100 #endif
ysr@777 1101
ysr@777 1102 // Print tracing information.
ysr@777 1103 void print_tracing_info() const;
ysr@777 1104
ysr@777 1105 // Print stats on young survival ratio
ysr@777 1106 void print_yg_surv_rate_info() const;
ysr@777 1107
apetrusenko@980 1108 void finished_recalculating_age_indexes(bool is_survivors) {
apetrusenko@980 1109 if (is_survivors) {
apetrusenko@980 1110 _survivor_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 1111 } else {
apetrusenko@980 1112 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 1113 }
ysr@777 1114 // do that for any other surv rate groups
ysr@777 1115 }
ysr@777 1116
ysr@777 1117 bool should_add_next_region_to_young_list();
ysr@777 1118
ysr@777 1119 bool in_young_gc_mode() {
ysr@777 1120 return _in_young_gc_mode;
ysr@777 1121 }
ysr@777 1122 void set_in_young_gc_mode(bool in_young_gc_mode) {
ysr@777 1123 _in_young_gc_mode = in_young_gc_mode;
ysr@777 1124 }
ysr@777 1125
ysr@777 1126 bool full_young_gcs() {
ysr@777 1127 return _full_young_gcs;
ysr@777 1128 }
ysr@777 1129 void set_full_young_gcs(bool full_young_gcs) {
ysr@777 1130 _full_young_gcs = full_young_gcs;
ysr@777 1131 }
ysr@777 1132
ysr@777 1133 bool adaptive_young_list_length() {
ysr@777 1134 return _adaptive_young_list_length;
ysr@777 1135 }
ysr@777 1136 void set_adaptive_young_list_length(bool adaptive_young_list_length) {
ysr@777 1137 _adaptive_young_list_length = adaptive_young_list_length;
ysr@777 1138 }
ysr@777 1139
ysr@777 1140 inline double get_gc_eff_factor() {
ysr@777 1141 double ratio = _known_garbage_ratio;
ysr@777 1142
ysr@777 1143 double square = ratio * ratio;
ysr@777 1144 // square = square * square;
ysr@777 1145 double ret = square * 9.0 + 1.0;
ysr@777 1146 #if 0
ysr@777 1147 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
ysr@777 1148 #endif // 0
ysr@777 1149 guarantee(0.0 <= ret && ret < 10.0, "invariant!");
ysr@777 1150 return ret;
ysr@777 1151 }
ysr@777 1152
ysr@777 1153 //
ysr@777 1154 // Survivor regions policy.
ysr@777 1155 //
ysr@777 1156 protected:
ysr@777 1157
ysr@777 1158 // Current tenuring threshold, set to 0 if the collector reaches the
ysr@777 1159 // maximum amount of suvivors regions.
ysr@777 1160 int _tenuring_threshold;
ysr@777 1161
apetrusenko@980 1162 // The limit on the number of regions allocated for survivors.
apetrusenko@980 1163 size_t _max_survivor_regions;
apetrusenko@980 1164
apetrusenko@980 1165 // The amount of survor regions after a collection.
apetrusenko@980 1166 size_t _recorded_survivor_regions;
apetrusenko@980 1167 // List of survivor regions.
apetrusenko@980 1168 HeapRegion* _recorded_survivor_head;
apetrusenko@980 1169 HeapRegion* _recorded_survivor_tail;
apetrusenko@980 1170
apetrusenko@980 1171 ageTable _survivors_age_table;
apetrusenko@980 1172
ysr@777 1173 public:
ysr@777 1174
ysr@777 1175 inline GCAllocPurpose
ysr@777 1176 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
ysr@777 1177 if (age < _tenuring_threshold && src_region->is_young()) {
ysr@777 1178 return GCAllocForSurvived;
ysr@777 1179 } else {
ysr@777 1180 return GCAllocForTenured;
ysr@777 1181 }
ysr@777 1182 }
ysr@777 1183
ysr@777 1184 inline bool track_object_age(GCAllocPurpose purpose) {
ysr@777 1185 return purpose == GCAllocForSurvived;
ysr@777 1186 }
ysr@777 1187
ysr@777 1188 inline GCAllocPurpose alternative_purpose(int purpose) {
ysr@777 1189 return GCAllocForTenured;
ysr@777 1190 }
ysr@777 1191
apetrusenko@980 1192 static const size_t REGIONS_UNLIMITED = ~(size_t)0;
apetrusenko@980 1193
apetrusenko@980 1194 size_t max_regions(int purpose);
ysr@777 1195
ysr@777 1196 // The limit on regions for a particular purpose is reached.
ysr@777 1197 void note_alloc_region_limit_reached(int purpose) {
ysr@777 1198 if (purpose == GCAllocForSurvived) {
ysr@777 1199 _tenuring_threshold = 0;
ysr@777 1200 }
ysr@777 1201 }
ysr@777 1202
ysr@777 1203 void note_start_adding_survivor_regions() {
ysr@777 1204 _survivor_surv_rate_group->start_adding_regions();
ysr@777 1205 }
ysr@777 1206
ysr@777 1207 void note_stop_adding_survivor_regions() {
ysr@777 1208 _survivor_surv_rate_group->stop_adding_regions();
ysr@777 1209 }
apetrusenko@980 1210
apetrusenko@980 1211 void record_survivor_regions(size_t regions,
apetrusenko@980 1212 HeapRegion* head,
apetrusenko@980 1213 HeapRegion* tail) {
apetrusenko@980 1214 _recorded_survivor_regions = regions;
apetrusenko@980 1215 _recorded_survivor_head = head;
apetrusenko@980 1216 _recorded_survivor_tail = tail;
apetrusenko@980 1217 }
apetrusenko@980 1218
tonyp@1273 1219 size_t recorded_survivor_regions() {
tonyp@1273 1220 return _recorded_survivor_regions;
tonyp@1273 1221 }
tonyp@1273 1222
apetrusenko@980 1223 void record_thread_age_table(ageTable* age_table)
apetrusenko@980 1224 {
apetrusenko@980 1225 _survivors_age_table.merge_par(age_table);
apetrusenko@980 1226 }
apetrusenko@980 1227
apetrusenko@980 1228 // Calculates survivor space parameters.
apetrusenko@980 1229 void calculate_survivors_policy();
apetrusenko@980 1230
ysr@777 1231 };
ysr@777 1232
ysr@777 1233 // This encapsulates a particular strategy for a g1 Collector.
ysr@777 1234 //
ysr@777 1235 // Start a concurrent mark when our heap size is n bytes
ysr@777 1236 // greater then our heap size was at the last concurrent
ysr@777 1237 // mark. Where n is a function of the CMSTriggerRatio
ysr@777 1238 // and the MinHeapFreeRatio.
ysr@777 1239 //
ysr@777 1240 // Start a g1 collection pause when we have allocated the
ysr@777 1241 // average number of bytes currently being freed in
ysr@777 1242 // a collection, but only if it is at least one region
ysr@777 1243 // full
ysr@777 1244 //
ysr@777 1245 // Resize Heap based on desired
ysr@777 1246 // allocation space, where desired allocation space is
ysr@777 1247 // a function of survival rate and desired future to size.
ysr@777 1248 //
ysr@777 1249 // Choose collection set by first picking all older regions
ysr@777 1250 // which have a survival rate which beats our projected young
ysr@777 1251 // survival rate. Then fill out the number of needed regions
ysr@777 1252 // with young regions.
ysr@777 1253
ysr@777 1254 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
ysr@777 1255 CollectionSetChooser* _collectionSetChooser;
ysr@777 1256 // If the estimated is less then desirable, resize if possible.
ysr@777 1257 void expand_if_possible(size_t numRegions);
ysr@777 1258
tonyp@2011 1259 virtual bool choose_collection_set(double target_pause_time_ms);
ysr@777 1260 virtual void record_collection_pause_start(double start_time_sec,
ysr@777 1261 size_t start_used);
ysr@777 1262 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 1263 size_t max_live_bytes);
ysr@777 1264 virtual void record_full_collection_end();
ysr@777 1265
ysr@777 1266 public:
ysr@777 1267 G1CollectorPolicy_BestRegionsFirst() {
ysr@777 1268 _collectionSetChooser = new CollectionSetChooser();
ysr@777 1269 }
apetrusenko@1112 1270 void record_collection_pause_end(bool abandoned);
ysr@777 1271 bool should_do_collection_pause(size_t word_size);
ysr@777 1272 // This is not needed any more, after the CSet choosing code was
ysr@777 1273 // changed to use the pause prediction work. But let's leave the
ysr@777 1274 // hook in just in case.
ysr@777 1275 void note_change_in_marked_bytes(HeapRegion* r) { }
ysr@777 1276 #ifndef PRODUCT
ysr@777 1277 bool assertMarkedBytesDataOK();
ysr@777 1278 #endif
ysr@777 1279 };
ysr@777 1280
ysr@777 1281 // This should move to some place more general...
ysr@777 1282
ysr@777 1283 // If we have "n" measurements, and we've kept track of their "sum" and the
ysr@777 1284 // "sum_of_squares" of the measurements, this returns the variance of the
ysr@777 1285 // sequence.
ysr@777 1286 inline double variance(int n, double sum_of_squares, double sum) {
ysr@777 1287 double n_d = (double)n;
ysr@777 1288 double avg = sum/n_d;
ysr@777 1289 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
ysr@777 1290 }
ysr@777 1291
ysr@777 1292 // Local Variables: ***
ysr@777 1293 // c-indentation-style: gnu ***
ysr@777 1294 // End: ***

mercurial