src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Fri, 12 Aug 2011 11:31:06 -0400

author
tonyp
date
Fri, 12 Aug 2011 11:31:06 -0400
changeset 3028
f44782f04dd4
parent 3021
14a2fd14c0db
child 3065
ff53346271fe
permissions
-rw-r--r--

7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
Summary: Refactor the allocation code during GC to use the G1AllocRegion abstraction. Use separate subclasses of G1AllocRegion for survivor and old regions. Avoid BOT updates and dirty survivor cards incrementally for the former.
Reviewed-by: brutisso, johnc, ysr

ysr@777 1 /*
tonyp@3028 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/collectionSetChooser.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1MMUTracker.hpp"
stefank@2314 30 #include "memory/collectorPolicy.hpp"
stefank@2314 31
ysr@777 32 // A G1CollectorPolicy makes policy decisions that determine the
ysr@777 33 // characteristics of the collector. Examples include:
ysr@777 34 // * choice of collection set.
ysr@777 35 // * when to collect.
ysr@777 36
ysr@777 37 class HeapRegion;
ysr@777 38 class CollectionSetChooser;
ysr@777 39
ysr@777 40 // Yes, this is a bit unpleasant... but it saves replicating the same thing
ysr@777 41 // over and over again and introducing subtle problems through small typos and
ysr@777 42 // cutting and pasting mistakes. The macros below introduces a number
ysr@777 43 // sequnce into the following two classes and the methods that access it.
ysr@777 44
ysr@777 45 #define define_num_seq(name) \
ysr@777 46 private: \
ysr@777 47 NumberSeq _all_##name##_times_ms; \
ysr@777 48 public: \
ysr@777 49 void record_##name##_time_ms(double ms) { \
ysr@777 50 _all_##name##_times_ms.add(ms); \
ysr@777 51 } \
ysr@777 52 NumberSeq* get_##name##_seq() { \
ysr@777 53 return &_all_##name##_times_ms; \
ysr@777 54 }
ysr@777 55
ysr@777 56 class MainBodySummary;
ysr@777 57
apetrusenko@984 58 class PauseSummary: public CHeapObj {
ysr@777 59 define_num_seq(total)
ysr@777 60 define_num_seq(other)
ysr@777 61
ysr@777 62 public:
ysr@777 63 virtual MainBodySummary* main_body_summary() { return NULL; }
ysr@777 64 };
ysr@777 65
apetrusenko@984 66 class MainBodySummary: public CHeapObj {
ysr@777 67 define_num_seq(satb_drain) // optional
ysr@777 68 define_num_seq(parallel) // parallel only
ysr@777 69 define_num_seq(ext_root_scan)
ysr@777 70 define_num_seq(mark_stack_scan)
ysr@777 71 define_num_seq(update_rs)
ysr@777 72 define_num_seq(scan_rs)
ysr@777 73 define_num_seq(obj_copy)
ysr@777 74 define_num_seq(termination) // parallel only
ysr@777 75 define_num_seq(parallel_other) // parallel only
ysr@777 76 define_num_seq(mark_closure)
ysr@777 77 define_num_seq(clear_ct) // parallel only
ysr@777 78 };
ysr@777 79
apetrusenko@1112 80 class Summary: public PauseSummary,
apetrusenko@1112 81 public MainBodySummary {
ysr@777 82 public:
ysr@777 83 virtual MainBodySummary* main_body_summary() { return this; }
ysr@777 84 };
ysr@777 85
ysr@777 86 class G1CollectorPolicy: public CollectorPolicy {
ysr@777 87 protected:
ysr@777 88 // The number of pauses during the execution.
ysr@777 89 long _n_pauses;
ysr@777 90
ysr@777 91 // either equal to the number of parallel threads, if ParallelGCThreads
ysr@777 92 // has been set, or 1 otherwise
ysr@777 93 int _parallel_gc_threads;
ysr@777 94
ysr@777 95 enum SomePrivateConstants {
tonyp@1377 96 NumPrevPausesForHeuristics = 10
ysr@777 97 };
ysr@777 98
ysr@777 99 G1MMUTracker* _mmu_tracker;
ysr@777 100
ysr@777 101 void initialize_flags();
ysr@777 102
ysr@777 103 void initialize_all() {
ysr@777 104 initialize_flags();
ysr@777 105 initialize_size_info();
ysr@777 106 initialize_perm_generation(PermGen::MarkSweepCompact);
ysr@777 107 }
ysr@777 108
ysr@777 109 virtual size_t default_init_heap_size() {
ysr@777 110 // Pick some reasonable default.
ysr@777 111 return 8*M;
ysr@777 112 }
ysr@777 113
ysr@777 114 double _cur_collection_start_sec;
ysr@777 115 size_t _cur_collection_pause_used_at_start_bytes;
ysr@777 116 size_t _cur_collection_pause_used_regions_at_start;
ysr@777 117 size_t _prev_collection_pause_used_at_end_bytes;
ysr@777 118 double _cur_collection_par_time_ms;
ysr@777 119 double _cur_satb_drain_time_ms;
ysr@777 120 double _cur_clear_ct_time_ms;
ysr@777 121 bool _satb_drain_time_set;
ysr@777 122
johnc@1325 123 #ifndef PRODUCT
johnc@1325 124 // Card Table Count Cache stats
johnc@1325 125 double _min_clear_cc_time_ms; // min
johnc@1325 126 double _max_clear_cc_time_ms; // max
johnc@1325 127 double _cur_clear_cc_time_ms; // clearing time during current pause
johnc@1325 128 double _cum_clear_cc_time_ms; // cummulative clearing time
johnc@1325 129 jlong _num_cc_clears; // number of times the card count cache has been cleared
johnc@1325 130 #endif
johnc@1325 131
johnc@3021 132 // Statistics for recent GC pauses. See below for how indexed.
johnc@3021 133 TruncatedSeq* _recent_rs_scan_times_ms;
ysr@777 134
ysr@777 135 // These exclude marking times.
ysr@777 136 TruncatedSeq* _recent_pause_times_ms;
ysr@777 137 TruncatedSeq* _recent_gc_times_ms;
ysr@777 138
ysr@777 139 TruncatedSeq* _recent_CS_bytes_used_before;
ysr@777 140 TruncatedSeq* _recent_CS_bytes_surviving;
ysr@777 141
ysr@777 142 TruncatedSeq* _recent_rs_sizes;
ysr@777 143
ysr@777 144 TruncatedSeq* _concurrent_mark_init_times_ms;
ysr@777 145 TruncatedSeq* _concurrent_mark_remark_times_ms;
ysr@777 146 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
ysr@777 147
apetrusenko@1112 148 Summary* _summary;
ysr@777 149
ysr@777 150 NumberSeq* _all_pause_times_ms;
ysr@777 151 NumberSeq* _all_full_gc_times_ms;
ysr@777 152 double _stop_world_start;
ysr@777 153 NumberSeq* _all_stop_world_times_ms;
ysr@777 154 NumberSeq* _all_yield_times_ms;
ysr@777 155
ysr@777 156 size_t _region_num_young;
ysr@777 157 size_t _region_num_tenured;
ysr@777 158 size_t _prev_region_num_young;
ysr@777 159 size_t _prev_region_num_tenured;
ysr@777 160
ysr@777 161 NumberSeq* _all_mod_union_times_ms;
ysr@777 162
ysr@777 163 int _aux_num;
ysr@777 164 NumberSeq* _all_aux_times_ms;
ysr@777 165 double* _cur_aux_start_times_ms;
ysr@777 166 double* _cur_aux_times_ms;
ysr@777 167 bool* _cur_aux_times_set;
ysr@777 168
tonyp@1966 169 double* _par_last_gc_worker_start_times_ms;
ysr@777 170 double* _par_last_ext_root_scan_times_ms;
ysr@777 171 double* _par_last_mark_stack_scan_times_ms;
ysr@777 172 double* _par_last_update_rs_times_ms;
ysr@777 173 double* _par_last_update_rs_processed_buffers;
ysr@777 174 double* _par_last_scan_rs_times_ms;
ysr@777 175 double* _par_last_obj_copy_times_ms;
ysr@777 176 double* _par_last_termination_times_ms;
tonyp@1966 177 double* _par_last_termination_attempts;
tonyp@1966 178 double* _par_last_gc_worker_end_times_ms;
brutisso@2712 179 double* _par_last_gc_worker_times_ms;
ysr@777 180
ysr@777 181 // indicates that we are in young GC mode
ysr@777 182 bool _in_young_gc_mode;
ysr@777 183
ysr@777 184 // indicates whether we are in full young or partially young GC mode
ysr@777 185 bool _full_young_gcs;
ysr@777 186
ysr@777 187 // if true, then it tries to dynamically adjust the length of the
ysr@777 188 // young list
ysr@777 189 bool _adaptive_young_list_length;
ysr@777 190 size_t _young_list_min_length;
ysr@777 191 size_t _young_list_target_length;
ysr@777 192 size_t _young_list_fixed_length;
ysr@777 193
tonyp@2333 194 // The max number of regions we can extend the eden by while the GC
tonyp@2333 195 // locker is active. This should be >= _young_list_target_length;
tonyp@2333 196 size_t _young_list_max_length;
tonyp@2333 197
ysr@777 198 size_t _young_cset_length;
ysr@777 199 bool _last_young_gc_full;
ysr@777 200
ysr@777 201 unsigned _full_young_pause_num;
ysr@777 202 unsigned _partial_young_pause_num;
ysr@777 203
ysr@777 204 bool _during_marking;
ysr@777 205 bool _in_marking_window;
ysr@777 206 bool _in_marking_window_im;
ysr@777 207
ysr@777 208 SurvRateGroup* _short_lived_surv_rate_group;
ysr@777 209 SurvRateGroup* _survivor_surv_rate_group;
ysr@777 210 // add here any more surv rate groups
ysr@777 211
tonyp@1791 212 double _gc_overhead_perc;
tonyp@1791 213
ysr@777 214 bool during_marking() {
ysr@777 215 return _during_marking;
ysr@777 216 }
ysr@777 217
ysr@777 218 // <NEW PREDICTION>
ysr@777 219
ysr@777 220 private:
ysr@777 221 enum PredictionConstants {
ysr@777 222 TruncatedSeqLength = 10
ysr@777 223 };
ysr@777 224
ysr@777 225 TruncatedSeq* _alloc_rate_ms_seq;
ysr@777 226 double _prev_collection_pause_end_ms;
ysr@777 227
ysr@777 228 TruncatedSeq* _pending_card_diff_seq;
ysr@777 229 TruncatedSeq* _rs_length_diff_seq;
ysr@777 230 TruncatedSeq* _cost_per_card_ms_seq;
ysr@777 231 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
ysr@777 232 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
ysr@777 233 TruncatedSeq* _cost_per_entry_ms_seq;
ysr@777 234 TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
ysr@777 235 TruncatedSeq* _cost_per_byte_ms_seq;
ysr@777 236 TruncatedSeq* _constant_other_time_ms_seq;
ysr@777 237 TruncatedSeq* _young_other_cost_per_region_ms_seq;
ysr@777 238 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
ysr@777 239
ysr@777 240 TruncatedSeq* _pending_cards_seq;
ysr@777 241 TruncatedSeq* _scanned_cards_seq;
ysr@777 242 TruncatedSeq* _rs_lengths_seq;
ysr@777 243
ysr@777 244 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
ysr@777 245
ysr@777 246 TruncatedSeq* _young_gc_eff_seq;
ysr@777 247
ysr@777 248 TruncatedSeq* _max_conc_overhead_seq;
ysr@777 249
ysr@777 250 size_t _recorded_young_regions;
ysr@777 251 size_t _recorded_non_young_regions;
ysr@777 252 size_t _recorded_region_num;
ysr@777 253
ysr@777 254 size_t _free_regions_at_end_of_collection;
ysr@777 255
ysr@777 256 size_t _recorded_rs_lengths;
ysr@777 257 size_t _max_rs_lengths;
ysr@777 258
ysr@777 259 size_t _recorded_marked_bytes;
ysr@777 260 size_t _recorded_young_bytes;
ysr@777 261
ysr@777 262 size_t _predicted_pending_cards;
ysr@777 263 size_t _predicted_cards_scanned;
ysr@777 264 size_t _predicted_rs_lengths;
ysr@777 265 size_t _predicted_bytes_to_copy;
ysr@777 266
ysr@777 267 double _predicted_survival_ratio;
ysr@777 268 double _predicted_rs_update_time_ms;
ysr@777 269 double _predicted_rs_scan_time_ms;
ysr@777 270 double _predicted_object_copy_time_ms;
ysr@777 271 double _predicted_constant_other_time_ms;
ysr@777 272 double _predicted_young_other_time_ms;
ysr@777 273 double _predicted_non_young_other_time_ms;
ysr@777 274 double _predicted_pause_time_ms;
ysr@777 275
ysr@777 276 double _vtime_diff_ms;
ysr@777 277
ysr@777 278 double _recorded_young_free_cset_time_ms;
ysr@777 279 double _recorded_non_young_free_cset_time_ms;
ysr@777 280
ysr@777 281 double _sigma;
ysr@777 282 double _expensive_region_limit_ms;
ysr@777 283
ysr@777 284 size_t _rs_lengths_prediction;
ysr@777 285
ysr@777 286 size_t _known_garbage_bytes;
ysr@777 287 double _known_garbage_ratio;
ysr@777 288
ysr@777 289 double sigma() {
ysr@777 290 return _sigma;
ysr@777 291 }
ysr@777 292
ysr@777 293 // A function that prevents us putting too much stock in small sample
ysr@777 294 // sets. Returns a number between 2.0 and 1.0, depending on the number
ysr@777 295 // of samples. 5 or more samples yields one; fewer scales linearly from
ysr@777 296 // 2.0 at 1 sample to 1.0 at 5.
ysr@777 297 double confidence_factor(int samples) {
ysr@777 298 if (samples > 4) return 1.0;
ysr@777 299 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
ysr@777 300 }
ysr@777 301
ysr@777 302 double get_new_neg_prediction(TruncatedSeq* seq) {
ysr@777 303 return seq->davg() - sigma() * seq->dsd();
ysr@777 304 }
ysr@777 305
ysr@777 306 #ifndef PRODUCT
ysr@777 307 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
ysr@777 308 #endif // PRODUCT
ysr@777 309
iveresov@1546 310 void adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 311 double update_rs_processed_buffers,
iveresov@1546 312 double goal_ms);
iveresov@1546 313
ysr@777 314 protected:
ysr@777 315 double _pause_time_target_ms;
ysr@777 316 double _recorded_young_cset_choice_time_ms;
ysr@777 317 double _recorded_non_young_cset_choice_time_ms;
ysr@777 318 bool _within_target;
ysr@777 319 size_t _pending_cards;
ysr@777 320 size_t _max_pending_cards;
ysr@777 321
ysr@777 322 public:
ysr@777 323
ysr@777 324 void set_region_short_lived(HeapRegion* hr) {
ysr@777 325 hr->install_surv_rate_group(_short_lived_surv_rate_group);
ysr@777 326 }
ysr@777 327
ysr@777 328 void set_region_survivors(HeapRegion* hr) {
ysr@777 329 hr->install_surv_rate_group(_survivor_surv_rate_group);
ysr@777 330 }
ysr@777 331
ysr@777 332 #ifndef PRODUCT
ysr@777 333 bool verify_young_ages();
ysr@777 334 #endif // PRODUCT
ysr@777 335
ysr@777 336 double get_new_prediction(TruncatedSeq* seq) {
ysr@777 337 return MAX2(seq->davg() + sigma() * seq->dsd(),
ysr@777 338 seq->davg() * confidence_factor(seq->num()));
ysr@777 339 }
ysr@777 340
ysr@777 341 size_t young_cset_length() {
ysr@777 342 return _young_cset_length;
ysr@777 343 }
ysr@777 344
ysr@777 345 void record_max_rs_lengths(size_t rs_lengths) {
ysr@777 346 _max_rs_lengths = rs_lengths;
ysr@777 347 }
ysr@777 348
ysr@777 349 size_t predict_pending_card_diff() {
ysr@777 350 double prediction = get_new_neg_prediction(_pending_card_diff_seq);
ysr@777 351 if (prediction < 0.00001)
ysr@777 352 return 0;
ysr@777 353 else
ysr@777 354 return (size_t) prediction;
ysr@777 355 }
ysr@777 356
ysr@777 357 size_t predict_pending_cards() {
ysr@777 358 size_t max_pending_card_num = _g1->max_pending_card_num();
ysr@777 359 size_t diff = predict_pending_card_diff();
ysr@777 360 size_t prediction;
ysr@777 361 if (diff > max_pending_card_num)
ysr@777 362 prediction = max_pending_card_num;
ysr@777 363 else
ysr@777 364 prediction = max_pending_card_num - diff;
ysr@777 365
ysr@777 366 return prediction;
ysr@777 367 }
ysr@777 368
ysr@777 369 size_t predict_rs_length_diff() {
ysr@777 370 return (size_t) get_new_prediction(_rs_length_diff_seq);
ysr@777 371 }
ysr@777 372
ysr@777 373 double predict_alloc_rate_ms() {
ysr@777 374 return get_new_prediction(_alloc_rate_ms_seq);
ysr@777 375 }
ysr@777 376
ysr@777 377 double predict_cost_per_card_ms() {
ysr@777 378 return get_new_prediction(_cost_per_card_ms_seq);
ysr@777 379 }
ysr@777 380
ysr@777 381 double predict_rs_update_time_ms(size_t pending_cards) {
ysr@777 382 return (double) pending_cards * predict_cost_per_card_ms();
ysr@777 383 }
ysr@777 384
ysr@777 385 double predict_fully_young_cards_per_entry_ratio() {
ysr@777 386 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
ysr@777 387 }
ysr@777 388
ysr@777 389 double predict_partially_young_cards_per_entry_ratio() {
ysr@777 390 if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
ysr@777 391 return predict_fully_young_cards_per_entry_ratio();
ysr@777 392 else
ysr@777 393 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
ysr@777 394 }
ysr@777 395
ysr@777 396 size_t predict_young_card_num(size_t rs_length) {
ysr@777 397 return (size_t) ((double) rs_length *
ysr@777 398 predict_fully_young_cards_per_entry_ratio());
ysr@777 399 }
ysr@777 400
ysr@777 401 size_t predict_non_young_card_num(size_t rs_length) {
ysr@777 402 return (size_t) ((double) rs_length *
ysr@777 403 predict_partially_young_cards_per_entry_ratio());
ysr@777 404 }
ysr@777 405
ysr@777 406 double predict_rs_scan_time_ms(size_t card_num) {
ysr@777 407 if (full_young_gcs())
ysr@777 408 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
ysr@777 409 else
ysr@777 410 return predict_partially_young_rs_scan_time_ms(card_num);
ysr@777 411 }
ysr@777 412
ysr@777 413 double predict_partially_young_rs_scan_time_ms(size_t card_num) {
ysr@777 414 if (_partially_young_cost_per_entry_ms_seq->num() < 3)
ysr@777 415 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
ysr@777 416 else
ysr@777 417 return (double) card_num *
ysr@777 418 get_new_prediction(_partially_young_cost_per_entry_ms_seq);
ysr@777 419 }
ysr@777 420
ysr@777 421 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
ysr@777 422 if (_cost_per_byte_ms_during_cm_seq->num() < 3)
ysr@777 423 return 1.1 * (double) bytes_to_copy *
ysr@777 424 get_new_prediction(_cost_per_byte_ms_seq);
ysr@777 425 else
ysr@777 426 return (double) bytes_to_copy *
ysr@777 427 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
ysr@777 428 }
ysr@777 429
ysr@777 430 double predict_object_copy_time_ms(size_t bytes_to_copy) {
ysr@777 431 if (_in_marking_window && !_in_marking_window_im)
ysr@777 432 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
ysr@777 433 else
ysr@777 434 return (double) bytes_to_copy *
ysr@777 435 get_new_prediction(_cost_per_byte_ms_seq);
ysr@777 436 }
ysr@777 437
ysr@777 438 double predict_constant_other_time_ms() {
ysr@777 439 return get_new_prediction(_constant_other_time_ms_seq);
ysr@777 440 }
ysr@777 441
ysr@777 442 double predict_young_other_time_ms(size_t young_num) {
ysr@777 443 return
ysr@777 444 (double) young_num *
ysr@777 445 get_new_prediction(_young_other_cost_per_region_ms_seq);
ysr@777 446 }
ysr@777 447
ysr@777 448 double predict_non_young_other_time_ms(size_t non_young_num) {
ysr@777 449 return
ysr@777 450 (double) non_young_num *
ysr@777 451 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
ysr@777 452 }
ysr@777 453
ysr@777 454 void check_if_region_is_too_expensive(double predicted_time_ms);
ysr@777 455
ysr@777 456 double predict_young_collection_elapsed_time_ms(size_t adjustment);
ysr@777 457 double predict_base_elapsed_time_ms(size_t pending_cards);
ysr@777 458 double predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 459 size_t scanned_cards);
ysr@777 460 size_t predict_bytes_to_copy(HeapRegion* hr);
ysr@777 461 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
ysr@777 462
johnc@1829 463 // for use by: calculate_young_list_target_length(rs_length)
johnc@1829 464 bool predict_will_fit(size_t young_region_num,
johnc@1829 465 double base_time_ms,
johnc@1829 466 size_t init_free_regions,
johnc@1829 467 double target_pause_time_ms);
ysr@777 468
ysr@777 469 void start_recording_regions();
johnc@1829 470 void record_cset_region_info(HeapRegion* hr, bool young);
johnc@1829 471 void record_non_young_cset_region(HeapRegion* hr);
johnc@1829 472
johnc@1829 473 void set_recorded_young_regions(size_t n_regions);
johnc@1829 474 void set_recorded_young_bytes(size_t bytes);
johnc@1829 475 void set_recorded_rs_lengths(size_t rs_lengths);
johnc@1829 476 void set_predicted_bytes_to_copy(size_t bytes);
johnc@1829 477
ysr@777 478 void end_recording_regions();
ysr@777 479
ysr@777 480 void record_vtime_diff_ms(double vtime_diff_ms) {
ysr@777 481 _vtime_diff_ms = vtime_diff_ms;
ysr@777 482 }
ysr@777 483
ysr@777 484 void record_young_free_cset_time_ms(double time_ms) {
ysr@777 485 _recorded_young_free_cset_time_ms = time_ms;
ysr@777 486 }
ysr@777 487
ysr@777 488 void record_non_young_free_cset_time_ms(double time_ms) {
ysr@777 489 _recorded_non_young_free_cset_time_ms = time_ms;
ysr@777 490 }
ysr@777 491
ysr@777 492 double predict_young_gc_eff() {
ysr@777 493 return get_new_neg_prediction(_young_gc_eff_seq);
ysr@777 494 }
ysr@777 495
apetrusenko@980 496 double predict_survivor_regions_evac_time();
apetrusenko@980 497
ysr@777 498 // </NEW PREDICTION>
ysr@777 499
ysr@777 500 public:
ysr@777 501 void cset_regions_freed() {
ysr@777 502 bool propagate = _last_young_gc_full && !_in_marking_window;
ysr@777 503 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 504 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 505 // also call it on any more surv rate groups
ysr@777 506 }
ysr@777 507
ysr@777 508 void set_known_garbage_bytes(size_t known_garbage_bytes) {
ysr@777 509 _known_garbage_bytes = known_garbage_bytes;
ysr@777 510 size_t heap_bytes = _g1->capacity();
ysr@777 511 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
ysr@777 512 }
ysr@777 513
ysr@777 514 void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
ysr@777 515 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
ysr@777 516
ysr@777 517 _known_garbage_bytes -= known_garbage_bytes;
ysr@777 518 size_t heap_bytes = _g1->capacity();
ysr@777 519 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
ysr@777 520 }
ysr@777 521
ysr@777 522 G1MMUTracker* mmu_tracker() {
ysr@777 523 return _mmu_tracker;
ysr@777 524 }
ysr@777 525
tonyp@2011 526 double max_pause_time_ms() {
tonyp@2011 527 return _mmu_tracker->max_gc_time() * 1000.0;
tonyp@2011 528 }
tonyp@2011 529
ysr@777 530 double predict_init_time_ms() {
ysr@777 531 return get_new_prediction(_concurrent_mark_init_times_ms);
ysr@777 532 }
ysr@777 533
ysr@777 534 double predict_remark_time_ms() {
ysr@777 535 return get_new_prediction(_concurrent_mark_remark_times_ms);
ysr@777 536 }
ysr@777 537
ysr@777 538 double predict_cleanup_time_ms() {
ysr@777 539 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
ysr@777 540 }
ysr@777 541
ysr@777 542 // Returns an estimate of the survival rate of the region at yg-age
ysr@777 543 // "yg_age".
apetrusenko@980 544 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
apetrusenko@980 545 TruncatedSeq* seq = surv_rate_group->get_seq(age);
ysr@777 546 if (seq->num() == 0)
ysr@777 547 gclog_or_tty->print("BARF! age is %d", age);
ysr@777 548 guarantee( seq->num() > 0, "invariant" );
ysr@777 549 double pred = get_new_prediction(seq);
ysr@777 550 if (pred > 1.0)
ysr@777 551 pred = 1.0;
ysr@777 552 return pred;
ysr@777 553 }
ysr@777 554
apetrusenko@980 555 double predict_yg_surv_rate(int age) {
apetrusenko@980 556 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
apetrusenko@980 557 }
apetrusenko@980 558
ysr@777 559 double accum_yg_surv_rate_pred(int age) {
ysr@777 560 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
ysr@777 561 }
ysr@777 562
ysr@777 563 protected:
tonyp@1966 564 void print_stats(int level, const char* str, double value);
tonyp@1966 565 void print_stats(int level, const char* str, int value);
tonyp@1966 566
brutisso@2712 567 void print_par_stats(int level, const char* str, double* data);
brutisso@2712 568 void print_par_sizes(int level, const char* str, double* data);
ysr@777 569
ysr@777 570 void check_other_times(int level,
ysr@777 571 NumberSeq* other_times_ms,
ysr@777 572 NumberSeq* calc_other_times_ms) const;
ysr@777 573
ysr@777 574 void print_summary (PauseSummary* stats) const;
ysr@777 575
ysr@777 576 void print_summary (int level, const char* str, NumberSeq* seq) const;
ysr@777 577 void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
ysr@777 578
ysr@777 579 double avg_value (double* data);
ysr@777 580 double max_value (double* data);
ysr@777 581 double sum_of_values (double* data);
ysr@777 582 double max_sum (double* data1, double* data2);
ysr@777 583
ysr@777 584 int _last_satb_drain_processed_buffers;
ysr@777 585 int _last_update_rs_processed_buffers;
ysr@777 586 double _last_pause_time_ms;
ysr@777 587
ysr@777 588 size_t _bytes_in_collection_set_before_gc;
tonyp@3028 589 size_t _bytes_copied_during_gc;
tonyp@3028 590
ysr@777 591 // Used to count used bytes in CS.
ysr@777 592 friend class CountCSClosure;
ysr@777 593
ysr@777 594 // Statistics kept per GC stoppage, pause or full.
ysr@777 595 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
ysr@777 596
ysr@777 597 // We track markings.
ysr@777 598 int _num_markings;
ysr@777 599 double _mark_thread_startup_sec; // Time at startup of marking thread
ysr@777 600
ysr@777 601 // Add a new GC of the given duration and end time to the record.
ysr@777 602 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
ysr@777 603
ysr@777 604 // The head of the list (via "next_in_collection_set()") representing the
johnc@1829 605 // current collection set. Set from the incrementally built collection
johnc@1829 606 // set at the start of the pause.
ysr@777 607 HeapRegion* _collection_set;
johnc@1829 608
johnc@1829 609 // The number of regions in the collection set. Set from the incrementally
johnc@1829 610 // built collection set at the start of an evacuation pause.
ysr@777 611 size_t _collection_set_size;
johnc@1829 612
johnc@1829 613 // The number of bytes in the collection set before the pause. Set from
johnc@1829 614 // the incrementally built collection set at the start of an evacuation
johnc@1829 615 // pause.
ysr@777 616 size_t _collection_set_bytes_used_before;
ysr@777 617
johnc@1829 618 // The associated information that is maintained while the incremental
johnc@1829 619 // collection set is being built with young regions. Used to populate
johnc@1829 620 // the recorded info for the evacuation pause.
johnc@1829 621
johnc@1829 622 enum CSetBuildType {
johnc@1829 623 Active, // We are actively building the collection set
johnc@1829 624 Inactive // We are not actively building the collection set
johnc@1829 625 };
johnc@1829 626
johnc@1829 627 CSetBuildType _inc_cset_build_state;
johnc@1829 628
johnc@1829 629 // The head of the incrementally built collection set.
johnc@1829 630 HeapRegion* _inc_cset_head;
johnc@1829 631
johnc@1829 632 // The tail of the incrementally built collection set.
johnc@1829 633 HeapRegion* _inc_cset_tail;
johnc@1829 634
johnc@1829 635 // The number of regions in the incrementally built collection set.
johnc@1829 636 // Used to set _collection_set_size at the start of an evacuation
johnc@1829 637 // pause.
johnc@1829 638 size_t _inc_cset_size;
johnc@1829 639
johnc@1829 640 // Used as the index in the surving young words structure
johnc@1829 641 // which tracks the amount of space, for each young region,
johnc@1829 642 // that survives the pause.
johnc@1829 643 size_t _inc_cset_young_index;
johnc@1829 644
johnc@1829 645 // The number of bytes in the incrementally built collection set.
johnc@1829 646 // Used to set _collection_set_bytes_used_before at the start of
johnc@1829 647 // an evacuation pause.
johnc@1829 648 size_t _inc_cset_bytes_used_before;
johnc@1829 649
johnc@1829 650 // Used to record the highest end of heap region in collection set
johnc@1829 651 HeapWord* _inc_cset_max_finger;
johnc@1829 652
johnc@1829 653 // The number of recorded used bytes in the young regions
johnc@1829 654 // of the collection set. This is the sum of the used() bytes
johnc@1829 655 // of retired young regions in the collection set.
johnc@1829 656 size_t _inc_cset_recorded_young_bytes;
johnc@1829 657
johnc@1829 658 // The RSet lengths recorded for regions in the collection set
johnc@1829 659 // (updated by the periodic sampling of the regions in the
johnc@1829 660 // young list/collection set).
johnc@1829 661 size_t _inc_cset_recorded_rs_lengths;
johnc@1829 662
johnc@1829 663 // The predicted elapsed time it will take to collect the regions
johnc@1829 664 // in the collection set (updated by the periodic sampling of the
johnc@1829 665 // regions in the young list/collection set).
johnc@1829 666 double _inc_cset_predicted_elapsed_time_ms;
johnc@1829 667
johnc@1829 668 // The predicted bytes to copy for the regions in the collection
johnc@1829 669 // set (updated by the periodic sampling of the regions in the
johnc@1829 670 // young list/collection set).
johnc@1829 671 size_t _inc_cset_predicted_bytes_to_copy;
johnc@1829 672
ysr@777 673 // Info about marking.
ysr@777 674 int _n_marks; // Sticky at 2, so we know when we've done at least 2.
ysr@777 675
ysr@777 676 // The number of collection pauses at the end of the last mark.
ysr@777 677 size_t _n_pauses_at_mark_end;
ysr@777 678
ysr@777 679 // Stash a pointer to the g1 heap.
ysr@777 680 G1CollectedHeap* _g1;
ysr@777 681
ysr@777 682 // The average time in ms per collection pause, averaged over recent pauses.
ysr@777 683 double recent_avg_time_for_pauses_ms();
ysr@777 684
johnc@3021 685 // The average time in ms for RS scanning, per pause, averaged
johnc@3021 686 // over recent pauses. (Note the RS scanning time for a pause
johnc@3021 687 // is itself an average of the RS scanning time for each worker
johnc@3021 688 // thread.)
johnc@3021 689 double recent_avg_time_for_rs_scan_ms();
ysr@777 690
ysr@777 691 // The number of "recent" GCs recorded in the number sequences
ysr@777 692 int number_of_recent_gcs();
ysr@777 693
ysr@777 694 // The average survival ratio, computed by the total number of bytes
ysr@777 695 // suriviving / total number of bytes before collection over the last
ysr@777 696 // several recent pauses.
ysr@777 697 double recent_avg_survival_fraction();
ysr@777 698 // The survival fraction of the most recent pause; if there have been no
ysr@777 699 // pauses, returns 1.0.
ysr@777 700 double last_survival_fraction();
ysr@777 701
ysr@777 702 // Returns a "conservative" estimate of the recent survival rate, i.e.,
ysr@777 703 // one that may be higher than "recent_avg_survival_fraction".
ysr@777 704 // This is conservative in several ways:
ysr@777 705 // If there have been few pauses, it will assume a potential high
ysr@777 706 // variance, and err on the side of caution.
ysr@777 707 // It puts a lower bound (currently 0.1) on the value it will return.
ysr@777 708 // To try to detect phase changes, if the most recent pause ("latest") has a
ysr@777 709 // higher-than average ("avg") survival rate, it returns that rate.
ysr@777 710 // "work" version is a utility function; young is restricted to young regions.
ysr@777 711 double conservative_avg_survival_fraction_work(double avg,
ysr@777 712 double latest);
ysr@777 713
ysr@777 714 // The arguments are the two sequences that keep track of the number of bytes
ysr@777 715 // surviving and the total number of bytes before collection, resp.,
ysr@777 716 // over the last evereal recent pauses
ysr@777 717 // Returns the survival rate for the category in the most recent pause.
ysr@777 718 // If there have been no pauses, returns 1.0.
ysr@777 719 double last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 720 TruncatedSeq* before);
ysr@777 721
ysr@777 722 // The arguments are the two sequences that keep track of the number of bytes
ysr@777 723 // surviving and the total number of bytes before collection, resp.,
ysr@777 724 // over the last several recent pauses
ysr@777 725 // Returns the average survival ration over the last several recent pauses
ysr@777 726 // If there have been no pauses, return 1.0
ysr@777 727 double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 728 TruncatedSeq* before);
ysr@777 729
ysr@777 730 double conservative_avg_survival_fraction() {
ysr@777 731 double avg = recent_avg_survival_fraction();
ysr@777 732 double latest = last_survival_fraction();
ysr@777 733 return conservative_avg_survival_fraction_work(avg, latest);
ysr@777 734 }
ysr@777 735
ysr@777 736 // The ratio of gc time to elapsed time, computed over recent pauses.
ysr@777 737 double _recent_avg_pause_time_ratio;
ysr@777 738
ysr@777 739 double recent_avg_pause_time_ratio() {
ysr@777 740 return _recent_avg_pause_time_ratio;
ysr@777 741 }
ysr@777 742
ysr@777 743 // Number of pauses between concurrent marking.
ysr@777 744 size_t _pauses_btwn_concurrent_mark;
ysr@777 745
ysr@777 746 size_t _n_marks_since_last_pause;
ysr@777 747
tonyp@1794 748 // At the end of a pause we check the heap occupancy and we decide
tonyp@1794 749 // whether we will start a marking cycle during the next pause. If
tonyp@1794 750 // we decide that we want to do that, we will set this parameter to
tonyp@1794 751 // true. So, this parameter will stay true between the end of a
tonyp@1794 752 // pause and the beginning of a subsequent pause (not necessarily
tonyp@1794 753 // the next one, see the comments on the next field) when we decide
tonyp@1794 754 // that we will indeed start a marking cycle and do the initial-mark
tonyp@1794 755 // work.
tonyp@1794 756 volatile bool _initiate_conc_mark_if_possible;
ysr@777 757
tonyp@1794 758 // If initiate_conc_mark_if_possible() is set at the beginning of a
tonyp@1794 759 // pause, it is a suggestion that the pause should start a marking
tonyp@1794 760 // cycle by doing the initial-mark work. However, it is possible
tonyp@1794 761 // that the concurrent marking thread is still finishing up the
tonyp@1794 762 // previous marking cycle (e.g., clearing the next marking
tonyp@1794 763 // bitmap). If that is the case we cannot start a new cycle and
tonyp@1794 764 // we'll have to wait for the concurrent marking thread to finish
tonyp@1794 765 // what it is doing. In this case we will postpone the marking cycle
tonyp@1794 766 // initiation decision for the next pause. When we eventually decide
tonyp@1794 767 // to start a cycle, we will set _during_initial_mark_pause which
tonyp@1794 768 // will stay true until the end of the initial-mark pause and it's
tonyp@1794 769 // the condition that indicates that a pause is doing the
tonyp@1794 770 // initial-mark work.
tonyp@1794 771 volatile bool _during_initial_mark_pause;
tonyp@1794 772
ysr@777 773 bool _should_revert_to_full_young_gcs;
ysr@777 774 bool _last_full_young_gc;
ysr@777 775
ysr@777 776 // This set of variables tracks the collector efficiency, in order to
ysr@777 777 // determine whether we should initiate a new marking.
ysr@777 778 double _cur_mark_stop_world_time_ms;
ysr@777 779 double _mark_init_start_sec;
ysr@777 780 double _mark_remark_start_sec;
ysr@777 781 double _mark_cleanup_start_sec;
ysr@777 782 double _mark_closure_time_ms;
ysr@777 783
ysr@777 784 void calculate_young_list_min_length();
johnc@1829 785 void calculate_young_list_target_length();
johnc@1829 786 void calculate_young_list_target_length(size_t rs_lengths);
ysr@777 787
ysr@777 788 public:
ysr@777 789
ysr@777 790 G1CollectorPolicy();
ysr@777 791
ysr@777 792 virtual G1CollectorPolicy* as_g1_policy() { return this; }
ysr@777 793
ysr@777 794 virtual CollectorPolicy::Name kind() {
ysr@777 795 return CollectorPolicy::G1CollectorPolicyKind;
ysr@777 796 }
ysr@777 797
ysr@777 798 void check_prediction_validity();
ysr@777 799
ysr@777 800 size_t bytes_in_collection_set() {
ysr@777 801 return _bytes_in_collection_set_before_gc;
ysr@777 802 }
ysr@777 803
ysr@777 804 unsigned calc_gc_alloc_time_stamp() {
ysr@777 805 return _all_pause_times_ms->num() + 1;
ysr@777 806 }
ysr@777 807
ysr@777 808 protected:
ysr@777 809
ysr@777 810 // Count the number of bytes used in the CS.
ysr@777 811 void count_CS_bytes_used();
ysr@777 812
ysr@777 813 // Together these do the base cleanup-recording work. Subclasses might
ysr@777 814 // want to put something between them.
ysr@777 815 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 816 size_t max_live_bytes);
ysr@777 817 void record_concurrent_mark_cleanup_end_work2();
ysr@777 818
ysr@777 819 public:
ysr@777 820
ysr@777 821 virtual void init();
ysr@777 822
apetrusenko@980 823 // Create jstat counters for the policy.
apetrusenko@980 824 virtual void initialize_gc_policy_counters();
apetrusenko@980 825
ysr@777 826 virtual HeapWord* mem_allocate_work(size_t size,
ysr@777 827 bool is_tlab,
ysr@777 828 bool* gc_overhead_limit_was_exceeded);
ysr@777 829
ysr@777 830 // This method controls how a collector handles one or more
ysr@777 831 // of its generations being fully allocated.
ysr@777 832 virtual HeapWord* satisfy_failed_allocation(size_t size,
ysr@777 833 bool is_tlab);
ysr@777 834
ysr@777 835 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
ysr@777 836
ysr@777 837 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
ysr@777 838
ysr@777 839 // The number of collection pauses so far.
ysr@777 840 long n_pauses() const { return _n_pauses; }
ysr@777 841
ysr@777 842 // Update the heuristic info to record a collection pause of the given
ysr@777 843 // start time, where the given number of bytes were used at the start.
ysr@777 844 // This may involve changing the desired size of a collection set.
ysr@777 845
ysr@777 846 virtual void record_stop_world_start();
ysr@777 847
ysr@777 848 virtual void record_collection_pause_start(double start_time_sec,
ysr@777 849 size_t start_used);
ysr@777 850
ysr@777 851 // Must currently be called while the world is stopped.
ysr@777 852 virtual void record_concurrent_mark_init_start();
ysr@777 853 virtual void record_concurrent_mark_init_end();
ysr@777 854 void record_concurrent_mark_init_end_pre(double
ysr@777 855 mark_init_elapsed_time_ms);
ysr@777 856
ysr@777 857 void record_mark_closure_time(double mark_closure_time_ms);
ysr@777 858
ysr@777 859 virtual void record_concurrent_mark_remark_start();
ysr@777 860 virtual void record_concurrent_mark_remark_end();
ysr@777 861
ysr@777 862 virtual void record_concurrent_mark_cleanup_start();
ysr@777 863 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 864 size_t max_live_bytes);
ysr@777 865 virtual void record_concurrent_mark_cleanup_completed();
ysr@777 866
ysr@777 867 virtual void record_concurrent_pause();
ysr@777 868 virtual void record_concurrent_pause_end();
ysr@777 869
tonyp@2062 870 virtual void record_collection_pause_end();
tonyp@2961 871 void print_heap_transition();
ysr@777 872
ysr@777 873 // Record the fact that a full collection occurred.
ysr@777 874 virtual void record_full_collection_start();
ysr@777 875 virtual void record_full_collection_end();
ysr@777 876
tonyp@1966 877 void record_gc_worker_start_time(int worker_i, double ms) {
tonyp@1966 878 _par_last_gc_worker_start_times_ms[worker_i] = ms;
tonyp@1966 879 }
tonyp@1966 880
ysr@777 881 void record_ext_root_scan_time(int worker_i, double ms) {
ysr@777 882 _par_last_ext_root_scan_times_ms[worker_i] = ms;
ysr@777 883 }
ysr@777 884
ysr@777 885 void record_mark_stack_scan_time(int worker_i, double ms) {
ysr@777 886 _par_last_mark_stack_scan_times_ms[worker_i] = ms;
ysr@777 887 }
ysr@777 888
ysr@777 889 void record_satb_drain_time(double ms) {
ysr@777 890 _cur_satb_drain_time_ms = ms;
ysr@777 891 _satb_drain_time_set = true;
ysr@777 892 }
ysr@777 893
ysr@777 894 void record_satb_drain_processed_buffers (int processed_buffers) {
ysr@777 895 _last_satb_drain_processed_buffers = processed_buffers;
ysr@777 896 }
ysr@777 897
ysr@777 898 void record_mod_union_time(double ms) {
ysr@777 899 _all_mod_union_times_ms->add(ms);
ysr@777 900 }
ysr@777 901
ysr@777 902 void record_update_rs_time(int thread, double ms) {
ysr@777 903 _par_last_update_rs_times_ms[thread] = ms;
ysr@777 904 }
ysr@777 905
ysr@777 906 void record_update_rs_processed_buffers (int thread,
ysr@777 907 double processed_buffers) {
ysr@777 908 _par_last_update_rs_processed_buffers[thread] = processed_buffers;
ysr@777 909 }
ysr@777 910
ysr@777 911 void record_scan_rs_time(int thread, double ms) {
ysr@777 912 _par_last_scan_rs_times_ms[thread] = ms;
ysr@777 913 }
ysr@777 914
ysr@777 915 void reset_obj_copy_time(int thread) {
ysr@777 916 _par_last_obj_copy_times_ms[thread] = 0.0;
ysr@777 917 }
ysr@777 918
ysr@777 919 void reset_obj_copy_time() {
ysr@777 920 reset_obj_copy_time(0);
ysr@777 921 }
ysr@777 922
ysr@777 923 void record_obj_copy_time(int thread, double ms) {
ysr@777 924 _par_last_obj_copy_times_ms[thread] += ms;
ysr@777 925 }
ysr@777 926
tonyp@1966 927 void record_termination(int thread, double ms, size_t attempts) {
tonyp@1966 928 _par_last_termination_times_ms[thread] = ms;
tonyp@1966 929 _par_last_termination_attempts[thread] = (double) attempts;
ysr@777 930 }
ysr@777 931
tonyp@1966 932 void record_gc_worker_end_time(int worker_i, double ms) {
tonyp@1966 933 _par_last_gc_worker_end_times_ms[worker_i] = ms;
ysr@777 934 }
ysr@777 935
tonyp@1030 936 void record_pause_time_ms(double ms) {
ysr@777 937 _last_pause_time_ms = ms;
ysr@777 938 }
ysr@777 939
ysr@777 940 void record_clear_ct_time(double ms) {
ysr@777 941 _cur_clear_ct_time_ms = ms;
ysr@777 942 }
ysr@777 943
ysr@777 944 void record_par_time(double ms) {
ysr@777 945 _cur_collection_par_time_ms = ms;
ysr@777 946 }
ysr@777 947
ysr@777 948 void record_aux_start_time(int i) {
ysr@777 949 guarantee(i < _aux_num, "should be within range");
ysr@777 950 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
ysr@777 951 }
ysr@777 952
ysr@777 953 void record_aux_end_time(int i) {
ysr@777 954 guarantee(i < _aux_num, "should be within range");
ysr@777 955 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
ysr@777 956 _cur_aux_times_set[i] = true;
ysr@777 957 _cur_aux_times_ms[i] += ms;
ysr@777 958 }
ysr@777 959
johnc@1325 960 #ifndef PRODUCT
johnc@1325 961 void record_cc_clear_time(double ms) {
johnc@1325 962 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
johnc@1325 963 _min_clear_cc_time_ms = ms;
johnc@1325 964 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
johnc@1325 965 _max_clear_cc_time_ms = ms;
johnc@1325 966 _cur_clear_cc_time_ms = ms;
johnc@1325 967 _cum_clear_cc_time_ms += ms;
johnc@1325 968 _num_cc_clears++;
johnc@1325 969 }
johnc@1325 970 #endif
johnc@1325 971
tonyp@3028 972 // Record how much space we copied during a GC. This is typically
tonyp@3028 973 // called when a GC alloc region is being retired.
tonyp@3028 974 void record_bytes_copied_during_gc(size_t bytes) {
tonyp@3028 975 _bytes_copied_during_gc += bytes;
tonyp@3028 976 }
tonyp@3028 977
tonyp@3028 978 // The amount of space we copied during a GC.
tonyp@3028 979 size_t bytes_copied_during_gc() {
tonyp@3028 980 return _bytes_copied_during_gc;
tonyp@3028 981 }
ysr@777 982
ysr@777 983 // Choose a new collection set. Marks the chosen regions as being
ysr@777 984 // "in_collection_set", and links them together. The head and number of
ysr@777 985 // the collection set are available via access methods.
tonyp@2062 986 virtual void choose_collection_set(double target_pause_time_ms) = 0;
ysr@777 987
ysr@777 988 // The head of the list (via "next_in_collection_set()") representing the
ysr@777 989 // current collection set.
ysr@777 990 HeapRegion* collection_set() { return _collection_set; }
ysr@777 991
johnc@1829 992 void clear_collection_set() { _collection_set = NULL; }
johnc@1829 993
ysr@777 994 // The number of elements in the current collection set.
ysr@777 995 size_t collection_set_size() { return _collection_set_size; }
ysr@777 996
ysr@777 997 // Add "hr" to the CS.
ysr@777 998 void add_to_collection_set(HeapRegion* hr);
ysr@777 999
johnc@1829 1000 // Incremental CSet Support
johnc@1829 1001
johnc@1829 1002 // The head of the incrementally built collection set.
johnc@1829 1003 HeapRegion* inc_cset_head() { return _inc_cset_head; }
johnc@1829 1004
johnc@1829 1005 // The tail of the incrementally built collection set.
johnc@1829 1006 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
johnc@1829 1007
johnc@1829 1008 // The number of elements in the incrementally built collection set.
johnc@1829 1009 size_t inc_cset_size() { return _inc_cset_size; }
johnc@1829 1010
johnc@1829 1011 // Initialize incremental collection set info.
johnc@1829 1012 void start_incremental_cset_building();
johnc@1829 1013
johnc@1829 1014 void clear_incremental_cset() {
johnc@1829 1015 _inc_cset_head = NULL;
johnc@1829 1016 _inc_cset_tail = NULL;
johnc@1829 1017 }
johnc@1829 1018
johnc@1829 1019 // Stop adding regions to the incremental collection set
johnc@1829 1020 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
johnc@1829 1021
johnc@1829 1022 // Add/remove information about hr to the aggregated information
johnc@1829 1023 // for the incrementally built collection set.
johnc@1829 1024 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
johnc@1829 1025 void remove_from_incremental_cset_info(HeapRegion* hr);
johnc@1829 1026
johnc@1829 1027 // Update information about hr in the aggregated information for
johnc@1829 1028 // the incrementally built collection set.
johnc@1829 1029 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
johnc@1829 1030
johnc@1829 1031 private:
johnc@1829 1032 // Update the incremental cset information when adding a region
johnc@1829 1033 // (should not be called directly).
johnc@1829 1034 void add_region_to_incremental_cset_common(HeapRegion* hr);
johnc@1829 1035
johnc@1829 1036 public:
johnc@1829 1037 // Add hr to the LHS of the incremental collection set.
johnc@1829 1038 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
johnc@1829 1039
johnc@1829 1040 // Add hr to the RHS of the incremental collection set.
johnc@1829 1041 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
johnc@1829 1042
johnc@1829 1043 #ifndef PRODUCT
johnc@1829 1044 void print_collection_set(HeapRegion* list_head, outputStream* st);
johnc@1829 1045 #endif // !PRODUCT
johnc@1829 1046
tonyp@1794 1047 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
tonyp@1794 1048 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
tonyp@1794 1049 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
tonyp@1794 1050
tonyp@1794 1051 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
tonyp@1794 1052 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
tonyp@1794 1053 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
tonyp@1794 1054
tonyp@2011 1055 // This sets the initiate_conc_mark_if_possible() flag to start a
tonyp@2011 1056 // new cycle, as long as we are not already in one. It's best if it
tonyp@2011 1057 // is called during a safepoint when the test whether a cycle is in
tonyp@2011 1058 // progress or not is stable.
tonyp@2011 1059 bool force_initial_mark_if_outside_cycle();
tonyp@2011 1060
tonyp@1794 1061 // This is called at the very beginning of an evacuation pause (it
tonyp@1794 1062 // has to be the first thing that the pause does). If
tonyp@1794 1063 // initiate_conc_mark_if_possible() is true, and the concurrent
tonyp@1794 1064 // marking thread has completed its work during the previous cycle,
tonyp@1794 1065 // it will set during_initial_mark_pause() to so that the pause does
tonyp@1794 1066 // the initial-mark work and start a marking cycle.
tonyp@1794 1067 void decide_on_conc_mark_initiation();
ysr@777 1068
ysr@777 1069 // If an expansion would be appropriate, because recent GC overhead had
ysr@777 1070 // exceeded the desired limit, return an amount to expand by.
ysr@777 1071 virtual size_t expansion_amount();
ysr@777 1072
ysr@777 1073 // note start of mark thread
ysr@777 1074 void note_start_of_mark_thread();
ysr@777 1075
ysr@777 1076 // The marked bytes of the "r" has changed; reclassify it's desirability
ysr@777 1077 // for marking. Also asserts that "r" is eligible for a CS.
ysr@777 1078 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
ysr@777 1079
ysr@777 1080 #ifndef PRODUCT
ysr@777 1081 // Check any appropriate marked bytes info, asserting false if
ysr@777 1082 // something's wrong, else returning "true".
ysr@777 1083 virtual bool assertMarkedBytesDataOK() = 0;
ysr@777 1084 #endif
ysr@777 1085
ysr@777 1086 // Print tracing information.
ysr@777 1087 void print_tracing_info() const;
ysr@777 1088
ysr@777 1089 // Print stats on young survival ratio
ysr@777 1090 void print_yg_surv_rate_info() const;
ysr@777 1091
apetrusenko@980 1092 void finished_recalculating_age_indexes(bool is_survivors) {
apetrusenko@980 1093 if (is_survivors) {
apetrusenko@980 1094 _survivor_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 1095 } else {
apetrusenko@980 1096 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 1097 }
ysr@777 1098 // do that for any other surv rate groups
ysr@777 1099 }
ysr@777 1100
tonyp@2315 1101 bool is_young_list_full() {
tonyp@2315 1102 size_t young_list_length = _g1->young_list()->length();
tonyp@2333 1103 size_t young_list_target_length = _young_list_target_length;
tonyp@2333 1104 if (G1FixedEdenSize) {
tonyp@2333 1105 young_list_target_length -= _max_survivor_regions;
tonyp@2333 1106 }
tonyp@2333 1107 return young_list_length >= young_list_target_length;
tonyp@2333 1108 }
tonyp@2333 1109
tonyp@2333 1110 bool can_expand_young_list() {
tonyp@2333 1111 size_t young_list_length = _g1->young_list()->length();
tonyp@2333 1112 size_t young_list_max_length = _young_list_max_length;
tonyp@2315 1113 if (G1FixedEdenSize) {
tonyp@2315 1114 young_list_max_length -= _max_survivor_regions;
tonyp@2315 1115 }
tonyp@2333 1116 return young_list_length < young_list_max_length;
tonyp@2333 1117 }
tonyp@2315 1118
tonyp@2315 1119 void update_region_num(bool young);
ysr@777 1120
ysr@777 1121 bool in_young_gc_mode() {
ysr@777 1122 return _in_young_gc_mode;
ysr@777 1123 }
ysr@777 1124 void set_in_young_gc_mode(bool in_young_gc_mode) {
ysr@777 1125 _in_young_gc_mode = in_young_gc_mode;
ysr@777 1126 }
ysr@777 1127
ysr@777 1128 bool full_young_gcs() {
ysr@777 1129 return _full_young_gcs;
ysr@777 1130 }
ysr@777 1131 void set_full_young_gcs(bool full_young_gcs) {
ysr@777 1132 _full_young_gcs = full_young_gcs;
ysr@777 1133 }
ysr@777 1134
ysr@777 1135 bool adaptive_young_list_length() {
ysr@777 1136 return _adaptive_young_list_length;
ysr@777 1137 }
ysr@777 1138 void set_adaptive_young_list_length(bool adaptive_young_list_length) {
ysr@777 1139 _adaptive_young_list_length = adaptive_young_list_length;
ysr@777 1140 }
ysr@777 1141
ysr@777 1142 inline double get_gc_eff_factor() {
ysr@777 1143 double ratio = _known_garbage_ratio;
ysr@777 1144
ysr@777 1145 double square = ratio * ratio;
ysr@777 1146 // square = square * square;
ysr@777 1147 double ret = square * 9.0 + 1.0;
ysr@777 1148 #if 0
ysr@777 1149 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
ysr@777 1150 #endif // 0
ysr@777 1151 guarantee(0.0 <= ret && ret < 10.0, "invariant!");
ysr@777 1152 return ret;
ysr@777 1153 }
ysr@777 1154
ysr@777 1155 //
ysr@777 1156 // Survivor regions policy.
ysr@777 1157 //
ysr@777 1158 protected:
ysr@777 1159
ysr@777 1160 // Current tenuring threshold, set to 0 if the collector reaches the
ysr@777 1161 // maximum amount of suvivors regions.
ysr@777 1162 int _tenuring_threshold;
ysr@777 1163
apetrusenko@980 1164 // The limit on the number of regions allocated for survivors.
apetrusenko@980 1165 size_t _max_survivor_regions;
apetrusenko@980 1166
tonyp@2961 1167 // For reporting purposes.
tonyp@2961 1168 size_t _eden_bytes_before_gc;
tonyp@2961 1169 size_t _survivor_bytes_before_gc;
tonyp@2961 1170 size_t _capacity_before_gc;
tonyp@2961 1171
apetrusenko@980 1172 // The amount of survor regions after a collection.
apetrusenko@980 1173 size_t _recorded_survivor_regions;
apetrusenko@980 1174 // List of survivor regions.
apetrusenko@980 1175 HeapRegion* _recorded_survivor_head;
apetrusenko@980 1176 HeapRegion* _recorded_survivor_tail;
apetrusenko@980 1177
apetrusenko@980 1178 ageTable _survivors_age_table;
apetrusenko@980 1179
ysr@777 1180 public:
ysr@777 1181
ysr@777 1182 inline GCAllocPurpose
ysr@777 1183 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
ysr@777 1184 if (age < _tenuring_threshold && src_region->is_young()) {
ysr@777 1185 return GCAllocForSurvived;
ysr@777 1186 } else {
ysr@777 1187 return GCAllocForTenured;
ysr@777 1188 }
ysr@777 1189 }
ysr@777 1190
ysr@777 1191 inline bool track_object_age(GCAllocPurpose purpose) {
ysr@777 1192 return purpose == GCAllocForSurvived;
ysr@777 1193 }
ysr@777 1194
apetrusenko@980 1195 static const size_t REGIONS_UNLIMITED = ~(size_t)0;
apetrusenko@980 1196
apetrusenko@980 1197 size_t max_regions(int purpose);
ysr@777 1198
ysr@777 1199 // The limit on regions for a particular purpose is reached.
ysr@777 1200 void note_alloc_region_limit_reached(int purpose) {
ysr@777 1201 if (purpose == GCAllocForSurvived) {
ysr@777 1202 _tenuring_threshold = 0;
ysr@777 1203 }
ysr@777 1204 }
ysr@777 1205
ysr@777 1206 void note_start_adding_survivor_regions() {
ysr@777 1207 _survivor_surv_rate_group->start_adding_regions();
ysr@777 1208 }
ysr@777 1209
ysr@777 1210 void note_stop_adding_survivor_regions() {
ysr@777 1211 _survivor_surv_rate_group->stop_adding_regions();
ysr@777 1212 }
apetrusenko@980 1213
apetrusenko@980 1214 void record_survivor_regions(size_t regions,
apetrusenko@980 1215 HeapRegion* head,
apetrusenko@980 1216 HeapRegion* tail) {
apetrusenko@980 1217 _recorded_survivor_regions = regions;
apetrusenko@980 1218 _recorded_survivor_head = head;
apetrusenko@980 1219 _recorded_survivor_tail = tail;
apetrusenko@980 1220 }
apetrusenko@980 1221
tonyp@1273 1222 size_t recorded_survivor_regions() {
tonyp@1273 1223 return _recorded_survivor_regions;
tonyp@1273 1224 }
tonyp@1273 1225
apetrusenko@980 1226 void record_thread_age_table(ageTable* age_table)
apetrusenko@980 1227 {
apetrusenko@980 1228 _survivors_age_table.merge_par(age_table);
apetrusenko@980 1229 }
apetrusenko@980 1230
tonyp@2333 1231 void calculate_max_gc_locker_expansion();
tonyp@2333 1232
apetrusenko@980 1233 // Calculates survivor space parameters.
apetrusenko@980 1234 void calculate_survivors_policy();
apetrusenko@980 1235
ysr@777 1236 };
ysr@777 1237
ysr@777 1238 // This encapsulates a particular strategy for a g1 Collector.
ysr@777 1239 //
ysr@777 1240 // Start a concurrent mark when our heap size is n bytes
ysr@777 1241 // greater then our heap size was at the last concurrent
ysr@777 1242 // mark. Where n is a function of the CMSTriggerRatio
ysr@777 1243 // and the MinHeapFreeRatio.
ysr@777 1244 //
ysr@777 1245 // Start a g1 collection pause when we have allocated the
ysr@777 1246 // average number of bytes currently being freed in
ysr@777 1247 // a collection, but only if it is at least one region
ysr@777 1248 // full
ysr@777 1249 //
ysr@777 1250 // Resize Heap based on desired
ysr@777 1251 // allocation space, where desired allocation space is
ysr@777 1252 // a function of survival rate and desired future to size.
ysr@777 1253 //
ysr@777 1254 // Choose collection set by first picking all older regions
ysr@777 1255 // which have a survival rate which beats our projected young
ysr@777 1256 // survival rate. Then fill out the number of needed regions
ysr@777 1257 // with young regions.
ysr@777 1258
ysr@777 1259 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
ysr@777 1260 CollectionSetChooser* _collectionSetChooser;
ysr@777 1261 // If the estimated is less then desirable, resize if possible.
ysr@777 1262 void expand_if_possible(size_t numRegions);
ysr@777 1263
tonyp@2062 1264 virtual void choose_collection_set(double target_pause_time_ms);
ysr@777 1265 virtual void record_collection_pause_start(double start_time_sec,
ysr@777 1266 size_t start_used);
ysr@777 1267 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 1268 size_t max_live_bytes);
ysr@777 1269 virtual void record_full_collection_end();
ysr@777 1270
ysr@777 1271 public:
ysr@777 1272 G1CollectorPolicy_BestRegionsFirst() {
ysr@777 1273 _collectionSetChooser = new CollectionSetChooser();
ysr@777 1274 }
tonyp@2062 1275 void record_collection_pause_end();
ysr@777 1276 // This is not needed any more, after the CSet choosing code was
ysr@777 1277 // changed to use the pause prediction work. But let's leave the
ysr@777 1278 // hook in just in case.
ysr@777 1279 void note_change_in_marked_bytes(HeapRegion* r) { }
ysr@777 1280 #ifndef PRODUCT
ysr@777 1281 bool assertMarkedBytesDataOK();
ysr@777 1282 #endif
ysr@777 1283 };
ysr@777 1284
ysr@777 1285 // This should move to some place more general...
ysr@777 1286
ysr@777 1287 // If we have "n" measurements, and we've kept track of their "sum" and the
ysr@777 1288 // "sum_of_squares" of the measurements, this returns the variance of the
ysr@777 1289 // sequence.
ysr@777 1290 inline double variance(int n, double sum_of_squares, double sum) {
ysr@777 1291 double n_d = (double)n;
ysr@777 1292 double avg = sum/n_d;
ysr@777 1293 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
ysr@777 1294 }
ysr@777 1295
ysr@777 1296 // Local Variables: ***
ysr@777 1297 // c-indentation-style: gnu ***
ysr@777 1298 // End: ***
stefank@2314 1299
stefank@2314 1300 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP

mercurial