src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 3120
af2ab04e0038
child 3176
8229bd737950
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

ysr@777 1 /*
tonyp@3028 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/collectionSetChooser.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1MMUTracker.hpp"
stefank@2314 30 #include "memory/collectorPolicy.hpp"
stefank@2314 31
ysr@777 32 // A G1CollectorPolicy makes policy decisions that determine the
ysr@777 33 // characteristics of the collector. Examples include:
ysr@777 34 // * choice of collection set.
ysr@777 35 // * when to collect.
ysr@777 36
ysr@777 37 class HeapRegion;
ysr@777 38 class CollectionSetChooser;
ysr@777 39
ysr@777 40 // Yes, this is a bit unpleasant... but it saves replicating the same thing
ysr@777 41 // over and over again and introducing subtle problems through small typos and
ysr@777 42 // cutting and pasting mistakes. The macros below introduces a number
ysr@777 43 // sequnce into the following two classes and the methods that access it.
ysr@777 44
ysr@777 45 #define define_num_seq(name) \
ysr@777 46 private: \
ysr@777 47 NumberSeq _all_##name##_times_ms; \
ysr@777 48 public: \
ysr@777 49 void record_##name##_time_ms(double ms) { \
ysr@777 50 _all_##name##_times_ms.add(ms); \
ysr@777 51 } \
ysr@777 52 NumberSeq* get_##name##_seq() { \
ysr@777 53 return &_all_##name##_times_ms; \
ysr@777 54 }
ysr@777 55
ysr@777 56 class MainBodySummary;
ysr@777 57
apetrusenko@984 58 class PauseSummary: public CHeapObj {
ysr@777 59 define_num_seq(total)
ysr@777 60 define_num_seq(other)
ysr@777 61
ysr@777 62 public:
ysr@777 63 virtual MainBodySummary* main_body_summary() { return NULL; }
ysr@777 64 };
ysr@777 65
apetrusenko@984 66 class MainBodySummary: public CHeapObj {
ysr@777 67 define_num_seq(satb_drain) // optional
ysr@777 68 define_num_seq(parallel) // parallel only
ysr@777 69 define_num_seq(ext_root_scan)
ysr@777 70 define_num_seq(mark_stack_scan)
ysr@777 71 define_num_seq(update_rs)
ysr@777 72 define_num_seq(scan_rs)
ysr@777 73 define_num_seq(obj_copy)
ysr@777 74 define_num_seq(termination) // parallel only
ysr@777 75 define_num_seq(parallel_other) // parallel only
ysr@777 76 define_num_seq(mark_closure)
ysr@777 77 define_num_seq(clear_ct) // parallel only
ysr@777 78 };
ysr@777 79
apetrusenko@1112 80 class Summary: public PauseSummary,
apetrusenko@1112 81 public MainBodySummary {
ysr@777 82 public:
ysr@777 83 virtual MainBodySummary* main_body_summary() { return this; }
ysr@777 84 };
ysr@777 85
ysr@777 86 class G1CollectorPolicy: public CollectorPolicy {
ysr@777 87 protected:
ysr@777 88 // The number of pauses during the execution.
ysr@777 89 long _n_pauses;
ysr@777 90
ysr@777 91 // either equal to the number of parallel threads, if ParallelGCThreads
ysr@777 92 // has been set, or 1 otherwise
ysr@777 93 int _parallel_gc_threads;
ysr@777 94
ysr@777 95 enum SomePrivateConstants {
tonyp@1377 96 NumPrevPausesForHeuristics = 10
ysr@777 97 };
ysr@777 98
ysr@777 99 G1MMUTracker* _mmu_tracker;
ysr@777 100
ysr@777 101 void initialize_flags();
ysr@777 102
ysr@777 103 void initialize_all() {
ysr@777 104 initialize_flags();
ysr@777 105 initialize_size_info();
ysr@777 106 initialize_perm_generation(PermGen::MarkSweepCompact);
ysr@777 107 }
ysr@777 108
ysr@777 109 virtual size_t default_init_heap_size() {
ysr@777 110 // Pick some reasonable default.
ysr@777 111 return 8*M;
ysr@777 112 }
ysr@777 113
ysr@777 114 double _cur_collection_start_sec;
ysr@777 115 size_t _cur_collection_pause_used_at_start_bytes;
ysr@777 116 size_t _cur_collection_pause_used_regions_at_start;
ysr@777 117 size_t _prev_collection_pause_used_at_end_bytes;
ysr@777 118 double _cur_collection_par_time_ms;
ysr@777 119 double _cur_satb_drain_time_ms;
ysr@777 120 double _cur_clear_ct_time_ms;
ysr@777 121 bool _satb_drain_time_set;
johnc@3175 122 double _cur_ref_proc_time_ms;
johnc@3175 123 double _cur_ref_enq_time_ms;
ysr@777 124
johnc@1325 125 #ifndef PRODUCT
johnc@1325 126 // Card Table Count Cache stats
johnc@1325 127 double _min_clear_cc_time_ms; // min
johnc@1325 128 double _max_clear_cc_time_ms; // max
johnc@1325 129 double _cur_clear_cc_time_ms; // clearing time during current pause
johnc@1325 130 double _cum_clear_cc_time_ms; // cummulative clearing time
johnc@1325 131 jlong _num_cc_clears; // number of times the card count cache has been cleared
johnc@1325 132 #endif
johnc@1325 133
johnc@3021 134 // Statistics for recent GC pauses. See below for how indexed.
johnc@3021 135 TruncatedSeq* _recent_rs_scan_times_ms;
ysr@777 136
ysr@777 137 // These exclude marking times.
ysr@777 138 TruncatedSeq* _recent_pause_times_ms;
ysr@777 139 TruncatedSeq* _recent_gc_times_ms;
ysr@777 140
ysr@777 141 TruncatedSeq* _recent_CS_bytes_used_before;
ysr@777 142 TruncatedSeq* _recent_CS_bytes_surviving;
ysr@777 143
ysr@777 144 TruncatedSeq* _recent_rs_sizes;
ysr@777 145
ysr@777 146 TruncatedSeq* _concurrent_mark_remark_times_ms;
ysr@777 147 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
ysr@777 148
apetrusenko@1112 149 Summary* _summary;
ysr@777 150
ysr@777 151 NumberSeq* _all_pause_times_ms;
ysr@777 152 NumberSeq* _all_full_gc_times_ms;
ysr@777 153 double _stop_world_start;
ysr@777 154 NumberSeq* _all_stop_world_times_ms;
ysr@777 155 NumberSeq* _all_yield_times_ms;
ysr@777 156
ysr@777 157 size_t _region_num_young;
ysr@777 158 size_t _region_num_tenured;
ysr@777 159 size_t _prev_region_num_young;
ysr@777 160 size_t _prev_region_num_tenured;
ysr@777 161
ysr@777 162 NumberSeq* _all_mod_union_times_ms;
ysr@777 163
ysr@777 164 int _aux_num;
ysr@777 165 NumberSeq* _all_aux_times_ms;
ysr@777 166 double* _cur_aux_start_times_ms;
ysr@777 167 double* _cur_aux_times_ms;
ysr@777 168 bool* _cur_aux_times_set;
ysr@777 169
tonyp@1966 170 double* _par_last_gc_worker_start_times_ms;
ysr@777 171 double* _par_last_ext_root_scan_times_ms;
ysr@777 172 double* _par_last_mark_stack_scan_times_ms;
ysr@777 173 double* _par_last_update_rs_times_ms;
ysr@777 174 double* _par_last_update_rs_processed_buffers;
ysr@777 175 double* _par_last_scan_rs_times_ms;
ysr@777 176 double* _par_last_obj_copy_times_ms;
ysr@777 177 double* _par_last_termination_times_ms;
tonyp@1966 178 double* _par_last_termination_attempts;
tonyp@1966 179 double* _par_last_gc_worker_end_times_ms;
brutisso@2712 180 double* _par_last_gc_worker_times_ms;
ysr@777 181
ysr@777 182 // indicates whether we are in full young or partially young GC mode
ysr@777 183 bool _full_young_gcs;
ysr@777 184
ysr@777 185 // if true, then it tries to dynamically adjust the length of the
ysr@777 186 // young list
ysr@777 187 bool _adaptive_young_list_length;
ysr@777 188 size_t _young_list_target_length;
ysr@777 189 size_t _young_list_fixed_length;
brutisso@3120 190 size_t _prev_eden_capacity; // used for logging
ysr@777 191
tonyp@2333 192 // The max number of regions we can extend the eden by while the GC
tonyp@2333 193 // locker is active. This should be >= _young_list_target_length;
tonyp@2333 194 size_t _young_list_max_length;
tonyp@2333 195
ysr@777 196 size_t _young_cset_length;
ysr@777 197 bool _last_young_gc_full;
ysr@777 198
ysr@777 199 unsigned _full_young_pause_num;
ysr@777 200 unsigned _partial_young_pause_num;
ysr@777 201
ysr@777 202 bool _during_marking;
ysr@777 203 bool _in_marking_window;
ysr@777 204 bool _in_marking_window_im;
ysr@777 205
ysr@777 206 SurvRateGroup* _short_lived_surv_rate_group;
ysr@777 207 SurvRateGroup* _survivor_surv_rate_group;
ysr@777 208 // add here any more surv rate groups
ysr@777 209
tonyp@1791 210 double _gc_overhead_perc;
tonyp@1791 211
tonyp@3119 212 double _reserve_factor;
tonyp@3119 213 size_t _reserve_regions;
tonyp@3119 214
ysr@777 215 bool during_marking() {
ysr@777 216 return _during_marking;
ysr@777 217 }
ysr@777 218
ysr@777 219 // <NEW PREDICTION>
ysr@777 220
ysr@777 221 private:
ysr@777 222 enum PredictionConstants {
ysr@777 223 TruncatedSeqLength = 10
ysr@777 224 };
ysr@777 225
ysr@777 226 TruncatedSeq* _alloc_rate_ms_seq;
ysr@777 227 double _prev_collection_pause_end_ms;
ysr@777 228
ysr@777 229 TruncatedSeq* _pending_card_diff_seq;
ysr@777 230 TruncatedSeq* _rs_length_diff_seq;
ysr@777 231 TruncatedSeq* _cost_per_card_ms_seq;
ysr@777 232 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
ysr@777 233 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
ysr@777 234 TruncatedSeq* _cost_per_entry_ms_seq;
ysr@777 235 TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
ysr@777 236 TruncatedSeq* _cost_per_byte_ms_seq;
ysr@777 237 TruncatedSeq* _constant_other_time_ms_seq;
ysr@777 238 TruncatedSeq* _young_other_cost_per_region_ms_seq;
ysr@777 239 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
ysr@777 240
ysr@777 241 TruncatedSeq* _pending_cards_seq;
ysr@777 242 TruncatedSeq* _scanned_cards_seq;
ysr@777 243 TruncatedSeq* _rs_lengths_seq;
ysr@777 244
ysr@777 245 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
ysr@777 246
ysr@777 247 TruncatedSeq* _young_gc_eff_seq;
ysr@777 248
ysr@777 249 TruncatedSeq* _max_conc_overhead_seq;
ysr@777 250
brutisso@3120 251 bool _using_new_ratio_calculations;
brutisso@3120 252 size_t _min_desired_young_length; // as set on the command line or default calculations
brutisso@3120 253 size_t _max_desired_young_length; // as set on the command line or default calculations
brutisso@3120 254
ysr@777 255 size_t _recorded_young_regions;
ysr@777 256 size_t _recorded_non_young_regions;
ysr@777 257 size_t _recorded_region_num;
ysr@777 258
ysr@777 259 size_t _free_regions_at_end_of_collection;
ysr@777 260
ysr@777 261 size_t _recorded_rs_lengths;
ysr@777 262 size_t _max_rs_lengths;
ysr@777 263
ysr@777 264 size_t _recorded_marked_bytes;
ysr@777 265 size_t _recorded_young_bytes;
ysr@777 266
ysr@777 267 size_t _predicted_pending_cards;
ysr@777 268 size_t _predicted_cards_scanned;
ysr@777 269 size_t _predicted_rs_lengths;
ysr@777 270 size_t _predicted_bytes_to_copy;
ysr@777 271
ysr@777 272 double _predicted_survival_ratio;
ysr@777 273 double _predicted_rs_update_time_ms;
ysr@777 274 double _predicted_rs_scan_time_ms;
ysr@777 275 double _predicted_object_copy_time_ms;
ysr@777 276 double _predicted_constant_other_time_ms;
ysr@777 277 double _predicted_young_other_time_ms;
ysr@777 278 double _predicted_non_young_other_time_ms;
ysr@777 279 double _predicted_pause_time_ms;
ysr@777 280
ysr@777 281 double _vtime_diff_ms;
ysr@777 282
ysr@777 283 double _recorded_young_free_cset_time_ms;
ysr@777 284 double _recorded_non_young_free_cset_time_ms;
ysr@777 285
ysr@777 286 double _sigma;
ysr@777 287 double _expensive_region_limit_ms;
ysr@777 288
ysr@777 289 size_t _rs_lengths_prediction;
ysr@777 290
ysr@777 291 size_t _known_garbage_bytes;
ysr@777 292 double _known_garbage_ratio;
ysr@777 293
ysr@777 294 double sigma() {
ysr@777 295 return _sigma;
ysr@777 296 }
ysr@777 297
ysr@777 298 // A function that prevents us putting too much stock in small sample
ysr@777 299 // sets. Returns a number between 2.0 and 1.0, depending on the number
ysr@777 300 // of samples. 5 or more samples yields one; fewer scales linearly from
ysr@777 301 // 2.0 at 1 sample to 1.0 at 5.
ysr@777 302 double confidence_factor(int samples) {
ysr@777 303 if (samples > 4) return 1.0;
ysr@777 304 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
ysr@777 305 }
ysr@777 306
ysr@777 307 double get_new_neg_prediction(TruncatedSeq* seq) {
ysr@777 308 return seq->davg() - sigma() * seq->dsd();
ysr@777 309 }
ysr@777 310
ysr@777 311 #ifndef PRODUCT
ysr@777 312 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
ysr@777 313 #endif // PRODUCT
ysr@777 314
iveresov@1546 315 void adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 316 double update_rs_processed_buffers,
iveresov@1546 317 double goal_ms);
iveresov@1546 318
ysr@777 319 protected:
ysr@777 320 double _pause_time_target_ms;
ysr@777 321 double _recorded_young_cset_choice_time_ms;
ysr@777 322 double _recorded_non_young_cset_choice_time_ms;
ysr@777 323 bool _within_target;
ysr@777 324 size_t _pending_cards;
ysr@777 325 size_t _max_pending_cards;
ysr@777 326
ysr@777 327 public:
ysr@777 328
ysr@777 329 void set_region_short_lived(HeapRegion* hr) {
ysr@777 330 hr->install_surv_rate_group(_short_lived_surv_rate_group);
ysr@777 331 }
ysr@777 332
ysr@777 333 void set_region_survivors(HeapRegion* hr) {
ysr@777 334 hr->install_surv_rate_group(_survivor_surv_rate_group);
ysr@777 335 }
ysr@777 336
ysr@777 337 #ifndef PRODUCT
ysr@777 338 bool verify_young_ages();
ysr@777 339 #endif // PRODUCT
ysr@777 340
ysr@777 341 double get_new_prediction(TruncatedSeq* seq) {
ysr@777 342 return MAX2(seq->davg() + sigma() * seq->dsd(),
ysr@777 343 seq->davg() * confidence_factor(seq->num()));
ysr@777 344 }
ysr@777 345
ysr@777 346 size_t young_cset_length() {
ysr@777 347 return _young_cset_length;
ysr@777 348 }
ysr@777 349
ysr@777 350 void record_max_rs_lengths(size_t rs_lengths) {
ysr@777 351 _max_rs_lengths = rs_lengths;
ysr@777 352 }
ysr@777 353
ysr@777 354 size_t predict_pending_card_diff() {
ysr@777 355 double prediction = get_new_neg_prediction(_pending_card_diff_seq);
ysr@777 356 if (prediction < 0.00001)
ysr@777 357 return 0;
ysr@777 358 else
ysr@777 359 return (size_t) prediction;
ysr@777 360 }
ysr@777 361
ysr@777 362 size_t predict_pending_cards() {
ysr@777 363 size_t max_pending_card_num = _g1->max_pending_card_num();
ysr@777 364 size_t diff = predict_pending_card_diff();
ysr@777 365 size_t prediction;
ysr@777 366 if (diff > max_pending_card_num)
ysr@777 367 prediction = max_pending_card_num;
ysr@777 368 else
ysr@777 369 prediction = max_pending_card_num - diff;
ysr@777 370
ysr@777 371 return prediction;
ysr@777 372 }
ysr@777 373
ysr@777 374 size_t predict_rs_length_diff() {
ysr@777 375 return (size_t) get_new_prediction(_rs_length_diff_seq);
ysr@777 376 }
ysr@777 377
ysr@777 378 double predict_alloc_rate_ms() {
ysr@777 379 return get_new_prediction(_alloc_rate_ms_seq);
ysr@777 380 }
ysr@777 381
ysr@777 382 double predict_cost_per_card_ms() {
ysr@777 383 return get_new_prediction(_cost_per_card_ms_seq);
ysr@777 384 }
ysr@777 385
ysr@777 386 double predict_rs_update_time_ms(size_t pending_cards) {
ysr@777 387 return (double) pending_cards * predict_cost_per_card_ms();
ysr@777 388 }
ysr@777 389
ysr@777 390 double predict_fully_young_cards_per_entry_ratio() {
ysr@777 391 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
ysr@777 392 }
ysr@777 393
ysr@777 394 double predict_partially_young_cards_per_entry_ratio() {
ysr@777 395 if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
ysr@777 396 return predict_fully_young_cards_per_entry_ratio();
ysr@777 397 else
ysr@777 398 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
ysr@777 399 }
ysr@777 400
ysr@777 401 size_t predict_young_card_num(size_t rs_length) {
ysr@777 402 return (size_t) ((double) rs_length *
ysr@777 403 predict_fully_young_cards_per_entry_ratio());
ysr@777 404 }
ysr@777 405
ysr@777 406 size_t predict_non_young_card_num(size_t rs_length) {
ysr@777 407 return (size_t) ((double) rs_length *
ysr@777 408 predict_partially_young_cards_per_entry_ratio());
ysr@777 409 }
ysr@777 410
ysr@777 411 double predict_rs_scan_time_ms(size_t card_num) {
ysr@777 412 if (full_young_gcs())
ysr@777 413 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
ysr@777 414 else
ysr@777 415 return predict_partially_young_rs_scan_time_ms(card_num);
ysr@777 416 }
ysr@777 417
ysr@777 418 double predict_partially_young_rs_scan_time_ms(size_t card_num) {
ysr@777 419 if (_partially_young_cost_per_entry_ms_seq->num() < 3)
ysr@777 420 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
ysr@777 421 else
ysr@777 422 return (double) card_num *
ysr@777 423 get_new_prediction(_partially_young_cost_per_entry_ms_seq);
ysr@777 424 }
ysr@777 425
ysr@777 426 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
ysr@777 427 if (_cost_per_byte_ms_during_cm_seq->num() < 3)
ysr@777 428 return 1.1 * (double) bytes_to_copy *
ysr@777 429 get_new_prediction(_cost_per_byte_ms_seq);
ysr@777 430 else
ysr@777 431 return (double) bytes_to_copy *
ysr@777 432 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
ysr@777 433 }
ysr@777 434
ysr@777 435 double predict_object_copy_time_ms(size_t bytes_to_copy) {
ysr@777 436 if (_in_marking_window && !_in_marking_window_im)
ysr@777 437 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
ysr@777 438 else
ysr@777 439 return (double) bytes_to_copy *
ysr@777 440 get_new_prediction(_cost_per_byte_ms_seq);
ysr@777 441 }
ysr@777 442
ysr@777 443 double predict_constant_other_time_ms() {
ysr@777 444 return get_new_prediction(_constant_other_time_ms_seq);
ysr@777 445 }
ysr@777 446
ysr@777 447 double predict_young_other_time_ms(size_t young_num) {
ysr@777 448 return
ysr@777 449 (double) young_num *
ysr@777 450 get_new_prediction(_young_other_cost_per_region_ms_seq);
ysr@777 451 }
ysr@777 452
ysr@777 453 double predict_non_young_other_time_ms(size_t non_young_num) {
ysr@777 454 return
ysr@777 455 (double) non_young_num *
ysr@777 456 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
ysr@777 457 }
ysr@777 458
ysr@777 459 void check_if_region_is_too_expensive(double predicted_time_ms);
ysr@777 460
ysr@777 461 double predict_young_collection_elapsed_time_ms(size_t adjustment);
ysr@777 462 double predict_base_elapsed_time_ms(size_t pending_cards);
ysr@777 463 double predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 464 size_t scanned_cards);
ysr@777 465 size_t predict_bytes_to_copy(HeapRegion* hr);
ysr@777 466 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
ysr@777 467
ysr@777 468 void start_recording_regions();
johnc@1829 469 void record_cset_region_info(HeapRegion* hr, bool young);
johnc@1829 470 void record_non_young_cset_region(HeapRegion* hr);
johnc@1829 471
johnc@1829 472 void set_recorded_young_regions(size_t n_regions);
johnc@1829 473 void set_recorded_young_bytes(size_t bytes);
johnc@1829 474 void set_recorded_rs_lengths(size_t rs_lengths);
johnc@1829 475 void set_predicted_bytes_to_copy(size_t bytes);
johnc@1829 476
ysr@777 477 void end_recording_regions();
ysr@777 478
ysr@777 479 void record_vtime_diff_ms(double vtime_diff_ms) {
ysr@777 480 _vtime_diff_ms = vtime_diff_ms;
ysr@777 481 }
ysr@777 482
ysr@777 483 void record_young_free_cset_time_ms(double time_ms) {
ysr@777 484 _recorded_young_free_cset_time_ms = time_ms;
ysr@777 485 }
ysr@777 486
ysr@777 487 void record_non_young_free_cset_time_ms(double time_ms) {
ysr@777 488 _recorded_non_young_free_cset_time_ms = time_ms;
ysr@777 489 }
ysr@777 490
ysr@777 491 double predict_young_gc_eff() {
ysr@777 492 return get_new_neg_prediction(_young_gc_eff_seq);
ysr@777 493 }
ysr@777 494
apetrusenko@980 495 double predict_survivor_regions_evac_time();
apetrusenko@980 496
ysr@777 497 // </NEW PREDICTION>
ysr@777 498
ysr@777 499 void cset_regions_freed() {
ysr@777 500 bool propagate = _last_young_gc_full && !_in_marking_window;
ysr@777 501 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 502 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 503 // also call it on any more surv rate groups
ysr@777 504 }
ysr@777 505
ysr@777 506 void set_known_garbage_bytes(size_t known_garbage_bytes) {
ysr@777 507 _known_garbage_bytes = known_garbage_bytes;
ysr@777 508 size_t heap_bytes = _g1->capacity();
ysr@777 509 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
ysr@777 510 }
ysr@777 511
ysr@777 512 void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
ysr@777 513 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
ysr@777 514
ysr@777 515 _known_garbage_bytes -= known_garbage_bytes;
ysr@777 516 size_t heap_bytes = _g1->capacity();
ysr@777 517 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
ysr@777 518 }
ysr@777 519
ysr@777 520 G1MMUTracker* mmu_tracker() {
ysr@777 521 return _mmu_tracker;
ysr@777 522 }
ysr@777 523
tonyp@2011 524 double max_pause_time_ms() {
tonyp@2011 525 return _mmu_tracker->max_gc_time() * 1000.0;
tonyp@2011 526 }
tonyp@2011 527
ysr@777 528 double predict_remark_time_ms() {
ysr@777 529 return get_new_prediction(_concurrent_mark_remark_times_ms);
ysr@777 530 }
ysr@777 531
ysr@777 532 double predict_cleanup_time_ms() {
ysr@777 533 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
ysr@777 534 }
ysr@777 535
ysr@777 536 // Returns an estimate of the survival rate of the region at yg-age
ysr@777 537 // "yg_age".
apetrusenko@980 538 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
apetrusenko@980 539 TruncatedSeq* seq = surv_rate_group->get_seq(age);
ysr@777 540 if (seq->num() == 0)
ysr@777 541 gclog_or_tty->print("BARF! age is %d", age);
ysr@777 542 guarantee( seq->num() > 0, "invariant" );
ysr@777 543 double pred = get_new_prediction(seq);
ysr@777 544 if (pred > 1.0)
ysr@777 545 pred = 1.0;
ysr@777 546 return pred;
ysr@777 547 }
ysr@777 548
apetrusenko@980 549 double predict_yg_surv_rate(int age) {
apetrusenko@980 550 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
apetrusenko@980 551 }
apetrusenko@980 552
ysr@777 553 double accum_yg_surv_rate_pred(int age) {
ysr@777 554 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
ysr@777 555 }
ysr@777 556
ysr@777 557 protected:
tonyp@1966 558 void print_stats(int level, const char* str, double value);
tonyp@1966 559 void print_stats(int level, const char* str, int value);
tonyp@1966 560
brutisso@2712 561 void print_par_stats(int level, const char* str, double* data);
brutisso@2712 562 void print_par_sizes(int level, const char* str, double* data);
ysr@777 563
ysr@777 564 void check_other_times(int level,
ysr@777 565 NumberSeq* other_times_ms,
ysr@777 566 NumberSeq* calc_other_times_ms) const;
ysr@777 567
ysr@777 568 void print_summary (PauseSummary* stats) const;
ysr@777 569
ysr@777 570 void print_summary (int level, const char* str, NumberSeq* seq) const;
ysr@777 571 void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
ysr@777 572
ysr@777 573 double avg_value (double* data);
ysr@777 574 double max_value (double* data);
ysr@777 575 double sum_of_values (double* data);
ysr@777 576 double max_sum (double* data1, double* data2);
ysr@777 577
ysr@777 578 int _last_satb_drain_processed_buffers;
ysr@777 579 int _last_update_rs_processed_buffers;
ysr@777 580 double _last_pause_time_ms;
ysr@777 581
ysr@777 582 size_t _bytes_in_collection_set_before_gc;
tonyp@3028 583 size_t _bytes_copied_during_gc;
tonyp@3028 584
ysr@777 585 // Used to count used bytes in CS.
ysr@777 586 friend class CountCSClosure;
ysr@777 587
ysr@777 588 // Statistics kept per GC stoppage, pause or full.
ysr@777 589 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
ysr@777 590
ysr@777 591 // We track markings.
ysr@777 592 int _num_markings;
ysr@777 593 double _mark_thread_startup_sec; // Time at startup of marking thread
ysr@777 594
ysr@777 595 // Add a new GC of the given duration and end time to the record.
ysr@777 596 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
ysr@777 597
ysr@777 598 // The head of the list (via "next_in_collection_set()") representing the
johnc@1829 599 // current collection set. Set from the incrementally built collection
johnc@1829 600 // set at the start of the pause.
ysr@777 601 HeapRegion* _collection_set;
johnc@1829 602
johnc@1829 603 // The number of regions in the collection set. Set from the incrementally
johnc@1829 604 // built collection set at the start of an evacuation pause.
ysr@777 605 size_t _collection_set_size;
johnc@1829 606
johnc@1829 607 // The number of bytes in the collection set before the pause. Set from
johnc@1829 608 // the incrementally built collection set at the start of an evacuation
johnc@1829 609 // pause.
ysr@777 610 size_t _collection_set_bytes_used_before;
ysr@777 611
johnc@1829 612 // The associated information that is maintained while the incremental
johnc@1829 613 // collection set is being built with young regions. Used to populate
johnc@1829 614 // the recorded info for the evacuation pause.
johnc@1829 615
johnc@1829 616 enum CSetBuildType {
johnc@1829 617 Active, // We are actively building the collection set
johnc@1829 618 Inactive // We are not actively building the collection set
johnc@1829 619 };
johnc@1829 620
johnc@1829 621 CSetBuildType _inc_cset_build_state;
johnc@1829 622
johnc@1829 623 // The head of the incrementally built collection set.
johnc@1829 624 HeapRegion* _inc_cset_head;
johnc@1829 625
johnc@1829 626 // The tail of the incrementally built collection set.
johnc@1829 627 HeapRegion* _inc_cset_tail;
johnc@1829 628
johnc@1829 629 // The number of regions in the incrementally built collection set.
johnc@1829 630 // Used to set _collection_set_size at the start of an evacuation
johnc@1829 631 // pause.
johnc@1829 632 size_t _inc_cset_size;
johnc@1829 633
johnc@1829 634 // Used as the index in the surving young words structure
johnc@1829 635 // which tracks the amount of space, for each young region,
johnc@1829 636 // that survives the pause.
johnc@1829 637 size_t _inc_cset_young_index;
johnc@1829 638
johnc@1829 639 // The number of bytes in the incrementally built collection set.
johnc@1829 640 // Used to set _collection_set_bytes_used_before at the start of
johnc@1829 641 // an evacuation pause.
johnc@1829 642 size_t _inc_cset_bytes_used_before;
johnc@1829 643
johnc@1829 644 // Used to record the highest end of heap region in collection set
johnc@1829 645 HeapWord* _inc_cset_max_finger;
johnc@1829 646
johnc@1829 647 // The number of recorded used bytes in the young regions
johnc@1829 648 // of the collection set. This is the sum of the used() bytes
johnc@1829 649 // of retired young regions in the collection set.
johnc@1829 650 size_t _inc_cset_recorded_young_bytes;
johnc@1829 651
johnc@1829 652 // The RSet lengths recorded for regions in the collection set
johnc@1829 653 // (updated by the periodic sampling of the regions in the
johnc@1829 654 // young list/collection set).
johnc@1829 655 size_t _inc_cset_recorded_rs_lengths;
johnc@1829 656
johnc@1829 657 // The predicted elapsed time it will take to collect the regions
johnc@1829 658 // in the collection set (updated by the periodic sampling of the
johnc@1829 659 // regions in the young list/collection set).
johnc@1829 660 double _inc_cset_predicted_elapsed_time_ms;
johnc@1829 661
johnc@1829 662 // The predicted bytes to copy for the regions in the collection
johnc@1829 663 // set (updated by the periodic sampling of the regions in the
johnc@1829 664 // young list/collection set).
johnc@1829 665 size_t _inc_cset_predicted_bytes_to_copy;
johnc@1829 666
ysr@777 667 // Info about marking.
ysr@777 668 int _n_marks; // Sticky at 2, so we know when we've done at least 2.
ysr@777 669
ysr@777 670 // The number of collection pauses at the end of the last mark.
ysr@777 671 size_t _n_pauses_at_mark_end;
ysr@777 672
ysr@777 673 // Stash a pointer to the g1 heap.
ysr@777 674 G1CollectedHeap* _g1;
ysr@777 675
ysr@777 676 // The average time in ms per collection pause, averaged over recent pauses.
ysr@777 677 double recent_avg_time_for_pauses_ms();
ysr@777 678
johnc@3021 679 // The average time in ms for RS scanning, per pause, averaged
johnc@3021 680 // over recent pauses. (Note the RS scanning time for a pause
johnc@3021 681 // is itself an average of the RS scanning time for each worker
johnc@3021 682 // thread.)
johnc@3021 683 double recent_avg_time_for_rs_scan_ms();
ysr@777 684
ysr@777 685 // The number of "recent" GCs recorded in the number sequences
ysr@777 686 int number_of_recent_gcs();
ysr@777 687
ysr@777 688 // The average survival ratio, computed by the total number of bytes
ysr@777 689 // suriviving / total number of bytes before collection over the last
ysr@777 690 // several recent pauses.
ysr@777 691 double recent_avg_survival_fraction();
ysr@777 692 // The survival fraction of the most recent pause; if there have been no
ysr@777 693 // pauses, returns 1.0.
ysr@777 694 double last_survival_fraction();
ysr@777 695
ysr@777 696 // Returns a "conservative" estimate of the recent survival rate, i.e.,
ysr@777 697 // one that may be higher than "recent_avg_survival_fraction".
ysr@777 698 // This is conservative in several ways:
ysr@777 699 // If there have been few pauses, it will assume a potential high
ysr@777 700 // variance, and err on the side of caution.
ysr@777 701 // It puts a lower bound (currently 0.1) on the value it will return.
ysr@777 702 // To try to detect phase changes, if the most recent pause ("latest") has a
ysr@777 703 // higher-than average ("avg") survival rate, it returns that rate.
ysr@777 704 // "work" version is a utility function; young is restricted to young regions.
ysr@777 705 double conservative_avg_survival_fraction_work(double avg,
ysr@777 706 double latest);
ysr@777 707
ysr@777 708 // The arguments are the two sequences that keep track of the number of bytes
ysr@777 709 // surviving and the total number of bytes before collection, resp.,
ysr@777 710 // over the last evereal recent pauses
ysr@777 711 // Returns the survival rate for the category in the most recent pause.
ysr@777 712 // If there have been no pauses, returns 1.0.
ysr@777 713 double last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 714 TruncatedSeq* before);
ysr@777 715
ysr@777 716 // The arguments are the two sequences that keep track of the number of bytes
ysr@777 717 // surviving and the total number of bytes before collection, resp.,
ysr@777 718 // over the last several recent pauses
ysr@777 719 // Returns the average survival ration over the last several recent pauses
ysr@777 720 // If there have been no pauses, return 1.0
ysr@777 721 double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 722 TruncatedSeq* before);
ysr@777 723
ysr@777 724 double conservative_avg_survival_fraction() {
ysr@777 725 double avg = recent_avg_survival_fraction();
ysr@777 726 double latest = last_survival_fraction();
ysr@777 727 return conservative_avg_survival_fraction_work(avg, latest);
ysr@777 728 }
ysr@777 729
ysr@777 730 // The ratio of gc time to elapsed time, computed over recent pauses.
ysr@777 731 double _recent_avg_pause_time_ratio;
ysr@777 732
ysr@777 733 double recent_avg_pause_time_ratio() {
ysr@777 734 return _recent_avg_pause_time_ratio;
ysr@777 735 }
ysr@777 736
ysr@777 737 // Number of pauses between concurrent marking.
ysr@777 738 size_t _pauses_btwn_concurrent_mark;
ysr@777 739
ysr@777 740 size_t _n_marks_since_last_pause;
ysr@777 741
tonyp@1794 742 // At the end of a pause we check the heap occupancy and we decide
tonyp@1794 743 // whether we will start a marking cycle during the next pause. If
tonyp@1794 744 // we decide that we want to do that, we will set this parameter to
tonyp@1794 745 // true. So, this parameter will stay true between the end of a
tonyp@1794 746 // pause and the beginning of a subsequent pause (not necessarily
tonyp@1794 747 // the next one, see the comments on the next field) when we decide
tonyp@1794 748 // that we will indeed start a marking cycle and do the initial-mark
tonyp@1794 749 // work.
tonyp@1794 750 volatile bool _initiate_conc_mark_if_possible;
ysr@777 751
tonyp@1794 752 // If initiate_conc_mark_if_possible() is set at the beginning of a
tonyp@1794 753 // pause, it is a suggestion that the pause should start a marking
tonyp@1794 754 // cycle by doing the initial-mark work. However, it is possible
tonyp@1794 755 // that the concurrent marking thread is still finishing up the
tonyp@1794 756 // previous marking cycle (e.g., clearing the next marking
tonyp@1794 757 // bitmap). If that is the case we cannot start a new cycle and
tonyp@1794 758 // we'll have to wait for the concurrent marking thread to finish
tonyp@1794 759 // what it is doing. In this case we will postpone the marking cycle
tonyp@1794 760 // initiation decision for the next pause. When we eventually decide
tonyp@1794 761 // to start a cycle, we will set _during_initial_mark_pause which
tonyp@1794 762 // will stay true until the end of the initial-mark pause and it's
tonyp@1794 763 // the condition that indicates that a pause is doing the
tonyp@1794 764 // initial-mark work.
tonyp@1794 765 volatile bool _during_initial_mark_pause;
tonyp@1794 766
ysr@777 767 bool _should_revert_to_full_young_gcs;
ysr@777 768 bool _last_full_young_gc;
ysr@777 769
ysr@777 770 // This set of variables tracks the collector efficiency, in order to
ysr@777 771 // determine whether we should initiate a new marking.
ysr@777 772 double _cur_mark_stop_world_time_ms;
ysr@777 773 double _mark_remark_start_sec;
ysr@777 774 double _mark_cleanup_start_sec;
ysr@777 775 double _mark_closure_time_ms;
ysr@777 776
tonyp@3119 777 // Update the young list target length either by setting it to the
tonyp@3119 778 // desired fixed value or by calculating it using G1's pause
tonyp@3119 779 // prediction model. If no rs_lengths parameter is passed, predict
tonyp@3119 780 // the RS lengths using the prediction model, otherwise use the
tonyp@3119 781 // given rs_lengths as the prediction.
tonyp@3119 782 void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
tonyp@3119 783
tonyp@3119 784 // Calculate and return the minimum desired young list target
tonyp@3119 785 // length. This is the minimum desired young list length according
tonyp@3119 786 // to the user's inputs.
tonyp@3119 787 size_t calculate_young_list_desired_min_length(size_t base_min_length);
tonyp@3119 788
tonyp@3119 789 // Calculate and return the maximum desired young list target
tonyp@3119 790 // length. This is the maximum desired young list length according
tonyp@3119 791 // to the user's inputs.
tonyp@3119 792 size_t calculate_young_list_desired_max_length();
tonyp@3119 793
tonyp@3119 794 // Calculate and return the maximum young list target length that
tonyp@3119 795 // can fit into the pause time goal. The parameters are: rs_lengths
tonyp@3119 796 // represent the prediction of how large the young RSet lengths will
tonyp@3119 797 // be, base_min_length is the alreay existing number of regions in
tonyp@3119 798 // the young list, min_length and max_length are the desired min and
tonyp@3119 799 // max young list length according to the user's inputs.
tonyp@3119 800 size_t calculate_young_list_target_length(size_t rs_lengths,
tonyp@3119 801 size_t base_min_length,
tonyp@3119 802 size_t desired_min_length,
tonyp@3119 803 size_t desired_max_length);
tonyp@3119 804
tonyp@3119 805 // Check whether a given young length (young_length) fits into the
tonyp@3119 806 // given target pause time and whether the prediction for the amount
tonyp@3119 807 // of objects to be copied for the given length will fit into the
tonyp@3119 808 // given free space (expressed by base_free_regions). It is used by
tonyp@3119 809 // calculate_young_list_target_length().
tonyp@3119 810 bool predict_will_fit(size_t young_length, double base_time_ms,
tonyp@3119 811 size_t base_free_regions, double target_pause_time_ms);
ysr@777 812
ysr@777 813 public:
ysr@777 814
ysr@777 815 G1CollectorPolicy();
ysr@777 816
ysr@777 817 virtual G1CollectorPolicy* as_g1_policy() { return this; }
ysr@777 818
ysr@777 819 virtual CollectorPolicy::Name kind() {
ysr@777 820 return CollectorPolicy::G1CollectorPolicyKind;
ysr@777 821 }
ysr@777 822
tonyp@3119 823 // Check the current value of the young list RSet lengths and
tonyp@3119 824 // compare it against the last prediction. If the current value is
tonyp@3119 825 // higher, recalculate the young list target length prediction.
tonyp@3119 826 void revise_young_list_target_length_if_necessary();
ysr@777 827
ysr@777 828 size_t bytes_in_collection_set() {
ysr@777 829 return _bytes_in_collection_set_before_gc;
ysr@777 830 }
ysr@777 831
ysr@777 832 unsigned calc_gc_alloc_time_stamp() {
ysr@777 833 return _all_pause_times_ms->num() + 1;
ysr@777 834 }
ysr@777 835
brutisso@3120 836 // This should be called after the heap is resized.
brutisso@3120 837 void record_new_heap_size(size_t new_number_of_regions);
tonyp@3119 838
ysr@777 839 protected:
ysr@777 840
ysr@777 841 // Count the number of bytes used in the CS.
ysr@777 842 void count_CS_bytes_used();
ysr@777 843
ysr@777 844 // Together these do the base cleanup-recording work. Subclasses might
ysr@777 845 // want to put something between them.
ysr@777 846 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 847 size_t max_live_bytes);
ysr@777 848 void record_concurrent_mark_cleanup_end_work2();
ysr@777 849
brutisso@3120 850 void update_young_list_size_using_newratio(size_t number_of_heap_regions);
brutisso@3120 851
ysr@777 852 public:
ysr@777 853
ysr@777 854 virtual void init();
ysr@777 855
apetrusenko@980 856 // Create jstat counters for the policy.
apetrusenko@980 857 virtual void initialize_gc_policy_counters();
apetrusenko@980 858
ysr@777 859 virtual HeapWord* mem_allocate_work(size_t size,
ysr@777 860 bool is_tlab,
ysr@777 861 bool* gc_overhead_limit_was_exceeded);
ysr@777 862
ysr@777 863 // This method controls how a collector handles one or more
ysr@777 864 // of its generations being fully allocated.
ysr@777 865 virtual HeapWord* satisfy_failed_allocation(size_t size,
ysr@777 866 bool is_tlab);
ysr@777 867
ysr@777 868 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
ysr@777 869
ysr@777 870 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
ysr@777 871
ysr@777 872 // The number of collection pauses so far.
ysr@777 873 long n_pauses() const { return _n_pauses; }
ysr@777 874
ysr@777 875 // Update the heuristic info to record a collection pause of the given
ysr@777 876 // start time, where the given number of bytes were used at the start.
ysr@777 877 // This may involve changing the desired size of a collection set.
ysr@777 878
ysr@777 879 virtual void record_stop_world_start();
ysr@777 880
ysr@777 881 virtual void record_collection_pause_start(double start_time_sec,
ysr@777 882 size_t start_used);
ysr@777 883
ysr@777 884 // Must currently be called while the world is stopped.
brutisso@3065 885 void record_concurrent_mark_init_end(double
ysr@777 886 mark_init_elapsed_time_ms);
ysr@777 887
ysr@777 888 void record_mark_closure_time(double mark_closure_time_ms);
ysr@777 889
ysr@777 890 virtual void record_concurrent_mark_remark_start();
ysr@777 891 virtual void record_concurrent_mark_remark_end();
ysr@777 892
ysr@777 893 virtual void record_concurrent_mark_cleanup_start();
ysr@777 894 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 895 size_t max_live_bytes);
ysr@777 896 virtual void record_concurrent_mark_cleanup_completed();
ysr@777 897
ysr@777 898 virtual void record_concurrent_pause();
ysr@777 899 virtual void record_concurrent_pause_end();
ysr@777 900
tonyp@2062 901 virtual void record_collection_pause_end();
tonyp@2961 902 void print_heap_transition();
ysr@777 903
ysr@777 904 // Record the fact that a full collection occurred.
ysr@777 905 virtual void record_full_collection_start();
ysr@777 906 virtual void record_full_collection_end();
ysr@777 907
tonyp@1966 908 void record_gc_worker_start_time(int worker_i, double ms) {
tonyp@1966 909 _par_last_gc_worker_start_times_ms[worker_i] = ms;
tonyp@1966 910 }
tonyp@1966 911
ysr@777 912 void record_ext_root_scan_time(int worker_i, double ms) {
ysr@777 913 _par_last_ext_root_scan_times_ms[worker_i] = ms;
ysr@777 914 }
ysr@777 915
ysr@777 916 void record_mark_stack_scan_time(int worker_i, double ms) {
ysr@777 917 _par_last_mark_stack_scan_times_ms[worker_i] = ms;
ysr@777 918 }
ysr@777 919
ysr@777 920 void record_satb_drain_time(double ms) {
ysr@777 921 _cur_satb_drain_time_ms = ms;
ysr@777 922 _satb_drain_time_set = true;
ysr@777 923 }
ysr@777 924
ysr@777 925 void record_satb_drain_processed_buffers (int processed_buffers) {
ysr@777 926 _last_satb_drain_processed_buffers = processed_buffers;
ysr@777 927 }
ysr@777 928
ysr@777 929 void record_mod_union_time(double ms) {
ysr@777 930 _all_mod_union_times_ms->add(ms);
ysr@777 931 }
ysr@777 932
ysr@777 933 void record_update_rs_time(int thread, double ms) {
ysr@777 934 _par_last_update_rs_times_ms[thread] = ms;
ysr@777 935 }
ysr@777 936
ysr@777 937 void record_update_rs_processed_buffers (int thread,
ysr@777 938 double processed_buffers) {
ysr@777 939 _par_last_update_rs_processed_buffers[thread] = processed_buffers;
ysr@777 940 }
ysr@777 941
ysr@777 942 void record_scan_rs_time(int thread, double ms) {
ysr@777 943 _par_last_scan_rs_times_ms[thread] = ms;
ysr@777 944 }
ysr@777 945
ysr@777 946 void reset_obj_copy_time(int thread) {
ysr@777 947 _par_last_obj_copy_times_ms[thread] = 0.0;
ysr@777 948 }
ysr@777 949
ysr@777 950 void reset_obj_copy_time() {
ysr@777 951 reset_obj_copy_time(0);
ysr@777 952 }
ysr@777 953
ysr@777 954 void record_obj_copy_time(int thread, double ms) {
ysr@777 955 _par_last_obj_copy_times_ms[thread] += ms;
ysr@777 956 }
ysr@777 957
tonyp@1966 958 void record_termination(int thread, double ms, size_t attempts) {
tonyp@1966 959 _par_last_termination_times_ms[thread] = ms;
tonyp@1966 960 _par_last_termination_attempts[thread] = (double) attempts;
ysr@777 961 }
ysr@777 962
tonyp@1966 963 void record_gc_worker_end_time(int worker_i, double ms) {
tonyp@1966 964 _par_last_gc_worker_end_times_ms[worker_i] = ms;
ysr@777 965 }
ysr@777 966
tonyp@1030 967 void record_pause_time_ms(double ms) {
ysr@777 968 _last_pause_time_ms = ms;
ysr@777 969 }
ysr@777 970
ysr@777 971 void record_clear_ct_time(double ms) {
ysr@777 972 _cur_clear_ct_time_ms = ms;
ysr@777 973 }
ysr@777 974
ysr@777 975 void record_par_time(double ms) {
ysr@777 976 _cur_collection_par_time_ms = ms;
ysr@777 977 }
ysr@777 978
ysr@777 979 void record_aux_start_time(int i) {
ysr@777 980 guarantee(i < _aux_num, "should be within range");
ysr@777 981 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
ysr@777 982 }
ysr@777 983
ysr@777 984 void record_aux_end_time(int i) {
ysr@777 985 guarantee(i < _aux_num, "should be within range");
ysr@777 986 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
ysr@777 987 _cur_aux_times_set[i] = true;
ysr@777 988 _cur_aux_times_ms[i] += ms;
ysr@777 989 }
ysr@777 990
johnc@3175 991 void record_ref_proc_time(double ms) {
johnc@3175 992 _cur_ref_proc_time_ms = ms;
johnc@3175 993 }
johnc@3175 994
johnc@3175 995 void record_ref_enq_time(double ms) {
johnc@3175 996 _cur_ref_enq_time_ms = ms;
johnc@3175 997 }
johnc@3175 998
johnc@1325 999 #ifndef PRODUCT
johnc@1325 1000 void record_cc_clear_time(double ms) {
johnc@1325 1001 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
johnc@1325 1002 _min_clear_cc_time_ms = ms;
johnc@1325 1003 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
johnc@1325 1004 _max_clear_cc_time_ms = ms;
johnc@1325 1005 _cur_clear_cc_time_ms = ms;
johnc@1325 1006 _cum_clear_cc_time_ms += ms;
johnc@1325 1007 _num_cc_clears++;
johnc@1325 1008 }
johnc@1325 1009 #endif
johnc@1325 1010
tonyp@3028 1011 // Record how much space we copied during a GC. This is typically
tonyp@3028 1012 // called when a GC alloc region is being retired.
tonyp@3028 1013 void record_bytes_copied_during_gc(size_t bytes) {
tonyp@3028 1014 _bytes_copied_during_gc += bytes;
tonyp@3028 1015 }
tonyp@3028 1016
tonyp@3028 1017 // The amount of space we copied during a GC.
tonyp@3028 1018 size_t bytes_copied_during_gc() {
tonyp@3028 1019 return _bytes_copied_during_gc;
tonyp@3028 1020 }
ysr@777 1021
ysr@777 1022 // Choose a new collection set. Marks the chosen regions as being
ysr@777 1023 // "in_collection_set", and links them together. The head and number of
ysr@777 1024 // the collection set are available via access methods.
tonyp@2062 1025 virtual void choose_collection_set(double target_pause_time_ms) = 0;
ysr@777 1026
ysr@777 1027 // The head of the list (via "next_in_collection_set()") representing the
ysr@777 1028 // current collection set.
ysr@777 1029 HeapRegion* collection_set() { return _collection_set; }
ysr@777 1030
johnc@1829 1031 void clear_collection_set() { _collection_set = NULL; }
johnc@1829 1032
ysr@777 1033 // The number of elements in the current collection set.
ysr@777 1034 size_t collection_set_size() { return _collection_set_size; }
ysr@777 1035
ysr@777 1036 // Add "hr" to the CS.
ysr@777 1037 void add_to_collection_set(HeapRegion* hr);
ysr@777 1038
johnc@1829 1039 // Incremental CSet Support
johnc@1829 1040
johnc@1829 1041 // The head of the incrementally built collection set.
johnc@1829 1042 HeapRegion* inc_cset_head() { return _inc_cset_head; }
johnc@1829 1043
johnc@1829 1044 // The tail of the incrementally built collection set.
johnc@1829 1045 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
johnc@1829 1046
johnc@1829 1047 // The number of elements in the incrementally built collection set.
johnc@1829 1048 size_t inc_cset_size() { return _inc_cset_size; }
johnc@1829 1049
johnc@1829 1050 // Initialize incremental collection set info.
johnc@1829 1051 void start_incremental_cset_building();
johnc@1829 1052
johnc@1829 1053 void clear_incremental_cset() {
johnc@1829 1054 _inc_cset_head = NULL;
johnc@1829 1055 _inc_cset_tail = NULL;
johnc@1829 1056 }
johnc@1829 1057
johnc@1829 1058 // Stop adding regions to the incremental collection set
johnc@1829 1059 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
johnc@1829 1060
johnc@1829 1061 // Add/remove information about hr to the aggregated information
johnc@1829 1062 // for the incrementally built collection set.
johnc@1829 1063 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
johnc@1829 1064 void remove_from_incremental_cset_info(HeapRegion* hr);
johnc@1829 1065
johnc@1829 1066 // Update information about hr in the aggregated information for
johnc@1829 1067 // the incrementally built collection set.
johnc@1829 1068 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
johnc@1829 1069
johnc@1829 1070 private:
johnc@1829 1071 // Update the incremental cset information when adding a region
johnc@1829 1072 // (should not be called directly).
johnc@1829 1073 void add_region_to_incremental_cset_common(HeapRegion* hr);
johnc@1829 1074
johnc@1829 1075 public:
johnc@1829 1076 // Add hr to the LHS of the incremental collection set.
johnc@1829 1077 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
johnc@1829 1078
johnc@1829 1079 // Add hr to the RHS of the incremental collection set.
johnc@1829 1080 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
johnc@1829 1081
johnc@1829 1082 #ifndef PRODUCT
johnc@1829 1083 void print_collection_set(HeapRegion* list_head, outputStream* st);
johnc@1829 1084 #endif // !PRODUCT
johnc@1829 1085
tonyp@1794 1086 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
tonyp@1794 1087 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
tonyp@1794 1088 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
tonyp@1794 1089
tonyp@1794 1090 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
tonyp@1794 1091 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
tonyp@1794 1092 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
tonyp@1794 1093
tonyp@2011 1094 // This sets the initiate_conc_mark_if_possible() flag to start a
tonyp@2011 1095 // new cycle, as long as we are not already in one. It's best if it
tonyp@2011 1096 // is called during a safepoint when the test whether a cycle is in
tonyp@2011 1097 // progress or not is stable.
tonyp@3114 1098 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
tonyp@2011 1099
tonyp@1794 1100 // This is called at the very beginning of an evacuation pause (it
tonyp@1794 1101 // has to be the first thing that the pause does). If
tonyp@1794 1102 // initiate_conc_mark_if_possible() is true, and the concurrent
tonyp@1794 1103 // marking thread has completed its work during the previous cycle,
tonyp@1794 1104 // it will set during_initial_mark_pause() to so that the pause does
tonyp@1794 1105 // the initial-mark work and start a marking cycle.
tonyp@1794 1106 void decide_on_conc_mark_initiation();
ysr@777 1107
ysr@777 1108 // If an expansion would be appropriate, because recent GC overhead had
ysr@777 1109 // exceeded the desired limit, return an amount to expand by.
ysr@777 1110 virtual size_t expansion_amount();
ysr@777 1111
ysr@777 1112 // note start of mark thread
ysr@777 1113 void note_start_of_mark_thread();
ysr@777 1114
ysr@777 1115 // The marked bytes of the "r" has changed; reclassify it's desirability
ysr@777 1116 // for marking. Also asserts that "r" is eligible for a CS.
ysr@777 1117 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
ysr@777 1118
ysr@777 1119 #ifndef PRODUCT
ysr@777 1120 // Check any appropriate marked bytes info, asserting false if
ysr@777 1121 // something's wrong, else returning "true".
ysr@777 1122 virtual bool assertMarkedBytesDataOK() = 0;
ysr@777 1123 #endif
ysr@777 1124
ysr@777 1125 // Print tracing information.
ysr@777 1126 void print_tracing_info() const;
ysr@777 1127
ysr@777 1128 // Print stats on young survival ratio
ysr@777 1129 void print_yg_surv_rate_info() const;
ysr@777 1130
apetrusenko@980 1131 void finished_recalculating_age_indexes(bool is_survivors) {
apetrusenko@980 1132 if (is_survivors) {
apetrusenko@980 1133 _survivor_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 1134 } else {
apetrusenko@980 1135 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 1136 }
ysr@777 1137 // do that for any other surv rate groups
ysr@777 1138 }
ysr@777 1139
tonyp@2315 1140 bool is_young_list_full() {
tonyp@2315 1141 size_t young_list_length = _g1->young_list()->length();
tonyp@2333 1142 size_t young_list_target_length = _young_list_target_length;
tonyp@2333 1143 return young_list_length >= young_list_target_length;
tonyp@2333 1144 }
tonyp@2333 1145
tonyp@2333 1146 bool can_expand_young_list() {
tonyp@2333 1147 size_t young_list_length = _g1->young_list()->length();
tonyp@2333 1148 size_t young_list_max_length = _young_list_max_length;
tonyp@2333 1149 return young_list_length < young_list_max_length;
tonyp@2333 1150 }
tonyp@2315 1151
tonyp@2315 1152 void update_region_num(bool young);
ysr@777 1153
ysr@777 1154 bool full_young_gcs() {
ysr@777 1155 return _full_young_gcs;
ysr@777 1156 }
ysr@777 1157 void set_full_young_gcs(bool full_young_gcs) {
ysr@777 1158 _full_young_gcs = full_young_gcs;
ysr@777 1159 }
ysr@777 1160
ysr@777 1161 bool adaptive_young_list_length() {
ysr@777 1162 return _adaptive_young_list_length;
ysr@777 1163 }
ysr@777 1164 void set_adaptive_young_list_length(bool adaptive_young_list_length) {
ysr@777 1165 _adaptive_young_list_length = adaptive_young_list_length;
ysr@777 1166 }
ysr@777 1167
ysr@777 1168 inline double get_gc_eff_factor() {
ysr@777 1169 double ratio = _known_garbage_ratio;
ysr@777 1170
ysr@777 1171 double square = ratio * ratio;
ysr@777 1172 // square = square * square;
ysr@777 1173 double ret = square * 9.0 + 1.0;
ysr@777 1174 #if 0
ysr@777 1175 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
ysr@777 1176 #endif // 0
ysr@777 1177 guarantee(0.0 <= ret && ret < 10.0, "invariant!");
ysr@777 1178 return ret;
ysr@777 1179 }
ysr@777 1180
ysr@777 1181 //
ysr@777 1182 // Survivor regions policy.
ysr@777 1183 //
ysr@777 1184 protected:
ysr@777 1185
ysr@777 1186 // Current tenuring threshold, set to 0 if the collector reaches the
ysr@777 1187 // maximum amount of suvivors regions.
ysr@777 1188 int _tenuring_threshold;
ysr@777 1189
apetrusenko@980 1190 // The limit on the number of regions allocated for survivors.
apetrusenko@980 1191 size_t _max_survivor_regions;
apetrusenko@980 1192
tonyp@2961 1193 // For reporting purposes.
tonyp@2961 1194 size_t _eden_bytes_before_gc;
tonyp@2961 1195 size_t _survivor_bytes_before_gc;
tonyp@2961 1196 size_t _capacity_before_gc;
tonyp@2961 1197
apetrusenko@980 1198 // The amount of survor regions after a collection.
apetrusenko@980 1199 size_t _recorded_survivor_regions;
apetrusenko@980 1200 // List of survivor regions.
apetrusenko@980 1201 HeapRegion* _recorded_survivor_head;
apetrusenko@980 1202 HeapRegion* _recorded_survivor_tail;
apetrusenko@980 1203
apetrusenko@980 1204 ageTable _survivors_age_table;
apetrusenko@980 1205
ysr@777 1206 public:
ysr@777 1207
ysr@777 1208 inline GCAllocPurpose
ysr@777 1209 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
ysr@777 1210 if (age < _tenuring_threshold && src_region->is_young()) {
ysr@777 1211 return GCAllocForSurvived;
ysr@777 1212 } else {
ysr@777 1213 return GCAllocForTenured;
ysr@777 1214 }
ysr@777 1215 }
ysr@777 1216
ysr@777 1217 inline bool track_object_age(GCAllocPurpose purpose) {
ysr@777 1218 return purpose == GCAllocForSurvived;
ysr@777 1219 }
ysr@777 1220
apetrusenko@980 1221 static const size_t REGIONS_UNLIMITED = ~(size_t)0;
apetrusenko@980 1222
apetrusenko@980 1223 size_t max_regions(int purpose);
ysr@777 1224
ysr@777 1225 // The limit on regions for a particular purpose is reached.
ysr@777 1226 void note_alloc_region_limit_reached(int purpose) {
ysr@777 1227 if (purpose == GCAllocForSurvived) {
ysr@777 1228 _tenuring_threshold = 0;
ysr@777 1229 }
ysr@777 1230 }
ysr@777 1231
ysr@777 1232 void note_start_adding_survivor_regions() {
ysr@777 1233 _survivor_surv_rate_group->start_adding_regions();
ysr@777 1234 }
ysr@777 1235
ysr@777 1236 void note_stop_adding_survivor_regions() {
ysr@777 1237 _survivor_surv_rate_group->stop_adding_regions();
ysr@777 1238 }
apetrusenko@980 1239
apetrusenko@980 1240 void record_survivor_regions(size_t regions,
apetrusenko@980 1241 HeapRegion* head,
apetrusenko@980 1242 HeapRegion* tail) {
apetrusenko@980 1243 _recorded_survivor_regions = regions;
apetrusenko@980 1244 _recorded_survivor_head = head;
apetrusenko@980 1245 _recorded_survivor_tail = tail;
apetrusenko@980 1246 }
apetrusenko@980 1247
tonyp@1273 1248 size_t recorded_survivor_regions() {
tonyp@1273 1249 return _recorded_survivor_regions;
tonyp@1273 1250 }
tonyp@1273 1251
apetrusenko@980 1252 void record_thread_age_table(ageTable* age_table)
apetrusenko@980 1253 {
apetrusenko@980 1254 _survivors_age_table.merge_par(age_table);
apetrusenko@980 1255 }
apetrusenko@980 1256
tonyp@3119 1257 void update_max_gc_locker_expansion();
tonyp@2333 1258
apetrusenko@980 1259 // Calculates survivor space parameters.
tonyp@3119 1260 void update_survivors_policy();
apetrusenko@980 1261
ysr@777 1262 };
ysr@777 1263
ysr@777 1264 // This encapsulates a particular strategy for a g1 Collector.
ysr@777 1265 //
ysr@777 1266 // Start a concurrent mark when our heap size is n bytes
ysr@777 1267 // greater then our heap size was at the last concurrent
ysr@777 1268 // mark. Where n is a function of the CMSTriggerRatio
ysr@777 1269 // and the MinHeapFreeRatio.
ysr@777 1270 //
ysr@777 1271 // Start a g1 collection pause when we have allocated the
ysr@777 1272 // average number of bytes currently being freed in
ysr@777 1273 // a collection, but only if it is at least one region
ysr@777 1274 // full
ysr@777 1275 //
ysr@777 1276 // Resize Heap based on desired
ysr@777 1277 // allocation space, where desired allocation space is
ysr@777 1278 // a function of survival rate and desired future to size.
ysr@777 1279 //
ysr@777 1280 // Choose collection set by first picking all older regions
ysr@777 1281 // which have a survival rate which beats our projected young
ysr@777 1282 // survival rate. Then fill out the number of needed regions
ysr@777 1283 // with young regions.
ysr@777 1284
ysr@777 1285 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
ysr@777 1286 CollectionSetChooser* _collectionSetChooser;
ysr@777 1287
tonyp@2062 1288 virtual void choose_collection_set(double target_pause_time_ms);
ysr@777 1289 virtual void record_collection_pause_start(double start_time_sec,
ysr@777 1290 size_t start_used);
ysr@777 1291 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 1292 size_t max_live_bytes);
ysr@777 1293 virtual void record_full_collection_end();
ysr@777 1294
ysr@777 1295 public:
ysr@777 1296 G1CollectorPolicy_BestRegionsFirst() {
ysr@777 1297 _collectionSetChooser = new CollectionSetChooser();
ysr@777 1298 }
tonyp@2062 1299 void record_collection_pause_end();
ysr@777 1300 // This is not needed any more, after the CSet choosing code was
ysr@777 1301 // changed to use the pause prediction work. But let's leave the
ysr@777 1302 // hook in just in case.
ysr@777 1303 void note_change_in_marked_bytes(HeapRegion* r) { }
ysr@777 1304 #ifndef PRODUCT
ysr@777 1305 bool assertMarkedBytesDataOK();
ysr@777 1306 #endif
ysr@777 1307 };
ysr@777 1308
ysr@777 1309 // This should move to some place more general...
ysr@777 1310
ysr@777 1311 // If we have "n" measurements, and we've kept track of their "sum" and the
ysr@777 1312 // "sum_of_squares" of the measurements, this returns the variance of the
ysr@777 1313 // sequence.
ysr@777 1314 inline double variance(int n, double sum_of_squares, double sum) {
ysr@777 1315 double n_d = (double)n;
ysr@777 1316 double avg = sum/n_d;
ysr@777 1317 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
ysr@777 1318 }
ysr@777 1319
stefank@2314 1320 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP

mercurial