src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Tue, 24 Aug 2010 17:24:33 -0400

author
tonyp
date
Tue, 24 Aug 2010 17:24:33 -0400
changeset 2315
631f79e71e90
parent 2314
f95d63e2154a
child 2333
016a3628c885
permissions
-rw-r--r--

6974966: G1: unnecessary direct-to-old allocations
Summary: This change revamps the slow allocation path of G1. Improvements include the following: a) Allocations directly to old regions are now totally banned. G1 now only allows allocations out of young regions (with the only exception being humongous regions). b) The thread that allocates a new region (which is now guaranteed to be young) does not dirty all its cards. Each thread that successfully allocates out of a young region is now responsible for dirtying the cards that corresponding to the "block" that just got allocated. c) allocate_new_tlab() and mem_allocate() are now implemented differently and TLAB allocations are only done by allocate_new_tlab(). d) If a thread schedules an evacuation pause in order to satisfy an allocation request, it will perform the allocation at the end of the safepoint so that the thread that initiated the GC also gets "first pick" of any space made available by the GC. e) If a thread is unable to allocate a humongous object it will schedule an evacuation pause in case it reclaims enough regions so that the humongous allocation can be satisfied aftewards. f) The G1 policy is more careful to set the young list target length to be the survivor number +1. g) Lots of code tidy up, removal, refactoring to make future changes easier.
Reviewed-by: johnc, ysr

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/collectionSetChooser.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1MMUTracker.hpp"
stefank@2314 30 #include "memory/collectorPolicy.hpp"
stefank@2314 31
ysr@777 32 // A G1CollectorPolicy makes policy decisions that determine the
ysr@777 33 // characteristics of the collector. Examples include:
ysr@777 34 // * choice of collection set.
ysr@777 35 // * when to collect.
ysr@777 36
ysr@777 37 class HeapRegion;
ysr@777 38 class CollectionSetChooser;
ysr@777 39
ysr@777 40 // Yes, this is a bit unpleasant... but it saves replicating the same thing
ysr@777 41 // over and over again and introducing subtle problems through small typos and
ysr@777 42 // cutting and pasting mistakes. The macros below introduces a number
ysr@777 43 // sequnce into the following two classes and the methods that access it.
ysr@777 44
ysr@777 45 #define define_num_seq(name) \
ysr@777 46 private: \
ysr@777 47 NumberSeq _all_##name##_times_ms; \
ysr@777 48 public: \
ysr@777 49 void record_##name##_time_ms(double ms) { \
ysr@777 50 _all_##name##_times_ms.add(ms); \
ysr@777 51 } \
ysr@777 52 NumberSeq* get_##name##_seq() { \
ysr@777 53 return &_all_##name##_times_ms; \
ysr@777 54 }
ysr@777 55
ysr@777 56 class MainBodySummary;
ysr@777 57
apetrusenko@984 58 class PauseSummary: public CHeapObj {
ysr@777 59 define_num_seq(total)
ysr@777 60 define_num_seq(other)
ysr@777 61
ysr@777 62 public:
ysr@777 63 virtual MainBodySummary* main_body_summary() { return NULL; }
ysr@777 64 };
ysr@777 65
apetrusenko@984 66 class MainBodySummary: public CHeapObj {
ysr@777 67 define_num_seq(satb_drain) // optional
ysr@777 68 define_num_seq(parallel) // parallel only
ysr@777 69 define_num_seq(ext_root_scan)
ysr@777 70 define_num_seq(mark_stack_scan)
ysr@777 71 define_num_seq(update_rs)
ysr@777 72 define_num_seq(scan_rs)
ysr@777 73 define_num_seq(obj_copy)
ysr@777 74 define_num_seq(termination) // parallel only
ysr@777 75 define_num_seq(parallel_other) // parallel only
ysr@777 76 define_num_seq(mark_closure)
ysr@777 77 define_num_seq(clear_ct) // parallel only
ysr@777 78 };
ysr@777 79
apetrusenko@1112 80 class Summary: public PauseSummary,
apetrusenko@1112 81 public MainBodySummary {
ysr@777 82 public:
ysr@777 83 virtual MainBodySummary* main_body_summary() { return this; }
ysr@777 84 };
ysr@777 85
ysr@777 86 class G1CollectorPolicy: public CollectorPolicy {
ysr@777 87 protected:
ysr@777 88 // The number of pauses during the execution.
ysr@777 89 long _n_pauses;
ysr@777 90
ysr@777 91 // either equal to the number of parallel threads, if ParallelGCThreads
ysr@777 92 // has been set, or 1 otherwise
ysr@777 93 int _parallel_gc_threads;
ysr@777 94
ysr@777 95 enum SomePrivateConstants {
tonyp@1377 96 NumPrevPausesForHeuristics = 10
ysr@777 97 };
ysr@777 98
ysr@777 99 G1MMUTracker* _mmu_tracker;
ysr@777 100
ysr@777 101 void initialize_flags();
ysr@777 102
ysr@777 103 void initialize_all() {
ysr@777 104 initialize_flags();
ysr@777 105 initialize_size_info();
ysr@777 106 initialize_perm_generation(PermGen::MarkSweepCompact);
ysr@777 107 }
ysr@777 108
ysr@777 109 virtual size_t default_init_heap_size() {
ysr@777 110 // Pick some reasonable default.
ysr@777 111 return 8*M;
ysr@777 112 }
ysr@777 113
ysr@777 114 double _cur_collection_start_sec;
ysr@777 115 size_t _cur_collection_pause_used_at_start_bytes;
ysr@777 116 size_t _cur_collection_pause_used_regions_at_start;
ysr@777 117 size_t _prev_collection_pause_used_at_end_bytes;
ysr@777 118 double _cur_collection_par_time_ms;
ysr@777 119 double _cur_satb_drain_time_ms;
ysr@777 120 double _cur_clear_ct_time_ms;
ysr@777 121 bool _satb_drain_time_set;
ysr@777 122
johnc@1325 123 #ifndef PRODUCT
johnc@1325 124 // Card Table Count Cache stats
johnc@1325 125 double _min_clear_cc_time_ms; // min
johnc@1325 126 double _max_clear_cc_time_ms; // max
johnc@1325 127 double _cur_clear_cc_time_ms; // clearing time during current pause
johnc@1325 128 double _cum_clear_cc_time_ms; // cummulative clearing time
johnc@1325 129 jlong _num_cc_clears; // number of times the card count cache has been cleared
johnc@1325 130 #endif
johnc@1325 131
ysr@777 132 double _cur_CH_strong_roots_end_sec;
ysr@777 133 double _cur_CH_strong_roots_dur_ms;
ysr@777 134 double _cur_G1_strong_roots_end_sec;
ysr@777 135 double _cur_G1_strong_roots_dur_ms;
ysr@777 136
ysr@777 137 // Statistics for recent GC pauses. See below for how indexed.
ysr@777 138 TruncatedSeq* _recent_CH_strong_roots_times_ms;
ysr@777 139 TruncatedSeq* _recent_G1_strong_roots_times_ms;
ysr@777 140 TruncatedSeq* _recent_evac_times_ms;
ysr@777 141 // These exclude marking times.
ysr@777 142 TruncatedSeq* _recent_pause_times_ms;
ysr@777 143 TruncatedSeq* _recent_gc_times_ms;
ysr@777 144
ysr@777 145 TruncatedSeq* _recent_CS_bytes_used_before;
ysr@777 146 TruncatedSeq* _recent_CS_bytes_surviving;
ysr@777 147
ysr@777 148 TruncatedSeq* _recent_rs_sizes;
ysr@777 149
ysr@777 150 TruncatedSeq* _concurrent_mark_init_times_ms;
ysr@777 151 TruncatedSeq* _concurrent_mark_remark_times_ms;
ysr@777 152 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
ysr@777 153
apetrusenko@1112 154 Summary* _summary;
ysr@777 155
ysr@777 156 NumberSeq* _all_pause_times_ms;
ysr@777 157 NumberSeq* _all_full_gc_times_ms;
ysr@777 158 double _stop_world_start;
ysr@777 159 NumberSeq* _all_stop_world_times_ms;
ysr@777 160 NumberSeq* _all_yield_times_ms;
ysr@777 161
ysr@777 162 size_t _region_num_young;
ysr@777 163 size_t _region_num_tenured;
ysr@777 164 size_t _prev_region_num_young;
ysr@777 165 size_t _prev_region_num_tenured;
ysr@777 166
ysr@777 167 NumberSeq* _all_mod_union_times_ms;
ysr@777 168
ysr@777 169 int _aux_num;
ysr@777 170 NumberSeq* _all_aux_times_ms;
ysr@777 171 double* _cur_aux_start_times_ms;
ysr@777 172 double* _cur_aux_times_ms;
ysr@777 173 bool* _cur_aux_times_set;
ysr@777 174
tonyp@1966 175 double* _par_last_gc_worker_start_times_ms;
ysr@777 176 double* _par_last_ext_root_scan_times_ms;
ysr@777 177 double* _par_last_mark_stack_scan_times_ms;
ysr@777 178 double* _par_last_update_rs_times_ms;
ysr@777 179 double* _par_last_update_rs_processed_buffers;
ysr@777 180 double* _par_last_scan_rs_times_ms;
ysr@777 181 double* _par_last_obj_copy_times_ms;
ysr@777 182 double* _par_last_termination_times_ms;
tonyp@1966 183 double* _par_last_termination_attempts;
tonyp@1966 184 double* _par_last_gc_worker_end_times_ms;
ysr@777 185
ysr@777 186 // indicates that we are in young GC mode
ysr@777 187 bool _in_young_gc_mode;
ysr@777 188
ysr@777 189 // indicates whether we are in full young or partially young GC mode
ysr@777 190 bool _full_young_gcs;
ysr@777 191
ysr@777 192 // if true, then it tries to dynamically adjust the length of the
ysr@777 193 // young list
ysr@777 194 bool _adaptive_young_list_length;
ysr@777 195 size_t _young_list_min_length;
ysr@777 196 size_t _young_list_target_length;
ysr@777 197 size_t _young_list_fixed_length;
ysr@777 198
ysr@777 199 size_t _young_cset_length;
ysr@777 200 bool _last_young_gc_full;
ysr@777 201
ysr@777 202 unsigned _full_young_pause_num;
ysr@777 203 unsigned _partial_young_pause_num;
ysr@777 204
ysr@777 205 bool _during_marking;
ysr@777 206 bool _in_marking_window;
ysr@777 207 bool _in_marking_window_im;
ysr@777 208
ysr@777 209 SurvRateGroup* _short_lived_surv_rate_group;
ysr@777 210 SurvRateGroup* _survivor_surv_rate_group;
ysr@777 211 // add here any more surv rate groups
ysr@777 212
tonyp@1791 213 double _gc_overhead_perc;
tonyp@1791 214
ysr@777 215 bool during_marking() {
ysr@777 216 return _during_marking;
ysr@777 217 }
ysr@777 218
ysr@777 219 // <NEW PREDICTION>
ysr@777 220
ysr@777 221 private:
ysr@777 222 enum PredictionConstants {
ysr@777 223 TruncatedSeqLength = 10
ysr@777 224 };
ysr@777 225
ysr@777 226 TruncatedSeq* _alloc_rate_ms_seq;
ysr@777 227 double _prev_collection_pause_end_ms;
ysr@777 228
ysr@777 229 TruncatedSeq* _pending_card_diff_seq;
ysr@777 230 TruncatedSeq* _rs_length_diff_seq;
ysr@777 231 TruncatedSeq* _cost_per_card_ms_seq;
ysr@777 232 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
ysr@777 233 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
ysr@777 234 TruncatedSeq* _cost_per_entry_ms_seq;
ysr@777 235 TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
ysr@777 236 TruncatedSeq* _cost_per_byte_ms_seq;
ysr@777 237 TruncatedSeq* _constant_other_time_ms_seq;
ysr@777 238 TruncatedSeq* _young_other_cost_per_region_ms_seq;
ysr@777 239 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
ysr@777 240
ysr@777 241 TruncatedSeq* _pending_cards_seq;
ysr@777 242 TruncatedSeq* _scanned_cards_seq;
ysr@777 243 TruncatedSeq* _rs_lengths_seq;
ysr@777 244
ysr@777 245 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
ysr@777 246
ysr@777 247 TruncatedSeq* _young_gc_eff_seq;
ysr@777 248
ysr@777 249 TruncatedSeq* _max_conc_overhead_seq;
ysr@777 250
ysr@777 251 size_t _recorded_young_regions;
ysr@777 252 size_t _recorded_non_young_regions;
ysr@777 253 size_t _recorded_region_num;
ysr@777 254
ysr@777 255 size_t _free_regions_at_end_of_collection;
ysr@777 256
ysr@777 257 size_t _recorded_rs_lengths;
ysr@777 258 size_t _max_rs_lengths;
ysr@777 259
ysr@777 260 size_t _recorded_marked_bytes;
ysr@777 261 size_t _recorded_young_bytes;
ysr@777 262
ysr@777 263 size_t _predicted_pending_cards;
ysr@777 264 size_t _predicted_cards_scanned;
ysr@777 265 size_t _predicted_rs_lengths;
ysr@777 266 size_t _predicted_bytes_to_copy;
ysr@777 267
ysr@777 268 double _predicted_survival_ratio;
ysr@777 269 double _predicted_rs_update_time_ms;
ysr@777 270 double _predicted_rs_scan_time_ms;
ysr@777 271 double _predicted_object_copy_time_ms;
ysr@777 272 double _predicted_constant_other_time_ms;
ysr@777 273 double _predicted_young_other_time_ms;
ysr@777 274 double _predicted_non_young_other_time_ms;
ysr@777 275 double _predicted_pause_time_ms;
ysr@777 276
ysr@777 277 double _vtime_diff_ms;
ysr@777 278
ysr@777 279 double _recorded_young_free_cset_time_ms;
ysr@777 280 double _recorded_non_young_free_cset_time_ms;
ysr@777 281
ysr@777 282 double _sigma;
ysr@777 283 double _expensive_region_limit_ms;
ysr@777 284
ysr@777 285 size_t _rs_lengths_prediction;
ysr@777 286
ysr@777 287 size_t _known_garbage_bytes;
ysr@777 288 double _known_garbage_ratio;
ysr@777 289
ysr@777 290 double sigma() {
ysr@777 291 return _sigma;
ysr@777 292 }
ysr@777 293
ysr@777 294 // A function that prevents us putting too much stock in small sample
ysr@777 295 // sets. Returns a number between 2.0 and 1.0, depending on the number
ysr@777 296 // of samples. 5 or more samples yields one; fewer scales linearly from
ysr@777 297 // 2.0 at 1 sample to 1.0 at 5.
ysr@777 298 double confidence_factor(int samples) {
ysr@777 299 if (samples > 4) return 1.0;
ysr@777 300 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
ysr@777 301 }
ysr@777 302
ysr@777 303 double get_new_neg_prediction(TruncatedSeq* seq) {
ysr@777 304 return seq->davg() - sigma() * seq->dsd();
ysr@777 305 }
ysr@777 306
ysr@777 307 #ifndef PRODUCT
ysr@777 308 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
ysr@777 309 #endif // PRODUCT
ysr@777 310
iveresov@1546 311 void adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 312 double update_rs_processed_buffers,
iveresov@1546 313 double goal_ms);
iveresov@1546 314
ysr@777 315 protected:
ysr@777 316 double _pause_time_target_ms;
ysr@777 317 double _recorded_young_cset_choice_time_ms;
ysr@777 318 double _recorded_non_young_cset_choice_time_ms;
ysr@777 319 bool _within_target;
ysr@777 320 size_t _pending_cards;
ysr@777 321 size_t _max_pending_cards;
ysr@777 322
ysr@777 323 public:
ysr@777 324
ysr@777 325 void set_region_short_lived(HeapRegion* hr) {
ysr@777 326 hr->install_surv_rate_group(_short_lived_surv_rate_group);
ysr@777 327 }
ysr@777 328
ysr@777 329 void set_region_survivors(HeapRegion* hr) {
ysr@777 330 hr->install_surv_rate_group(_survivor_surv_rate_group);
ysr@777 331 }
ysr@777 332
ysr@777 333 #ifndef PRODUCT
ysr@777 334 bool verify_young_ages();
ysr@777 335 #endif // PRODUCT
ysr@777 336
ysr@777 337 double get_new_prediction(TruncatedSeq* seq) {
ysr@777 338 return MAX2(seq->davg() + sigma() * seq->dsd(),
ysr@777 339 seq->davg() * confidence_factor(seq->num()));
ysr@777 340 }
ysr@777 341
ysr@777 342 size_t young_cset_length() {
ysr@777 343 return _young_cset_length;
ysr@777 344 }
ysr@777 345
ysr@777 346 void record_max_rs_lengths(size_t rs_lengths) {
ysr@777 347 _max_rs_lengths = rs_lengths;
ysr@777 348 }
ysr@777 349
ysr@777 350 size_t predict_pending_card_diff() {
ysr@777 351 double prediction = get_new_neg_prediction(_pending_card_diff_seq);
ysr@777 352 if (prediction < 0.00001)
ysr@777 353 return 0;
ysr@777 354 else
ysr@777 355 return (size_t) prediction;
ysr@777 356 }
ysr@777 357
ysr@777 358 size_t predict_pending_cards() {
ysr@777 359 size_t max_pending_card_num = _g1->max_pending_card_num();
ysr@777 360 size_t diff = predict_pending_card_diff();
ysr@777 361 size_t prediction;
ysr@777 362 if (diff > max_pending_card_num)
ysr@777 363 prediction = max_pending_card_num;
ysr@777 364 else
ysr@777 365 prediction = max_pending_card_num - diff;
ysr@777 366
ysr@777 367 return prediction;
ysr@777 368 }
ysr@777 369
ysr@777 370 size_t predict_rs_length_diff() {
ysr@777 371 return (size_t) get_new_prediction(_rs_length_diff_seq);
ysr@777 372 }
ysr@777 373
ysr@777 374 double predict_alloc_rate_ms() {
ysr@777 375 return get_new_prediction(_alloc_rate_ms_seq);
ysr@777 376 }
ysr@777 377
ysr@777 378 double predict_cost_per_card_ms() {
ysr@777 379 return get_new_prediction(_cost_per_card_ms_seq);
ysr@777 380 }
ysr@777 381
ysr@777 382 double predict_rs_update_time_ms(size_t pending_cards) {
ysr@777 383 return (double) pending_cards * predict_cost_per_card_ms();
ysr@777 384 }
ysr@777 385
ysr@777 386 double predict_fully_young_cards_per_entry_ratio() {
ysr@777 387 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
ysr@777 388 }
ysr@777 389
ysr@777 390 double predict_partially_young_cards_per_entry_ratio() {
ysr@777 391 if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
ysr@777 392 return predict_fully_young_cards_per_entry_ratio();
ysr@777 393 else
ysr@777 394 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
ysr@777 395 }
ysr@777 396
ysr@777 397 size_t predict_young_card_num(size_t rs_length) {
ysr@777 398 return (size_t) ((double) rs_length *
ysr@777 399 predict_fully_young_cards_per_entry_ratio());
ysr@777 400 }
ysr@777 401
ysr@777 402 size_t predict_non_young_card_num(size_t rs_length) {
ysr@777 403 return (size_t) ((double) rs_length *
ysr@777 404 predict_partially_young_cards_per_entry_ratio());
ysr@777 405 }
ysr@777 406
ysr@777 407 double predict_rs_scan_time_ms(size_t card_num) {
ysr@777 408 if (full_young_gcs())
ysr@777 409 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
ysr@777 410 else
ysr@777 411 return predict_partially_young_rs_scan_time_ms(card_num);
ysr@777 412 }
ysr@777 413
ysr@777 414 double predict_partially_young_rs_scan_time_ms(size_t card_num) {
ysr@777 415 if (_partially_young_cost_per_entry_ms_seq->num() < 3)
ysr@777 416 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
ysr@777 417 else
ysr@777 418 return (double) card_num *
ysr@777 419 get_new_prediction(_partially_young_cost_per_entry_ms_seq);
ysr@777 420 }
ysr@777 421
ysr@777 422 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
ysr@777 423 if (_cost_per_byte_ms_during_cm_seq->num() < 3)
ysr@777 424 return 1.1 * (double) bytes_to_copy *
ysr@777 425 get_new_prediction(_cost_per_byte_ms_seq);
ysr@777 426 else
ysr@777 427 return (double) bytes_to_copy *
ysr@777 428 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
ysr@777 429 }
ysr@777 430
ysr@777 431 double predict_object_copy_time_ms(size_t bytes_to_copy) {
ysr@777 432 if (_in_marking_window && !_in_marking_window_im)
ysr@777 433 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
ysr@777 434 else
ysr@777 435 return (double) bytes_to_copy *
ysr@777 436 get_new_prediction(_cost_per_byte_ms_seq);
ysr@777 437 }
ysr@777 438
ysr@777 439 double predict_constant_other_time_ms() {
ysr@777 440 return get_new_prediction(_constant_other_time_ms_seq);
ysr@777 441 }
ysr@777 442
ysr@777 443 double predict_young_other_time_ms(size_t young_num) {
ysr@777 444 return
ysr@777 445 (double) young_num *
ysr@777 446 get_new_prediction(_young_other_cost_per_region_ms_seq);
ysr@777 447 }
ysr@777 448
ysr@777 449 double predict_non_young_other_time_ms(size_t non_young_num) {
ysr@777 450 return
ysr@777 451 (double) non_young_num *
ysr@777 452 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
ysr@777 453 }
ysr@777 454
ysr@777 455 void check_if_region_is_too_expensive(double predicted_time_ms);
ysr@777 456
ysr@777 457 double predict_young_collection_elapsed_time_ms(size_t adjustment);
ysr@777 458 double predict_base_elapsed_time_ms(size_t pending_cards);
ysr@777 459 double predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 460 size_t scanned_cards);
ysr@777 461 size_t predict_bytes_to_copy(HeapRegion* hr);
ysr@777 462 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
ysr@777 463
johnc@1829 464 // for use by: calculate_young_list_target_length(rs_length)
johnc@1829 465 bool predict_will_fit(size_t young_region_num,
johnc@1829 466 double base_time_ms,
johnc@1829 467 size_t init_free_regions,
johnc@1829 468 double target_pause_time_ms);
ysr@777 469
ysr@777 470 void start_recording_regions();
johnc@1829 471 void record_cset_region_info(HeapRegion* hr, bool young);
johnc@1829 472 void record_non_young_cset_region(HeapRegion* hr);
johnc@1829 473
johnc@1829 474 void set_recorded_young_regions(size_t n_regions);
johnc@1829 475 void set_recorded_young_bytes(size_t bytes);
johnc@1829 476 void set_recorded_rs_lengths(size_t rs_lengths);
johnc@1829 477 void set_predicted_bytes_to_copy(size_t bytes);
johnc@1829 478
ysr@777 479 void end_recording_regions();
ysr@777 480
ysr@777 481 void record_vtime_diff_ms(double vtime_diff_ms) {
ysr@777 482 _vtime_diff_ms = vtime_diff_ms;
ysr@777 483 }
ysr@777 484
ysr@777 485 void record_young_free_cset_time_ms(double time_ms) {
ysr@777 486 _recorded_young_free_cset_time_ms = time_ms;
ysr@777 487 }
ysr@777 488
ysr@777 489 void record_non_young_free_cset_time_ms(double time_ms) {
ysr@777 490 _recorded_non_young_free_cset_time_ms = time_ms;
ysr@777 491 }
ysr@777 492
ysr@777 493 double predict_young_gc_eff() {
ysr@777 494 return get_new_neg_prediction(_young_gc_eff_seq);
ysr@777 495 }
ysr@777 496
apetrusenko@980 497 double predict_survivor_regions_evac_time();
apetrusenko@980 498
ysr@777 499 // </NEW PREDICTION>
ysr@777 500
ysr@777 501 public:
ysr@777 502 void cset_regions_freed() {
ysr@777 503 bool propagate = _last_young_gc_full && !_in_marking_window;
ysr@777 504 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 505 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 506 // also call it on any more surv rate groups
ysr@777 507 }
ysr@777 508
ysr@777 509 void set_known_garbage_bytes(size_t known_garbage_bytes) {
ysr@777 510 _known_garbage_bytes = known_garbage_bytes;
ysr@777 511 size_t heap_bytes = _g1->capacity();
ysr@777 512 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
ysr@777 513 }
ysr@777 514
ysr@777 515 void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
ysr@777 516 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
ysr@777 517
ysr@777 518 _known_garbage_bytes -= known_garbage_bytes;
ysr@777 519 size_t heap_bytes = _g1->capacity();
ysr@777 520 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
ysr@777 521 }
ysr@777 522
ysr@777 523 G1MMUTracker* mmu_tracker() {
ysr@777 524 return _mmu_tracker;
ysr@777 525 }
ysr@777 526
tonyp@2011 527 double max_pause_time_ms() {
tonyp@2011 528 return _mmu_tracker->max_gc_time() * 1000.0;
tonyp@2011 529 }
tonyp@2011 530
ysr@777 531 double predict_init_time_ms() {
ysr@777 532 return get_new_prediction(_concurrent_mark_init_times_ms);
ysr@777 533 }
ysr@777 534
ysr@777 535 double predict_remark_time_ms() {
ysr@777 536 return get_new_prediction(_concurrent_mark_remark_times_ms);
ysr@777 537 }
ysr@777 538
ysr@777 539 double predict_cleanup_time_ms() {
ysr@777 540 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
ysr@777 541 }
ysr@777 542
ysr@777 543 // Returns an estimate of the survival rate of the region at yg-age
ysr@777 544 // "yg_age".
apetrusenko@980 545 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
apetrusenko@980 546 TruncatedSeq* seq = surv_rate_group->get_seq(age);
ysr@777 547 if (seq->num() == 0)
ysr@777 548 gclog_or_tty->print("BARF! age is %d", age);
ysr@777 549 guarantee( seq->num() > 0, "invariant" );
ysr@777 550 double pred = get_new_prediction(seq);
ysr@777 551 if (pred > 1.0)
ysr@777 552 pred = 1.0;
ysr@777 553 return pred;
ysr@777 554 }
ysr@777 555
apetrusenko@980 556 double predict_yg_surv_rate(int age) {
apetrusenko@980 557 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
apetrusenko@980 558 }
apetrusenko@980 559
ysr@777 560 double accum_yg_surv_rate_pred(int age) {
ysr@777 561 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
ysr@777 562 }
ysr@777 563
ysr@777 564 protected:
tonyp@1966 565 void print_stats(int level, const char* str, double value);
tonyp@1966 566 void print_stats(int level, const char* str, int value);
tonyp@1966 567
tonyp@1966 568 void print_par_stats(int level, const char* str, double* data) {
ysr@777 569 print_par_stats(level, str, data, true);
ysr@777 570 }
tonyp@1966 571 void print_par_stats(int level, const char* str, double* data, bool summary);
tonyp@1966 572 void print_par_sizes(int level, const char* str, double* data, bool summary);
ysr@777 573
ysr@777 574 void check_other_times(int level,
ysr@777 575 NumberSeq* other_times_ms,
ysr@777 576 NumberSeq* calc_other_times_ms) const;
ysr@777 577
ysr@777 578 void print_summary (PauseSummary* stats) const;
ysr@777 579
ysr@777 580 void print_summary (int level, const char* str, NumberSeq* seq) const;
ysr@777 581 void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
ysr@777 582
ysr@777 583 double avg_value (double* data);
ysr@777 584 double max_value (double* data);
ysr@777 585 double sum_of_values (double* data);
ysr@777 586 double max_sum (double* data1, double* data2);
ysr@777 587
ysr@777 588 int _last_satb_drain_processed_buffers;
ysr@777 589 int _last_update_rs_processed_buffers;
ysr@777 590 double _last_pause_time_ms;
ysr@777 591
ysr@777 592 size_t _bytes_in_to_space_before_gc;
ysr@777 593 size_t _bytes_in_to_space_after_gc;
ysr@777 594 size_t bytes_in_to_space_during_gc() {
ysr@777 595 return
ysr@777 596 _bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc;
ysr@777 597 }
ysr@777 598 size_t _bytes_in_collection_set_before_gc;
ysr@777 599 // Used to count used bytes in CS.
ysr@777 600 friend class CountCSClosure;
ysr@777 601
ysr@777 602 // Statistics kept per GC stoppage, pause or full.
ysr@777 603 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
ysr@777 604
ysr@777 605 // We track markings.
ysr@777 606 int _num_markings;
ysr@777 607 double _mark_thread_startup_sec; // Time at startup of marking thread
ysr@777 608
ysr@777 609 // Add a new GC of the given duration and end time to the record.
ysr@777 610 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
ysr@777 611
ysr@777 612 // The head of the list (via "next_in_collection_set()") representing the
johnc@1829 613 // current collection set. Set from the incrementally built collection
johnc@1829 614 // set at the start of the pause.
ysr@777 615 HeapRegion* _collection_set;
johnc@1829 616
johnc@1829 617 // The number of regions in the collection set. Set from the incrementally
johnc@1829 618 // built collection set at the start of an evacuation pause.
ysr@777 619 size_t _collection_set_size;
johnc@1829 620
johnc@1829 621 // The number of bytes in the collection set before the pause. Set from
johnc@1829 622 // the incrementally built collection set at the start of an evacuation
johnc@1829 623 // pause.
ysr@777 624 size_t _collection_set_bytes_used_before;
ysr@777 625
johnc@1829 626 // The associated information that is maintained while the incremental
johnc@1829 627 // collection set is being built with young regions. Used to populate
johnc@1829 628 // the recorded info for the evacuation pause.
johnc@1829 629
johnc@1829 630 enum CSetBuildType {
johnc@1829 631 Active, // We are actively building the collection set
johnc@1829 632 Inactive // We are not actively building the collection set
johnc@1829 633 };
johnc@1829 634
johnc@1829 635 CSetBuildType _inc_cset_build_state;
johnc@1829 636
johnc@1829 637 // The head of the incrementally built collection set.
johnc@1829 638 HeapRegion* _inc_cset_head;
johnc@1829 639
johnc@1829 640 // The tail of the incrementally built collection set.
johnc@1829 641 HeapRegion* _inc_cset_tail;
johnc@1829 642
johnc@1829 643 // The number of regions in the incrementally built collection set.
johnc@1829 644 // Used to set _collection_set_size at the start of an evacuation
johnc@1829 645 // pause.
johnc@1829 646 size_t _inc_cset_size;
johnc@1829 647
johnc@1829 648 // Used as the index in the surving young words structure
johnc@1829 649 // which tracks the amount of space, for each young region,
johnc@1829 650 // that survives the pause.
johnc@1829 651 size_t _inc_cset_young_index;
johnc@1829 652
johnc@1829 653 // The number of bytes in the incrementally built collection set.
johnc@1829 654 // Used to set _collection_set_bytes_used_before at the start of
johnc@1829 655 // an evacuation pause.
johnc@1829 656 size_t _inc_cset_bytes_used_before;
johnc@1829 657
johnc@1829 658 // Used to record the highest end of heap region in collection set
johnc@1829 659 HeapWord* _inc_cset_max_finger;
johnc@1829 660
johnc@1829 661 // The number of recorded used bytes in the young regions
johnc@1829 662 // of the collection set. This is the sum of the used() bytes
johnc@1829 663 // of retired young regions in the collection set.
johnc@1829 664 size_t _inc_cset_recorded_young_bytes;
johnc@1829 665
johnc@1829 666 // The RSet lengths recorded for regions in the collection set
johnc@1829 667 // (updated by the periodic sampling of the regions in the
johnc@1829 668 // young list/collection set).
johnc@1829 669 size_t _inc_cset_recorded_rs_lengths;
johnc@1829 670
johnc@1829 671 // The predicted elapsed time it will take to collect the regions
johnc@1829 672 // in the collection set (updated by the periodic sampling of the
johnc@1829 673 // regions in the young list/collection set).
johnc@1829 674 double _inc_cset_predicted_elapsed_time_ms;
johnc@1829 675
johnc@1829 676 // The predicted bytes to copy for the regions in the collection
johnc@1829 677 // set (updated by the periodic sampling of the regions in the
johnc@1829 678 // young list/collection set).
johnc@1829 679 size_t _inc_cset_predicted_bytes_to_copy;
johnc@1829 680
ysr@777 681 // Info about marking.
ysr@777 682 int _n_marks; // Sticky at 2, so we know when we've done at least 2.
ysr@777 683
ysr@777 684 // The number of collection pauses at the end of the last mark.
ysr@777 685 size_t _n_pauses_at_mark_end;
ysr@777 686
ysr@777 687 // Stash a pointer to the g1 heap.
ysr@777 688 G1CollectedHeap* _g1;
ysr@777 689
ysr@777 690 // The average time in ms per collection pause, averaged over recent pauses.
ysr@777 691 double recent_avg_time_for_pauses_ms();
ysr@777 692
ysr@777 693 // The average time in ms for processing CollectedHeap strong roots, per
ysr@777 694 // collection pause, averaged over recent pauses.
ysr@777 695 double recent_avg_time_for_CH_strong_ms();
ysr@777 696
ysr@777 697 // The average time in ms for processing the G1 remembered set, per
ysr@777 698 // pause, averaged over recent pauses.
ysr@777 699 double recent_avg_time_for_G1_strong_ms();
ysr@777 700
ysr@777 701 // The average time in ms for "evacuating followers", per pause, averaged
ysr@777 702 // over recent pauses.
ysr@777 703 double recent_avg_time_for_evac_ms();
ysr@777 704
ysr@777 705 // The number of "recent" GCs recorded in the number sequences
ysr@777 706 int number_of_recent_gcs();
ysr@777 707
ysr@777 708 // The average survival ratio, computed by the total number of bytes
ysr@777 709 // suriviving / total number of bytes before collection over the last
ysr@777 710 // several recent pauses.
ysr@777 711 double recent_avg_survival_fraction();
ysr@777 712 // The survival fraction of the most recent pause; if there have been no
ysr@777 713 // pauses, returns 1.0.
ysr@777 714 double last_survival_fraction();
ysr@777 715
ysr@777 716 // Returns a "conservative" estimate of the recent survival rate, i.e.,
ysr@777 717 // one that may be higher than "recent_avg_survival_fraction".
ysr@777 718 // This is conservative in several ways:
ysr@777 719 // If there have been few pauses, it will assume a potential high
ysr@777 720 // variance, and err on the side of caution.
ysr@777 721 // It puts a lower bound (currently 0.1) on the value it will return.
ysr@777 722 // To try to detect phase changes, if the most recent pause ("latest") has a
ysr@777 723 // higher-than average ("avg") survival rate, it returns that rate.
ysr@777 724 // "work" version is a utility function; young is restricted to young regions.
ysr@777 725 double conservative_avg_survival_fraction_work(double avg,
ysr@777 726 double latest);
ysr@777 727
ysr@777 728 // The arguments are the two sequences that keep track of the number of bytes
ysr@777 729 // surviving and the total number of bytes before collection, resp.,
ysr@777 730 // over the last evereal recent pauses
ysr@777 731 // Returns the survival rate for the category in the most recent pause.
ysr@777 732 // If there have been no pauses, returns 1.0.
ysr@777 733 double last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 734 TruncatedSeq* before);
ysr@777 735
ysr@777 736 // The arguments are the two sequences that keep track of the number of bytes
ysr@777 737 // surviving and the total number of bytes before collection, resp.,
ysr@777 738 // over the last several recent pauses
ysr@777 739 // Returns the average survival ration over the last several recent pauses
ysr@777 740 // If there have been no pauses, return 1.0
ysr@777 741 double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 742 TruncatedSeq* before);
ysr@777 743
ysr@777 744 double conservative_avg_survival_fraction() {
ysr@777 745 double avg = recent_avg_survival_fraction();
ysr@777 746 double latest = last_survival_fraction();
ysr@777 747 return conservative_avg_survival_fraction_work(avg, latest);
ysr@777 748 }
ysr@777 749
ysr@777 750 // The ratio of gc time to elapsed time, computed over recent pauses.
ysr@777 751 double _recent_avg_pause_time_ratio;
ysr@777 752
ysr@777 753 double recent_avg_pause_time_ratio() {
ysr@777 754 return _recent_avg_pause_time_ratio;
ysr@777 755 }
ysr@777 756
ysr@777 757 // Number of pauses between concurrent marking.
ysr@777 758 size_t _pauses_btwn_concurrent_mark;
ysr@777 759
ysr@777 760 size_t _n_marks_since_last_pause;
ysr@777 761
tonyp@1794 762 // At the end of a pause we check the heap occupancy and we decide
tonyp@1794 763 // whether we will start a marking cycle during the next pause. If
tonyp@1794 764 // we decide that we want to do that, we will set this parameter to
tonyp@1794 765 // true. So, this parameter will stay true between the end of a
tonyp@1794 766 // pause and the beginning of a subsequent pause (not necessarily
tonyp@1794 767 // the next one, see the comments on the next field) when we decide
tonyp@1794 768 // that we will indeed start a marking cycle and do the initial-mark
tonyp@1794 769 // work.
tonyp@1794 770 volatile bool _initiate_conc_mark_if_possible;
ysr@777 771
tonyp@1794 772 // If initiate_conc_mark_if_possible() is set at the beginning of a
tonyp@1794 773 // pause, it is a suggestion that the pause should start a marking
tonyp@1794 774 // cycle by doing the initial-mark work. However, it is possible
tonyp@1794 775 // that the concurrent marking thread is still finishing up the
tonyp@1794 776 // previous marking cycle (e.g., clearing the next marking
tonyp@1794 777 // bitmap). If that is the case we cannot start a new cycle and
tonyp@1794 778 // we'll have to wait for the concurrent marking thread to finish
tonyp@1794 779 // what it is doing. In this case we will postpone the marking cycle
tonyp@1794 780 // initiation decision for the next pause. When we eventually decide
tonyp@1794 781 // to start a cycle, we will set _during_initial_mark_pause which
tonyp@1794 782 // will stay true until the end of the initial-mark pause and it's
tonyp@1794 783 // the condition that indicates that a pause is doing the
tonyp@1794 784 // initial-mark work.
tonyp@1794 785 volatile bool _during_initial_mark_pause;
tonyp@1794 786
ysr@777 787 bool _should_revert_to_full_young_gcs;
ysr@777 788 bool _last_full_young_gc;
ysr@777 789
ysr@777 790 // This set of variables tracks the collector efficiency, in order to
ysr@777 791 // determine whether we should initiate a new marking.
ysr@777 792 double _cur_mark_stop_world_time_ms;
ysr@777 793 double _mark_init_start_sec;
ysr@777 794 double _mark_remark_start_sec;
ysr@777 795 double _mark_cleanup_start_sec;
ysr@777 796 double _mark_closure_time_ms;
ysr@777 797
ysr@777 798 void calculate_young_list_min_length();
johnc@1829 799 void calculate_young_list_target_length();
johnc@1829 800 void calculate_young_list_target_length(size_t rs_lengths);
ysr@777 801
ysr@777 802 public:
ysr@777 803
ysr@777 804 G1CollectorPolicy();
ysr@777 805
ysr@777 806 virtual G1CollectorPolicy* as_g1_policy() { return this; }
ysr@777 807
ysr@777 808 virtual CollectorPolicy::Name kind() {
ysr@777 809 return CollectorPolicy::G1CollectorPolicyKind;
ysr@777 810 }
ysr@777 811
ysr@777 812 void check_prediction_validity();
ysr@777 813
ysr@777 814 size_t bytes_in_collection_set() {
ysr@777 815 return _bytes_in_collection_set_before_gc;
ysr@777 816 }
ysr@777 817
ysr@777 818 size_t bytes_in_to_space() {
ysr@777 819 return bytes_in_to_space_during_gc();
ysr@777 820 }
ysr@777 821
ysr@777 822 unsigned calc_gc_alloc_time_stamp() {
ysr@777 823 return _all_pause_times_ms->num() + 1;
ysr@777 824 }
ysr@777 825
ysr@777 826 protected:
ysr@777 827
ysr@777 828 // Count the number of bytes used in the CS.
ysr@777 829 void count_CS_bytes_used();
ysr@777 830
ysr@777 831 // Together these do the base cleanup-recording work. Subclasses might
ysr@777 832 // want to put something between them.
ysr@777 833 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 834 size_t max_live_bytes);
ysr@777 835 void record_concurrent_mark_cleanup_end_work2();
ysr@777 836
ysr@777 837 public:
ysr@777 838
ysr@777 839 virtual void init();
ysr@777 840
apetrusenko@980 841 // Create jstat counters for the policy.
apetrusenko@980 842 virtual void initialize_gc_policy_counters();
apetrusenko@980 843
ysr@777 844 virtual HeapWord* mem_allocate_work(size_t size,
ysr@777 845 bool is_tlab,
ysr@777 846 bool* gc_overhead_limit_was_exceeded);
ysr@777 847
ysr@777 848 // This method controls how a collector handles one or more
ysr@777 849 // of its generations being fully allocated.
ysr@777 850 virtual HeapWord* satisfy_failed_allocation(size_t size,
ysr@777 851 bool is_tlab);
ysr@777 852
ysr@777 853 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
ysr@777 854
ysr@777 855 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
ysr@777 856
ysr@777 857 // The number of collection pauses so far.
ysr@777 858 long n_pauses() const { return _n_pauses; }
ysr@777 859
ysr@777 860 // Update the heuristic info to record a collection pause of the given
ysr@777 861 // start time, where the given number of bytes were used at the start.
ysr@777 862 // This may involve changing the desired size of a collection set.
ysr@777 863
ysr@777 864 virtual void record_stop_world_start();
ysr@777 865
ysr@777 866 virtual void record_collection_pause_start(double start_time_sec,
ysr@777 867 size_t start_used);
ysr@777 868
ysr@777 869 // Must currently be called while the world is stopped.
ysr@777 870 virtual void record_concurrent_mark_init_start();
ysr@777 871 virtual void record_concurrent_mark_init_end();
ysr@777 872 void record_concurrent_mark_init_end_pre(double
ysr@777 873 mark_init_elapsed_time_ms);
ysr@777 874
ysr@777 875 void record_mark_closure_time(double mark_closure_time_ms);
ysr@777 876
ysr@777 877 virtual void record_concurrent_mark_remark_start();
ysr@777 878 virtual void record_concurrent_mark_remark_end();
ysr@777 879
ysr@777 880 virtual void record_concurrent_mark_cleanup_start();
ysr@777 881 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 882 size_t max_live_bytes);
ysr@777 883 virtual void record_concurrent_mark_cleanup_completed();
ysr@777 884
ysr@777 885 virtual void record_concurrent_pause();
ysr@777 886 virtual void record_concurrent_pause_end();
ysr@777 887
ysr@777 888 virtual void record_collection_pause_end_CH_strong_roots();
ysr@777 889 virtual void record_collection_pause_end_G1_strong_roots();
ysr@777 890
tonyp@2062 891 virtual void record_collection_pause_end();
ysr@777 892
ysr@777 893 // Record the fact that a full collection occurred.
ysr@777 894 virtual void record_full_collection_start();
ysr@777 895 virtual void record_full_collection_end();
ysr@777 896
tonyp@1966 897 void record_gc_worker_start_time(int worker_i, double ms) {
tonyp@1966 898 _par_last_gc_worker_start_times_ms[worker_i] = ms;
tonyp@1966 899 }
tonyp@1966 900
ysr@777 901 void record_ext_root_scan_time(int worker_i, double ms) {
ysr@777 902 _par_last_ext_root_scan_times_ms[worker_i] = ms;
ysr@777 903 }
ysr@777 904
ysr@777 905 void record_mark_stack_scan_time(int worker_i, double ms) {
ysr@777 906 _par_last_mark_stack_scan_times_ms[worker_i] = ms;
ysr@777 907 }
ysr@777 908
ysr@777 909 void record_satb_drain_time(double ms) {
ysr@777 910 _cur_satb_drain_time_ms = ms;
ysr@777 911 _satb_drain_time_set = true;
ysr@777 912 }
ysr@777 913
ysr@777 914 void record_satb_drain_processed_buffers (int processed_buffers) {
ysr@777 915 _last_satb_drain_processed_buffers = processed_buffers;
ysr@777 916 }
ysr@777 917
ysr@777 918 void record_mod_union_time(double ms) {
ysr@777 919 _all_mod_union_times_ms->add(ms);
ysr@777 920 }
ysr@777 921
ysr@777 922 void record_update_rs_time(int thread, double ms) {
ysr@777 923 _par_last_update_rs_times_ms[thread] = ms;
ysr@777 924 }
ysr@777 925
ysr@777 926 void record_update_rs_processed_buffers (int thread,
ysr@777 927 double processed_buffers) {
ysr@777 928 _par_last_update_rs_processed_buffers[thread] = processed_buffers;
ysr@777 929 }
ysr@777 930
ysr@777 931 void record_scan_rs_time(int thread, double ms) {
ysr@777 932 _par_last_scan_rs_times_ms[thread] = ms;
ysr@777 933 }
ysr@777 934
ysr@777 935 void reset_obj_copy_time(int thread) {
ysr@777 936 _par_last_obj_copy_times_ms[thread] = 0.0;
ysr@777 937 }
ysr@777 938
ysr@777 939 void reset_obj_copy_time() {
ysr@777 940 reset_obj_copy_time(0);
ysr@777 941 }
ysr@777 942
ysr@777 943 void record_obj_copy_time(int thread, double ms) {
ysr@777 944 _par_last_obj_copy_times_ms[thread] += ms;
ysr@777 945 }
ysr@777 946
tonyp@1966 947 void record_termination(int thread, double ms, size_t attempts) {
tonyp@1966 948 _par_last_termination_times_ms[thread] = ms;
tonyp@1966 949 _par_last_termination_attempts[thread] = (double) attempts;
ysr@777 950 }
ysr@777 951
tonyp@1966 952 void record_gc_worker_end_time(int worker_i, double ms) {
tonyp@1966 953 _par_last_gc_worker_end_times_ms[worker_i] = ms;
ysr@777 954 }
ysr@777 955
tonyp@1030 956 void record_pause_time_ms(double ms) {
ysr@777 957 _last_pause_time_ms = ms;
ysr@777 958 }
ysr@777 959
ysr@777 960 void record_clear_ct_time(double ms) {
ysr@777 961 _cur_clear_ct_time_ms = ms;
ysr@777 962 }
ysr@777 963
ysr@777 964 void record_par_time(double ms) {
ysr@777 965 _cur_collection_par_time_ms = ms;
ysr@777 966 }
ysr@777 967
ysr@777 968 void record_aux_start_time(int i) {
ysr@777 969 guarantee(i < _aux_num, "should be within range");
ysr@777 970 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
ysr@777 971 }
ysr@777 972
ysr@777 973 void record_aux_end_time(int i) {
ysr@777 974 guarantee(i < _aux_num, "should be within range");
ysr@777 975 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
ysr@777 976 _cur_aux_times_set[i] = true;
ysr@777 977 _cur_aux_times_ms[i] += ms;
ysr@777 978 }
ysr@777 979
johnc@1325 980 #ifndef PRODUCT
johnc@1325 981 void record_cc_clear_time(double ms) {
johnc@1325 982 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
johnc@1325 983 _min_clear_cc_time_ms = ms;
johnc@1325 984 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
johnc@1325 985 _max_clear_cc_time_ms = ms;
johnc@1325 986 _cur_clear_cc_time_ms = ms;
johnc@1325 987 _cum_clear_cc_time_ms += ms;
johnc@1325 988 _num_cc_clears++;
johnc@1325 989 }
johnc@1325 990 #endif
johnc@1325 991
ysr@777 992 // Record the fact that "bytes" bytes allocated in a region.
ysr@777 993 void record_before_bytes(size_t bytes);
ysr@777 994 void record_after_bytes(size_t bytes);
ysr@777 995
ysr@777 996 // Choose a new collection set. Marks the chosen regions as being
ysr@777 997 // "in_collection_set", and links them together. The head and number of
ysr@777 998 // the collection set are available via access methods.
tonyp@2062 999 virtual void choose_collection_set(double target_pause_time_ms) = 0;
ysr@777 1000
ysr@777 1001 // The head of the list (via "next_in_collection_set()") representing the
ysr@777 1002 // current collection set.
ysr@777 1003 HeapRegion* collection_set() { return _collection_set; }
ysr@777 1004
johnc@1829 1005 void clear_collection_set() { _collection_set = NULL; }
johnc@1829 1006
ysr@777 1007 // The number of elements in the current collection set.
ysr@777 1008 size_t collection_set_size() { return _collection_set_size; }
ysr@777 1009
ysr@777 1010 // Add "hr" to the CS.
ysr@777 1011 void add_to_collection_set(HeapRegion* hr);
ysr@777 1012
johnc@1829 1013 // Incremental CSet Support
johnc@1829 1014
johnc@1829 1015 // The head of the incrementally built collection set.
johnc@1829 1016 HeapRegion* inc_cset_head() { return _inc_cset_head; }
johnc@1829 1017
johnc@1829 1018 // The tail of the incrementally built collection set.
johnc@1829 1019 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
johnc@1829 1020
johnc@1829 1021 // The number of elements in the incrementally built collection set.
johnc@1829 1022 size_t inc_cset_size() { return _inc_cset_size; }
johnc@1829 1023
johnc@1829 1024 // Initialize incremental collection set info.
johnc@1829 1025 void start_incremental_cset_building();
johnc@1829 1026
johnc@1829 1027 void clear_incremental_cset() {
johnc@1829 1028 _inc_cset_head = NULL;
johnc@1829 1029 _inc_cset_tail = NULL;
johnc@1829 1030 }
johnc@1829 1031
johnc@1829 1032 // Stop adding regions to the incremental collection set
johnc@1829 1033 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
johnc@1829 1034
johnc@1829 1035 // Add/remove information about hr to the aggregated information
johnc@1829 1036 // for the incrementally built collection set.
johnc@1829 1037 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
johnc@1829 1038 void remove_from_incremental_cset_info(HeapRegion* hr);
johnc@1829 1039
johnc@1829 1040 // Update information about hr in the aggregated information for
johnc@1829 1041 // the incrementally built collection set.
johnc@1829 1042 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
johnc@1829 1043
johnc@1829 1044 private:
johnc@1829 1045 // Update the incremental cset information when adding a region
johnc@1829 1046 // (should not be called directly).
johnc@1829 1047 void add_region_to_incremental_cset_common(HeapRegion* hr);
johnc@1829 1048
johnc@1829 1049 public:
johnc@1829 1050 // Add hr to the LHS of the incremental collection set.
johnc@1829 1051 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
johnc@1829 1052
johnc@1829 1053 // Add hr to the RHS of the incremental collection set.
johnc@1829 1054 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
johnc@1829 1055
johnc@1829 1056 #ifndef PRODUCT
johnc@1829 1057 void print_collection_set(HeapRegion* list_head, outputStream* st);
johnc@1829 1058 #endif // !PRODUCT
johnc@1829 1059
tonyp@1794 1060 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
tonyp@1794 1061 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
tonyp@1794 1062 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
tonyp@1794 1063
tonyp@1794 1064 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
tonyp@1794 1065 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
tonyp@1794 1066 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
tonyp@1794 1067
tonyp@2011 1068 // This sets the initiate_conc_mark_if_possible() flag to start a
tonyp@2011 1069 // new cycle, as long as we are not already in one. It's best if it
tonyp@2011 1070 // is called during a safepoint when the test whether a cycle is in
tonyp@2011 1071 // progress or not is stable.
tonyp@2011 1072 bool force_initial_mark_if_outside_cycle();
tonyp@2011 1073
tonyp@1794 1074 // This is called at the very beginning of an evacuation pause (it
tonyp@1794 1075 // has to be the first thing that the pause does). If
tonyp@1794 1076 // initiate_conc_mark_if_possible() is true, and the concurrent
tonyp@1794 1077 // marking thread has completed its work during the previous cycle,
tonyp@1794 1078 // it will set during_initial_mark_pause() to so that the pause does
tonyp@1794 1079 // the initial-mark work and start a marking cycle.
tonyp@1794 1080 void decide_on_conc_mark_initiation();
ysr@777 1081
ysr@777 1082 // If an expansion would be appropriate, because recent GC overhead had
ysr@777 1083 // exceeded the desired limit, return an amount to expand by.
ysr@777 1084 virtual size_t expansion_amount();
ysr@777 1085
ysr@777 1086 // note start of mark thread
ysr@777 1087 void note_start_of_mark_thread();
ysr@777 1088
ysr@777 1089 // The marked bytes of the "r" has changed; reclassify it's desirability
ysr@777 1090 // for marking. Also asserts that "r" is eligible for a CS.
ysr@777 1091 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
ysr@777 1092
ysr@777 1093 #ifndef PRODUCT
ysr@777 1094 // Check any appropriate marked bytes info, asserting false if
ysr@777 1095 // something's wrong, else returning "true".
ysr@777 1096 virtual bool assertMarkedBytesDataOK() = 0;
ysr@777 1097 #endif
ysr@777 1098
ysr@777 1099 // Print tracing information.
ysr@777 1100 void print_tracing_info() const;
ysr@777 1101
ysr@777 1102 // Print stats on young survival ratio
ysr@777 1103 void print_yg_surv_rate_info() const;
ysr@777 1104
apetrusenko@980 1105 void finished_recalculating_age_indexes(bool is_survivors) {
apetrusenko@980 1106 if (is_survivors) {
apetrusenko@980 1107 _survivor_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 1108 } else {
apetrusenko@980 1109 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 1110 }
ysr@777 1111 // do that for any other surv rate groups
ysr@777 1112 }
ysr@777 1113
tonyp@2315 1114 bool is_young_list_full() {
tonyp@2315 1115 size_t young_list_length = _g1->young_list()->length();
tonyp@2315 1116 size_t young_list_max_length = _young_list_target_length;
tonyp@2315 1117 if (G1FixedEdenSize) {
tonyp@2315 1118 young_list_max_length -= _max_survivor_regions;
tonyp@2315 1119 }
tonyp@2315 1120
tonyp@2315 1121 return young_list_length >= young_list_max_length;
tonyp@2315 1122 }
tonyp@2315 1123 void update_region_num(bool young);
ysr@777 1124
ysr@777 1125 bool in_young_gc_mode() {
ysr@777 1126 return _in_young_gc_mode;
ysr@777 1127 }
ysr@777 1128 void set_in_young_gc_mode(bool in_young_gc_mode) {
ysr@777 1129 _in_young_gc_mode = in_young_gc_mode;
ysr@777 1130 }
ysr@777 1131
ysr@777 1132 bool full_young_gcs() {
ysr@777 1133 return _full_young_gcs;
ysr@777 1134 }
ysr@777 1135 void set_full_young_gcs(bool full_young_gcs) {
ysr@777 1136 _full_young_gcs = full_young_gcs;
ysr@777 1137 }
ysr@777 1138
ysr@777 1139 bool adaptive_young_list_length() {
ysr@777 1140 return _adaptive_young_list_length;
ysr@777 1141 }
ysr@777 1142 void set_adaptive_young_list_length(bool adaptive_young_list_length) {
ysr@777 1143 _adaptive_young_list_length = adaptive_young_list_length;
ysr@777 1144 }
ysr@777 1145
ysr@777 1146 inline double get_gc_eff_factor() {
ysr@777 1147 double ratio = _known_garbage_ratio;
ysr@777 1148
ysr@777 1149 double square = ratio * ratio;
ysr@777 1150 // square = square * square;
ysr@777 1151 double ret = square * 9.0 + 1.0;
ysr@777 1152 #if 0
ysr@777 1153 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
ysr@777 1154 #endif // 0
ysr@777 1155 guarantee(0.0 <= ret && ret < 10.0, "invariant!");
ysr@777 1156 return ret;
ysr@777 1157 }
ysr@777 1158
ysr@777 1159 //
ysr@777 1160 // Survivor regions policy.
ysr@777 1161 //
ysr@777 1162 protected:
ysr@777 1163
ysr@777 1164 // Current tenuring threshold, set to 0 if the collector reaches the
ysr@777 1165 // maximum amount of suvivors regions.
ysr@777 1166 int _tenuring_threshold;
ysr@777 1167
apetrusenko@980 1168 // The limit on the number of regions allocated for survivors.
apetrusenko@980 1169 size_t _max_survivor_regions;
apetrusenko@980 1170
apetrusenko@980 1171 // The amount of survor regions after a collection.
apetrusenko@980 1172 size_t _recorded_survivor_regions;
apetrusenko@980 1173 // List of survivor regions.
apetrusenko@980 1174 HeapRegion* _recorded_survivor_head;
apetrusenko@980 1175 HeapRegion* _recorded_survivor_tail;
apetrusenko@980 1176
apetrusenko@980 1177 ageTable _survivors_age_table;
apetrusenko@980 1178
ysr@777 1179 public:
ysr@777 1180
ysr@777 1181 inline GCAllocPurpose
ysr@777 1182 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
ysr@777 1183 if (age < _tenuring_threshold && src_region->is_young()) {
ysr@777 1184 return GCAllocForSurvived;
ysr@777 1185 } else {
ysr@777 1186 return GCAllocForTenured;
ysr@777 1187 }
ysr@777 1188 }
ysr@777 1189
ysr@777 1190 inline bool track_object_age(GCAllocPurpose purpose) {
ysr@777 1191 return purpose == GCAllocForSurvived;
ysr@777 1192 }
ysr@777 1193
ysr@777 1194 inline GCAllocPurpose alternative_purpose(int purpose) {
ysr@777 1195 return GCAllocForTenured;
ysr@777 1196 }
ysr@777 1197
apetrusenko@980 1198 static const size_t REGIONS_UNLIMITED = ~(size_t)0;
apetrusenko@980 1199
apetrusenko@980 1200 size_t max_regions(int purpose);
ysr@777 1201
ysr@777 1202 // The limit on regions for a particular purpose is reached.
ysr@777 1203 void note_alloc_region_limit_reached(int purpose) {
ysr@777 1204 if (purpose == GCAllocForSurvived) {
ysr@777 1205 _tenuring_threshold = 0;
ysr@777 1206 }
ysr@777 1207 }
ysr@777 1208
ysr@777 1209 void note_start_adding_survivor_regions() {
ysr@777 1210 _survivor_surv_rate_group->start_adding_regions();
ysr@777 1211 }
ysr@777 1212
ysr@777 1213 void note_stop_adding_survivor_regions() {
ysr@777 1214 _survivor_surv_rate_group->stop_adding_regions();
ysr@777 1215 }
apetrusenko@980 1216
apetrusenko@980 1217 void record_survivor_regions(size_t regions,
apetrusenko@980 1218 HeapRegion* head,
apetrusenko@980 1219 HeapRegion* tail) {
apetrusenko@980 1220 _recorded_survivor_regions = regions;
apetrusenko@980 1221 _recorded_survivor_head = head;
apetrusenko@980 1222 _recorded_survivor_tail = tail;
apetrusenko@980 1223 }
apetrusenko@980 1224
tonyp@1273 1225 size_t recorded_survivor_regions() {
tonyp@1273 1226 return _recorded_survivor_regions;
tonyp@1273 1227 }
tonyp@1273 1228
apetrusenko@980 1229 void record_thread_age_table(ageTable* age_table)
apetrusenko@980 1230 {
apetrusenko@980 1231 _survivors_age_table.merge_par(age_table);
apetrusenko@980 1232 }
apetrusenko@980 1233
apetrusenko@980 1234 // Calculates survivor space parameters.
apetrusenko@980 1235 void calculate_survivors_policy();
apetrusenko@980 1236
ysr@777 1237 };
ysr@777 1238
ysr@777 1239 // This encapsulates a particular strategy for a g1 Collector.
ysr@777 1240 //
ysr@777 1241 // Start a concurrent mark when our heap size is n bytes
ysr@777 1242 // greater then our heap size was at the last concurrent
ysr@777 1243 // mark. Where n is a function of the CMSTriggerRatio
ysr@777 1244 // and the MinHeapFreeRatio.
ysr@777 1245 //
ysr@777 1246 // Start a g1 collection pause when we have allocated the
ysr@777 1247 // average number of bytes currently being freed in
ysr@777 1248 // a collection, but only if it is at least one region
ysr@777 1249 // full
ysr@777 1250 //
ysr@777 1251 // Resize Heap based on desired
ysr@777 1252 // allocation space, where desired allocation space is
ysr@777 1253 // a function of survival rate and desired future to size.
ysr@777 1254 //
ysr@777 1255 // Choose collection set by first picking all older regions
ysr@777 1256 // which have a survival rate which beats our projected young
ysr@777 1257 // survival rate. Then fill out the number of needed regions
ysr@777 1258 // with young regions.
ysr@777 1259
ysr@777 1260 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
ysr@777 1261 CollectionSetChooser* _collectionSetChooser;
ysr@777 1262 // If the estimated is less then desirable, resize if possible.
ysr@777 1263 void expand_if_possible(size_t numRegions);
ysr@777 1264
tonyp@2062 1265 virtual void choose_collection_set(double target_pause_time_ms);
ysr@777 1266 virtual void record_collection_pause_start(double start_time_sec,
ysr@777 1267 size_t start_used);
ysr@777 1268 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 1269 size_t max_live_bytes);
ysr@777 1270 virtual void record_full_collection_end();
ysr@777 1271
ysr@777 1272 public:
ysr@777 1273 G1CollectorPolicy_BestRegionsFirst() {
ysr@777 1274 _collectionSetChooser = new CollectionSetChooser();
ysr@777 1275 }
tonyp@2062 1276 void record_collection_pause_end();
ysr@777 1277 // This is not needed any more, after the CSet choosing code was
ysr@777 1278 // changed to use the pause prediction work. But let's leave the
ysr@777 1279 // hook in just in case.
ysr@777 1280 void note_change_in_marked_bytes(HeapRegion* r) { }
ysr@777 1281 #ifndef PRODUCT
ysr@777 1282 bool assertMarkedBytesDataOK();
ysr@777 1283 #endif
ysr@777 1284 };
ysr@777 1285
ysr@777 1286 // This should move to some place more general...
ysr@777 1287
ysr@777 1288 // If we have "n" measurements, and we've kept track of their "sum" and the
ysr@777 1289 // "sum_of_squares" of the measurements, this returns the variance of the
ysr@777 1290 // sequence.
ysr@777 1291 inline double variance(int n, double sum_of_squares, double sum) {
ysr@777 1292 double n_d = (double)n;
ysr@777 1293 double avg = sum/n_d;
ysr@777 1294 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
ysr@777 1295 }
ysr@777 1296
ysr@777 1297 // Local Variables: ***
ysr@777 1298 // c-indentation-style: gnu ***
ysr@777 1299 // End: ***
stefank@2314 1300
stefank@2314 1301 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP

mercurial