src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2712
5c0b591e1074
child 2961
053d84a76d3d
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/collectionSetChooser.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1MMUTracker.hpp"
stefank@2314 30 #include "memory/collectorPolicy.hpp"
stefank@2314 31
ysr@777 32 // A G1CollectorPolicy makes policy decisions that determine the
ysr@777 33 // characteristics of the collector. Examples include:
ysr@777 34 // * choice of collection set.
ysr@777 35 // * when to collect.
ysr@777 36
ysr@777 37 class HeapRegion;
ysr@777 38 class CollectionSetChooser;
ysr@777 39
ysr@777 40 // Yes, this is a bit unpleasant... but it saves replicating the same thing
ysr@777 41 // over and over again and introducing subtle problems through small typos and
ysr@777 42 // cutting and pasting mistakes. The macros below introduces a number
ysr@777 43 // sequnce into the following two classes and the methods that access it.
ysr@777 44
ysr@777 45 #define define_num_seq(name) \
ysr@777 46 private: \
ysr@777 47 NumberSeq _all_##name##_times_ms; \
ysr@777 48 public: \
ysr@777 49 void record_##name##_time_ms(double ms) { \
ysr@777 50 _all_##name##_times_ms.add(ms); \
ysr@777 51 } \
ysr@777 52 NumberSeq* get_##name##_seq() { \
ysr@777 53 return &_all_##name##_times_ms; \
ysr@777 54 }
ysr@777 55
ysr@777 56 class MainBodySummary;
ysr@777 57
apetrusenko@984 58 class PauseSummary: public CHeapObj {
ysr@777 59 define_num_seq(total)
ysr@777 60 define_num_seq(other)
ysr@777 61
ysr@777 62 public:
ysr@777 63 virtual MainBodySummary* main_body_summary() { return NULL; }
ysr@777 64 };
ysr@777 65
apetrusenko@984 66 class MainBodySummary: public CHeapObj {
ysr@777 67 define_num_seq(satb_drain) // optional
ysr@777 68 define_num_seq(parallel) // parallel only
ysr@777 69 define_num_seq(ext_root_scan)
ysr@777 70 define_num_seq(mark_stack_scan)
ysr@777 71 define_num_seq(update_rs)
ysr@777 72 define_num_seq(scan_rs)
ysr@777 73 define_num_seq(obj_copy)
ysr@777 74 define_num_seq(termination) // parallel only
ysr@777 75 define_num_seq(parallel_other) // parallel only
ysr@777 76 define_num_seq(mark_closure)
ysr@777 77 define_num_seq(clear_ct) // parallel only
ysr@777 78 };
ysr@777 79
apetrusenko@1112 80 class Summary: public PauseSummary,
apetrusenko@1112 81 public MainBodySummary {
ysr@777 82 public:
ysr@777 83 virtual MainBodySummary* main_body_summary() { return this; }
ysr@777 84 };
ysr@777 85
ysr@777 86 class G1CollectorPolicy: public CollectorPolicy {
ysr@777 87 protected:
ysr@777 88 // The number of pauses during the execution.
ysr@777 89 long _n_pauses;
ysr@777 90
ysr@777 91 // either equal to the number of parallel threads, if ParallelGCThreads
ysr@777 92 // has been set, or 1 otherwise
ysr@777 93 int _parallel_gc_threads;
ysr@777 94
ysr@777 95 enum SomePrivateConstants {
tonyp@1377 96 NumPrevPausesForHeuristics = 10
ysr@777 97 };
ysr@777 98
ysr@777 99 G1MMUTracker* _mmu_tracker;
ysr@777 100
ysr@777 101 void initialize_flags();
ysr@777 102
ysr@777 103 void initialize_all() {
ysr@777 104 initialize_flags();
ysr@777 105 initialize_size_info();
ysr@777 106 initialize_perm_generation(PermGen::MarkSweepCompact);
ysr@777 107 }
ysr@777 108
ysr@777 109 virtual size_t default_init_heap_size() {
ysr@777 110 // Pick some reasonable default.
ysr@777 111 return 8*M;
ysr@777 112 }
ysr@777 113
ysr@777 114 double _cur_collection_start_sec;
ysr@777 115 size_t _cur_collection_pause_used_at_start_bytes;
ysr@777 116 size_t _cur_collection_pause_used_regions_at_start;
ysr@777 117 size_t _prev_collection_pause_used_at_end_bytes;
ysr@777 118 double _cur_collection_par_time_ms;
ysr@777 119 double _cur_satb_drain_time_ms;
ysr@777 120 double _cur_clear_ct_time_ms;
ysr@777 121 bool _satb_drain_time_set;
ysr@777 122
johnc@1325 123 #ifndef PRODUCT
johnc@1325 124 // Card Table Count Cache stats
johnc@1325 125 double _min_clear_cc_time_ms; // min
johnc@1325 126 double _max_clear_cc_time_ms; // max
johnc@1325 127 double _cur_clear_cc_time_ms; // clearing time during current pause
johnc@1325 128 double _cum_clear_cc_time_ms; // cummulative clearing time
johnc@1325 129 jlong _num_cc_clears; // number of times the card count cache has been cleared
johnc@1325 130 #endif
johnc@1325 131
ysr@777 132 double _cur_CH_strong_roots_end_sec;
ysr@777 133 double _cur_CH_strong_roots_dur_ms;
ysr@777 134 double _cur_G1_strong_roots_end_sec;
ysr@777 135 double _cur_G1_strong_roots_dur_ms;
ysr@777 136
ysr@777 137 // Statistics for recent GC pauses. See below for how indexed.
ysr@777 138 TruncatedSeq* _recent_CH_strong_roots_times_ms;
ysr@777 139 TruncatedSeq* _recent_G1_strong_roots_times_ms;
ysr@777 140 TruncatedSeq* _recent_evac_times_ms;
ysr@777 141 // These exclude marking times.
ysr@777 142 TruncatedSeq* _recent_pause_times_ms;
ysr@777 143 TruncatedSeq* _recent_gc_times_ms;
ysr@777 144
ysr@777 145 TruncatedSeq* _recent_CS_bytes_used_before;
ysr@777 146 TruncatedSeq* _recent_CS_bytes_surviving;
ysr@777 147
ysr@777 148 TruncatedSeq* _recent_rs_sizes;
ysr@777 149
ysr@777 150 TruncatedSeq* _concurrent_mark_init_times_ms;
ysr@777 151 TruncatedSeq* _concurrent_mark_remark_times_ms;
ysr@777 152 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
ysr@777 153
apetrusenko@1112 154 Summary* _summary;
ysr@777 155
ysr@777 156 NumberSeq* _all_pause_times_ms;
ysr@777 157 NumberSeq* _all_full_gc_times_ms;
ysr@777 158 double _stop_world_start;
ysr@777 159 NumberSeq* _all_stop_world_times_ms;
ysr@777 160 NumberSeq* _all_yield_times_ms;
ysr@777 161
ysr@777 162 size_t _region_num_young;
ysr@777 163 size_t _region_num_tenured;
ysr@777 164 size_t _prev_region_num_young;
ysr@777 165 size_t _prev_region_num_tenured;
ysr@777 166
ysr@777 167 NumberSeq* _all_mod_union_times_ms;
ysr@777 168
ysr@777 169 int _aux_num;
ysr@777 170 NumberSeq* _all_aux_times_ms;
ysr@777 171 double* _cur_aux_start_times_ms;
ysr@777 172 double* _cur_aux_times_ms;
ysr@777 173 bool* _cur_aux_times_set;
ysr@777 174
tonyp@1966 175 double* _par_last_gc_worker_start_times_ms;
ysr@777 176 double* _par_last_ext_root_scan_times_ms;
ysr@777 177 double* _par_last_mark_stack_scan_times_ms;
ysr@777 178 double* _par_last_update_rs_times_ms;
ysr@777 179 double* _par_last_update_rs_processed_buffers;
ysr@777 180 double* _par_last_scan_rs_times_ms;
ysr@777 181 double* _par_last_obj_copy_times_ms;
ysr@777 182 double* _par_last_termination_times_ms;
tonyp@1966 183 double* _par_last_termination_attempts;
tonyp@1966 184 double* _par_last_gc_worker_end_times_ms;
brutisso@2712 185 double* _par_last_gc_worker_times_ms;
ysr@777 186
ysr@777 187 // indicates that we are in young GC mode
ysr@777 188 bool _in_young_gc_mode;
ysr@777 189
ysr@777 190 // indicates whether we are in full young or partially young GC mode
ysr@777 191 bool _full_young_gcs;
ysr@777 192
ysr@777 193 // if true, then it tries to dynamically adjust the length of the
ysr@777 194 // young list
ysr@777 195 bool _adaptive_young_list_length;
ysr@777 196 size_t _young_list_min_length;
ysr@777 197 size_t _young_list_target_length;
ysr@777 198 size_t _young_list_fixed_length;
ysr@777 199
tonyp@2333 200 // The max number of regions we can extend the eden by while the GC
tonyp@2333 201 // locker is active. This should be >= _young_list_target_length;
tonyp@2333 202 size_t _young_list_max_length;
tonyp@2333 203
ysr@777 204 size_t _young_cset_length;
ysr@777 205 bool _last_young_gc_full;
ysr@777 206
ysr@777 207 unsigned _full_young_pause_num;
ysr@777 208 unsigned _partial_young_pause_num;
ysr@777 209
ysr@777 210 bool _during_marking;
ysr@777 211 bool _in_marking_window;
ysr@777 212 bool _in_marking_window_im;
ysr@777 213
ysr@777 214 SurvRateGroup* _short_lived_surv_rate_group;
ysr@777 215 SurvRateGroup* _survivor_surv_rate_group;
ysr@777 216 // add here any more surv rate groups
ysr@777 217
tonyp@1791 218 double _gc_overhead_perc;
tonyp@1791 219
ysr@777 220 bool during_marking() {
ysr@777 221 return _during_marking;
ysr@777 222 }
ysr@777 223
ysr@777 224 // <NEW PREDICTION>
ysr@777 225
ysr@777 226 private:
ysr@777 227 enum PredictionConstants {
ysr@777 228 TruncatedSeqLength = 10
ysr@777 229 };
ysr@777 230
ysr@777 231 TruncatedSeq* _alloc_rate_ms_seq;
ysr@777 232 double _prev_collection_pause_end_ms;
ysr@777 233
ysr@777 234 TruncatedSeq* _pending_card_diff_seq;
ysr@777 235 TruncatedSeq* _rs_length_diff_seq;
ysr@777 236 TruncatedSeq* _cost_per_card_ms_seq;
ysr@777 237 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
ysr@777 238 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
ysr@777 239 TruncatedSeq* _cost_per_entry_ms_seq;
ysr@777 240 TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
ysr@777 241 TruncatedSeq* _cost_per_byte_ms_seq;
ysr@777 242 TruncatedSeq* _constant_other_time_ms_seq;
ysr@777 243 TruncatedSeq* _young_other_cost_per_region_ms_seq;
ysr@777 244 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
ysr@777 245
ysr@777 246 TruncatedSeq* _pending_cards_seq;
ysr@777 247 TruncatedSeq* _scanned_cards_seq;
ysr@777 248 TruncatedSeq* _rs_lengths_seq;
ysr@777 249
ysr@777 250 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
ysr@777 251
ysr@777 252 TruncatedSeq* _young_gc_eff_seq;
ysr@777 253
ysr@777 254 TruncatedSeq* _max_conc_overhead_seq;
ysr@777 255
ysr@777 256 size_t _recorded_young_regions;
ysr@777 257 size_t _recorded_non_young_regions;
ysr@777 258 size_t _recorded_region_num;
ysr@777 259
ysr@777 260 size_t _free_regions_at_end_of_collection;
ysr@777 261
ysr@777 262 size_t _recorded_rs_lengths;
ysr@777 263 size_t _max_rs_lengths;
ysr@777 264
ysr@777 265 size_t _recorded_marked_bytes;
ysr@777 266 size_t _recorded_young_bytes;
ysr@777 267
ysr@777 268 size_t _predicted_pending_cards;
ysr@777 269 size_t _predicted_cards_scanned;
ysr@777 270 size_t _predicted_rs_lengths;
ysr@777 271 size_t _predicted_bytes_to_copy;
ysr@777 272
ysr@777 273 double _predicted_survival_ratio;
ysr@777 274 double _predicted_rs_update_time_ms;
ysr@777 275 double _predicted_rs_scan_time_ms;
ysr@777 276 double _predicted_object_copy_time_ms;
ysr@777 277 double _predicted_constant_other_time_ms;
ysr@777 278 double _predicted_young_other_time_ms;
ysr@777 279 double _predicted_non_young_other_time_ms;
ysr@777 280 double _predicted_pause_time_ms;
ysr@777 281
ysr@777 282 double _vtime_diff_ms;
ysr@777 283
ysr@777 284 double _recorded_young_free_cset_time_ms;
ysr@777 285 double _recorded_non_young_free_cset_time_ms;
ysr@777 286
ysr@777 287 double _sigma;
ysr@777 288 double _expensive_region_limit_ms;
ysr@777 289
ysr@777 290 size_t _rs_lengths_prediction;
ysr@777 291
ysr@777 292 size_t _known_garbage_bytes;
ysr@777 293 double _known_garbage_ratio;
ysr@777 294
ysr@777 295 double sigma() {
ysr@777 296 return _sigma;
ysr@777 297 }
ysr@777 298
ysr@777 299 // A function that prevents us putting too much stock in small sample
ysr@777 300 // sets. Returns a number between 2.0 and 1.0, depending on the number
ysr@777 301 // of samples. 5 or more samples yields one; fewer scales linearly from
ysr@777 302 // 2.0 at 1 sample to 1.0 at 5.
ysr@777 303 double confidence_factor(int samples) {
ysr@777 304 if (samples > 4) return 1.0;
ysr@777 305 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
ysr@777 306 }
ysr@777 307
ysr@777 308 double get_new_neg_prediction(TruncatedSeq* seq) {
ysr@777 309 return seq->davg() - sigma() * seq->dsd();
ysr@777 310 }
ysr@777 311
ysr@777 312 #ifndef PRODUCT
ysr@777 313 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
ysr@777 314 #endif // PRODUCT
ysr@777 315
iveresov@1546 316 void adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 317 double update_rs_processed_buffers,
iveresov@1546 318 double goal_ms);
iveresov@1546 319
ysr@777 320 protected:
ysr@777 321 double _pause_time_target_ms;
ysr@777 322 double _recorded_young_cset_choice_time_ms;
ysr@777 323 double _recorded_non_young_cset_choice_time_ms;
ysr@777 324 bool _within_target;
ysr@777 325 size_t _pending_cards;
ysr@777 326 size_t _max_pending_cards;
ysr@777 327
ysr@777 328 public:
ysr@777 329
ysr@777 330 void set_region_short_lived(HeapRegion* hr) {
ysr@777 331 hr->install_surv_rate_group(_short_lived_surv_rate_group);
ysr@777 332 }
ysr@777 333
ysr@777 334 void set_region_survivors(HeapRegion* hr) {
ysr@777 335 hr->install_surv_rate_group(_survivor_surv_rate_group);
ysr@777 336 }
ysr@777 337
ysr@777 338 #ifndef PRODUCT
ysr@777 339 bool verify_young_ages();
ysr@777 340 #endif // PRODUCT
ysr@777 341
ysr@777 342 double get_new_prediction(TruncatedSeq* seq) {
ysr@777 343 return MAX2(seq->davg() + sigma() * seq->dsd(),
ysr@777 344 seq->davg() * confidence_factor(seq->num()));
ysr@777 345 }
ysr@777 346
ysr@777 347 size_t young_cset_length() {
ysr@777 348 return _young_cset_length;
ysr@777 349 }
ysr@777 350
ysr@777 351 void record_max_rs_lengths(size_t rs_lengths) {
ysr@777 352 _max_rs_lengths = rs_lengths;
ysr@777 353 }
ysr@777 354
ysr@777 355 size_t predict_pending_card_diff() {
ysr@777 356 double prediction = get_new_neg_prediction(_pending_card_diff_seq);
ysr@777 357 if (prediction < 0.00001)
ysr@777 358 return 0;
ysr@777 359 else
ysr@777 360 return (size_t) prediction;
ysr@777 361 }
ysr@777 362
ysr@777 363 size_t predict_pending_cards() {
ysr@777 364 size_t max_pending_card_num = _g1->max_pending_card_num();
ysr@777 365 size_t diff = predict_pending_card_diff();
ysr@777 366 size_t prediction;
ysr@777 367 if (diff > max_pending_card_num)
ysr@777 368 prediction = max_pending_card_num;
ysr@777 369 else
ysr@777 370 prediction = max_pending_card_num - diff;
ysr@777 371
ysr@777 372 return prediction;
ysr@777 373 }
ysr@777 374
ysr@777 375 size_t predict_rs_length_diff() {
ysr@777 376 return (size_t) get_new_prediction(_rs_length_diff_seq);
ysr@777 377 }
ysr@777 378
ysr@777 379 double predict_alloc_rate_ms() {
ysr@777 380 return get_new_prediction(_alloc_rate_ms_seq);
ysr@777 381 }
ysr@777 382
ysr@777 383 double predict_cost_per_card_ms() {
ysr@777 384 return get_new_prediction(_cost_per_card_ms_seq);
ysr@777 385 }
ysr@777 386
ysr@777 387 double predict_rs_update_time_ms(size_t pending_cards) {
ysr@777 388 return (double) pending_cards * predict_cost_per_card_ms();
ysr@777 389 }
ysr@777 390
ysr@777 391 double predict_fully_young_cards_per_entry_ratio() {
ysr@777 392 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
ysr@777 393 }
ysr@777 394
ysr@777 395 double predict_partially_young_cards_per_entry_ratio() {
ysr@777 396 if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
ysr@777 397 return predict_fully_young_cards_per_entry_ratio();
ysr@777 398 else
ysr@777 399 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
ysr@777 400 }
ysr@777 401
ysr@777 402 size_t predict_young_card_num(size_t rs_length) {
ysr@777 403 return (size_t) ((double) rs_length *
ysr@777 404 predict_fully_young_cards_per_entry_ratio());
ysr@777 405 }
ysr@777 406
ysr@777 407 size_t predict_non_young_card_num(size_t rs_length) {
ysr@777 408 return (size_t) ((double) rs_length *
ysr@777 409 predict_partially_young_cards_per_entry_ratio());
ysr@777 410 }
ysr@777 411
ysr@777 412 double predict_rs_scan_time_ms(size_t card_num) {
ysr@777 413 if (full_young_gcs())
ysr@777 414 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
ysr@777 415 else
ysr@777 416 return predict_partially_young_rs_scan_time_ms(card_num);
ysr@777 417 }
ysr@777 418
ysr@777 419 double predict_partially_young_rs_scan_time_ms(size_t card_num) {
ysr@777 420 if (_partially_young_cost_per_entry_ms_seq->num() < 3)
ysr@777 421 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
ysr@777 422 else
ysr@777 423 return (double) card_num *
ysr@777 424 get_new_prediction(_partially_young_cost_per_entry_ms_seq);
ysr@777 425 }
ysr@777 426
ysr@777 427 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
ysr@777 428 if (_cost_per_byte_ms_during_cm_seq->num() < 3)
ysr@777 429 return 1.1 * (double) bytes_to_copy *
ysr@777 430 get_new_prediction(_cost_per_byte_ms_seq);
ysr@777 431 else
ysr@777 432 return (double) bytes_to_copy *
ysr@777 433 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
ysr@777 434 }
ysr@777 435
ysr@777 436 double predict_object_copy_time_ms(size_t bytes_to_copy) {
ysr@777 437 if (_in_marking_window && !_in_marking_window_im)
ysr@777 438 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
ysr@777 439 else
ysr@777 440 return (double) bytes_to_copy *
ysr@777 441 get_new_prediction(_cost_per_byte_ms_seq);
ysr@777 442 }
ysr@777 443
ysr@777 444 double predict_constant_other_time_ms() {
ysr@777 445 return get_new_prediction(_constant_other_time_ms_seq);
ysr@777 446 }
ysr@777 447
ysr@777 448 double predict_young_other_time_ms(size_t young_num) {
ysr@777 449 return
ysr@777 450 (double) young_num *
ysr@777 451 get_new_prediction(_young_other_cost_per_region_ms_seq);
ysr@777 452 }
ysr@777 453
ysr@777 454 double predict_non_young_other_time_ms(size_t non_young_num) {
ysr@777 455 return
ysr@777 456 (double) non_young_num *
ysr@777 457 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
ysr@777 458 }
ysr@777 459
ysr@777 460 void check_if_region_is_too_expensive(double predicted_time_ms);
ysr@777 461
ysr@777 462 double predict_young_collection_elapsed_time_ms(size_t adjustment);
ysr@777 463 double predict_base_elapsed_time_ms(size_t pending_cards);
ysr@777 464 double predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 465 size_t scanned_cards);
ysr@777 466 size_t predict_bytes_to_copy(HeapRegion* hr);
ysr@777 467 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
ysr@777 468
johnc@1829 469 // for use by: calculate_young_list_target_length(rs_length)
johnc@1829 470 bool predict_will_fit(size_t young_region_num,
johnc@1829 471 double base_time_ms,
johnc@1829 472 size_t init_free_regions,
johnc@1829 473 double target_pause_time_ms);
ysr@777 474
ysr@777 475 void start_recording_regions();
johnc@1829 476 void record_cset_region_info(HeapRegion* hr, bool young);
johnc@1829 477 void record_non_young_cset_region(HeapRegion* hr);
johnc@1829 478
johnc@1829 479 void set_recorded_young_regions(size_t n_regions);
johnc@1829 480 void set_recorded_young_bytes(size_t bytes);
johnc@1829 481 void set_recorded_rs_lengths(size_t rs_lengths);
johnc@1829 482 void set_predicted_bytes_to_copy(size_t bytes);
johnc@1829 483
ysr@777 484 void end_recording_regions();
ysr@777 485
ysr@777 486 void record_vtime_diff_ms(double vtime_diff_ms) {
ysr@777 487 _vtime_diff_ms = vtime_diff_ms;
ysr@777 488 }
ysr@777 489
ysr@777 490 void record_young_free_cset_time_ms(double time_ms) {
ysr@777 491 _recorded_young_free_cset_time_ms = time_ms;
ysr@777 492 }
ysr@777 493
ysr@777 494 void record_non_young_free_cset_time_ms(double time_ms) {
ysr@777 495 _recorded_non_young_free_cset_time_ms = time_ms;
ysr@777 496 }
ysr@777 497
ysr@777 498 double predict_young_gc_eff() {
ysr@777 499 return get_new_neg_prediction(_young_gc_eff_seq);
ysr@777 500 }
ysr@777 501
apetrusenko@980 502 double predict_survivor_regions_evac_time();
apetrusenko@980 503
ysr@777 504 // </NEW PREDICTION>
ysr@777 505
ysr@777 506 public:
ysr@777 507 void cset_regions_freed() {
ysr@777 508 bool propagate = _last_young_gc_full && !_in_marking_window;
ysr@777 509 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 510 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 511 // also call it on any more surv rate groups
ysr@777 512 }
ysr@777 513
ysr@777 514 void set_known_garbage_bytes(size_t known_garbage_bytes) {
ysr@777 515 _known_garbage_bytes = known_garbage_bytes;
ysr@777 516 size_t heap_bytes = _g1->capacity();
ysr@777 517 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
ysr@777 518 }
ysr@777 519
ysr@777 520 void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
ysr@777 521 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
ysr@777 522
ysr@777 523 _known_garbage_bytes -= known_garbage_bytes;
ysr@777 524 size_t heap_bytes = _g1->capacity();
ysr@777 525 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
ysr@777 526 }
ysr@777 527
ysr@777 528 G1MMUTracker* mmu_tracker() {
ysr@777 529 return _mmu_tracker;
ysr@777 530 }
ysr@777 531
tonyp@2011 532 double max_pause_time_ms() {
tonyp@2011 533 return _mmu_tracker->max_gc_time() * 1000.0;
tonyp@2011 534 }
tonyp@2011 535
ysr@777 536 double predict_init_time_ms() {
ysr@777 537 return get_new_prediction(_concurrent_mark_init_times_ms);
ysr@777 538 }
ysr@777 539
ysr@777 540 double predict_remark_time_ms() {
ysr@777 541 return get_new_prediction(_concurrent_mark_remark_times_ms);
ysr@777 542 }
ysr@777 543
ysr@777 544 double predict_cleanup_time_ms() {
ysr@777 545 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
ysr@777 546 }
ysr@777 547
ysr@777 548 // Returns an estimate of the survival rate of the region at yg-age
ysr@777 549 // "yg_age".
apetrusenko@980 550 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
apetrusenko@980 551 TruncatedSeq* seq = surv_rate_group->get_seq(age);
ysr@777 552 if (seq->num() == 0)
ysr@777 553 gclog_or_tty->print("BARF! age is %d", age);
ysr@777 554 guarantee( seq->num() > 0, "invariant" );
ysr@777 555 double pred = get_new_prediction(seq);
ysr@777 556 if (pred > 1.0)
ysr@777 557 pred = 1.0;
ysr@777 558 return pred;
ysr@777 559 }
ysr@777 560
apetrusenko@980 561 double predict_yg_surv_rate(int age) {
apetrusenko@980 562 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
apetrusenko@980 563 }
apetrusenko@980 564
ysr@777 565 double accum_yg_surv_rate_pred(int age) {
ysr@777 566 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
ysr@777 567 }
ysr@777 568
ysr@777 569 protected:
tonyp@1966 570 void print_stats(int level, const char* str, double value);
tonyp@1966 571 void print_stats(int level, const char* str, int value);
tonyp@1966 572
brutisso@2712 573 void print_par_stats(int level, const char* str, double* data);
brutisso@2712 574 void print_par_sizes(int level, const char* str, double* data);
ysr@777 575
ysr@777 576 void check_other_times(int level,
ysr@777 577 NumberSeq* other_times_ms,
ysr@777 578 NumberSeq* calc_other_times_ms) const;
ysr@777 579
ysr@777 580 void print_summary (PauseSummary* stats) const;
ysr@777 581
ysr@777 582 void print_summary (int level, const char* str, NumberSeq* seq) const;
ysr@777 583 void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
ysr@777 584
ysr@777 585 double avg_value (double* data);
ysr@777 586 double max_value (double* data);
ysr@777 587 double sum_of_values (double* data);
ysr@777 588 double max_sum (double* data1, double* data2);
ysr@777 589
ysr@777 590 int _last_satb_drain_processed_buffers;
ysr@777 591 int _last_update_rs_processed_buffers;
ysr@777 592 double _last_pause_time_ms;
ysr@777 593
ysr@777 594 size_t _bytes_in_to_space_before_gc;
ysr@777 595 size_t _bytes_in_to_space_after_gc;
ysr@777 596 size_t bytes_in_to_space_during_gc() {
ysr@777 597 return
ysr@777 598 _bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc;
ysr@777 599 }
ysr@777 600 size_t _bytes_in_collection_set_before_gc;
ysr@777 601 // Used to count used bytes in CS.
ysr@777 602 friend class CountCSClosure;
ysr@777 603
ysr@777 604 // Statistics kept per GC stoppage, pause or full.
ysr@777 605 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
ysr@777 606
ysr@777 607 // We track markings.
ysr@777 608 int _num_markings;
ysr@777 609 double _mark_thread_startup_sec; // Time at startup of marking thread
ysr@777 610
ysr@777 611 // Add a new GC of the given duration and end time to the record.
ysr@777 612 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
ysr@777 613
ysr@777 614 // The head of the list (via "next_in_collection_set()") representing the
johnc@1829 615 // current collection set. Set from the incrementally built collection
johnc@1829 616 // set at the start of the pause.
ysr@777 617 HeapRegion* _collection_set;
johnc@1829 618
johnc@1829 619 // The number of regions in the collection set. Set from the incrementally
johnc@1829 620 // built collection set at the start of an evacuation pause.
ysr@777 621 size_t _collection_set_size;
johnc@1829 622
johnc@1829 623 // The number of bytes in the collection set before the pause. Set from
johnc@1829 624 // the incrementally built collection set at the start of an evacuation
johnc@1829 625 // pause.
ysr@777 626 size_t _collection_set_bytes_used_before;
ysr@777 627
johnc@1829 628 // The associated information that is maintained while the incremental
johnc@1829 629 // collection set is being built with young regions. Used to populate
johnc@1829 630 // the recorded info for the evacuation pause.
johnc@1829 631
johnc@1829 632 enum CSetBuildType {
johnc@1829 633 Active, // We are actively building the collection set
johnc@1829 634 Inactive // We are not actively building the collection set
johnc@1829 635 };
johnc@1829 636
johnc@1829 637 CSetBuildType _inc_cset_build_state;
johnc@1829 638
johnc@1829 639 // The head of the incrementally built collection set.
johnc@1829 640 HeapRegion* _inc_cset_head;
johnc@1829 641
johnc@1829 642 // The tail of the incrementally built collection set.
johnc@1829 643 HeapRegion* _inc_cset_tail;
johnc@1829 644
johnc@1829 645 // The number of regions in the incrementally built collection set.
johnc@1829 646 // Used to set _collection_set_size at the start of an evacuation
johnc@1829 647 // pause.
johnc@1829 648 size_t _inc_cset_size;
johnc@1829 649
johnc@1829 650 // Used as the index in the surving young words structure
johnc@1829 651 // which tracks the amount of space, for each young region,
johnc@1829 652 // that survives the pause.
johnc@1829 653 size_t _inc_cset_young_index;
johnc@1829 654
johnc@1829 655 // The number of bytes in the incrementally built collection set.
johnc@1829 656 // Used to set _collection_set_bytes_used_before at the start of
johnc@1829 657 // an evacuation pause.
johnc@1829 658 size_t _inc_cset_bytes_used_before;
johnc@1829 659
johnc@1829 660 // Used to record the highest end of heap region in collection set
johnc@1829 661 HeapWord* _inc_cset_max_finger;
johnc@1829 662
johnc@1829 663 // The number of recorded used bytes in the young regions
johnc@1829 664 // of the collection set. This is the sum of the used() bytes
johnc@1829 665 // of retired young regions in the collection set.
johnc@1829 666 size_t _inc_cset_recorded_young_bytes;
johnc@1829 667
johnc@1829 668 // The RSet lengths recorded for regions in the collection set
johnc@1829 669 // (updated by the periodic sampling of the regions in the
johnc@1829 670 // young list/collection set).
johnc@1829 671 size_t _inc_cset_recorded_rs_lengths;
johnc@1829 672
johnc@1829 673 // The predicted elapsed time it will take to collect the regions
johnc@1829 674 // in the collection set (updated by the periodic sampling of the
johnc@1829 675 // regions in the young list/collection set).
johnc@1829 676 double _inc_cset_predicted_elapsed_time_ms;
johnc@1829 677
johnc@1829 678 // The predicted bytes to copy for the regions in the collection
johnc@1829 679 // set (updated by the periodic sampling of the regions in the
johnc@1829 680 // young list/collection set).
johnc@1829 681 size_t _inc_cset_predicted_bytes_to_copy;
johnc@1829 682
ysr@777 683 // Info about marking.
ysr@777 684 int _n_marks; // Sticky at 2, so we know when we've done at least 2.
ysr@777 685
ysr@777 686 // The number of collection pauses at the end of the last mark.
ysr@777 687 size_t _n_pauses_at_mark_end;
ysr@777 688
ysr@777 689 // Stash a pointer to the g1 heap.
ysr@777 690 G1CollectedHeap* _g1;
ysr@777 691
ysr@777 692 // The average time in ms per collection pause, averaged over recent pauses.
ysr@777 693 double recent_avg_time_for_pauses_ms();
ysr@777 694
ysr@777 695 // The average time in ms for processing CollectedHeap strong roots, per
ysr@777 696 // collection pause, averaged over recent pauses.
ysr@777 697 double recent_avg_time_for_CH_strong_ms();
ysr@777 698
ysr@777 699 // The average time in ms for processing the G1 remembered set, per
ysr@777 700 // pause, averaged over recent pauses.
ysr@777 701 double recent_avg_time_for_G1_strong_ms();
ysr@777 702
ysr@777 703 // The average time in ms for "evacuating followers", per pause, averaged
ysr@777 704 // over recent pauses.
ysr@777 705 double recent_avg_time_for_evac_ms();
ysr@777 706
ysr@777 707 // The number of "recent" GCs recorded in the number sequences
ysr@777 708 int number_of_recent_gcs();
ysr@777 709
ysr@777 710 // The average survival ratio, computed by the total number of bytes
ysr@777 711 // suriviving / total number of bytes before collection over the last
ysr@777 712 // several recent pauses.
ysr@777 713 double recent_avg_survival_fraction();
ysr@777 714 // The survival fraction of the most recent pause; if there have been no
ysr@777 715 // pauses, returns 1.0.
ysr@777 716 double last_survival_fraction();
ysr@777 717
ysr@777 718 // Returns a "conservative" estimate of the recent survival rate, i.e.,
ysr@777 719 // one that may be higher than "recent_avg_survival_fraction".
ysr@777 720 // This is conservative in several ways:
ysr@777 721 // If there have been few pauses, it will assume a potential high
ysr@777 722 // variance, and err on the side of caution.
ysr@777 723 // It puts a lower bound (currently 0.1) on the value it will return.
ysr@777 724 // To try to detect phase changes, if the most recent pause ("latest") has a
ysr@777 725 // higher-than average ("avg") survival rate, it returns that rate.
ysr@777 726 // "work" version is a utility function; young is restricted to young regions.
ysr@777 727 double conservative_avg_survival_fraction_work(double avg,
ysr@777 728 double latest);
ysr@777 729
ysr@777 730 // The arguments are the two sequences that keep track of the number of bytes
ysr@777 731 // surviving and the total number of bytes before collection, resp.,
ysr@777 732 // over the last evereal recent pauses
ysr@777 733 // Returns the survival rate for the category in the most recent pause.
ysr@777 734 // If there have been no pauses, returns 1.0.
ysr@777 735 double last_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 736 TruncatedSeq* before);
ysr@777 737
ysr@777 738 // The arguments are the two sequences that keep track of the number of bytes
ysr@777 739 // surviving and the total number of bytes before collection, resp.,
ysr@777 740 // over the last several recent pauses
ysr@777 741 // Returns the average survival ration over the last several recent pauses
ysr@777 742 // If there have been no pauses, return 1.0
ysr@777 743 double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
ysr@777 744 TruncatedSeq* before);
ysr@777 745
ysr@777 746 double conservative_avg_survival_fraction() {
ysr@777 747 double avg = recent_avg_survival_fraction();
ysr@777 748 double latest = last_survival_fraction();
ysr@777 749 return conservative_avg_survival_fraction_work(avg, latest);
ysr@777 750 }
ysr@777 751
ysr@777 752 // The ratio of gc time to elapsed time, computed over recent pauses.
ysr@777 753 double _recent_avg_pause_time_ratio;
ysr@777 754
ysr@777 755 double recent_avg_pause_time_ratio() {
ysr@777 756 return _recent_avg_pause_time_ratio;
ysr@777 757 }
ysr@777 758
ysr@777 759 // Number of pauses between concurrent marking.
ysr@777 760 size_t _pauses_btwn_concurrent_mark;
ysr@777 761
ysr@777 762 size_t _n_marks_since_last_pause;
ysr@777 763
tonyp@1794 764 // At the end of a pause we check the heap occupancy and we decide
tonyp@1794 765 // whether we will start a marking cycle during the next pause. If
tonyp@1794 766 // we decide that we want to do that, we will set this parameter to
tonyp@1794 767 // true. So, this parameter will stay true between the end of a
tonyp@1794 768 // pause and the beginning of a subsequent pause (not necessarily
tonyp@1794 769 // the next one, see the comments on the next field) when we decide
tonyp@1794 770 // that we will indeed start a marking cycle and do the initial-mark
tonyp@1794 771 // work.
tonyp@1794 772 volatile bool _initiate_conc_mark_if_possible;
ysr@777 773
tonyp@1794 774 // If initiate_conc_mark_if_possible() is set at the beginning of a
tonyp@1794 775 // pause, it is a suggestion that the pause should start a marking
tonyp@1794 776 // cycle by doing the initial-mark work. However, it is possible
tonyp@1794 777 // that the concurrent marking thread is still finishing up the
tonyp@1794 778 // previous marking cycle (e.g., clearing the next marking
tonyp@1794 779 // bitmap). If that is the case we cannot start a new cycle and
tonyp@1794 780 // we'll have to wait for the concurrent marking thread to finish
tonyp@1794 781 // what it is doing. In this case we will postpone the marking cycle
tonyp@1794 782 // initiation decision for the next pause. When we eventually decide
tonyp@1794 783 // to start a cycle, we will set _during_initial_mark_pause which
tonyp@1794 784 // will stay true until the end of the initial-mark pause and it's
tonyp@1794 785 // the condition that indicates that a pause is doing the
tonyp@1794 786 // initial-mark work.
tonyp@1794 787 volatile bool _during_initial_mark_pause;
tonyp@1794 788
ysr@777 789 bool _should_revert_to_full_young_gcs;
ysr@777 790 bool _last_full_young_gc;
ysr@777 791
ysr@777 792 // This set of variables tracks the collector efficiency, in order to
ysr@777 793 // determine whether we should initiate a new marking.
ysr@777 794 double _cur_mark_stop_world_time_ms;
ysr@777 795 double _mark_init_start_sec;
ysr@777 796 double _mark_remark_start_sec;
ysr@777 797 double _mark_cleanup_start_sec;
ysr@777 798 double _mark_closure_time_ms;
ysr@777 799
ysr@777 800 void calculate_young_list_min_length();
johnc@1829 801 void calculate_young_list_target_length();
johnc@1829 802 void calculate_young_list_target_length(size_t rs_lengths);
ysr@777 803
ysr@777 804 public:
ysr@777 805
ysr@777 806 G1CollectorPolicy();
ysr@777 807
ysr@777 808 virtual G1CollectorPolicy* as_g1_policy() { return this; }
ysr@777 809
ysr@777 810 virtual CollectorPolicy::Name kind() {
ysr@777 811 return CollectorPolicy::G1CollectorPolicyKind;
ysr@777 812 }
ysr@777 813
ysr@777 814 void check_prediction_validity();
ysr@777 815
ysr@777 816 size_t bytes_in_collection_set() {
ysr@777 817 return _bytes_in_collection_set_before_gc;
ysr@777 818 }
ysr@777 819
ysr@777 820 size_t bytes_in_to_space() {
ysr@777 821 return bytes_in_to_space_during_gc();
ysr@777 822 }
ysr@777 823
ysr@777 824 unsigned calc_gc_alloc_time_stamp() {
ysr@777 825 return _all_pause_times_ms->num() + 1;
ysr@777 826 }
ysr@777 827
ysr@777 828 protected:
ysr@777 829
ysr@777 830 // Count the number of bytes used in the CS.
ysr@777 831 void count_CS_bytes_used();
ysr@777 832
ysr@777 833 // Together these do the base cleanup-recording work. Subclasses might
ysr@777 834 // want to put something between them.
ysr@777 835 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
ysr@777 836 size_t max_live_bytes);
ysr@777 837 void record_concurrent_mark_cleanup_end_work2();
ysr@777 838
ysr@777 839 public:
ysr@777 840
ysr@777 841 virtual void init();
ysr@777 842
apetrusenko@980 843 // Create jstat counters for the policy.
apetrusenko@980 844 virtual void initialize_gc_policy_counters();
apetrusenko@980 845
ysr@777 846 virtual HeapWord* mem_allocate_work(size_t size,
ysr@777 847 bool is_tlab,
ysr@777 848 bool* gc_overhead_limit_was_exceeded);
ysr@777 849
ysr@777 850 // This method controls how a collector handles one or more
ysr@777 851 // of its generations being fully allocated.
ysr@777 852 virtual HeapWord* satisfy_failed_allocation(size_t size,
ysr@777 853 bool is_tlab);
ysr@777 854
ysr@777 855 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
ysr@777 856
ysr@777 857 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
ysr@777 858
ysr@777 859 // The number of collection pauses so far.
ysr@777 860 long n_pauses() const { return _n_pauses; }
ysr@777 861
ysr@777 862 // Update the heuristic info to record a collection pause of the given
ysr@777 863 // start time, where the given number of bytes were used at the start.
ysr@777 864 // This may involve changing the desired size of a collection set.
ysr@777 865
ysr@777 866 virtual void record_stop_world_start();
ysr@777 867
ysr@777 868 virtual void record_collection_pause_start(double start_time_sec,
ysr@777 869 size_t start_used);
ysr@777 870
ysr@777 871 // Must currently be called while the world is stopped.
ysr@777 872 virtual void record_concurrent_mark_init_start();
ysr@777 873 virtual void record_concurrent_mark_init_end();
ysr@777 874 void record_concurrent_mark_init_end_pre(double
ysr@777 875 mark_init_elapsed_time_ms);
ysr@777 876
ysr@777 877 void record_mark_closure_time(double mark_closure_time_ms);
ysr@777 878
ysr@777 879 virtual void record_concurrent_mark_remark_start();
ysr@777 880 virtual void record_concurrent_mark_remark_end();
ysr@777 881
ysr@777 882 virtual void record_concurrent_mark_cleanup_start();
ysr@777 883 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 884 size_t max_live_bytes);
ysr@777 885 virtual void record_concurrent_mark_cleanup_completed();
ysr@777 886
ysr@777 887 virtual void record_concurrent_pause();
ysr@777 888 virtual void record_concurrent_pause_end();
ysr@777 889
ysr@777 890 virtual void record_collection_pause_end_CH_strong_roots();
ysr@777 891 virtual void record_collection_pause_end_G1_strong_roots();
ysr@777 892
tonyp@2062 893 virtual void record_collection_pause_end();
ysr@777 894
ysr@777 895 // Record the fact that a full collection occurred.
ysr@777 896 virtual void record_full_collection_start();
ysr@777 897 virtual void record_full_collection_end();
ysr@777 898
tonyp@1966 899 void record_gc_worker_start_time(int worker_i, double ms) {
tonyp@1966 900 _par_last_gc_worker_start_times_ms[worker_i] = ms;
tonyp@1966 901 }
tonyp@1966 902
ysr@777 903 void record_ext_root_scan_time(int worker_i, double ms) {
ysr@777 904 _par_last_ext_root_scan_times_ms[worker_i] = ms;
ysr@777 905 }
ysr@777 906
ysr@777 907 void record_mark_stack_scan_time(int worker_i, double ms) {
ysr@777 908 _par_last_mark_stack_scan_times_ms[worker_i] = ms;
ysr@777 909 }
ysr@777 910
ysr@777 911 void record_satb_drain_time(double ms) {
ysr@777 912 _cur_satb_drain_time_ms = ms;
ysr@777 913 _satb_drain_time_set = true;
ysr@777 914 }
ysr@777 915
ysr@777 916 void record_satb_drain_processed_buffers (int processed_buffers) {
ysr@777 917 _last_satb_drain_processed_buffers = processed_buffers;
ysr@777 918 }
ysr@777 919
ysr@777 920 void record_mod_union_time(double ms) {
ysr@777 921 _all_mod_union_times_ms->add(ms);
ysr@777 922 }
ysr@777 923
ysr@777 924 void record_update_rs_time(int thread, double ms) {
ysr@777 925 _par_last_update_rs_times_ms[thread] = ms;
ysr@777 926 }
ysr@777 927
ysr@777 928 void record_update_rs_processed_buffers (int thread,
ysr@777 929 double processed_buffers) {
ysr@777 930 _par_last_update_rs_processed_buffers[thread] = processed_buffers;
ysr@777 931 }
ysr@777 932
ysr@777 933 void record_scan_rs_time(int thread, double ms) {
ysr@777 934 _par_last_scan_rs_times_ms[thread] = ms;
ysr@777 935 }
ysr@777 936
ysr@777 937 void reset_obj_copy_time(int thread) {
ysr@777 938 _par_last_obj_copy_times_ms[thread] = 0.0;
ysr@777 939 }
ysr@777 940
ysr@777 941 void reset_obj_copy_time() {
ysr@777 942 reset_obj_copy_time(0);
ysr@777 943 }
ysr@777 944
ysr@777 945 void record_obj_copy_time(int thread, double ms) {
ysr@777 946 _par_last_obj_copy_times_ms[thread] += ms;
ysr@777 947 }
ysr@777 948
tonyp@1966 949 void record_termination(int thread, double ms, size_t attempts) {
tonyp@1966 950 _par_last_termination_times_ms[thread] = ms;
tonyp@1966 951 _par_last_termination_attempts[thread] = (double) attempts;
ysr@777 952 }
ysr@777 953
tonyp@1966 954 void record_gc_worker_end_time(int worker_i, double ms) {
tonyp@1966 955 _par_last_gc_worker_end_times_ms[worker_i] = ms;
ysr@777 956 }
ysr@777 957
tonyp@1030 958 void record_pause_time_ms(double ms) {
ysr@777 959 _last_pause_time_ms = ms;
ysr@777 960 }
ysr@777 961
ysr@777 962 void record_clear_ct_time(double ms) {
ysr@777 963 _cur_clear_ct_time_ms = ms;
ysr@777 964 }
ysr@777 965
ysr@777 966 void record_par_time(double ms) {
ysr@777 967 _cur_collection_par_time_ms = ms;
ysr@777 968 }
ysr@777 969
ysr@777 970 void record_aux_start_time(int i) {
ysr@777 971 guarantee(i < _aux_num, "should be within range");
ysr@777 972 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
ysr@777 973 }
ysr@777 974
ysr@777 975 void record_aux_end_time(int i) {
ysr@777 976 guarantee(i < _aux_num, "should be within range");
ysr@777 977 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
ysr@777 978 _cur_aux_times_set[i] = true;
ysr@777 979 _cur_aux_times_ms[i] += ms;
ysr@777 980 }
ysr@777 981
johnc@1325 982 #ifndef PRODUCT
johnc@1325 983 void record_cc_clear_time(double ms) {
johnc@1325 984 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
johnc@1325 985 _min_clear_cc_time_ms = ms;
johnc@1325 986 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
johnc@1325 987 _max_clear_cc_time_ms = ms;
johnc@1325 988 _cur_clear_cc_time_ms = ms;
johnc@1325 989 _cum_clear_cc_time_ms += ms;
johnc@1325 990 _num_cc_clears++;
johnc@1325 991 }
johnc@1325 992 #endif
johnc@1325 993
ysr@777 994 // Record the fact that "bytes" bytes allocated in a region.
ysr@777 995 void record_before_bytes(size_t bytes);
ysr@777 996 void record_after_bytes(size_t bytes);
ysr@777 997
ysr@777 998 // Choose a new collection set. Marks the chosen regions as being
ysr@777 999 // "in_collection_set", and links them together. The head and number of
ysr@777 1000 // the collection set are available via access methods.
tonyp@2062 1001 virtual void choose_collection_set(double target_pause_time_ms) = 0;
ysr@777 1002
ysr@777 1003 // The head of the list (via "next_in_collection_set()") representing the
ysr@777 1004 // current collection set.
ysr@777 1005 HeapRegion* collection_set() { return _collection_set; }
ysr@777 1006
johnc@1829 1007 void clear_collection_set() { _collection_set = NULL; }
johnc@1829 1008
ysr@777 1009 // The number of elements in the current collection set.
ysr@777 1010 size_t collection_set_size() { return _collection_set_size; }
ysr@777 1011
ysr@777 1012 // Add "hr" to the CS.
ysr@777 1013 void add_to_collection_set(HeapRegion* hr);
ysr@777 1014
johnc@1829 1015 // Incremental CSet Support
johnc@1829 1016
johnc@1829 1017 // The head of the incrementally built collection set.
johnc@1829 1018 HeapRegion* inc_cset_head() { return _inc_cset_head; }
johnc@1829 1019
johnc@1829 1020 // The tail of the incrementally built collection set.
johnc@1829 1021 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
johnc@1829 1022
johnc@1829 1023 // The number of elements in the incrementally built collection set.
johnc@1829 1024 size_t inc_cset_size() { return _inc_cset_size; }
johnc@1829 1025
johnc@1829 1026 // Initialize incremental collection set info.
johnc@1829 1027 void start_incremental_cset_building();
johnc@1829 1028
johnc@1829 1029 void clear_incremental_cset() {
johnc@1829 1030 _inc_cset_head = NULL;
johnc@1829 1031 _inc_cset_tail = NULL;
johnc@1829 1032 }
johnc@1829 1033
johnc@1829 1034 // Stop adding regions to the incremental collection set
johnc@1829 1035 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
johnc@1829 1036
johnc@1829 1037 // Add/remove information about hr to the aggregated information
johnc@1829 1038 // for the incrementally built collection set.
johnc@1829 1039 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
johnc@1829 1040 void remove_from_incremental_cset_info(HeapRegion* hr);
johnc@1829 1041
johnc@1829 1042 // Update information about hr in the aggregated information for
johnc@1829 1043 // the incrementally built collection set.
johnc@1829 1044 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
johnc@1829 1045
johnc@1829 1046 private:
johnc@1829 1047 // Update the incremental cset information when adding a region
johnc@1829 1048 // (should not be called directly).
johnc@1829 1049 void add_region_to_incremental_cset_common(HeapRegion* hr);
johnc@1829 1050
johnc@1829 1051 public:
johnc@1829 1052 // Add hr to the LHS of the incremental collection set.
johnc@1829 1053 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
johnc@1829 1054
johnc@1829 1055 // Add hr to the RHS of the incremental collection set.
johnc@1829 1056 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
johnc@1829 1057
johnc@1829 1058 #ifndef PRODUCT
johnc@1829 1059 void print_collection_set(HeapRegion* list_head, outputStream* st);
johnc@1829 1060 #endif // !PRODUCT
johnc@1829 1061
tonyp@1794 1062 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
tonyp@1794 1063 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
tonyp@1794 1064 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
tonyp@1794 1065
tonyp@1794 1066 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
tonyp@1794 1067 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
tonyp@1794 1068 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
tonyp@1794 1069
tonyp@2011 1070 // This sets the initiate_conc_mark_if_possible() flag to start a
tonyp@2011 1071 // new cycle, as long as we are not already in one. It's best if it
tonyp@2011 1072 // is called during a safepoint when the test whether a cycle is in
tonyp@2011 1073 // progress or not is stable.
tonyp@2011 1074 bool force_initial_mark_if_outside_cycle();
tonyp@2011 1075
tonyp@1794 1076 // This is called at the very beginning of an evacuation pause (it
tonyp@1794 1077 // has to be the first thing that the pause does). If
tonyp@1794 1078 // initiate_conc_mark_if_possible() is true, and the concurrent
tonyp@1794 1079 // marking thread has completed its work during the previous cycle,
tonyp@1794 1080 // it will set during_initial_mark_pause() to so that the pause does
tonyp@1794 1081 // the initial-mark work and start a marking cycle.
tonyp@1794 1082 void decide_on_conc_mark_initiation();
ysr@777 1083
ysr@777 1084 // If an expansion would be appropriate, because recent GC overhead had
ysr@777 1085 // exceeded the desired limit, return an amount to expand by.
ysr@777 1086 virtual size_t expansion_amount();
ysr@777 1087
ysr@777 1088 // note start of mark thread
ysr@777 1089 void note_start_of_mark_thread();
ysr@777 1090
ysr@777 1091 // The marked bytes of the "r" has changed; reclassify it's desirability
ysr@777 1092 // for marking. Also asserts that "r" is eligible for a CS.
ysr@777 1093 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
ysr@777 1094
ysr@777 1095 #ifndef PRODUCT
ysr@777 1096 // Check any appropriate marked bytes info, asserting false if
ysr@777 1097 // something's wrong, else returning "true".
ysr@777 1098 virtual bool assertMarkedBytesDataOK() = 0;
ysr@777 1099 #endif
ysr@777 1100
ysr@777 1101 // Print tracing information.
ysr@777 1102 void print_tracing_info() const;
ysr@777 1103
ysr@777 1104 // Print stats on young survival ratio
ysr@777 1105 void print_yg_surv_rate_info() const;
ysr@777 1106
apetrusenko@980 1107 void finished_recalculating_age_indexes(bool is_survivors) {
apetrusenko@980 1108 if (is_survivors) {
apetrusenko@980 1109 _survivor_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 1110 } else {
apetrusenko@980 1111 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 1112 }
ysr@777 1113 // do that for any other surv rate groups
ysr@777 1114 }
ysr@777 1115
tonyp@2315 1116 bool is_young_list_full() {
tonyp@2315 1117 size_t young_list_length = _g1->young_list()->length();
tonyp@2333 1118 size_t young_list_target_length = _young_list_target_length;
tonyp@2333 1119 if (G1FixedEdenSize) {
tonyp@2333 1120 young_list_target_length -= _max_survivor_regions;
tonyp@2333 1121 }
tonyp@2333 1122 return young_list_length >= young_list_target_length;
tonyp@2333 1123 }
tonyp@2333 1124
tonyp@2333 1125 bool can_expand_young_list() {
tonyp@2333 1126 size_t young_list_length = _g1->young_list()->length();
tonyp@2333 1127 size_t young_list_max_length = _young_list_max_length;
tonyp@2315 1128 if (G1FixedEdenSize) {
tonyp@2315 1129 young_list_max_length -= _max_survivor_regions;
tonyp@2315 1130 }
tonyp@2333 1131 return young_list_length < young_list_max_length;
tonyp@2333 1132 }
tonyp@2315 1133
tonyp@2315 1134 void update_region_num(bool young);
ysr@777 1135
ysr@777 1136 bool in_young_gc_mode() {
ysr@777 1137 return _in_young_gc_mode;
ysr@777 1138 }
ysr@777 1139 void set_in_young_gc_mode(bool in_young_gc_mode) {
ysr@777 1140 _in_young_gc_mode = in_young_gc_mode;
ysr@777 1141 }
ysr@777 1142
ysr@777 1143 bool full_young_gcs() {
ysr@777 1144 return _full_young_gcs;
ysr@777 1145 }
ysr@777 1146 void set_full_young_gcs(bool full_young_gcs) {
ysr@777 1147 _full_young_gcs = full_young_gcs;
ysr@777 1148 }
ysr@777 1149
ysr@777 1150 bool adaptive_young_list_length() {
ysr@777 1151 return _adaptive_young_list_length;
ysr@777 1152 }
ysr@777 1153 void set_adaptive_young_list_length(bool adaptive_young_list_length) {
ysr@777 1154 _adaptive_young_list_length = adaptive_young_list_length;
ysr@777 1155 }
ysr@777 1156
ysr@777 1157 inline double get_gc_eff_factor() {
ysr@777 1158 double ratio = _known_garbage_ratio;
ysr@777 1159
ysr@777 1160 double square = ratio * ratio;
ysr@777 1161 // square = square * square;
ysr@777 1162 double ret = square * 9.0 + 1.0;
ysr@777 1163 #if 0
ysr@777 1164 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
ysr@777 1165 #endif // 0
ysr@777 1166 guarantee(0.0 <= ret && ret < 10.0, "invariant!");
ysr@777 1167 return ret;
ysr@777 1168 }
ysr@777 1169
ysr@777 1170 //
ysr@777 1171 // Survivor regions policy.
ysr@777 1172 //
ysr@777 1173 protected:
ysr@777 1174
ysr@777 1175 // Current tenuring threshold, set to 0 if the collector reaches the
ysr@777 1176 // maximum amount of suvivors regions.
ysr@777 1177 int _tenuring_threshold;
ysr@777 1178
apetrusenko@980 1179 // The limit on the number of regions allocated for survivors.
apetrusenko@980 1180 size_t _max_survivor_regions;
apetrusenko@980 1181
apetrusenko@980 1182 // The amount of survor regions after a collection.
apetrusenko@980 1183 size_t _recorded_survivor_regions;
apetrusenko@980 1184 // List of survivor regions.
apetrusenko@980 1185 HeapRegion* _recorded_survivor_head;
apetrusenko@980 1186 HeapRegion* _recorded_survivor_tail;
apetrusenko@980 1187
apetrusenko@980 1188 ageTable _survivors_age_table;
apetrusenko@980 1189
ysr@777 1190 public:
ysr@777 1191
ysr@777 1192 inline GCAllocPurpose
ysr@777 1193 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
ysr@777 1194 if (age < _tenuring_threshold && src_region->is_young()) {
ysr@777 1195 return GCAllocForSurvived;
ysr@777 1196 } else {
ysr@777 1197 return GCAllocForTenured;
ysr@777 1198 }
ysr@777 1199 }
ysr@777 1200
ysr@777 1201 inline bool track_object_age(GCAllocPurpose purpose) {
ysr@777 1202 return purpose == GCAllocForSurvived;
ysr@777 1203 }
ysr@777 1204
ysr@777 1205 inline GCAllocPurpose alternative_purpose(int purpose) {
ysr@777 1206 return GCAllocForTenured;
ysr@777 1207 }
ysr@777 1208
apetrusenko@980 1209 static const size_t REGIONS_UNLIMITED = ~(size_t)0;
apetrusenko@980 1210
apetrusenko@980 1211 size_t max_regions(int purpose);
ysr@777 1212
ysr@777 1213 // The limit on regions for a particular purpose is reached.
ysr@777 1214 void note_alloc_region_limit_reached(int purpose) {
ysr@777 1215 if (purpose == GCAllocForSurvived) {
ysr@777 1216 _tenuring_threshold = 0;
ysr@777 1217 }
ysr@777 1218 }
ysr@777 1219
ysr@777 1220 void note_start_adding_survivor_regions() {
ysr@777 1221 _survivor_surv_rate_group->start_adding_regions();
ysr@777 1222 }
ysr@777 1223
ysr@777 1224 void note_stop_adding_survivor_regions() {
ysr@777 1225 _survivor_surv_rate_group->stop_adding_regions();
ysr@777 1226 }
apetrusenko@980 1227
apetrusenko@980 1228 void record_survivor_regions(size_t regions,
apetrusenko@980 1229 HeapRegion* head,
apetrusenko@980 1230 HeapRegion* tail) {
apetrusenko@980 1231 _recorded_survivor_regions = regions;
apetrusenko@980 1232 _recorded_survivor_head = head;
apetrusenko@980 1233 _recorded_survivor_tail = tail;
apetrusenko@980 1234 }
apetrusenko@980 1235
tonyp@1273 1236 size_t recorded_survivor_regions() {
tonyp@1273 1237 return _recorded_survivor_regions;
tonyp@1273 1238 }
tonyp@1273 1239
apetrusenko@980 1240 void record_thread_age_table(ageTable* age_table)
apetrusenko@980 1241 {
apetrusenko@980 1242 _survivors_age_table.merge_par(age_table);
apetrusenko@980 1243 }
apetrusenko@980 1244
tonyp@2333 1245 void calculate_max_gc_locker_expansion();
tonyp@2333 1246
apetrusenko@980 1247 // Calculates survivor space parameters.
apetrusenko@980 1248 void calculate_survivors_policy();
apetrusenko@980 1249
ysr@777 1250 };
ysr@777 1251
ysr@777 1252 // This encapsulates a particular strategy for a g1 Collector.
ysr@777 1253 //
ysr@777 1254 // Start a concurrent mark when our heap size is n bytes
ysr@777 1255 // greater then our heap size was at the last concurrent
ysr@777 1256 // mark. Where n is a function of the CMSTriggerRatio
ysr@777 1257 // and the MinHeapFreeRatio.
ysr@777 1258 //
ysr@777 1259 // Start a g1 collection pause when we have allocated the
ysr@777 1260 // average number of bytes currently being freed in
ysr@777 1261 // a collection, but only if it is at least one region
ysr@777 1262 // full
ysr@777 1263 //
ysr@777 1264 // Resize Heap based on desired
ysr@777 1265 // allocation space, where desired allocation space is
ysr@777 1266 // a function of survival rate and desired future to size.
ysr@777 1267 //
ysr@777 1268 // Choose collection set by first picking all older regions
ysr@777 1269 // which have a survival rate which beats our projected young
ysr@777 1270 // survival rate. Then fill out the number of needed regions
ysr@777 1271 // with young regions.
ysr@777 1272
ysr@777 1273 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
ysr@777 1274 CollectionSetChooser* _collectionSetChooser;
ysr@777 1275 // If the estimated is less then desirable, resize if possible.
ysr@777 1276 void expand_if_possible(size_t numRegions);
ysr@777 1277
tonyp@2062 1278 virtual void choose_collection_set(double target_pause_time_ms);
ysr@777 1279 virtual void record_collection_pause_start(double start_time_sec,
ysr@777 1280 size_t start_used);
ysr@777 1281 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
ysr@777 1282 size_t max_live_bytes);
ysr@777 1283 virtual void record_full_collection_end();
ysr@777 1284
ysr@777 1285 public:
ysr@777 1286 G1CollectorPolicy_BestRegionsFirst() {
ysr@777 1287 _collectionSetChooser = new CollectionSetChooser();
ysr@777 1288 }
tonyp@2062 1289 void record_collection_pause_end();
ysr@777 1290 // This is not needed any more, after the CSet choosing code was
ysr@777 1291 // changed to use the pause prediction work. But let's leave the
ysr@777 1292 // hook in just in case.
ysr@777 1293 void note_change_in_marked_bytes(HeapRegion* r) { }
ysr@777 1294 #ifndef PRODUCT
ysr@777 1295 bool assertMarkedBytesDataOK();
ysr@777 1296 #endif
ysr@777 1297 };
ysr@777 1298
ysr@777 1299 // This should move to some place more general...
ysr@777 1300
ysr@777 1301 // If we have "n" measurements, and we've kept track of their "sum" and the
ysr@777 1302 // "sum_of_squares" of the measurements, this returns the variance of the
ysr@777 1303 // sequence.
ysr@777 1304 inline double variance(int n, double sum_of_squares, double sum) {
ysr@777 1305 double n_d = (double)n;
ysr@777 1306 double avg = sum/n_d;
ysr@777 1307 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
ysr@777 1308 }
ysr@777 1309
ysr@777 1310 // Local Variables: ***
ysr@777 1311 // c-indentation-style: gnu ***
ysr@777 1312 // End: ***
stefank@2314 1313
stefank@2314 1314 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP

mercurial