src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7195
c02ec279b062
child 7369
b840813adfcc
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

ysr@777 1 /*
johnc@4929 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/collectionSetChooser.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1MMUTracker.hpp"
stefank@2314 30 #include "memory/collectorPolicy.hpp"
stefank@2314 31
ysr@777 32 // A G1CollectorPolicy makes policy decisions that determine the
ysr@777 33 // characteristics of the collector. Examples include:
ysr@777 34 // * choice of collection set.
ysr@777 35 // * when to collect.
ysr@777 36
ysr@777 37 class HeapRegion;
ysr@777 38 class CollectionSetChooser;
brutisso@3923 39 class G1GCPhaseTimes;
ysr@777 40
brutisso@3812 41 // TraceGen0Time collects data on _both_ young and mixed evacuation pauses
brutisso@3812 42 // (the latter may contain non-young regions - i.e. regions that are
brutisso@3812 43 // technically in Gen1) while TraceGen1Time collects data about full GCs.
zgu@3900 44 class TraceGen0TimeData : public CHeapObj<mtGC> {
brutisso@3812 45 private:
brutisso@3812 46 unsigned _young_pause_num;
brutisso@3812 47 unsigned _mixed_pause_num;
ysr@777 48
brutisso@3812 49 NumberSeq _all_stop_world_times_ms;
brutisso@3812 50 NumberSeq _all_yield_times_ms;
ysr@777 51
brutisso@3812 52 NumberSeq _total;
brutisso@3812 53 NumberSeq _other;
brutisso@3812 54 NumberSeq _root_region_scan_wait;
brutisso@3812 55 NumberSeq _parallel;
brutisso@3812 56 NumberSeq _ext_root_scan;
brutisso@3812 57 NumberSeq _satb_filtering;
brutisso@3812 58 NumberSeq _update_rs;
brutisso@3812 59 NumberSeq _scan_rs;
brutisso@3812 60 NumberSeq _obj_copy;
brutisso@3812 61 NumberSeq _termination;
brutisso@3812 62 NumberSeq _parallel_other;
brutisso@3812 63 NumberSeq _clear_ct;
ysr@777 64
brutisso@3923 65 void print_summary(const char* str, const NumberSeq* seq) const;
brutisso@3923 66 void print_summary_sd(const char* str, const NumberSeq* seq) const;
ysr@777 67
ysr@777 68 public:
brutisso@3812 69 TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {};
brutisso@3812 70 void record_start_collection(double time_to_stop_the_world_ms);
brutisso@3812 71 void record_yield_time(double yield_time_ms);
brutisso@3923 72 void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times);
brutisso@3812 73 void increment_young_collection_count();
brutisso@3812 74 void increment_mixed_collection_count();
brutisso@3812 75 void print() const;
ysr@777 76 };
ysr@777 77
zgu@3900 78 class TraceGen1TimeData : public CHeapObj<mtGC> {
brutisso@3812 79 private:
brutisso@3812 80 NumberSeq _all_full_gc_times;
ysr@777 81
brutisso@3812 82 public:
brutisso@3812 83 void record_full_collection(double full_gc_time_ms);
brutisso@3812 84 void print() const;
ysr@777 85 };
ysr@777 86
brutisso@3358 87 // There are three command line options related to the young gen size:
brutisso@3358 88 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
brutisso@3358 89 // just a short form for NewSize==MaxNewSize). G1 will use its internal
brutisso@3358 90 // heuristics to calculate the actual young gen size, so these options
brutisso@3358 91 // basically only limit the range within which G1 can pick a young gen
brutisso@3358 92 // size. Also, these are general options taking byte sizes. G1 will
brutisso@3358 93 // internally work with a number of regions instead. So, some rounding
brutisso@3358 94 // will occur.
brutisso@3358 95 //
brutisso@3358 96 // If nothing related to the the young gen size is set on the command
johnc@4385 97 // line we should allow the young gen to be between G1NewSizePercent
johnc@4385 98 // and G1MaxNewSizePercent of the heap size. This means that every time
johnc@4385 99 // the heap size changes, the limits for the young gen size will be
johnc@4385 100 // recalculated.
brutisso@3358 101 //
brutisso@3358 102 // If only -XX:NewSize is set we should use the specified value as the
johnc@4385 103 // minimum size for young gen. Still using G1MaxNewSizePercent of the
johnc@4385 104 // heap as maximum.
brutisso@3358 105 //
brutisso@3358 106 // If only -XX:MaxNewSize is set we should use the specified value as the
johnc@4385 107 // maximum size for young gen. Still using G1NewSizePercent of the heap
johnc@4385 108 // as minimum.
brutisso@3358 109 //
brutisso@3358 110 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.
brutisso@3358 111 // No updates when the heap size changes. There is a special case when
brutisso@3358 112 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a
brutisso@3358 113 // different heuristic for calculating the collection set when we do mixed
brutisso@3358 114 // collection.
brutisso@3358 115 //
brutisso@3358 116 // If only -XX:NewRatio is set we should use the specified ratio of the heap
brutisso@3358 117 // as both min and max. This will be interpreted as "fixed" just like the
brutisso@3358 118 // NewSize==MaxNewSize case above. But we will update the min and max
brutisso@3358 119 // everytime the heap size changes.
brutisso@3358 120 //
brutisso@3358 121 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
brutisso@3358 122 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
zgu@3900 123 class G1YoungGenSizer : public CHeapObj<mtGC> {
brutisso@3358 124 private:
brutisso@3358 125 enum SizerKind {
brutisso@3358 126 SizerDefaults,
brutisso@3358 127 SizerNewSizeOnly,
brutisso@3358 128 SizerMaxNewSizeOnly,
brutisso@3358 129 SizerMaxAndNewSize,
brutisso@3358 130 SizerNewRatio
brutisso@3358 131 };
brutisso@3358 132 SizerKind _sizer_kind;
tonyp@3713 133 uint _min_desired_young_length;
tonyp@3713 134 uint _max_desired_young_length;
brutisso@3358 135 bool _adaptive_size;
tonyp@3713 136 uint calculate_default_min_length(uint new_number_of_heap_regions);
tonyp@3713 137 uint calculate_default_max_length(uint new_number_of_heap_regions);
brutisso@3358 138
jwilhelm@6085 139 // Update the given values for minimum and maximum young gen length in regions
jwilhelm@6085 140 // given the number of heap regions depending on the kind of sizing algorithm.
jwilhelm@6085 141 void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
jwilhelm@6085 142
brutisso@3358 143 public:
brutisso@3358 144 G1YoungGenSizer();
jwilhelm@6085 145 // Calculate the maximum length of the young gen given the number of regions
jwilhelm@6085 146 // depending on the sizing algorithm.
jwilhelm@6085 147 uint max_young_length(uint number_of_heap_regions);
jwilhelm@6085 148
tonyp@3713 149 void heap_size_changed(uint new_number_of_heap_regions);
tonyp@3713 150 uint min_desired_young_length() {
brutisso@3358 151 return _min_desired_young_length;
brutisso@3358 152 }
tonyp@3713 153 uint max_desired_young_length() {
brutisso@3358 154 return _max_desired_young_length;
brutisso@3358 155 }
brutisso@3358 156 bool adaptive_young_list_length() {
brutisso@3358 157 return _adaptive_size;
brutisso@3358 158 }
brutisso@3358 159 };
brutisso@3358 160
ysr@777 161 class G1CollectorPolicy: public CollectorPolicy {
tonyp@3209 162 private:
ysr@777 163 // either equal to the number of parallel threads, if ParallelGCThreads
ysr@777 164 // has been set, or 1 otherwise
ysr@777 165 int _parallel_gc_threads;
ysr@777 166
jmasa@3294 167 // The number of GC threads currently active.
jmasa@3294 168 uintx _no_of_gc_threads;
jmasa@3294 169
ysr@777 170 enum SomePrivateConstants {
tonyp@1377 171 NumPrevPausesForHeuristics = 10
ysr@777 172 };
ysr@777 173
ysr@777 174 G1MMUTracker* _mmu_tracker;
ysr@777 175
jwilhelm@6085 176 void initialize_alignments();
ysr@777 177 void initialize_flags();
ysr@777 178
tonyp@3209 179 CollectionSetChooser* _collectionSetChooser;
ysr@777 180
brutisso@3923 181 double _full_collection_start_sec;
tonyp@3713 182 uint _cur_collection_pause_used_regions_at_start;
johnc@1325 183
ysr@777 184 // These exclude marking times.
ysr@777 185 TruncatedSeq* _recent_gc_times_ms;
ysr@777 186
ysr@777 187 TruncatedSeq* _concurrent_mark_remark_times_ms;
ysr@777 188 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
ysr@777 189
brutisso@3812 190 TraceGen0TimeData _trace_gen0_time_data;
brutisso@3812 191 TraceGen1TimeData _trace_gen1_time_data;
ysr@777 192
ysr@777 193 double _stop_world_start;
ysr@777 194
tonyp@3337 195 // indicates whether we are in young or mixed GC mode
tonyp@3337 196 bool _gcs_are_young;
ysr@777 197
tonyp@3713 198 uint _young_list_target_length;
tonyp@3713 199 uint _young_list_fixed_length;
ysr@777 200
tonyp@2333 201 // The max number of regions we can extend the eden by while the GC
tonyp@2333 202 // locker is active. This should be >= _young_list_target_length;
tonyp@3713 203 uint _young_list_max_length;
tonyp@2333 204
tonyp@3337 205 bool _last_gc_was_young;
ysr@777 206
ysr@777 207 bool _during_marking;
ysr@777 208 bool _in_marking_window;
ysr@777 209 bool _in_marking_window_im;
ysr@777 210
ysr@777 211 SurvRateGroup* _short_lived_surv_rate_group;
ysr@777 212 SurvRateGroup* _survivor_surv_rate_group;
ysr@777 213 // add here any more surv rate groups
ysr@777 214
tonyp@1791 215 double _gc_overhead_perc;
tonyp@1791 216
tonyp@3119 217 double _reserve_factor;
tonyp@3713 218 uint _reserve_regions;
tonyp@3119 219
ysr@777 220 bool during_marking() {
ysr@777 221 return _during_marking;
ysr@777 222 }
ysr@777 223
ysr@777 224 enum PredictionConstants {
ysr@777 225 TruncatedSeqLength = 10
ysr@777 226 };
ysr@777 227
ysr@777 228 TruncatedSeq* _alloc_rate_ms_seq;
ysr@777 229 double _prev_collection_pause_end_ms;
ysr@777 230
ysr@777 231 TruncatedSeq* _rs_length_diff_seq;
ysr@777 232 TruncatedSeq* _cost_per_card_ms_seq;
tonyp@3337 233 TruncatedSeq* _young_cards_per_entry_ratio_seq;
tonyp@3337 234 TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
ysr@777 235 TruncatedSeq* _cost_per_entry_ms_seq;
tonyp@3337 236 TruncatedSeq* _mixed_cost_per_entry_ms_seq;
ysr@777 237 TruncatedSeq* _cost_per_byte_ms_seq;
ysr@777 238 TruncatedSeq* _constant_other_time_ms_seq;
ysr@777 239 TruncatedSeq* _young_other_cost_per_region_ms_seq;
ysr@777 240 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
ysr@777 241
ysr@777 242 TruncatedSeq* _pending_cards_seq;
ysr@777 243 TruncatedSeq* _rs_lengths_seq;
ysr@777 244
ysr@777 245 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
ysr@777 246
brutisso@3358 247 G1YoungGenSizer* _young_gen_sizer;
brutisso@3120 248
tonyp@3713 249 uint _eden_cset_region_length;
tonyp@3713 250 uint _survivor_cset_region_length;
tonyp@3713 251 uint _old_cset_region_length;
tonyp@3289 252
tonyp@3713 253 void init_cset_region_lengths(uint eden_cset_region_length,
tonyp@3713 254 uint survivor_cset_region_length);
tonyp@3289 255
tonyp@3713 256 uint eden_cset_region_length() { return _eden_cset_region_length; }
tonyp@3713 257 uint survivor_cset_region_length() { return _survivor_cset_region_length; }
tonyp@3713 258 uint old_cset_region_length() { return _old_cset_region_length; }
ysr@777 259
tonyp@3713 260 uint _free_regions_at_end_of_collection;
ysr@777 261
ysr@777 262 size_t _recorded_rs_lengths;
ysr@777 263 size_t _max_rs_lengths;
ysr@777 264 double _sigma;
ysr@777 265
ysr@777 266 size_t _rs_lengths_prediction;
ysr@777 267
tonyp@3539 268 double sigma() { return _sigma; }
ysr@777 269
ysr@777 270 // A function that prevents us putting too much stock in small sample
ysr@777 271 // sets. Returns a number between 2.0 and 1.0, depending on the number
ysr@777 272 // of samples. 5 or more samples yields one; fewer scales linearly from
ysr@777 273 // 2.0 at 1 sample to 1.0 at 5.
ysr@777 274 double confidence_factor(int samples) {
ysr@777 275 if (samples > 4) return 1.0;
ysr@777 276 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
ysr@777 277 }
ysr@777 278
ysr@777 279 double get_new_neg_prediction(TruncatedSeq* seq) {
ysr@777 280 return seq->davg() - sigma() * seq->dsd();
ysr@777 281 }
ysr@777 282
ysr@777 283 #ifndef PRODUCT
ysr@777 284 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
ysr@777 285 #endif // PRODUCT
ysr@777 286
iveresov@1546 287 void adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 288 double update_rs_processed_buffers,
iveresov@1546 289 double goal_ms);
iveresov@1546 290
jmasa@3294 291 uintx no_of_gc_threads() { return _no_of_gc_threads; }
jmasa@3294 292 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
jmasa@3294 293
ysr@777 294 double _pause_time_target_ms;
brutisso@3923 295
ysr@777 296 size_t _pending_cards;
ysr@777 297
ysr@777 298 public:
jmasa@3294 299 // Accessors
ysr@777 300
tonyp@3289 301 void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
brutisso@7195 302 hr->set_eden();
ysr@777 303 hr->install_surv_rate_group(_short_lived_surv_rate_group);
tonyp@3289 304 hr->set_young_index_in_cset(young_index_in_cset);
ysr@777 305 }
ysr@777 306
tonyp@3289 307 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
brutisso@7195 308 assert(hr->is_survivor(), "pre-condition");
ysr@777 309 hr->install_surv_rate_group(_survivor_surv_rate_group);
tonyp@3289 310 hr->set_young_index_in_cset(young_index_in_cset);
ysr@777 311 }
ysr@777 312
ysr@777 313 #ifndef PRODUCT
ysr@777 314 bool verify_young_ages();
ysr@777 315 #endif // PRODUCT
ysr@777 316
ysr@777 317 double get_new_prediction(TruncatedSeq* seq) {
ysr@777 318 return MAX2(seq->davg() + sigma() * seq->dsd(),
ysr@777 319 seq->davg() * confidence_factor(seq->num()));
ysr@777 320 }
ysr@777 321
ysr@777 322 void record_max_rs_lengths(size_t rs_lengths) {
ysr@777 323 _max_rs_lengths = rs_lengths;
ysr@777 324 }
ysr@777 325
ysr@777 326 size_t predict_rs_length_diff() {
ysr@777 327 return (size_t) get_new_prediction(_rs_length_diff_seq);
ysr@777 328 }
ysr@777 329
ysr@777 330 double predict_alloc_rate_ms() {
ysr@777 331 return get_new_prediction(_alloc_rate_ms_seq);
ysr@777 332 }
ysr@777 333
ysr@777 334 double predict_cost_per_card_ms() {
ysr@777 335 return get_new_prediction(_cost_per_card_ms_seq);
ysr@777 336 }
ysr@777 337
ysr@777 338 double predict_rs_update_time_ms(size_t pending_cards) {
ysr@777 339 return (double) pending_cards * predict_cost_per_card_ms();
ysr@777 340 }
ysr@777 341
tonyp@3337 342 double predict_young_cards_per_entry_ratio() {
tonyp@3337 343 return get_new_prediction(_young_cards_per_entry_ratio_seq);
ysr@777 344 }
ysr@777 345
tonyp@3337 346 double predict_mixed_cards_per_entry_ratio() {
tonyp@3337 347 if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
tonyp@3337 348 return predict_young_cards_per_entry_ratio();
tonyp@3337 349 } else {
tonyp@3337 350 return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
tonyp@3337 351 }
ysr@777 352 }
ysr@777 353
ysr@777 354 size_t predict_young_card_num(size_t rs_length) {
ysr@777 355 return (size_t) ((double) rs_length *
tonyp@3337 356 predict_young_cards_per_entry_ratio());
ysr@777 357 }
ysr@777 358
ysr@777 359 size_t predict_non_young_card_num(size_t rs_length) {
ysr@777 360 return (size_t) ((double) rs_length *
tonyp@3337 361 predict_mixed_cards_per_entry_ratio());
ysr@777 362 }
ysr@777 363
ysr@777 364 double predict_rs_scan_time_ms(size_t card_num) {
tonyp@3337 365 if (gcs_are_young()) {
ysr@777 366 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
tonyp@3337 367 } else {
tonyp@3337 368 return predict_mixed_rs_scan_time_ms(card_num);
tonyp@3337 369 }
ysr@777 370 }
ysr@777 371
tonyp@3337 372 double predict_mixed_rs_scan_time_ms(size_t card_num) {
tonyp@3337 373 if (_mixed_cost_per_entry_ms_seq->num() < 3) {
ysr@777 374 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
tonyp@3337 375 } else {
tonyp@3337 376 return (double) (card_num *
tonyp@3337 377 get_new_prediction(_mixed_cost_per_entry_ms_seq));
tonyp@3337 378 }
ysr@777 379 }
ysr@777 380
ysr@777 381 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
tonyp@3337 382 if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
tonyp@3337 383 return (1.1 * (double) bytes_to_copy) *
tonyp@3337 384 get_new_prediction(_cost_per_byte_ms_seq);
tonyp@3337 385 } else {
ysr@777 386 return (double) bytes_to_copy *
tonyp@3337 387 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
tonyp@3337 388 }
ysr@777 389 }
ysr@777 390
ysr@777 391 double predict_object_copy_time_ms(size_t bytes_to_copy) {
tonyp@3337 392 if (_in_marking_window && !_in_marking_window_im) {
ysr@777 393 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
tonyp@3337 394 } else {
ysr@777 395 return (double) bytes_to_copy *
tonyp@3337 396 get_new_prediction(_cost_per_byte_ms_seq);
tonyp@3337 397 }
ysr@777 398 }
ysr@777 399
ysr@777 400 double predict_constant_other_time_ms() {
ysr@777 401 return get_new_prediction(_constant_other_time_ms_seq);
ysr@777 402 }
ysr@777 403
ysr@777 404 double predict_young_other_time_ms(size_t young_num) {
tonyp@3337 405 return (double) young_num *
tonyp@3337 406 get_new_prediction(_young_other_cost_per_region_ms_seq);
ysr@777 407 }
ysr@777 408
ysr@777 409 double predict_non_young_other_time_ms(size_t non_young_num) {
tonyp@3337 410 return (double) non_young_num *
tonyp@3337 411 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
ysr@777 412 }
ysr@777 413
ysr@777 414 double predict_base_elapsed_time_ms(size_t pending_cards);
ysr@777 415 double predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 416 size_t scanned_cards);
ysr@777 417 size_t predict_bytes_to_copy(HeapRegion* hr);
johnc@3998 418 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc);
ysr@777 419
tonyp@3289 420 void set_recorded_rs_lengths(size_t rs_lengths);
johnc@1829 421
tonyp@3713 422 uint cset_region_length() { return young_cset_region_length() +
tonyp@3713 423 old_cset_region_length(); }
tonyp@3713 424 uint young_cset_region_length() { return eden_cset_region_length() +
tonyp@3713 425 survivor_cset_region_length(); }
ysr@777 426
apetrusenko@980 427 double predict_survivor_regions_evac_time();
apetrusenko@980 428
ysr@777 429 void cset_regions_freed() {
tonyp@3337 430 bool propagate = _last_gc_was_young && !_in_marking_window;
ysr@777 431 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 432 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 433 // also call it on any more surv rate groups
ysr@777 434 }
ysr@777 435
ysr@777 436 G1MMUTracker* mmu_tracker() {
ysr@777 437 return _mmu_tracker;
ysr@777 438 }
ysr@777 439
tonyp@2011 440 double max_pause_time_ms() {
tonyp@2011 441 return _mmu_tracker->max_gc_time() * 1000.0;
tonyp@2011 442 }
tonyp@2011 443
ysr@777 444 double predict_remark_time_ms() {
ysr@777 445 return get_new_prediction(_concurrent_mark_remark_times_ms);
ysr@777 446 }
ysr@777 447
ysr@777 448 double predict_cleanup_time_ms() {
ysr@777 449 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
ysr@777 450 }
ysr@777 451
ysr@777 452 // Returns an estimate of the survival rate of the region at yg-age
ysr@777 453 // "yg_age".
apetrusenko@980 454 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
apetrusenko@980 455 TruncatedSeq* seq = surv_rate_group->get_seq(age);
ysr@777 456 if (seq->num() == 0)
ysr@777 457 gclog_or_tty->print("BARF! age is %d", age);
ysr@777 458 guarantee( seq->num() > 0, "invariant" );
ysr@777 459 double pred = get_new_prediction(seq);
ysr@777 460 if (pred > 1.0)
ysr@777 461 pred = 1.0;
ysr@777 462 return pred;
ysr@777 463 }
ysr@777 464
apetrusenko@980 465 double predict_yg_surv_rate(int age) {
apetrusenko@980 466 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
apetrusenko@980 467 }
apetrusenko@980 468
ysr@777 469 double accum_yg_surv_rate_pred(int age) {
ysr@777 470 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
ysr@777 471 }
ysr@777 472
tonyp@3209 473 private:
ysr@777 474 // Statistics kept per GC stoppage, pause or full.
ysr@777 475 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
ysr@777 476
ysr@777 477 // Add a new GC of the given duration and end time to the record.
ysr@777 478 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
ysr@777 479
ysr@777 480 // The head of the list (via "next_in_collection_set()") representing the
johnc@1829 481 // current collection set. Set from the incrementally built collection
johnc@1829 482 // set at the start of the pause.
ysr@777 483 HeapRegion* _collection_set;
johnc@1829 484
johnc@1829 485 // The number of bytes in the collection set before the pause. Set from
johnc@1829 486 // the incrementally built collection set at the start of an evacuation
johnc@3998 487 // pause, and incremented in finalize_cset() when adding old regions
johnc@3998 488 // (if any) to the collection set.
ysr@777 489 size_t _collection_set_bytes_used_before;
ysr@777 490
johnc@3998 491 // The number of bytes copied during the GC.
johnc@3998 492 size_t _bytes_copied_during_gc;
johnc@3998 493
johnc@1829 494 // The associated information that is maintained while the incremental
johnc@1829 495 // collection set is being built with young regions. Used to populate
johnc@1829 496 // the recorded info for the evacuation pause.
johnc@1829 497
johnc@1829 498 enum CSetBuildType {
johnc@1829 499 Active, // We are actively building the collection set
johnc@1829 500 Inactive // We are not actively building the collection set
johnc@1829 501 };
johnc@1829 502
johnc@1829 503 CSetBuildType _inc_cset_build_state;
johnc@1829 504
johnc@1829 505 // The head of the incrementally built collection set.
johnc@1829 506 HeapRegion* _inc_cset_head;
johnc@1829 507
johnc@1829 508 // The tail of the incrementally built collection set.
johnc@1829 509 HeapRegion* _inc_cset_tail;
johnc@1829 510
johnc@1829 511 // The number of bytes in the incrementally built collection set.
johnc@1829 512 // Used to set _collection_set_bytes_used_before at the start of
johnc@1829 513 // an evacuation pause.
johnc@1829 514 size_t _inc_cset_bytes_used_before;
johnc@1829 515
johnc@1829 516 // Used to record the highest end of heap region in collection set
johnc@1829 517 HeapWord* _inc_cset_max_finger;
johnc@1829 518
tonyp@3356 519 // The RSet lengths recorded for regions in the CSet. It is updated
tonyp@3356 520 // by the thread that adds a new region to the CSet. We assume that
tonyp@3356 521 // only one thread can be allocating a new CSet region (currently,
tonyp@3356 522 // it does so after taking the Heap_lock) hence no need to
tonyp@3356 523 // synchronize updates to this field.
johnc@1829 524 size_t _inc_cset_recorded_rs_lengths;
johnc@1829 525
tonyp@3356 526 // A concurrent refinement thread periodcially samples the young
tonyp@3356 527 // region RSets and needs to update _inc_cset_recorded_rs_lengths as
tonyp@3356 528 // the RSets grow. Instead of having to syncronize updates to that
tonyp@3356 529 // field we accumulate them in this field and add it to
tonyp@3356 530 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
tonyp@3356 531 ssize_t _inc_cset_recorded_rs_lengths_diffs;
tonyp@3356 532
tonyp@3356 533 // The predicted elapsed time it will take to collect the regions in
tonyp@3356 534 // the CSet. This is updated by the thread that adds a new region to
tonyp@3356 535 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
tonyp@3356 536 // MT-safety assumptions.
johnc@1829 537 double _inc_cset_predicted_elapsed_time_ms;
johnc@1829 538
tonyp@3356 539 // See the comment for _inc_cset_recorded_rs_lengths_diffs.
tonyp@3356 540 double _inc_cset_predicted_elapsed_time_ms_diffs;
tonyp@3356 541
ysr@777 542 // Stash a pointer to the g1 heap.
ysr@777 543 G1CollectedHeap* _g1;
ysr@777 544
brutisso@3923 545 G1GCPhaseTimes* _phase_times;
brutisso@3923 546
ysr@777 547 // The ratio of gc time to elapsed time, computed over recent pauses.
ysr@777 548 double _recent_avg_pause_time_ratio;
ysr@777 549
ysr@777 550 double recent_avg_pause_time_ratio() {
ysr@777 551 return _recent_avg_pause_time_ratio;
ysr@777 552 }
ysr@777 553
tonyp@1794 554 // At the end of a pause we check the heap occupancy and we decide
tonyp@1794 555 // whether we will start a marking cycle during the next pause. If
tonyp@1794 556 // we decide that we want to do that, we will set this parameter to
tonyp@1794 557 // true. So, this parameter will stay true between the end of a
tonyp@1794 558 // pause and the beginning of a subsequent pause (not necessarily
tonyp@1794 559 // the next one, see the comments on the next field) when we decide
tonyp@1794 560 // that we will indeed start a marking cycle and do the initial-mark
tonyp@1794 561 // work.
tonyp@1794 562 volatile bool _initiate_conc_mark_if_possible;
ysr@777 563
tonyp@1794 564 // If initiate_conc_mark_if_possible() is set at the beginning of a
tonyp@1794 565 // pause, it is a suggestion that the pause should start a marking
tonyp@1794 566 // cycle by doing the initial-mark work. However, it is possible
tonyp@1794 567 // that the concurrent marking thread is still finishing up the
tonyp@1794 568 // previous marking cycle (e.g., clearing the next marking
tonyp@1794 569 // bitmap). If that is the case we cannot start a new cycle and
tonyp@1794 570 // we'll have to wait for the concurrent marking thread to finish
tonyp@1794 571 // what it is doing. In this case we will postpone the marking cycle
tonyp@1794 572 // initiation decision for the next pause. When we eventually decide
tonyp@1794 573 // to start a cycle, we will set _during_initial_mark_pause which
tonyp@1794 574 // will stay true until the end of the initial-mark pause and it's
tonyp@1794 575 // the condition that indicates that a pause is doing the
tonyp@1794 576 // initial-mark work.
tonyp@1794 577 volatile bool _during_initial_mark_pause;
tonyp@1794 578
tonyp@3337 579 bool _last_young_gc;
ysr@777 580
ysr@777 581 // This set of variables tracks the collector efficiency, in order to
ysr@777 582 // determine whether we should initiate a new marking.
ysr@777 583 double _cur_mark_stop_world_time_ms;
ysr@777 584 double _mark_remark_start_sec;
ysr@777 585 double _mark_cleanup_start_sec;
ysr@777 586
tonyp@3119 587 // Update the young list target length either by setting it to the
tonyp@3119 588 // desired fixed value or by calculating it using G1's pause
tonyp@3119 589 // prediction model. If no rs_lengths parameter is passed, predict
tonyp@3119 590 // the RS lengths using the prediction model, otherwise use the
tonyp@3119 591 // given rs_lengths as the prediction.
tonyp@3119 592 void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
tonyp@3119 593
tonyp@3119 594 // Calculate and return the minimum desired young list target
tonyp@3119 595 // length. This is the minimum desired young list length according
tonyp@3119 596 // to the user's inputs.
tonyp@3713 597 uint calculate_young_list_desired_min_length(uint base_min_length);
tonyp@3119 598
tonyp@3119 599 // Calculate and return the maximum desired young list target
tonyp@3119 600 // length. This is the maximum desired young list length according
tonyp@3119 601 // to the user's inputs.
tonyp@3713 602 uint calculate_young_list_desired_max_length();
tonyp@3119 603
tonyp@3119 604 // Calculate and return the maximum young list target length that
tonyp@3119 605 // can fit into the pause time goal. The parameters are: rs_lengths
tonyp@3119 606 // represent the prediction of how large the young RSet lengths will
tonyp@3119 607 // be, base_min_length is the alreay existing number of regions in
tonyp@3119 608 // the young list, min_length and max_length are the desired min and
tonyp@3119 609 // max young list length according to the user's inputs.
tonyp@3713 610 uint calculate_young_list_target_length(size_t rs_lengths,
tonyp@3713 611 uint base_min_length,
tonyp@3713 612 uint desired_min_length,
tonyp@3713 613 uint desired_max_length);
tonyp@3119 614
tonyp@3119 615 // Check whether a given young length (young_length) fits into the
tonyp@3119 616 // given target pause time and whether the prediction for the amount
tonyp@3119 617 // of objects to be copied for the given length will fit into the
tonyp@3119 618 // given free space (expressed by base_free_regions). It is used by
tonyp@3119 619 // calculate_young_list_target_length().
tonyp@3713 620 bool predict_will_fit(uint young_length, double base_time_ms,
tonyp@3713 621 uint base_free_regions, double target_pause_time_ms);
ysr@777 622
johnc@4681 623 // Calculate the minimum number of old regions we'll add to the CSet
johnc@4681 624 // during a mixed GC.
johnc@4681 625 uint calc_min_old_cset_length();
johnc@4681 626
johnc@4681 627 // Calculate the maximum number of old regions we'll add to the CSet
johnc@4681 628 // during a mixed GC.
johnc@4681 629 uint calc_max_old_cset_length();
johnc@4681 630
johnc@4681 631 // Returns the given amount of uncollected reclaimable space
johnc@4681 632 // as a percentage of the current heap capacity.
johnc@4681 633 double reclaimable_bytes_perc(size_t reclaimable_bytes);
johnc@4681 634
ysr@777 635 public:
ysr@777 636
ysr@777 637 G1CollectorPolicy();
ysr@777 638
ysr@777 639 virtual G1CollectorPolicy* as_g1_policy() { return this; }
ysr@777 640
ysr@777 641 virtual CollectorPolicy::Name kind() {
ysr@777 642 return CollectorPolicy::G1CollectorPolicyKind;
ysr@777 643 }
ysr@777 644
brutisso@3923 645 G1GCPhaseTimes* phase_times() const { return _phase_times; }
brutisso@3923 646
tonyp@3119 647 // Check the current value of the young list RSet lengths and
tonyp@3119 648 // compare it against the last prediction. If the current value is
tonyp@3119 649 // higher, recalculate the young list target length prediction.
tonyp@3119 650 void revise_young_list_target_length_if_necessary();
ysr@777 651
brutisso@3120 652 // This should be called after the heap is resized.
tonyp@3713 653 void record_new_heap_size(uint new_number_of_regions);
tonyp@3119 654
tonyp@3209 655 void init();
ysr@777 656
apetrusenko@980 657 // Create jstat counters for the policy.
apetrusenko@980 658 virtual void initialize_gc_policy_counters();
apetrusenko@980 659
ysr@777 660 virtual HeapWord* mem_allocate_work(size_t size,
ysr@777 661 bool is_tlab,
ysr@777 662 bool* gc_overhead_limit_was_exceeded);
ysr@777 663
ysr@777 664 // This method controls how a collector handles one or more
ysr@777 665 // of its generations being fully allocated.
ysr@777 666 virtual HeapWord* satisfy_failed_allocation(size_t size,
ysr@777 667 bool is_tlab);
ysr@777 668
ysr@777 669 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
ysr@777 670
brutisso@3461 671 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
brutisso@3456 672
johnc@4929 673 // Record the start and end of an evacuation pause.
johnc@4929 674 void record_collection_pause_start(double start_time_sec);
sla@5237 675 void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
ysr@777 676
johnc@4929 677 // Record the start and end of a full collection.
johnc@4929 678 void record_full_collection_start();
johnc@4929 679 void record_full_collection_end();
ysr@777 680
ysr@777 681 // Must currently be called while the world is stopped.
johnc@4929 682 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
ysr@777 683
johnc@4929 684 // Record start and end of remark.
tonyp@3209 685 void record_concurrent_mark_remark_start();
tonyp@3209 686 void record_concurrent_mark_remark_end();
ysr@777 687
johnc@4929 688 // Record start, end, and completion of cleanup.
tonyp@3209 689 void record_concurrent_mark_cleanup_start();
jmasa@3294 690 void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
tonyp@3209 691 void record_concurrent_mark_cleanup_completed();
ysr@777 692
johnc@4929 693 // Records the information about the heap size for reporting in
johnc@4929 694 // print_detailed_heap_transition
johnc@5123 695 void record_heap_size_info_at_start(bool full);
ysr@777 696
johnc@4929 697 // Print heap sizing transition (with less and more detail).
tonyp@2961 698 void print_heap_transition();
johnc@5123 699 void print_detailed_heap_transition(bool full = false);
ysr@777 700
johnc@4929 701 void record_stop_world_start();
johnc@4929 702 void record_concurrent_pause();
ysr@777 703
tonyp@3028 704 // Record how much space we copied during a GC. This is typically
tonyp@3028 705 // called when a GC alloc region is being retired.
tonyp@3028 706 void record_bytes_copied_during_gc(size_t bytes) {
tonyp@3028 707 _bytes_copied_during_gc += bytes;
tonyp@3028 708 }
tonyp@3028 709
tonyp@3028 710 // The amount of space we copied during a GC.
tonyp@3028 711 size_t bytes_copied_during_gc() {
tonyp@3028 712 return _bytes_copied_during_gc;
tonyp@3028 713 }
ysr@777 714
brutisso@3675 715 // Determine whether there are candidate regions so that the
brutisso@3675 716 // next GC should be mixed. The two action strings are used
brutisso@3675 717 // in the ergo output when the method returns true or false.
tonyp@3539 718 bool next_gc_should_be_mixed(const char* true_action_str,
tonyp@3539 719 const char* false_action_str);
tonyp@3539 720
ysr@777 721 // Choose a new collection set. Marks the chosen regions as being
ysr@777 722 // "in_collection_set", and links them together. The head and number of
ysr@777 723 // the collection set are available via access methods.
sla@5237 724 void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
ysr@777 725
ysr@777 726 // The head of the list (via "next_in_collection_set()") representing the
ysr@777 727 // current collection set.
ysr@777 728 HeapRegion* collection_set() { return _collection_set; }
ysr@777 729
johnc@1829 730 void clear_collection_set() { _collection_set = NULL; }
johnc@1829 731
tonyp@3289 732 // Add old region "hr" to the CSet.
tonyp@3289 733 void add_old_region_to_cset(HeapRegion* hr);
ysr@777 734
johnc@1829 735 // Incremental CSet Support
johnc@1829 736
johnc@1829 737 // The head of the incrementally built collection set.
johnc@1829 738 HeapRegion* inc_cset_head() { return _inc_cset_head; }
johnc@1829 739
johnc@1829 740 // The tail of the incrementally built collection set.
johnc@1829 741 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
johnc@1829 742
johnc@1829 743 // Initialize incremental collection set info.
johnc@1829 744 void start_incremental_cset_building();
johnc@1829 745
tonyp@3356 746 // Perform any final calculations on the incremental CSet fields
tonyp@3356 747 // before we can use them.
tonyp@3356 748 void finalize_incremental_cset_building();
tonyp@3356 749
johnc@1829 750 void clear_incremental_cset() {
johnc@1829 751 _inc_cset_head = NULL;
johnc@1829 752 _inc_cset_tail = NULL;
johnc@1829 753 }
johnc@1829 754
johnc@1829 755 // Stop adding regions to the incremental collection set
johnc@1829 756 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
johnc@1829 757
tonyp@3356 758 // Add information about hr to the aggregated information for the
tonyp@3356 759 // incrementally built collection set.
johnc@1829 760 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
johnc@1829 761
johnc@1829 762 // Update information about hr in the aggregated information for
johnc@1829 763 // the incrementally built collection set.
johnc@1829 764 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
johnc@1829 765
johnc@1829 766 private:
johnc@1829 767 // Update the incremental cset information when adding a region
johnc@1829 768 // (should not be called directly).
johnc@1829 769 void add_region_to_incremental_cset_common(HeapRegion* hr);
johnc@1829 770
johnc@1829 771 public:
johnc@1829 772 // Add hr to the LHS of the incremental collection set.
johnc@1829 773 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
johnc@1829 774
johnc@1829 775 // Add hr to the RHS of the incremental collection set.
johnc@1829 776 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
johnc@1829 777
johnc@1829 778 #ifndef PRODUCT
johnc@1829 779 void print_collection_set(HeapRegion* list_head, outputStream* st);
johnc@1829 780 #endif // !PRODUCT
johnc@1829 781
tonyp@1794 782 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
tonyp@1794 783 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
tonyp@1794 784 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
tonyp@1794 785
tonyp@1794 786 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
tonyp@1794 787 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
tonyp@1794 788 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
tonyp@1794 789
tonyp@2011 790 // This sets the initiate_conc_mark_if_possible() flag to start a
tonyp@2011 791 // new cycle, as long as we are not already in one. It's best if it
tonyp@2011 792 // is called during a safepoint when the test whether a cycle is in
tonyp@2011 793 // progress or not is stable.
tonyp@3114 794 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
tonyp@2011 795
tonyp@1794 796 // This is called at the very beginning of an evacuation pause (it
tonyp@1794 797 // has to be the first thing that the pause does). If
tonyp@1794 798 // initiate_conc_mark_if_possible() is true, and the concurrent
tonyp@1794 799 // marking thread has completed its work during the previous cycle,
tonyp@1794 800 // it will set during_initial_mark_pause() to so that the pause does
tonyp@1794 801 // the initial-mark work and start a marking cycle.
tonyp@1794 802 void decide_on_conc_mark_initiation();
ysr@777 803
ysr@777 804 // If an expansion would be appropriate, because recent GC overhead had
ysr@777 805 // exceeded the desired limit, return an amount to expand by.
tonyp@3209 806 size_t expansion_amount();
ysr@777 807
ysr@777 808 // Print tracing information.
ysr@777 809 void print_tracing_info() const;
ysr@777 810
ysr@777 811 // Print stats on young survival ratio
ysr@777 812 void print_yg_surv_rate_info() const;
ysr@777 813
apetrusenko@980 814 void finished_recalculating_age_indexes(bool is_survivors) {
apetrusenko@980 815 if (is_survivors) {
apetrusenko@980 816 _survivor_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 817 } else {
apetrusenko@980 818 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 819 }
ysr@777 820 // do that for any other surv rate groups
ysr@777 821 }
ysr@777 822
brutisso@6376 823 size_t young_list_target_length() const { return _young_list_target_length; }
brutisso@6376 824
tonyp@2315 825 bool is_young_list_full() {
tonyp@3713 826 uint young_list_length = _g1->young_list()->length();
tonyp@3713 827 uint young_list_target_length = _young_list_target_length;
tonyp@2333 828 return young_list_length >= young_list_target_length;
tonyp@2333 829 }
tonyp@2333 830
tonyp@2333 831 bool can_expand_young_list() {
tonyp@3713 832 uint young_list_length = _g1->young_list()->length();
tonyp@3713 833 uint young_list_max_length = _young_list_max_length;
tonyp@2333 834 return young_list_length < young_list_max_length;
tonyp@2333 835 }
tonyp@2315 836
tonyp@3713 837 uint young_list_max_length() {
tonyp@3176 838 return _young_list_max_length;
tonyp@3176 839 }
tonyp@3176 840
tonyp@3337 841 bool gcs_are_young() {
tonyp@3337 842 return _gcs_are_young;
ysr@777 843 }
tonyp@3337 844 void set_gcs_are_young(bool gcs_are_young) {
tonyp@3337 845 _gcs_are_young = gcs_are_young;
ysr@777 846 }
ysr@777 847
ysr@777 848 bool adaptive_young_list_length() {
brutisso@3358 849 return _young_gen_sizer->adaptive_young_list_length();
ysr@777 850 }
ysr@777 851
tonyp@3209 852 private:
ysr@777 853 //
ysr@777 854 // Survivor regions policy.
ysr@777 855 //
ysr@777 856
ysr@777 857 // Current tenuring threshold, set to 0 if the collector reaches the
jwilhelm@4129 858 // maximum amount of survivors regions.
jwilhelm@4129 859 uint _tenuring_threshold;
ysr@777 860
apetrusenko@980 861 // The limit on the number of regions allocated for survivors.
tonyp@3713 862 uint _max_survivor_regions;
apetrusenko@980 863
tonyp@2961 864 // For reporting purposes.
johnc@5123 865 // The value of _heap_bytes_before_gc is also used to calculate
johnc@5123 866 // the cost of copying.
johnc@5123 867
johnc@5123 868 size_t _eden_used_bytes_before_gc; // Eden occupancy before GC
johnc@5123 869 size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC
johnc@5123 870 size_t _heap_used_bytes_before_gc; // Heap occupancy before GC
johnc@5123 871 size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC
johnc@5123 872
johnc@5123 873 size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC
johnc@5123 874 size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC
tonyp@2961 875
jwilhelm@4129 876 // The amount of survivor regions after a collection.
tonyp@3713 877 uint _recorded_survivor_regions;
apetrusenko@980 878 // List of survivor regions.
apetrusenko@980 879 HeapRegion* _recorded_survivor_head;
apetrusenko@980 880 HeapRegion* _recorded_survivor_tail;
apetrusenko@980 881
apetrusenko@980 882 ageTable _survivors_age_table;
apetrusenko@980 883
ysr@777 884 public:
sla@5237 885 uint tenuring_threshold() const { return _tenuring_threshold; }
ysr@777 886
ysr@777 887 inline GCAllocPurpose
jwilhelm@4129 888 evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
ysr@777 889 if (age < _tenuring_threshold && src_region->is_young()) {
ysr@777 890 return GCAllocForSurvived;
ysr@777 891 } else {
ysr@777 892 return GCAllocForTenured;
ysr@777 893 }
ysr@777 894 }
ysr@777 895
ysr@777 896 inline bool track_object_age(GCAllocPurpose purpose) {
ysr@777 897 return purpose == GCAllocForSurvived;
ysr@777 898 }
ysr@777 899
tonyp@3713 900 static const uint REGIONS_UNLIMITED = (uint) -1;
apetrusenko@980 901
tonyp@3713 902 uint max_regions(int purpose);
ysr@777 903
ysr@777 904 // The limit on regions for a particular purpose is reached.
ysr@777 905 void note_alloc_region_limit_reached(int purpose) {
ysr@777 906 if (purpose == GCAllocForSurvived) {
ysr@777 907 _tenuring_threshold = 0;
ysr@777 908 }
ysr@777 909 }
ysr@777 910
ysr@777 911 void note_start_adding_survivor_regions() {
ysr@777 912 _survivor_surv_rate_group->start_adding_regions();
ysr@777 913 }
ysr@777 914
ysr@777 915 void note_stop_adding_survivor_regions() {
ysr@777 916 _survivor_surv_rate_group->stop_adding_regions();
ysr@777 917 }
apetrusenko@980 918
tonyp@3713 919 void record_survivor_regions(uint regions,
apetrusenko@980 920 HeapRegion* head,
apetrusenko@980 921 HeapRegion* tail) {
apetrusenko@980 922 _recorded_survivor_regions = regions;
apetrusenko@980 923 _recorded_survivor_head = head;
apetrusenko@980 924 _recorded_survivor_tail = tail;
apetrusenko@980 925 }
apetrusenko@980 926
tonyp@3713 927 uint recorded_survivor_regions() {
tonyp@1273 928 return _recorded_survivor_regions;
tonyp@1273 929 }
tonyp@1273 930
tonyp@3713 931 void record_thread_age_table(ageTable* age_table) {
apetrusenko@980 932 _survivors_age_table.merge_par(age_table);
apetrusenko@980 933 }
apetrusenko@980 934
tonyp@3119 935 void update_max_gc_locker_expansion();
tonyp@2333 936
apetrusenko@980 937 // Calculates survivor space parameters.
tonyp@3119 938 void update_survivors_policy();
apetrusenko@980 939
jwilhelm@6085 940 virtual void post_heap_initialize();
ysr@777 941 };
ysr@777 942
ysr@777 943 // This should move to some place more general...
ysr@777 944
ysr@777 945 // If we have "n" measurements, and we've kept track of their "sum" and the
ysr@777 946 // "sum_of_squares" of the measurements, this returns the variance of the
ysr@777 947 // sequence.
ysr@777 948 inline double variance(int n, double sum_of_squares, double sum) {
ysr@777 949 double n_d = (double)n;
ysr@777 950 double avg = sum/n_d;
ysr@777 951 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
ysr@777 952 }
ysr@777 953
stefank@2314 954 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP

mercurial