src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Tue, 21 Aug 2012 14:10:39 -0700

author
johnc
date
Tue, 21 Aug 2012 14:10:39 -0700
changeset 3998
7383557659bd
parent 3924
3a431b605145
child 4015
bb3f6194fedb
permissions
-rw-r--r--

7185699: G1: Prediction model discrepancies
Summary: Correct the result value of G1CollectedHeap::pending_card_num(). Change the code that calculates the GC efficiency of a non-young heap region to use historical data from mixed GCs and the actual number of live bytes when predicting how long it would take to collect the region. Changes were also reviewed by Thomas Schatzl.
Reviewed-by: azeemj, brutisso

ysr@777 1 /*
tonyp@3416 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/collectionSetChooser.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1MMUTracker.hpp"
stefank@2314 30 #include "memory/collectorPolicy.hpp"
stefank@2314 31
ysr@777 32 // A G1CollectorPolicy makes policy decisions that determine the
ysr@777 33 // characteristics of the collector. Examples include:
ysr@777 34 // * choice of collection set.
ysr@777 35 // * when to collect.
ysr@777 36
ysr@777 37 class HeapRegion;
ysr@777 38 class CollectionSetChooser;
brutisso@3923 39 class G1GCPhaseTimes;
ysr@777 40
brutisso@3812 41 // TraceGen0Time collects data on _both_ young and mixed evacuation pauses
brutisso@3812 42 // (the latter may contain non-young regions - i.e. regions that are
brutisso@3812 43 // technically in Gen1) while TraceGen1Time collects data about full GCs.
zgu@3900 44 class TraceGen0TimeData : public CHeapObj<mtGC> {
brutisso@3812 45 private:
brutisso@3812 46 unsigned _young_pause_num;
brutisso@3812 47 unsigned _mixed_pause_num;
ysr@777 48
brutisso@3812 49 NumberSeq _all_stop_world_times_ms;
brutisso@3812 50 NumberSeq _all_yield_times_ms;
ysr@777 51
brutisso@3812 52 NumberSeq _total;
brutisso@3812 53 NumberSeq _other;
brutisso@3812 54 NumberSeq _root_region_scan_wait;
brutisso@3812 55 NumberSeq _parallel;
brutisso@3812 56 NumberSeq _ext_root_scan;
brutisso@3812 57 NumberSeq _satb_filtering;
brutisso@3812 58 NumberSeq _update_rs;
brutisso@3812 59 NumberSeq _scan_rs;
brutisso@3812 60 NumberSeq _obj_copy;
brutisso@3812 61 NumberSeq _termination;
brutisso@3812 62 NumberSeq _parallel_other;
brutisso@3812 63 NumberSeq _clear_ct;
ysr@777 64
brutisso@3923 65 void print_summary(const char* str, const NumberSeq* seq) const;
brutisso@3923 66 void print_summary_sd(const char* str, const NumberSeq* seq) const;
ysr@777 67
ysr@777 68 public:
brutisso@3812 69 TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {};
brutisso@3812 70 void record_start_collection(double time_to_stop_the_world_ms);
brutisso@3812 71 void record_yield_time(double yield_time_ms);
brutisso@3923 72 void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times);
brutisso@3812 73 void increment_young_collection_count();
brutisso@3812 74 void increment_mixed_collection_count();
brutisso@3812 75 void print() const;
ysr@777 76 };
ysr@777 77
zgu@3900 78 class TraceGen1TimeData : public CHeapObj<mtGC> {
brutisso@3812 79 private:
brutisso@3812 80 NumberSeq _all_full_gc_times;
ysr@777 81
brutisso@3812 82 public:
brutisso@3812 83 void record_full_collection(double full_gc_time_ms);
brutisso@3812 84 void print() const;
ysr@777 85 };
ysr@777 86
brutisso@3358 87 // There are three command line options related to the young gen size:
brutisso@3358 88 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
brutisso@3358 89 // just a short form for NewSize==MaxNewSize). G1 will use its internal
brutisso@3358 90 // heuristics to calculate the actual young gen size, so these options
brutisso@3358 91 // basically only limit the range within which G1 can pick a young gen
brutisso@3358 92 // size. Also, these are general options taking byte sizes. G1 will
brutisso@3358 93 // internally work with a number of regions instead. So, some rounding
brutisso@3358 94 // will occur.
brutisso@3358 95 //
brutisso@3358 96 // If nothing related to the the young gen size is set on the command
brutisso@3358 97 // line we should allow the young gen to be between
brutisso@3358 98 // G1DefaultMinNewGenPercent and G1DefaultMaxNewGenPercent of the
brutisso@3358 99 // heap size. This means that every time the heap size changes the
brutisso@3358 100 // limits for the young gen size will be updated.
brutisso@3358 101 //
brutisso@3358 102 // If only -XX:NewSize is set we should use the specified value as the
brutisso@3358 103 // minimum size for young gen. Still using G1DefaultMaxNewGenPercent
brutisso@3358 104 // of the heap as maximum.
brutisso@3358 105 //
brutisso@3358 106 // If only -XX:MaxNewSize is set we should use the specified value as the
brutisso@3358 107 // maximum size for young gen. Still using G1DefaultMinNewGenPercent
brutisso@3358 108 // of the heap as minimum.
brutisso@3358 109 //
brutisso@3358 110 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.
brutisso@3358 111 // No updates when the heap size changes. There is a special case when
brutisso@3358 112 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a
brutisso@3358 113 // different heuristic for calculating the collection set when we do mixed
brutisso@3358 114 // collection.
brutisso@3358 115 //
brutisso@3358 116 // If only -XX:NewRatio is set we should use the specified ratio of the heap
brutisso@3358 117 // as both min and max. This will be interpreted as "fixed" just like the
brutisso@3358 118 // NewSize==MaxNewSize case above. But we will update the min and max
brutisso@3358 119 // everytime the heap size changes.
brutisso@3358 120 //
brutisso@3358 121 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
brutisso@3358 122 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
zgu@3900 123 class G1YoungGenSizer : public CHeapObj<mtGC> {
brutisso@3358 124 private:
brutisso@3358 125 enum SizerKind {
brutisso@3358 126 SizerDefaults,
brutisso@3358 127 SizerNewSizeOnly,
brutisso@3358 128 SizerMaxNewSizeOnly,
brutisso@3358 129 SizerMaxAndNewSize,
brutisso@3358 130 SizerNewRatio
brutisso@3358 131 };
brutisso@3358 132 SizerKind _sizer_kind;
tonyp@3713 133 uint _min_desired_young_length;
tonyp@3713 134 uint _max_desired_young_length;
brutisso@3358 135 bool _adaptive_size;
tonyp@3713 136 uint calculate_default_min_length(uint new_number_of_heap_regions);
tonyp@3713 137 uint calculate_default_max_length(uint new_number_of_heap_regions);
brutisso@3358 138
brutisso@3358 139 public:
brutisso@3358 140 G1YoungGenSizer();
tonyp@3713 141 void heap_size_changed(uint new_number_of_heap_regions);
tonyp@3713 142 uint min_desired_young_length() {
brutisso@3358 143 return _min_desired_young_length;
brutisso@3358 144 }
tonyp@3713 145 uint max_desired_young_length() {
brutisso@3358 146 return _max_desired_young_length;
brutisso@3358 147 }
brutisso@3358 148 bool adaptive_young_list_length() {
brutisso@3358 149 return _adaptive_size;
brutisso@3358 150 }
brutisso@3358 151 };
brutisso@3358 152
ysr@777 153 class G1CollectorPolicy: public CollectorPolicy {
tonyp@3209 154 private:
ysr@777 155 // either equal to the number of parallel threads, if ParallelGCThreads
ysr@777 156 // has been set, or 1 otherwise
ysr@777 157 int _parallel_gc_threads;
ysr@777 158
jmasa@3294 159 // The number of GC threads currently active.
jmasa@3294 160 uintx _no_of_gc_threads;
jmasa@3294 161
ysr@777 162 enum SomePrivateConstants {
tonyp@1377 163 NumPrevPausesForHeuristics = 10
ysr@777 164 };
ysr@777 165
ysr@777 166 G1MMUTracker* _mmu_tracker;
ysr@777 167
ysr@777 168 void initialize_flags();
ysr@777 169
ysr@777 170 void initialize_all() {
ysr@777 171 initialize_flags();
ysr@777 172 initialize_size_info();
ysr@777 173 initialize_perm_generation(PermGen::MarkSweepCompact);
ysr@777 174 }
ysr@777 175
tonyp@3209 176 CollectionSetChooser* _collectionSetChooser;
ysr@777 177
brutisso@3923 178 double _full_collection_start_sec;
ysr@777 179 size_t _cur_collection_pause_used_at_start_bytes;
tonyp@3713 180 uint _cur_collection_pause_used_regions_at_start;
johnc@1325 181
ysr@777 182 // These exclude marking times.
ysr@777 183 TruncatedSeq* _recent_gc_times_ms;
ysr@777 184
ysr@777 185 TruncatedSeq* _concurrent_mark_remark_times_ms;
ysr@777 186 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
ysr@777 187
brutisso@3812 188 TraceGen0TimeData _trace_gen0_time_data;
brutisso@3812 189 TraceGen1TimeData _trace_gen1_time_data;
ysr@777 190
ysr@777 191 double _stop_world_start;
ysr@777 192
tonyp@3337 193 // indicates whether we are in young or mixed GC mode
tonyp@3337 194 bool _gcs_are_young;
ysr@777 195
tonyp@3713 196 uint _young_list_target_length;
tonyp@3713 197 uint _young_list_fixed_length;
brutisso@3120 198 size_t _prev_eden_capacity; // used for logging
ysr@777 199
tonyp@2333 200 // The max number of regions we can extend the eden by while the GC
tonyp@2333 201 // locker is active. This should be >= _young_list_target_length;
tonyp@3713 202 uint _young_list_max_length;
tonyp@2333 203
tonyp@3337 204 bool _last_gc_was_young;
ysr@777 205
ysr@777 206 bool _during_marking;
ysr@777 207 bool _in_marking_window;
ysr@777 208 bool _in_marking_window_im;
ysr@777 209
ysr@777 210 SurvRateGroup* _short_lived_surv_rate_group;
ysr@777 211 SurvRateGroup* _survivor_surv_rate_group;
ysr@777 212 // add here any more surv rate groups
ysr@777 213
tonyp@1791 214 double _gc_overhead_perc;
tonyp@1791 215
tonyp@3119 216 double _reserve_factor;
tonyp@3713 217 uint _reserve_regions;
tonyp@3119 218
ysr@777 219 bool during_marking() {
ysr@777 220 return _during_marking;
ysr@777 221 }
ysr@777 222
ysr@777 223 private:
ysr@777 224 enum PredictionConstants {
ysr@777 225 TruncatedSeqLength = 10
ysr@777 226 };
ysr@777 227
ysr@777 228 TruncatedSeq* _alloc_rate_ms_seq;
ysr@777 229 double _prev_collection_pause_end_ms;
ysr@777 230
ysr@777 231 TruncatedSeq* _rs_length_diff_seq;
ysr@777 232 TruncatedSeq* _cost_per_card_ms_seq;
tonyp@3337 233 TruncatedSeq* _young_cards_per_entry_ratio_seq;
tonyp@3337 234 TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
ysr@777 235 TruncatedSeq* _cost_per_entry_ms_seq;
tonyp@3337 236 TruncatedSeq* _mixed_cost_per_entry_ms_seq;
ysr@777 237 TruncatedSeq* _cost_per_byte_ms_seq;
ysr@777 238 TruncatedSeq* _constant_other_time_ms_seq;
ysr@777 239 TruncatedSeq* _young_other_cost_per_region_ms_seq;
ysr@777 240 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
ysr@777 241
ysr@777 242 TruncatedSeq* _pending_cards_seq;
ysr@777 243 TruncatedSeq* _rs_lengths_seq;
ysr@777 244
ysr@777 245 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
ysr@777 246
brutisso@3358 247 G1YoungGenSizer* _young_gen_sizer;
brutisso@3120 248
tonyp@3713 249 uint _eden_cset_region_length;
tonyp@3713 250 uint _survivor_cset_region_length;
tonyp@3713 251 uint _old_cset_region_length;
tonyp@3289 252
tonyp@3713 253 void init_cset_region_lengths(uint eden_cset_region_length,
tonyp@3713 254 uint survivor_cset_region_length);
tonyp@3289 255
tonyp@3713 256 uint eden_cset_region_length() { return _eden_cset_region_length; }
tonyp@3713 257 uint survivor_cset_region_length() { return _survivor_cset_region_length; }
tonyp@3713 258 uint old_cset_region_length() { return _old_cset_region_length; }
ysr@777 259
tonyp@3713 260 uint _free_regions_at_end_of_collection;
ysr@777 261
ysr@777 262 size_t _recorded_rs_lengths;
ysr@777 263 size_t _max_rs_lengths;
ysr@777 264 double _sigma;
ysr@777 265
ysr@777 266 size_t _rs_lengths_prediction;
ysr@777 267
tonyp@3539 268 double sigma() { return _sigma; }
ysr@777 269
ysr@777 270 // A function that prevents us putting too much stock in small sample
ysr@777 271 // sets. Returns a number between 2.0 and 1.0, depending on the number
ysr@777 272 // of samples. 5 or more samples yields one; fewer scales linearly from
ysr@777 273 // 2.0 at 1 sample to 1.0 at 5.
ysr@777 274 double confidence_factor(int samples) {
ysr@777 275 if (samples > 4) return 1.0;
ysr@777 276 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
ysr@777 277 }
ysr@777 278
ysr@777 279 double get_new_neg_prediction(TruncatedSeq* seq) {
ysr@777 280 return seq->davg() - sigma() * seq->dsd();
ysr@777 281 }
ysr@777 282
ysr@777 283 #ifndef PRODUCT
ysr@777 284 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
ysr@777 285 #endif // PRODUCT
ysr@777 286
iveresov@1546 287 void adjust_concurrent_refinement(double update_rs_time,
iveresov@1546 288 double update_rs_processed_buffers,
iveresov@1546 289 double goal_ms);
iveresov@1546 290
jmasa@3294 291 uintx no_of_gc_threads() { return _no_of_gc_threads; }
jmasa@3294 292 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
jmasa@3294 293
ysr@777 294 double _pause_time_target_ms;
brutisso@3923 295
ysr@777 296 size_t _pending_cards;
ysr@777 297
ysr@777 298 public:
jmasa@3294 299 // Accessors
ysr@777 300
tonyp@3289 301 void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
tonyp@3289 302 hr->set_young();
ysr@777 303 hr->install_surv_rate_group(_short_lived_surv_rate_group);
tonyp@3289 304 hr->set_young_index_in_cset(young_index_in_cset);
ysr@777 305 }
ysr@777 306
tonyp@3289 307 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
tonyp@3289 308 assert(hr->is_young() && hr->is_survivor(), "pre-condition");
ysr@777 309 hr->install_surv_rate_group(_survivor_surv_rate_group);
tonyp@3289 310 hr->set_young_index_in_cset(young_index_in_cset);
ysr@777 311 }
ysr@777 312
ysr@777 313 #ifndef PRODUCT
ysr@777 314 bool verify_young_ages();
ysr@777 315 #endif // PRODUCT
ysr@777 316
ysr@777 317 double get_new_prediction(TruncatedSeq* seq) {
ysr@777 318 return MAX2(seq->davg() + sigma() * seq->dsd(),
ysr@777 319 seq->davg() * confidence_factor(seq->num()));
ysr@777 320 }
ysr@777 321
ysr@777 322 void record_max_rs_lengths(size_t rs_lengths) {
ysr@777 323 _max_rs_lengths = rs_lengths;
ysr@777 324 }
ysr@777 325
ysr@777 326 size_t predict_rs_length_diff() {
ysr@777 327 return (size_t) get_new_prediction(_rs_length_diff_seq);
ysr@777 328 }
ysr@777 329
ysr@777 330 double predict_alloc_rate_ms() {
ysr@777 331 return get_new_prediction(_alloc_rate_ms_seq);
ysr@777 332 }
ysr@777 333
ysr@777 334 double predict_cost_per_card_ms() {
ysr@777 335 return get_new_prediction(_cost_per_card_ms_seq);
ysr@777 336 }
ysr@777 337
ysr@777 338 double predict_rs_update_time_ms(size_t pending_cards) {
ysr@777 339 return (double) pending_cards * predict_cost_per_card_ms();
ysr@777 340 }
ysr@777 341
tonyp@3337 342 double predict_young_cards_per_entry_ratio() {
tonyp@3337 343 return get_new_prediction(_young_cards_per_entry_ratio_seq);
ysr@777 344 }
ysr@777 345
tonyp@3337 346 double predict_mixed_cards_per_entry_ratio() {
tonyp@3337 347 if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
tonyp@3337 348 return predict_young_cards_per_entry_ratio();
tonyp@3337 349 } else {
tonyp@3337 350 return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
tonyp@3337 351 }
ysr@777 352 }
ysr@777 353
ysr@777 354 size_t predict_young_card_num(size_t rs_length) {
ysr@777 355 return (size_t) ((double) rs_length *
tonyp@3337 356 predict_young_cards_per_entry_ratio());
ysr@777 357 }
ysr@777 358
ysr@777 359 size_t predict_non_young_card_num(size_t rs_length) {
ysr@777 360 return (size_t) ((double) rs_length *
tonyp@3337 361 predict_mixed_cards_per_entry_ratio());
ysr@777 362 }
ysr@777 363
ysr@777 364 double predict_rs_scan_time_ms(size_t card_num) {
tonyp@3337 365 if (gcs_are_young()) {
ysr@777 366 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
tonyp@3337 367 } else {
tonyp@3337 368 return predict_mixed_rs_scan_time_ms(card_num);
tonyp@3337 369 }
ysr@777 370 }
ysr@777 371
tonyp@3337 372 double predict_mixed_rs_scan_time_ms(size_t card_num) {
tonyp@3337 373 if (_mixed_cost_per_entry_ms_seq->num() < 3) {
ysr@777 374 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
tonyp@3337 375 } else {
tonyp@3337 376 return (double) (card_num *
tonyp@3337 377 get_new_prediction(_mixed_cost_per_entry_ms_seq));
tonyp@3337 378 }
ysr@777 379 }
ysr@777 380
ysr@777 381 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
tonyp@3337 382 if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
tonyp@3337 383 return (1.1 * (double) bytes_to_copy) *
tonyp@3337 384 get_new_prediction(_cost_per_byte_ms_seq);
tonyp@3337 385 } else {
ysr@777 386 return (double) bytes_to_copy *
tonyp@3337 387 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
tonyp@3337 388 }
ysr@777 389 }
ysr@777 390
ysr@777 391 double predict_object_copy_time_ms(size_t bytes_to_copy) {
tonyp@3337 392 if (_in_marking_window && !_in_marking_window_im) {
ysr@777 393 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
tonyp@3337 394 } else {
ysr@777 395 return (double) bytes_to_copy *
tonyp@3337 396 get_new_prediction(_cost_per_byte_ms_seq);
tonyp@3337 397 }
ysr@777 398 }
ysr@777 399
ysr@777 400 double predict_constant_other_time_ms() {
ysr@777 401 return get_new_prediction(_constant_other_time_ms_seq);
ysr@777 402 }
ysr@777 403
ysr@777 404 double predict_young_other_time_ms(size_t young_num) {
tonyp@3337 405 return (double) young_num *
tonyp@3337 406 get_new_prediction(_young_other_cost_per_region_ms_seq);
ysr@777 407 }
ysr@777 408
ysr@777 409 double predict_non_young_other_time_ms(size_t non_young_num) {
tonyp@3337 410 return (double) non_young_num *
tonyp@3337 411 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
ysr@777 412 }
ysr@777 413
ysr@777 414 double predict_base_elapsed_time_ms(size_t pending_cards);
ysr@777 415 double predict_base_elapsed_time_ms(size_t pending_cards,
ysr@777 416 size_t scanned_cards);
ysr@777 417 size_t predict_bytes_to_copy(HeapRegion* hr);
johnc@3998 418 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc);
ysr@777 419
tonyp@3289 420 void set_recorded_rs_lengths(size_t rs_lengths);
johnc@1829 421
tonyp@3713 422 uint cset_region_length() { return young_cset_region_length() +
tonyp@3713 423 old_cset_region_length(); }
tonyp@3713 424 uint young_cset_region_length() { return eden_cset_region_length() +
tonyp@3713 425 survivor_cset_region_length(); }
ysr@777 426
apetrusenko@980 427 double predict_survivor_regions_evac_time();
apetrusenko@980 428
ysr@777 429 void cset_regions_freed() {
tonyp@3337 430 bool propagate = _last_gc_was_young && !_in_marking_window;
ysr@777 431 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 432 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
ysr@777 433 // also call it on any more surv rate groups
ysr@777 434 }
ysr@777 435
ysr@777 436 G1MMUTracker* mmu_tracker() {
ysr@777 437 return _mmu_tracker;
ysr@777 438 }
ysr@777 439
tonyp@2011 440 double max_pause_time_ms() {
tonyp@2011 441 return _mmu_tracker->max_gc_time() * 1000.0;
tonyp@2011 442 }
tonyp@2011 443
ysr@777 444 double predict_remark_time_ms() {
ysr@777 445 return get_new_prediction(_concurrent_mark_remark_times_ms);
ysr@777 446 }
ysr@777 447
ysr@777 448 double predict_cleanup_time_ms() {
ysr@777 449 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
ysr@777 450 }
ysr@777 451
ysr@777 452 // Returns an estimate of the survival rate of the region at yg-age
ysr@777 453 // "yg_age".
apetrusenko@980 454 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
apetrusenko@980 455 TruncatedSeq* seq = surv_rate_group->get_seq(age);
ysr@777 456 if (seq->num() == 0)
ysr@777 457 gclog_or_tty->print("BARF! age is %d", age);
ysr@777 458 guarantee( seq->num() > 0, "invariant" );
ysr@777 459 double pred = get_new_prediction(seq);
ysr@777 460 if (pred > 1.0)
ysr@777 461 pred = 1.0;
ysr@777 462 return pred;
ysr@777 463 }
ysr@777 464
apetrusenko@980 465 double predict_yg_surv_rate(int age) {
apetrusenko@980 466 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
apetrusenko@980 467 }
apetrusenko@980 468
ysr@777 469 double accum_yg_surv_rate_pred(int age) {
ysr@777 470 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
ysr@777 471 }
ysr@777 472
tonyp@3209 473 private:
ysr@777 474 // Statistics kept per GC stoppage, pause or full.
ysr@777 475 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
ysr@777 476
ysr@777 477 // Add a new GC of the given duration and end time to the record.
ysr@777 478 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
ysr@777 479
ysr@777 480 // The head of the list (via "next_in_collection_set()") representing the
johnc@1829 481 // current collection set. Set from the incrementally built collection
johnc@1829 482 // set at the start of the pause.
ysr@777 483 HeapRegion* _collection_set;
johnc@1829 484
johnc@1829 485 // The number of bytes in the collection set before the pause. Set from
johnc@1829 486 // the incrementally built collection set at the start of an evacuation
johnc@3998 487 // pause, and incremented in finalize_cset() when adding old regions
johnc@3998 488 // (if any) to the collection set.
ysr@777 489 size_t _collection_set_bytes_used_before;
ysr@777 490
johnc@3998 491 // The number of bytes copied during the GC.
johnc@3998 492 size_t _bytes_copied_during_gc;
johnc@3998 493
johnc@1829 494 // The associated information that is maintained while the incremental
johnc@1829 495 // collection set is being built with young regions. Used to populate
johnc@1829 496 // the recorded info for the evacuation pause.
johnc@1829 497
johnc@1829 498 enum CSetBuildType {
johnc@1829 499 Active, // We are actively building the collection set
johnc@1829 500 Inactive // We are not actively building the collection set
johnc@1829 501 };
johnc@1829 502
johnc@1829 503 CSetBuildType _inc_cset_build_state;
johnc@1829 504
johnc@1829 505 // The head of the incrementally built collection set.
johnc@1829 506 HeapRegion* _inc_cset_head;
johnc@1829 507
johnc@1829 508 // The tail of the incrementally built collection set.
johnc@1829 509 HeapRegion* _inc_cset_tail;
johnc@1829 510
johnc@1829 511 // The number of bytes in the incrementally built collection set.
johnc@1829 512 // Used to set _collection_set_bytes_used_before at the start of
johnc@1829 513 // an evacuation pause.
johnc@1829 514 size_t _inc_cset_bytes_used_before;
johnc@1829 515
johnc@1829 516 // Used to record the highest end of heap region in collection set
johnc@1829 517 HeapWord* _inc_cset_max_finger;
johnc@1829 518
tonyp@3356 519 // The RSet lengths recorded for regions in the CSet. It is updated
tonyp@3356 520 // by the thread that adds a new region to the CSet. We assume that
tonyp@3356 521 // only one thread can be allocating a new CSet region (currently,
tonyp@3356 522 // it does so after taking the Heap_lock) hence no need to
tonyp@3356 523 // synchronize updates to this field.
johnc@1829 524 size_t _inc_cset_recorded_rs_lengths;
johnc@1829 525
tonyp@3356 526 // A concurrent refinement thread periodcially samples the young
tonyp@3356 527 // region RSets and needs to update _inc_cset_recorded_rs_lengths as
tonyp@3356 528 // the RSets grow. Instead of having to syncronize updates to that
tonyp@3356 529 // field we accumulate them in this field and add it to
tonyp@3356 530 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
tonyp@3356 531 ssize_t _inc_cset_recorded_rs_lengths_diffs;
tonyp@3356 532
tonyp@3356 533 // The predicted elapsed time it will take to collect the regions in
tonyp@3356 534 // the CSet. This is updated by the thread that adds a new region to
tonyp@3356 535 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
tonyp@3356 536 // MT-safety assumptions.
johnc@1829 537 double _inc_cset_predicted_elapsed_time_ms;
johnc@1829 538
tonyp@3356 539 // See the comment for _inc_cset_recorded_rs_lengths_diffs.
tonyp@3356 540 double _inc_cset_predicted_elapsed_time_ms_diffs;
tonyp@3356 541
ysr@777 542 // Stash a pointer to the g1 heap.
ysr@777 543 G1CollectedHeap* _g1;
ysr@777 544
brutisso@3923 545 G1GCPhaseTimes* _phase_times;
brutisso@3923 546
ysr@777 547 // The ratio of gc time to elapsed time, computed over recent pauses.
ysr@777 548 double _recent_avg_pause_time_ratio;
ysr@777 549
ysr@777 550 double recent_avg_pause_time_ratio() {
ysr@777 551 return _recent_avg_pause_time_ratio;
ysr@777 552 }
ysr@777 553
tonyp@1794 554 // At the end of a pause we check the heap occupancy and we decide
tonyp@1794 555 // whether we will start a marking cycle during the next pause. If
tonyp@1794 556 // we decide that we want to do that, we will set this parameter to
tonyp@1794 557 // true. So, this parameter will stay true between the end of a
tonyp@1794 558 // pause and the beginning of a subsequent pause (not necessarily
tonyp@1794 559 // the next one, see the comments on the next field) when we decide
tonyp@1794 560 // that we will indeed start a marking cycle and do the initial-mark
tonyp@1794 561 // work.
tonyp@1794 562 volatile bool _initiate_conc_mark_if_possible;
ysr@777 563
tonyp@1794 564 // If initiate_conc_mark_if_possible() is set at the beginning of a
tonyp@1794 565 // pause, it is a suggestion that the pause should start a marking
tonyp@1794 566 // cycle by doing the initial-mark work. However, it is possible
tonyp@1794 567 // that the concurrent marking thread is still finishing up the
tonyp@1794 568 // previous marking cycle (e.g., clearing the next marking
tonyp@1794 569 // bitmap). If that is the case we cannot start a new cycle and
tonyp@1794 570 // we'll have to wait for the concurrent marking thread to finish
tonyp@1794 571 // what it is doing. In this case we will postpone the marking cycle
tonyp@1794 572 // initiation decision for the next pause. When we eventually decide
tonyp@1794 573 // to start a cycle, we will set _during_initial_mark_pause which
tonyp@1794 574 // will stay true until the end of the initial-mark pause and it's
tonyp@1794 575 // the condition that indicates that a pause is doing the
tonyp@1794 576 // initial-mark work.
tonyp@1794 577 volatile bool _during_initial_mark_pause;
tonyp@1794 578
tonyp@3337 579 bool _last_young_gc;
ysr@777 580
ysr@777 581 // This set of variables tracks the collector efficiency, in order to
ysr@777 582 // determine whether we should initiate a new marking.
ysr@777 583 double _cur_mark_stop_world_time_ms;
ysr@777 584 double _mark_remark_start_sec;
ysr@777 585 double _mark_cleanup_start_sec;
ysr@777 586
tonyp@3119 587 // Update the young list target length either by setting it to the
tonyp@3119 588 // desired fixed value or by calculating it using G1's pause
tonyp@3119 589 // prediction model. If no rs_lengths parameter is passed, predict
tonyp@3119 590 // the RS lengths using the prediction model, otherwise use the
tonyp@3119 591 // given rs_lengths as the prediction.
tonyp@3119 592 void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
tonyp@3119 593
tonyp@3119 594 // Calculate and return the minimum desired young list target
tonyp@3119 595 // length. This is the minimum desired young list length according
tonyp@3119 596 // to the user's inputs.
tonyp@3713 597 uint calculate_young_list_desired_min_length(uint base_min_length);
tonyp@3119 598
tonyp@3119 599 // Calculate and return the maximum desired young list target
tonyp@3119 600 // length. This is the maximum desired young list length according
tonyp@3119 601 // to the user's inputs.
tonyp@3713 602 uint calculate_young_list_desired_max_length();
tonyp@3119 603
tonyp@3119 604 // Calculate and return the maximum young list target length that
tonyp@3119 605 // can fit into the pause time goal. The parameters are: rs_lengths
tonyp@3119 606 // represent the prediction of how large the young RSet lengths will
tonyp@3119 607 // be, base_min_length is the alreay existing number of regions in
tonyp@3119 608 // the young list, min_length and max_length are the desired min and
tonyp@3119 609 // max young list length according to the user's inputs.
tonyp@3713 610 uint calculate_young_list_target_length(size_t rs_lengths,
tonyp@3713 611 uint base_min_length,
tonyp@3713 612 uint desired_min_length,
tonyp@3713 613 uint desired_max_length);
tonyp@3119 614
tonyp@3119 615 // Check whether a given young length (young_length) fits into the
tonyp@3119 616 // given target pause time and whether the prediction for the amount
tonyp@3119 617 // of objects to be copied for the given length will fit into the
tonyp@3119 618 // given free space (expressed by base_free_regions). It is used by
tonyp@3119 619 // calculate_young_list_target_length().
tonyp@3713 620 bool predict_will_fit(uint young_length, double base_time_ms,
tonyp@3713 621 uint base_free_regions, double target_pause_time_ms);
ysr@777 622
ysr@777 623 public:
ysr@777 624
ysr@777 625 G1CollectorPolicy();
ysr@777 626
ysr@777 627 virtual G1CollectorPolicy* as_g1_policy() { return this; }
ysr@777 628
ysr@777 629 virtual CollectorPolicy::Name kind() {
ysr@777 630 return CollectorPolicy::G1CollectorPolicyKind;
ysr@777 631 }
ysr@777 632
brutisso@3923 633 G1GCPhaseTimes* phase_times() const { return _phase_times; }
brutisso@3923 634
tonyp@3119 635 // Check the current value of the young list RSet lengths and
tonyp@3119 636 // compare it against the last prediction. If the current value is
tonyp@3119 637 // higher, recalculate the young list target length prediction.
tonyp@3119 638 void revise_young_list_target_length_if_necessary();
ysr@777 639
brutisso@3120 640 // This should be called after the heap is resized.
tonyp@3713 641 void record_new_heap_size(uint new_number_of_regions);
tonyp@3119 642
tonyp@3209 643 void init();
ysr@777 644
apetrusenko@980 645 // Create jstat counters for the policy.
apetrusenko@980 646 virtual void initialize_gc_policy_counters();
apetrusenko@980 647
ysr@777 648 virtual HeapWord* mem_allocate_work(size_t size,
ysr@777 649 bool is_tlab,
ysr@777 650 bool* gc_overhead_limit_was_exceeded);
ysr@777 651
ysr@777 652 // This method controls how a collector handles one or more
ysr@777 653 // of its generations being fully allocated.
ysr@777 654 virtual HeapWord* satisfy_failed_allocation(size_t size,
ysr@777 655 bool is_tlab);
ysr@777 656
ysr@777 657 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
ysr@777 658
ysr@777 659 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
ysr@777 660
brutisso@3461 661 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
brutisso@3456 662
ysr@777 663 // Update the heuristic info to record a collection pause of the given
ysr@777 664 // start time, where the given number of bytes were used at the start.
ysr@777 665 // This may involve changing the desired size of a collection set.
ysr@777 666
tonyp@3209 667 void record_stop_world_start();
ysr@777 668
tonyp@3209 669 void record_collection_pause_start(double start_time_sec, size_t start_used);
ysr@777 670
ysr@777 671 // Must currently be called while the world is stopped.
brutisso@3065 672 void record_concurrent_mark_init_end(double
ysr@777 673 mark_init_elapsed_time_ms);
ysr@777 674
tonyp@3209 675 void record_concurrent_mark_remark_start();
tonyp@3209 676 void record_concurrent_mark_remark_end();
ysr@777 677
tonyp@3209 678 void record_concurrent_mark_cleanup_start();
jmasa@3294 679 void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
tonyp@3209 680 void record_concurrent_mark_cleanup_completed();
ysr@777 681
tonyp@3209 682 void record_concurrent_pause();
ysr@777 683
brutisso@3923 684 void record_collection_pause_end(double pause_time);
tonyp@2961 685 void print_heap_transition();
ysr@777 686
ysr@777 687 // Record the fact that a full collection occurred.
tonyp@3209 688 void record_full_collection_start();
tonyp@3209 689 void record_full_collection_end();
ysr@777 690
tonyp@3028 691 // Record how much space we copied during a GC. This is typically
tonyp@3028 692 // called when a GC alloc region is being retired.
tonyp@3028 693 void record_bytes_copied_during_gc(size_t bytes) {
tonyp@3028 694 _bytes_copied_during_gc += bytes;
tonyp@3028 695 }
tonyp@3028 696
tonyp@3028 697 // The amount of space we copied during a GC.
tonyp@3028 698 size_t bytes_copied_during_gc() {
tonyp@3028 699 return _bytes_copied_during_gc;
tonyp@3028 700 }
ysr@777 701
brutisso@3675 702 // Determine whether there are candidate regions so that the
brutisso@3675 703 // next GC should be mixed. The two action strings are used
brutisso@3675 704 // in the ergo output when the method returns true or false.
tonyp@3539 705 bool next_gc_should_be_mixed(const char* true_action_str,
tonyp@3539 706 const char* false_action_str);
tonyp@3539 707
ysr@777 708 // Choose a new collection set. Marks the chosen regions as being
ysr@777 709 // "in_collection_set", and links them together. The head and number of
ysr@777 710 // the collection set are available via access methods.
tonyp@3539 711 void finalize_cset(double target_pause_time_ms);
ysr@777 712
ysr@777 713 // The head of the list (via "next_in_collection_set()") representing the
ysr@777 714 // current collection set.
ysr@777 715 HeapRegion* collection_set() { return _collection_set; }
ysr@777 716
johnc@1829 717 void clear_collection_set() { _collection_set = NULL; }
johnc@1829 718
tonyp@3289 719 // Add old region "hr" to the CSet.
tonyp@3289 720 void add_old_region_to_cset(HeapRegion* hr);
ysr@777 721
johnc@1829 722 // Incremental CSet Support
johnc@1829 723
johnc@1829 724 // The head of the incrementally built collection set.
johnc@1829 725 HeapRegion* inc_cset_head() { return _inc_cset_head; }
johnc@1829 726
johnc@1829 727 // The tail of the incrementally built collection set.
johnc@1829 728 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
johnc@1829 729
johnc@1829 730 // Initialize incremental collection set info.
johnc@1829 731 void start_incremental_cset_building();
johnc@1829 732
tonyp@3356 733 // Perform any final calculations on the incremental CSet fields
tonyp@3356 734 // before we can use them.
tonyp@3356 735 void finalize_incremental_cset_building();
tonyp@3356 736
johnc@1829 737 void clear_incremental_cset() {
johnc@1829 738 _inc_cset_head = NULL;
johnc@1829 739 _inc_cset_tail = NULL;
johnc@1829 740 }
johnc@1829 741
johnc@1829 742 // Stop adding regions to the incremental collection set
johnc@1829 743 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
johnc@1829 744
tonyp@3356 745 // Add information about hr to the aggregated information for the
tonyp@3356 746 // incrementally built collection set.
johnc@1829 747 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
johnc@1829 748
johnc@1829 749 // Update information about hr in the aggregated information for
johnc@1829 750 // the incrementally built collection set.
johnc@1829 751 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
johnc@1829 752
johnc@1829 753 private:
johnc@1829 754 // Update the incremental cset information when adding a region
johnc@1829 755 // (should not be called directly).
johnc@1829 756 void add_region_to_incremental_cset_common(HeapRegion* hr);
johnc@1829 757
johnc@1829 758 public:
johnc@1829 759 // Add hr to the LHS of the incremental collection set.
johnc@1829 760 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
johnc@1829 761
johnc@1829 762 // Add hr to the RHS of the incremental collection set.
johnc@1829 763 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
johnc@1829 764
johnc@1829 765 #ifndef PRODUCT
johnc@1829 766 void print_collection_set(HeapRegion* list_head, outputStream* st);
johnc@1829 767 #endif // !PRODUCT
johnc@1829 768
tonyp@1794 769 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
tonyp@1794 770 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
tonyp@1794 771 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
tonyp@1794 772
tonyp@1794 773 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
tonyp@1794 774 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
tonyp@1794 775 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
tonyp@1794 776
tonyp@2011 777 // This sets the initiate_conc_mark_if_possible() flag to start a
tonyp@2011 778 // new cycle, as long as we are not already in one. It's best if it
tonyp@2011 779 // is called during a safepoint when the test whether a cycle is in
tonyp@2011 780 // progress or not is stable.
tonyp@3114 781 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
tonyp@2011 782
tonyp@1794 783 // This is called at the very beginning of an evacuation pause (it
tonyp@1794 784 // has to be the first thing that the pause does). If
tonyp@1794 785 // initiate_conc_mark_if_possible() is true, and the concurrent
tonyp@1794 786 // marking thread has completed its work during the previous cycle,
tonyp@1794 787 // it will set during_initial_mark_pause() to so that the pause does
tonyp@1794 788 // the initial-mark work and start a marking cycle.
tonyp@1794 789 void decide_on_conc_mark_initiation();
ysr@777 790
ysr@777 791 // If an expansion would be appropriate, because recent GC overhead had
ysr@777 792 // exceeded the desired limit, return an amount to expand by.
tonyp@3209 793 size_t expansion_amount();
ysr@777 794
ysr@777 795 // Print tracing information.
ysr@777 796 void print_tracing_info() const;
ysr@777 797
ysr@777 798 // Print stats on young survival ratio
ysr@777 799 void print_yg_surv_rate_info() const;
ysr@777 800
apetrusenko@980 801 void finished_recalculating_age_indexes(bool is_survivors) {
apetrusenko@980 802 if (is_survivors) {
apetrusenko@980 803 _survivor_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 804 } else {
apetrusenko@980 805 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
apetrusenko@980 806 }
ysr@777 807 // do that for any other surv rate groups
ysr@777 808 }
ysr@777 809
tonyp@2315 810 bool is_young_list_full() {
tonyp@3713 811 uint young_list_length = _g1->young_list()->length();
tonyp@3713 812 uint young_list_target_length = _young_list_target_length;
tonyp@2333 813 return young_list_length >= young_list_target_length;
tonyp@2333 814 }
tonyp@2333 815
tonyp@2333 816 bool can_expand_young_list() {
tonyp@3713 817 uint young_list_length = _g1->young_list()->length();
tonyp@3713 818 uint young_list_max_length = _young_list_max_length;
tonyp@2333 819 return young_list_length < young_list_max_length;
tonyp@2333 820 }
tonyp@2315 821
tonyp@3713 822 uint young_list_max_length() {
tonyp@3176 823 return _young_list_max_length;
tonyp@3176 824 }
tonyp@3176 825
tonyp@3337 826 bool gcs_are_young() {
tonyp@3337 827 return _gcs_are_young;
ysr@777 828 }
tonyp@3337 829 void set_gcs_are_young(bool gcs_are_young) {
tonyp@3337 830 _gcs_are_young = gcs_are_young;
ysr@777 831 }
ysr@777 832
ysr@777 833 bool adaptive_young_list_length() {
brutisso@3358 834 return _young_gen_sizer->adaptive_young_list_length();
ysr@777 835 }
ysr@777 836
tonyp@3209 837 private:
ysr@777 838 //
ysr@777 839 // Survivor regions policy.
ysr@777 840 //
ysr@777 841
ysr@777 842 // Current tenuring threshold, set to 0 if the collector reaches the
ysr@777 843 // maximum amount of suvivors regions.
ysr@777 844 int _tenuring_threshold;
ysr@777 845
apetrusenko@980 846 // The limit on the number of regions allocated for survivors.
tonyp@3713 847 uint _max_survivor_regions;
apetrusenko@980 848
tonyp@2961 849 // For reporting purposes.
tonyp@2961 850 size_t _eden_bytes_before_gc;
tonyp@2961 851 size_t _survivor_bytes_before_gc;
tonyp@2961 852 size_t _capacity_before_gc;
tonyp@2961 853
apetrusenko@980 854 // The amount of survor regions after a collection.
tonyp@3713 855 uint _recorded_survivor_regions;
apetrusenko@980 856 // List of survivor regions.
apetrusenko@980 857 HeapRegion* _recorded_survivor_head;
apetrusenko@980 858 HeapRegion* _recorded_survivor_tail;
apetrusenko@980 859
apetrusenko@980 860 ageTable _survivors_age_table;
apetrusenko@980 861
ysr@777 862 public:
ysr@777 863
ysr@777 864 inline GCAllocPurpose
ysr@777 865 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
ysr@777 866 if (age < _tenuring_threshold && src_region->is_young()) {
ysr@777 867 return GCAllocForSurvived;
ysr@777 868 } else {
ysr@777 869 return GCAllocForTenured;
ysr@777 870 }
ysr@777 871 }
ysr@777 872
ysr@777 873 inline bool track_object_age(GCAllocPurpose purpose) {
ysr@777 874 return purpose == GCAllocForSurvived;
ysr@777 875 }
ysr@777 876
tonyp@3713 877 static const uint REGIONS_UNLIMITED = (uint) -1;
apetrusenko@980 878
tonyp@3713 879 uint max_regions(int purpose);
ysr@777 880
ysr@777 881 // The limit on regions for a particular purpose is reached.
ysr@777 882 void note_alloc_region_limit_reached(int purpose) {
ysr@777 883 if (purpose == GCAllocForSurvived) {
ysr@777 884 _tenuring_threshold = 0;
ysr@777 885 }
ysr@777 886 }
ysr@777 887
ysr@777 888 void note_start_adding_survivor_regions() {
ysr@777 889 _survivor_surv_rate_group->start_adding_regions();
ysr@777 890 }
ysr@777 891
ysr@777 892 void note_stop_adding_survivor_regions() {
ysr@777 893 _survivor_surv_rate_group->stop_adding_regions();
ysr@777 894 }
apetrusenko@980 895
tonyp@3713 896 void record_survivor_regions(uint regions,
apetrusenko@980 897 HeapRegion* head,
apetrusenko@980 898 HeapRegion* tail) {
apetrusenko@980 899 _recorded_survivor_regions = regions;
apetrusenko@980 900 _recorded_survivor_head = head;
apetrusenko@980 901 _recorded_survivor_tail = tail;
apetrusenko@980 902 }
apetrusenko@980 903
tonyp@3713 904 uint recorded_survivor_regions() {
tonyp@1273 905 return _recorded_survivor_regions;
tonyp@1273 906 }
tonyp@1273 907
tonyp@3713 908 void record_thread_age_table(ageTable* age_table) {
apetrusenko@980 909 _survivors_age_table.merge_par(age_table);
apetrusenko@980 910 }
apetrusenko@980 911
tonyp@3119 912 void update_max_gc_locker_expansion();
tonyp@2333 913
apetrusenko@980 914 // Calculates survivor space parameters.
tonyp@3119 915 void update_survivors_policy();
apetrusenko@980 916
ysr@777 917 };
ysr@777 918
ysr@777 919 // This should move to some place more general...
ysr@777 920
ysr@777 921 // If we have "n" measurements, and we've kept track of their "sum" and the
ysr@777 922 // "sum_of_squares" of the measurements, this returns the variance of the
ysr@777 923 // sequence.
ysr@777 924 inline double variance(int n, double sum_of_squares, double sum) {
ysr@777 925 double n_d = (double)n;
ysr@777 926 double avg = sum/n_d;
ysr@777 927 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
ysr@777 928 }
ysr@777 929
stefank@2314 930 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP

mercurial