Tue, 04 Feb 2020 18:13:14 +0800
Merge
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
28 #include "gc_implementation/g1/collectionSetChooser.hpp"
29 #include "gc_implementation/g1/g1Allocator.hpp"
30 #include "gc_implementation/g1/g1MMUTracker.hpp"
31 #include "memory/collectorPolicy.hpp"
33 // A G1CollectorPolicy makes policy decisions that determine the
34 // characteristics of the collector. Examples include:
35 // * choice of collection set.
36 // * when to collect.
38 class HeapRegion;
39 class CollectionSetChooser;
40 class G1GCPhaseTimes;
42 // TraceGen0Time collects data on _both_ young and mixed evacuation pauses
43 // (the latter may contain non-young regions - i.e. regions that are
44 // technically in Gen1) while TraceGen1Time collects data about full GCs.
45 class TraceGen0TimeData : public CHeapObj<mtGC> {
46 private:
47 unsigned _young_pause_num;
48 unsigned _mixed_pause_num;
50 NumberSeq _all_stop_world_times_ms;
51 NumberSeq _all_yield_times_ms;
53 NumberSeq _total;
54 NumberSeq _other;
55 NumberSeq _root_region_scan_wait;
56 NumberSeq _parallel;
57 NumberSeq _ext_root_scan;
58 NumberSeq _satb_filtering;
59 NumberSeq _update_rs;
60 NumberSeq _scan_rs;
61 NumberSeq _obj_copy;
62 NumberSeq _termination;
63 NumberSeq _parallel_other;
64 NumberSeq _clear_ct;
66 void print_summary(const char* str, const NumberSeq* seq) const;
67 void print_summary_sd(const char* str, const NumberSeq* seq) const;
69 public:
70 TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {};
71 void record_start_collection(double time_to_stop_the_world_ms);
72 void record_yield_time(double yield_time_ms);
73 void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times);
74 void increment_young_collection_count();
75 void increment_mixed_collection_count();
76 void print() const;
77 };
79 class TraceGen1TimeData : public CHeapObj<mtGC> {
80 private:
81 NumberSeq _all_full_gc_times;
83 public:
84 void record_full_collection(double full_gc_time_ms);
85 void print() const;
86 };
88 // There are three command line options related to the young gen size:
89 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
90 // just a short form for NewSize==MaxNewSize). G1 will use its internal
91 // heuristics to calculate the actual young gen size, so these options
92 // basically only limit the range within which G1 can pick a young gen
93 // size. Also, these are general options taking byte sizes. G1 will
94 // internally work with a number of regions instead. So, some rounding
95 // will occur.
96 //
97 // If nothing related to the the young gen size is set on the command
98 // line we should allow the young gen to be between G1NewSizePercent
99 // and G1MaxNewSizePercent of the heap size. This means that every time
100 // the heap size changes, the limits for the young gen size will be
101 // recalculated.
102 //
103 // If only -XX:NewSize is set we should use the specified value as the
104 // minimum size for young gen. Still using G1MaxNewSizePercent of the
105 // heap as maximum.
106 //
107 // If only -XX:MaxNewSize is set we should use the specified value as the
108 // maximum size for young gen. Still using G1NewSizePercent of the heap
109 // as minimum.
110 //
111 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.
112 // No updates when the heap size changes. There is a special case when
113 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a
114 // different heuristic for calculating the collection set when we do mixed
115 // collection.
116 //
117 // If only -XX:NewRatio is set we should use the specified ratio of the heap
118 // as both min and max. This will be interpreted as "fixed" just like the
119 // NewSize==MaxNewSize case above. But we will update the min and max
120 // everytime the heap size changes.
121 //
122 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
123 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
124 class G1YoungGenSizer : public CHeapObj<mtGC> {
125 private:
126 enum SizerKind {
127 SizerDefaults,
128 SizerNewSizeOnly,
129 SizerMaxNewSizeOnly,
130 SizerMaxAndNewSize,
131 SizerNewRatio
132 };
133 SizerKind _sizer_kind;
134 uint _min_desired_young_length;
135 uint _max_desired_young_length;
137 // False when using a fixed young generation size due to command-line options,
138 // true otherwise.
139 bool _adaptive_size;
141 uint calculate_default_min_length(uint new_number_of_heap_regions);
142 uint calculate_default_max_length(uint new_number_of_heap_regions);
144 // Update the given values for minimum and maximum young gen length in regions
145 // given the number of heap regions depending on the kind of sizing algorithm.
146 void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
148 public:
149 G1YoungGenSizer();
150 // Calculate the maximum length of the young gen given the number of regions
151 // depending on the sizing algorithm.
152 uint max_young_length(uint number_of_heap_regions);
154 void heap_size_changed(uint new_number_of_heap_regions);
155 uint min_desired_young_length() {
156 return _min_desired_young_length;
157 }
158 uint max_desired_young_length() {
159 return _max_desired_young_length;
160 }
161 bool adaptive_young_list_length() {
162 return _adaptive_size;
163 }
164 };
166 class G1CollectorPolicy: public CollectorPolicy {
167 private:
168 // either equal to the number of parallel threads, if ParallelGCThreads
169 // has been set, or 1 otherwise
170 int _parallel_gc_threads;
172 // The number of GC threads currently active.
173 uintx _no_of_gc_threads;
175 enum SomePrivateConstants {
176 NumPrevPausesForHeuristics = 10
177 };
179 G1MMUTracker* _mmu_tracker;
181 void initialize_alignments();
182 void initialize_flags();
184 CollectionSetChooser* _collectionSetChooser;
186 double _full_collection_start_sec;
187 uint _cur_collection_pause_used_regions_at_start;
189 // These exclude marking times.
190 TruncatedSeq* _recent_gc_times_ms;
192 TruncatedSeq* _concurrent_mark_remark_times_ms;
193 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
195 TraceGen0TimeData _trace_gen0_time_data;
196 TraceGen1TimeData _trace_gen1_time_data;
198 double _stop_world_start;
200 // indicates whether we are in young or mixed GC mode
201 bool _gcs_are_young;
203 uint _young_list_target_length;
204 uint _young_list_fixed_length;
206 // The max number of regions we can extend the eden by while the GC
207 // locker is active. This should be >= _young_list_target_length;
208 uint _young_list_max_length;
210 bool _last_gc_was_young;
212 bool _during_marking;
213 bool _in_marking_window;
214 bool _in_marking_window_im;
216 SurvRateGroup* _short_lived_surv_rate_group;
217 SurvRateGroup* _survivor_surv_rate_group;
218 // add here any more surv rate groups
220 double _gc_overhead_perc;
222 double _reserve_factor;
223 uint _reserve_regions;
225 bool during_marking() {
226 return _during_marking;
227 }
229 enum PredictionConstants {
230 TruncatedSeqLength = 10
231 };
233 TruncatedSeq* _alloc_rate_ms_seq;
234 double _prev_collection_pause_end_ms;
236 TruncatedSeq* _rs_length_diff_seq;
237 TruncatedSeq* _cost_per_card_ms_seq;
238 TruncatedSeq* _young_cards_per_entry_ratio_seq;
239 TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
240 TruncatedSeq* _cost_per_entry_ms_seq;
241 TruncatedSeq* _mixed_cost_per_entry_ms_seq;
242 TruncatedSeq* _cost_per_byte_ms_seq;
243 TruncatedSeq* _constant_other_time_ms_seq;
244 TruncatedSeq* _young_other_cost_per_region_ms_seq;
245 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
247 TruncatedSeq* _pending_cards_seq;
248 TruncatedSeq* _rs_lengths_seq;
250 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
252 G1YoungGenSizer* _young_gen_sizer;
254 uint _eden_cset_region_length;
255 uint _survivor_cset_region_length;
256 uint _old_cset_region_length;
258 void init_cset_region_lengths(uint eden_cset_region_length,
259 uint survivor_cset_region_length);
261 uint eden_cset_region_length() { return _eden_cset_region_length; }
262 uint survivor_cset_region_length() { return _survivor_cset_region_length; }
263 uint old_cset_region_length() { return _old_cset_region_length; }
265 uint _free_regions_at_end_of_collection;
267 size_t _recorded_rs_lengths;
268 size_t _max_rs_lengths;
269 double _sigma;
271 size_t _rs_lengths_prediction;
273 double sigma() { return _sigma; }
275 // A function that prevents us putting too much stock in small sample
276 // sets. Returns a number between 2.0 and 1.0, depending on the number
277 // of samples. 5 or more samples yields one; fewer scales linearly from
278 // 2.0 at 1 sample to 1.0 at 5.
279 double confidence_factor(int samples) {
280 if (samples > 4) return 1.0;
281 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
282 }
284 double get_new_neg_prediction(TruncatedSeq* seq) {
285 return seq->davg() - sigma() * seq->dsd();
286 }
288 #ifndef PRODUCT
289 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
290 #endif // PRODUCT
292 void adjust_concurrent_refinement(double update_rs_time,
293 double update_rs_processed_buffers,
294 double goal_ms);
296 uintx no_of_gc_threads() { return _no_of_gc_threads; }
297 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
299 double _pause_time_target_ms;
301 size_t _pending_cards;
303 public:
304 // Accessors
306 void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
307 hr->set_eden();
308 hr->install_surv_rate_group(_short_lived_surv_rate_group);
309 hr->set_young_index_in_cset(young_index_in_cset);
310 }
312 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
313 assert(hr->is_survivor(), "pre-condition");
314 hr->install_surv_rate_group(_survivor_surv_rate_group);
315 hr->set_young_index_in_cset(young_index_in_cset);
316 }
318 #ifndef PRODUCT
319 bool verify_young_ages();
320 #endif // PRODUCT
322 double get_new_prediction(TruncatedSeq* seq) {
323 return MAX2(seq->davg() + sigma() * seq->dsd(),
324 seq->davg() * confidence_factor(seq->num()));
325 }
327 void record_max_rs_lengths(size_t rs_lengths) {
328 _max_rs_lengths = rs_lengths;
329 }
331 size_t predict_rs_length_diff() {
332 return (size_t) get_new_prediction(_rs_length_diff_seq);
333 }
335 double predict_alloc_rate_ms() {
336 return get_new_prediction(_alloc_rate_ms_seq);
337 }
339 double predict_cost_per_card_ms() {
340 return get_new_prediction(_cost_per_card_ms_seq);
341 }
343 double predict_rs_update_time_ms(size_t pending_cards) {
344 return (double) pending_cards * predict_cost_per_card_ms();
345 }
347 double predict_young_cards_per_entry_ratio() {
348 return get_new_prediction(_young_cards_per_entry_ratio_seq);
349 }
351 double predict_mixed_cards_per_entry_ratio() {
352 if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
353 return predict_young_cards_per_entry_ratio();
354 } else {
355 return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
356 }
357 }
359 size_t predict_young_card_num(size_t rs_length) {
360 return (size_t) ((double) rs_length *
361 predict_young_cards_per_entry_ratio());
362 }
364 size_t predict_non_young_card_num(size_t rs_length) {
365 return (size_t) ((double) rs_length *
366 predict_mixed_cards_per_entry_ratio());
367 }
369 double predict_rs_scan_time_ms(size_t card_num) {
370 if (gcs_are_young()) {
371 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
372 } else {
373 return predict_mixed_rs_scan_time_ms(card_num);
374 }
375 }
377 double predict_mixed_rs_scan_time_ms(size_t card_num) {
378 if (_mixed_cost_per_entry_ms_seq->num() < 3) {
379 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
380 } else {
381 return (double) (card_num *
382 get_new_prediction(_mixed_cost_per_entry_ms_seq));
383 }
384 }
386 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
387 if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
388 return (1.1 * (double) bytes_to_copy) *
389 get_new_prediction(_cost_per_byte_ms_seq);
390 } else {
391 return (double) bytes_to_copy *
392 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
393 }
394 }
396 double predict_object_copy_time_ms(size_t bytes_to_copy) {
397 if (_in_marking_window && !_in_marking_window_im) {
398 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
399 } else {
400 return (double) bytes_to_copy *
401 get_new_prediction(_cost_per_byte_ms_seq);
402 }
403 }
405 double predict_constant_other_time_ms() {
406 return get_new_prediction(_constant_other_time_ms_seq);
407 }
409 double predict_young_other_time_ms(size_t young_num) {
410 return (double) young_num *
411 get_new_prediction(_young_other_cost_per_region_ms_seq);
412 }
414 double predict_non_young_other_time_ms(size_t non_young_num) {
415 return (double) non_young_num *
416 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
417 }
419 double predict_base_elapsed_time_ms(size_t pending_cards);
420 double predict_base_elapsed_time_ms(size_t pending_cards,
421 size_t scanned_cards);
422 size_t predict_bytes_to_copy(HeapRegion* hr);
423 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc);
425 void set_recorded_rs_lengths(size_t rs_lengths);
427 uint cset_region_length() { return young_cset_region_length() +
428 old_cset_region_length(); }
429 uint young_cset_region_length() { return eden_cset_region_length() +
430 survivor_cset_region_length(); }
432 double predict_survivor_regions_evac_time();
434 void cset_regions_freed() {
435 bool propagate = _last_gc_was_young && !_in_marking_window;
436 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
437 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
438 // also call it on any more surv rate groups
439 }
441 G1MMUTracker* mmu_tracker() {
442 return _mmu_tracker;
443 }
445 double max_pause_time_ms() {
446 return _mmu_tracker->max_gc_time() * 1000.0;
447 }
449 double predict_remark_time_ms() {
450 return get_new_prediction(_concurrent_mark_remark_times_ms);
451 }
453 double predict_cleanup_time_ms() {
454 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
455 }
457 // Returns an estimate of the survival rate of the region at yg-age
458 // "yg_age".
459 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
460 TruncatedSeq* seq = surv_rate_group->get_seq(age);
461 if (seq->num() == 0)
462 gclog_or_tty->print("BARF! age is %d", age);
463 guarantee( seq->num() > 0, "invariant" );
464 double pred = get_new_prediction(seq);
465 if (pred > 1.0)
466 pred = 1.0;
467 return pred;
468 }
470 double predict_yg_surv_rate(int age) {
471 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
472 }
474 double accum_yg_surv_rate_pred(int age) {
475 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
476 }
478 private:
479 // Statistics kept per GC stoppage, pause or full.
480 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
482 // Add a new GC of the given duration and end time to the record.
483 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
485 // The head of the list (via "next_in_collection_set()") representing the
486 // current collection set. Set from the incrementally built collection
487 // set at the start of the pause.
488 HeapRegion* _collection_set;
490 // The number of bytes in the collection set before the pause. Set from
491 // the incrementally built collection set at the start of an evacuation
492 // pause, and incremented in finalize_cset() when adding old regions
493 // (if any) to the collection set.
494 size_t _collection_set_bytes_used_before;
496 // The number of bytes copied during the GC.
497 size_t _bytes_copied_during_gc;
499 // The associated information that is maintained while the incremental
500 // collection set is being built with young regions. Used to populate
501 // the recorded info for the evacuation pause.
503 enum CSetBuildType {
504 Active, // We are actively building the collection set
505 Inactive // We are not actively building the collection set
506 };
508 CSetBuildType _inc_cset_build_state;
510 // The head of the incrementally built collection set.
511 HeapRegion* _inc_cset_head;
513 // The tail of the incrementally built collection set.
514 HeapRegion* _inc_cset_tail;
516 // The number of bytes in the incrementally built collection set.
517 // Used to set _collection_set_bytes_used_before at the start of
518 // an evacuation pause.
519 size_t _inc_cset_bytes_used_before;
521 // Used to record the highest end of heap region in collection set
522 HeapWord* _inc_cset_max_finger;
524 // The RSet lengths recorded for regions in the CSet. It is updated
525 // by the thread that adds a new region to the CSet. We assume that
526 // only one thread can be allocating a new CSet region (currently,
527 // it does so after taking the Heap_lock) hence no need to
528 // synchronize updates to this field.
529 size_t _inc_cset_recorded_rs_lengths;
531 // A concurrent refinement thread periodcially samples the young
532 // region RSets and needs to update _inc_cset_recorded_rs_lengths as
533 // the RSets grow. Instead of having to syncronize updates to that
534 // field we accumulate them in this field and add it to
535 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
536 ssize_t _inc_cset_recorded_rs_lengths_diffs;
538 // The predicted elapsed time it will take to collect the regions in
539 // the CSet. This is updated by the thread that adds a new region to
540 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
541 // MT-safety assumptions.
542 double _inc_cset_predicted_elapsed_time_ms;
544 // See the comment for _inc_cset_recorded_rs_lengths_diffs.
545 double _inc_cset_predicted_elapsed_time_ms_diffs;
547 // Stash a pointer to the g1 heap.
548 G1CollectedHeap* _g1;
550 G1GCPhaseTimes* _phase_times;
552 // The ratio of gc time to elapsed time, computed over recent pauses.
553 double _recent_avg_pause_time_ratio;
555 double recent_avg_pause_time_ratio() {
556 return _recent_avg_pause_time_ratio;
557 }
559 // At the end of a pause we check the heap occupancy and we decide
560 // whether we will start a marking cycle during the next pause. If
561 // we decide that we want to do that, we will set this parameter to
562 // true. So, this parameter will stay true between the end of a
563 // pause and the beginning of a subsequent pause (not necessarily
564 // the next one, see the comments on the next field) when we decide
565 // that we will indeed start a marking cycle and do the initial-mark
566 // work.
567 volatile bool _initiate_conc_mark_if_possible;
569 // If initiate_conc_mark_if_possible() is set at the beginning of a
570 // pause, it is a suggestion that the pause should start a marking
571 // cycle by doing the initial-mark work. However, it is possible
572 // that the concurrent marking thread is still finishing up the
573 // previous marking cycle (e.g., clearing the next marking
574 // bitmap). If that is the case we cannot start a new cycle and
575 // we'll have to wait for the concurrent marking thread to finish
576 // what it is doing. In this case we will postpone the marking cycle
577 // initiation decision for the next pause. When we eventually decide
578 // to start a cycle, we will set _during_initial_mark_pause which
579 // will stay true until the end of the initial-mark pause and it's
580 // the condition that indicates that a pause is doing the
581 // initial-mark work.
582 volatile bool _during_initial_mark_pause;
584 bool _last_young_gc;
586 // This set of variables tracks the collector efficiency, in order to
587 // determine whether we should initiate a new marking.
588 double _cur_mark_stop_world_time_ms;
589 double _mark_remark_start_sec;
590 double _mark_cleanup_start_sec;
592 // Update the young list target length either by setting it to the
593 // desired fixed value or by calculating it using G1's pause
594 // prediction model. If no rs_lengths parameter is passed, predict
595 // the RS lengths using the prediction model, otherwise use the
596 // given rs_lengths as the prediction.
597 void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
599 // Calculate and return the minimum desired young list target
600 // length. This is the minimum desired young list length according
601 // to the user's inputs.
602 uint calculate_young_list_desired_min_length(uint base_min_length);
604 // Calculate and return the maximum desired young list target
605 // length. This is the maximum desired young list length according
606 // to the user's inputs.
607 uint calculate_young_list_desired_max_length();
609 // Calculate and return the maximum young list target length that
610 // can fit into the pause time goal. The parameters are: rs_lengths
611 // represent the prediction of how large the young RSet lengths will
612 // be, base_min_length is the alreay existing number of regions in
613 // the young list, min_length and max_length are the desired min and
614 // max young list length according to the user's inputs.
615 uint calculate_young_list_target_length(size_t rs_lengths,
616 uint base_min_length,
617 uint desired_min_length,
618 uint desired_max_length);
620 // Check whether a given young length (young_length) fits into the
621 // given target pause time and whether the prediction for the amount
622 // of objects to be copied for the given length will fit into the
623 // given free space (expressed by base_free_regions). It is used by
624 // calculate_young_list_target_length().
625 bool predict_will_fit(uint young_length, double base_time_ms,
626 uint base_free_regions, double target_pause_time_ms);
628 // Calculate the minimum number of old regions we'll add to the CSet
629 // during a mixed GC.
630 uint calc_min_old_cset_length();
632 // Calculate the maximum number of old regions we'll add to the CSet
633 // during a mixed GC.
634 uint calc_max_old_cset_length();
636 // Returns the given amount of uncollected reclaimable space
637 // as a percentage of the current heap capacity.
638 double reclaimable_bytes_perc(size_t reclaimable_bytes);
640 public:
642 G1CollectorPolicy();
644 virtual G1CollectorPolicy* as_g1_policy() { return this; }
646 virtual CollectorPolicy::Name kind() {
647 return CollectorPolicy::G1CollectorPolicyKind;
648 }
650 G1GCPhaseTimes* phase_times() const { return _phase_times; }
652 // Check the current value of the young list RSet lengths and
653 // compare it against the last prediction. If the current value is
654 // higher, recalculate the young list target length prediction.
655 void revise_young_list_target_length_if_necessary();
657 // This should be called after the heap is resized.
658 void record_new_heap_size(uint new_number_of_regions);
660 void init();
662 // Create jstat counters for the policy.
663 virtual void initialize_gc_policy_counters();
665 virtual HeapWord* mem_allocate_work(size_t size,
666 bool is_tlab,
667 bool* gc_overhead_limit_was_exceeded);
669 // This method controls how a collector handles one or more
670 // of its generations being fully allocated.
671 virtual HeapWord* satisfy_failed_allocation(size_t size,
672 bool is_tlab);
674 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
676 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
678 // Record the start and end of an evacuation pause.
679 void record_collection_pause_start(double start_time_sec);
680 void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
682 // Record the start and end of a full collection.
683 void record_full_collection_start();
684 void record_full_collection_end();
686 // Must currently be called while the world is stopped.
687 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
689 // Record start and end of remark.
690 void record_concurrent_mark_remark_start();
691 void record_concurrent_mark_remark_end();
693 // Record start, end, and completion of cleanup.
694 void record_concurrent_mark_cleanup_start();
695 void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
696 void record_concurrent_mark_cleanup_completed();
698 // Records the information about the heap size for reporting in
699 // print_detailed_heap_transition
700 void record_heap_size_info_at_start(bool full);
702 // Print heap sizing transition (with less and more detail).
703 void print_heap_transition();
704 void print_detailed_heap_transition(bool full = false);
706 void record_stop_world_start();
707 void record_concurrent_pause();
709 // Record how much space we copied during a GC. This is typically
710 // called when a GC alloc region is being retired.
711 void record_bytes_copied_during_gc(size_t bytes) {
712 _bytes_copied_during_gc += bytes;
713 }
715 // The amount of space we copied during a GC.
716 size_t bytes_copied_during_gc() {
717 return _bytes_copied_during_gc;
718 }
720 // Determine whether there are candidate regions so that the
721 // next GC should be mixed. The two action strings are used
722 // in the ergo output when the method returns true or false.
723 bool next_gc_should_be_mixed(const char* true_action_str,
724 const char* false_action_str);
726 // Choose a new collection set. Marks the chosen regions as being
727 // "in_collection_set", and links them together. The head and number of
728 // the collection set are available via access methods.
729 void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
731 // The head of the list (via "next_in_collection_set()") representing the
732 // current collection set.
733 HeapRegion* collection_set() { return _collection_set; }
735 void clear_collection_set() { _collection_set = NULL; }
737 // Add old region "hr" to the CSet.
738 void add_old_region_to_cset(HeapRegion* hr);
740 // Incremental CSet Support
742 // The head of the incrementally built collection set.
743 HeapRegion* inc_cset_head() { return _inc_cset_head; }
745 // The tail of the incrementally built collection set.
746 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
748 // Initialize incremental collection set info.
749 void start_incremental_cset_building();
751 // Perform any final calculations on the incremental CSet fields
752 // before we can use them.
753 void finalize_incremental_cset_building();
755 void clear_incremental_cset() {
756 _inc_cset_head = NULL;
757 _inc_cset_tail = NULL;
758 }
760 // Stop adding regions to the incremental collection set
761 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
763 // Add information about hr to the aggregated information for the
764 // incrementally built collection set.
765 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
767 // Update information about hr in the aggregated information for
768 // the incrementally built collection set.
769 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
771 private:
772 // Update the incremental cset information when adding a region
773 // (should not be called directly).
774 void add_region_to_incremental_cset_common(HeapRegion* hr);
776 public:
777 // Add hr to the LHS of the incremental collection set.
778 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
780 // Add hr to the RHS of the incremental collection set.
781 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
783 #ifndef PRODUCT
784 void print_collection_set(HeapRegion* list_head, outputStream* st);
785 #endif // !PRODUCT
787 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
788 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
789 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
791 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
792 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
793 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
795 // This sets the initiate_conc_mark_if_possible() flag to start a
796 // new cycle, as long as we are not already in one. It's best if it
797 // is called during a safepoint when the test whether a cycle is in
798 // progress or not is stable.
799 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
801 // This is called at the very beginning of an evacuation pause (it
802 // has to be the first thing that the pause does). If
803 // initiate_conc_mark_if_possible() is true, and the concurrent
804 // marking thread has completed its work during the previous cycle,
805 // it will set during_initial_mark_pause() to so that the pause does
806 // the initial-mark work and start a marking cycle.
807 void decide_on_conc_mark_initiation();
809 // If an expansion would be appropriate, because recent GC overhead had
810 // exceeded the desired limit, return an amount to expand by.
811 virtual size_t expansion_amount();
813 // Print tracing information.
814 void print_tracing_info() const;
816 // Print stats on young survival ratio
817 void print_yg_surv_rate_info() const;
819 void finished_recalculating_age_indexes(bool is_survivors) {
820 if (is_survivors) {
821 _survivor_surv_rate_group->finished_recalculating_age_indexes();
822 } else {
823 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
824 }
825 // do that for any other surv rate groups
826 }
828 size_t young_list_target_length() const { return _young_list_target_length; }
830 bool is_young_list_full();
832 bool can_expand_young_list();
834 uint young_list_max_length() {
835 return _young_list_max_length;
836 }
838 bool gcs_are_young() {
839 return _gcs_are_young;
840 }
841 void set_gcs_are_young(bool gcs_are_young) {
842 _gcs_are_young = gcs_are_young;
843 }
845 bool adaptive_young_list_length() {
846 return _young_gen_sizer->adaptive_young_list_length();
847 }
849 private:
850 //
851 // Survivor regions policy.
852 //
854 // Current tenuring threshold, set to 0 if the collector reaches the
855 // maximum amount of survivors regions.
856 uint _tenuring_threshold;
858 // The limit on the number of regions allocated for survivors.
859 uint _max_survivor_regions;
861 // For reporting purposes.
862 // The value of _heap_bytes_before_gc is also used to calculate
863 // the cost of copying.
865 size_t _eden_used_bytes_before_gc; // Eden occupancy before GC
866 size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC
867 size_t _heap_used_bytes_before_gc; // Heap occupancy before GC
868 size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC
870 size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC
871 size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC
873 // The amount of survivor regions after a collection.
874 uint _recorded_survivor_regions;
875 // List of survivor regions.
876 HeapRegion* _recorded_survivor_head;
877 HeapRegion* _recorded_survivor_tail;
879 ageTable _survivors_age_table;
881 public:
882 uint tenuring_threshold() const { return _tenuring_threshold; }
884 static const uint REGIONS_UNLIMITED = (uint) -1;
886 uint max_regions(InCSetState dest) {
887 switch (dest.value()) {
888 case InCSetState::Young:
889 return _max_survivor_regions;
890 case InCSetState::Old:
891 return REGIONS_UNLIMITED;
892 default:
893 assert(false, err_msg("Unknown dest state: " CSETSTATE_FORMAT, dest.value()));
894 break;
895 }
896 // keep some compilers happy
897 return 0;
898 }
900 void note_start_adding_survivor_regions() {
901 _survivor_surv_rate_group->start_adding_regions();
902 }
904 void note_stop_adding_survivor_regions() {
905 _survivor_surv_rate_group->stop_adding_regions();
906 }
908 void record_survivor_regions(uint regions,
909 HeapRegion* head,
910 HeapRegion* tail) {
911 _recorded_survivor_regions = regions;
912 _recorded_survivor_head = head;
913 _recorded_survivor_tail = tail;
914 }
916 uint recorded_survivor_regions() {
917 return _recorded_survivor_regions;
918 }
920 void record_thread_age_table(ageTable* age_table) {
921 _survivors_age_table.merge_par(age_table);
922 }
924 void update_max_gc_locker_expansion();
926 // Calculates survivor space parameters.
927 void update_survivors_policy();
929 virtual void post_heap_initialize();
930 };
932 // This should move to some place more general...
934 // If we have "n" measurements, and we've kept track of their "sum" and the
935 // "sum_of_squares" of the measurements, this returns the variance of the
936 // sequence.
937 inline double variance(int n, double sum_of_squares, double sum) {
938 double n_d = (double)n;
939 double avg = sum/n_d;
940 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
941 }
943 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP