Tue, 28 May 2013 09:32:06 +0200
8013895: G1: G1SummarizeRSetStats output on Linux needs improvemen
Summary: Fixed the output of G1SummarizeRSetStats: too small datatype for the number of concurrently processed cards, added concurrent remembered set thread time retrieval for Linux and Windows (BSD uses os::elapsedTime() now), and other cleanup. The information presented during VM operation is now relative to the previous output, not always cumulative if G1SummarizeRSetStatsPeriod > 0. At VM exit, the code prints a cumulative summary.
Reviewed-by: johnc, jwilhelm
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
28 #include "gc_implementation/g1/collectionSetChooser.hpp"
29 #include "gc_implementation/g1/g1MMUTracker.hpp"
30 #include "memory/collectorPolicy.hpp"
32 // A G1CollectorPolicy makes policy decisions that determine the
33 // characteristics of the collector. Examples include:
34 // * choice of collection set.
35 // * when to collect.
37 class HeapRegion;
38 class CollectionSetChooser;
39 class G1GCPhaseTimes;
41 // TraceGen0Time collects data on _both_ young and mixed evacuation pauses
42 // (the latter may contain non-young regions - i.e. regions that are
43 // technically in Gen1) while TraceGen1Time collects data about full GCs.
44 class TraceGen0TimeData : public CHeapObj<mtGC> {
45 private:
46 unsigned _young_pause_num;
47 unsigned _mixed_pause_num;
49 NumberSeq _all_stop_world_times_ms;
50 NumberSeq _all_yield_times_ms;
52 NumberSeq _total;
53 NumberSeq _other;
54 NumberSeq _root_region_scan_wait;
55 NumberSeq _parallel;
56 NumberSeq _ext_root_scan;
57 NumberSeq _satb_filtering;
58 NumberSeq _update_rs;
59 NumberSeq _scan_rs;
60 NumberSeq _obj_copy;
61 NumberSeq _termination;
62 NumberSeq _parallel_other;
63 NumberSeq _clear_ct;
65 void print_summary(const char* str, const NumberSeq* seq) const;
66 void print_summary_sd(const char* str, const NumberSeq* seq) const;
68 public:
69 TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {};
70 void record_start_collection(double time_to_stop_the_world_ms);
71 void record_yield_time(double yield_time_ms);
72 void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times);
73 void increment_young_collection_count();
74 void increment_mixed_collection_count();
75 void print() const;
76 };
78 class TraceGen1TimeData : public CHeapObj<mtGC> {
79 private:
80 NumberSeq _all_full_gc_times;
82 public:
83 void record_full_collection(double full_gc_time_ms);
84 void print() const;
85 };
87 // There are three command line options related to the young gen size:
88 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
89 // just a short form for NewSize==MaxNewSize). G1 will use its internal
90 // heuristics to calculate the actual young gen size, so these options
91 // basically only limit the range within which G1 can pick a young gen
92 // size. Also, these are general options taking byte sizes. G1 will
93 // internally work with a number of regions instead. So, some rounding
94 // will occur.
95 //
96 // If nothing related to the the young gen size is set on the command
97 // line we should allow the young gen to be between G1NewSizePercent
98 // and G1MaxNewSizePercent of the heap size. This means that every time
99 // the heap size changes, the limits for the young gen size will be
100 // recalculated.
101 //
102 // If only -XX:NewSize is set we should use the specified value as the
103 // minimum size for young gen. Still using G1MaxNewSizePercent of the
104 // heap as maximum.
105 //
106 // If only -XX:MaxNewSize is set we should use the specified value as the
107 // maximum size for young gen. Still using G1NewSizePercent of the heap
108 // as minimum.
109 //
110 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.
111 // No updates when the heap size changes. There is a special case when
112 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a
113 // different heuristic for calculating the collection set when we do mixed
114 // collection.
115 //
116 // If only -XX:NewRatio is set we should use the specified ratio of the heap
117 // as both min and max. This will be interpreted as "fixed" just like the
118 // NewSize==MaxNewSize case above. But we will update the min and max
119 // everytime the heap size changes.
120 //
121 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
122 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
123 class G1YoungGenSizer : public CHeapObj<mtGC> {
124 private:
125 enum SizerKind {
126 SizerDefaults,
127 SizerNewSizeOnly,
128 SizerMaxNewSizeOnly,
129 SizerMaxAndNewSize,
130 SizerNewRatio
131 };
132 SizerKind _sizer_kind;
133 uint _min_desired_young_length;
134 uint _max_desired_young_length;
135 bool _adaptive_size;
136 uint calculate_default_min_length(uint new_number_of_heap_regions);
137 uint calculate_default_max_length(uint new_number_of_heap_regions);
139 public:
140 G1YoungGenSizer();
141 void heap_size_changed(uint new_number_of_heap_regions);
142 uint min_desired_young_length() {
143 return _min_desired_young_length;
144 }
145 uint max_desired_young_length() {
146 return _max_desired_young_length;
147 }
148 bool adaptive_young_list_length() {
149 return _adaptive_size;
150 }
151 };
153 class G1CollectorPolicy: public CollectorPolicy {
154 private:
155 // either equal to the number of parallel threads, if ParallelGCThreads
156 // has been set, or 1 otherwise
157 int _parallel_gc_threads;
159 // The number of GC threads currently active.
160 uintx _no_of_gc_threads;
162 enum SomePrivateConstants {
163 NumPrevPausesForHeuristics = 10
164 };
166 G1MMUTracker* _mmu_tracker;
168 void initialize_flags();
170 void initialize_all() {
171 initialize_flags();
172 initialize_size_info();
173 }
175 CollectionSetChooser* _collectionSetChooser;
177 double _full_collection_start_sec;
178 uint _cur_collection_pause_used_regions_at_start;
180 // These exclude marking times.
181 TruncatedSeq* _recent_gc_times_ms;
183 TruncatedSeq* _concurrent_mark_remark_times_ms;
184 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
186 TraceGen0TimeData _trace_gen0_time_data;
187 TraceGen1TimeData _trace_gen1_time_data;
189 double _stop_world_start;
191 // indicates whether we are in young or mixed GC mode
192 bool _gcs_are_young;
194 uint _young_list_target_length;
195 uint _young_list_fixed_length;
197 // The max number of regions we can extend the eden by while the GC
198 // locker is active. This should be >= _young_list_target_length;
199 uint _young_list_max_length;
201 bool _last_gc_was_young;
203 bool _during_marking;
204 bool _in_marking_window;
205 bool _in_marking_window_im;
207 SurvRateGroup* _short_lived_surv_rate_group;
208 SurvRateGroup* _survivor_surv_rate_group;
209 // add here any more surv rate groups
211 double _gc_overhead_perc;
213 double _reserve_factor;
214 uint _reserve_regions;
216 bool during_marking() {
217 return _during_marking;
218 }
220 private:
221 enum PredictionConstants {
222 TruncatedSeqLength = 10
223 };
225 TruncatedSeq* _alloc_rate_ms_seq;
226 double _prev_collection_pause_end_ms;
228 TruncatedSeq* _rs_length_diff_seq;
229 TruncatedSeq* _cost_per_card_ms_seq;
230 TruncatedSeq* _young_cards_per_entry_ratio_seq;
231 TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
232 TruncatedSeq* _cost_per_entry_ms_seq;
233 TruncatedSeq* _mixed_cost_per_entry_ms_seq;
234 TruncatedSeq* _cost_per_byte_ms_seq;
235 TruncatedSeq* _constant_other_time_ms_seq;
236 TruncatedSeq* _young_other_cost_per_region_ms_seq;
237 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
239 TruncatedSeq* _pending_cards_seq;
240 TruncatedSeq* _rs_lengths_seq;
242 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
244 G1YoungGenSizer* _young_gen_sizer;
246 uint _eden_cset_region_length;
247 uint _survivor_cset_region_length;
248 uint _old_cset_region_length;
250 void init_cset_region_lengths(uint eden_cset_region_length,
251 uint survivor_cset_region_length);
253 uint eden_cset_region_length() { return _eden_cset_region_length; }
254 uint survivor_cset_region_length() { return _survivor_cset_region_length; }
255 uint old_cset_region_length() { return _old_cset_region_length; }
257 uint _free_regions_at_end_of_collection;
259 size_t _recorded_rs_lengths;
260 size_t _max_rs_lengths;
261 double _sigma;
263 size_t _rs_lengths_prediction;
265 double sigma() { return _sigma; }
267 // A function that prevents us putting too much stock in small sample
268 // sets. Returns a number between 2.0 and 1.0, depending on the number
269 // of samples. 5 or more samples yields one; fewer scales linearly from
270 // 2.0 at 1 sample to 1.0 at 5.
271 double confidence_factor(int samples) {
272 if (samples > 4) return 1.0;
273 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
274 }
276 double get_new_neg_prediction(TruncatedSeq* seq) {
277 return seq->davg() - sigma() * seq->dsd();
278 }
280 #ifndef PRODUCT
281 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
282 #endif // PRODUCT
284 void adjust_concurrent_refinement(double update_rs_time,
285 double update_rs_processed_buffers,
286 double goal_ms);
288 uintx no_of_gc_threads() { return _no_of_gc_threads; }
289 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
291 double _pause_time_target_ms;
293 size_t _pending_cards;
295 public:
296 // Accessors
298 void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
299 hr->set_young();
300 hr->install_surv_rate_group(_short_lived_surv_rate_group);
301 hr->set_young_index_in_cset(young_index_in_cset);
302 }
304 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
305 assert(hr->is_young() && hr->is_survivor(), "pre-condition");
306 hr->install_surv_rate_group(_survivor_surv_rate_group);
307 hr->set_young_index_in_cset(young_index_in_cset);
308 }
310 #ifndef PRODUCT
311 bool verify_young_ages();
312 #endif // PRODUCT
314 double get_new_prediction(TruncatedSeq* seq) {
315 return MAX2(seq->davg() + sigma() * seq->dsd(),
316 seq->davg() * confidence_factor(seq->num()));
317 }
319 void record_max_rs_lengths(size_t rs_lengths) {
320 _max_rs_lengths = rs_lengths;
321 }
323 size_t predict_rs_length_diff() {
324 return (size_t) get_new_prediction(_rs_length_diff_seq);
325 }
327 double predict_alloc_rate_ms() {
328 return get_new_prediction(_alloc_rate_ms_seq);
329 }
331 double predict_cost_per_card_ms() {
332 return get_new_prediction(_cost_per_card_ms_seq);
333 }
335 double predict_rs_update_time_ms(size_t pending_cards) {
336 return (double) pending_cards * predict_cost_per_card_ms();
337 }
339 double predict_young_cards_per_entry_ratio() {
340 return get_new_prediction(_young_cards_per_entry_ratio_seq);
341 }
343 double predict_mixed_cards_per_entry_ratio() {
344 if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
345 return predict_young_cards_per_entry_ratio();
346 } else {
347 return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
348 }
349 }
351 size_t predict_young_card_num(size_t rs_length) {
352 return (size_t) ((double) rs_length *
353 predict_young_cards_per_entry_ratio());
354 }
356 size_t predict_non_young_card_num(size_t rs_length) {
357 return (size_t) ((double) rs_length *
358 predict_mixed_cards_per_entry_ratio());
359 }
361 double predict_rs_scan_time_ms(size_t card_num) {
362 if (gcs_are_young()) {
363 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
364 } else {
365 return predict_mixed_rs_scan_time_ms(card_num);
366 }
367 }
369 double predict_mixed_rs_scan_time_ms(size_t card_num) {
370 if (_mixed_cost_per_entry_ms_seq->num() < 3) {
371 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
372 } else {
373 return (double) (card_num *
374 get_new_prediction(_mixed_cost_per_entry_ms_seq));
375 }
376 }
378 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
379 if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
380 return (1.1 * (double) bytes_to_copy) *
381 get_new_prediction(_cost_per_byte_ms_seq);
382 } else {
383 return (double) bytes_to_copy *
384 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
385 }
386 }
388 double predict_object_copy_time_ms(size_t bytes_to_copy) {
389 if (_in_marking_window && !_in_marking_window_im) {
390 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
391 } else {
392 return (double) bytes_to_copy *
393 get_new_prediction(_cost_per_byte_ms_seq);
394 }
395 }
397 double predict_constant_other_time_ms() {
398 return get_new_prediction(_constant_other_time_ms_seq);
399 }
401 double predict_young_other_time_ms(size_t young_num) {
402 return (double) young_num *
403 get_new_prediction(_young_other_cost_per_region_ms_seq);
404 }
406 double predict_non_young_other_time_ms(size_t non_young_num) {
407 return (double) non_young_num *
408 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
409 }
411 double predict_base_elapsed_time_ms(size_t pending_cards);
412 double predict_base_elapsed_time_ms(size_t pending_cards,
413 size_t scanned_cards);
414 size_t predict_bytes_to_copy(HeapRegion* hr);
415 double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc);
417 void set_recorded_rs_lengths(size_t rs_lengths);
419 uint cset_region_length() { return young_cset_region_length() +
420 old_cset_region_length(); }
421 uint young_cset_region_length() { return eden_cset_region_length() +
422 survivor_cset_region_length(); }
424 double predict_survivor_regions_evac_time();
426 void cset_regions_freed() {
427 bool propagate = _last_gc_was_young && !_in_marking_window;
428 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
429 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
430 // also call it on any more surv rate groups
431 }
433 G1MMUTracker* mmu_tracker() {
434 return _mmu_tracker;
435 }
437 double max_pause_time_ms() {
438 return _mmu_tracker->max_gc_time() * 1000.0;
439 }
441 double predict_remark_time_ms() {
442 return get_new_prediction(_concurrent_mark_remark_times_ms);
443 }
445 double predict_cleanup_time_ms() {
446 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
447 }
449 // Returns an estimate of the survival rate of the region at yg-age
450 // "yg_age".
451 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
452 TruncatedSeq* seq = surv_rate_group->get_seq(age);
453 if (seq->num() == 0)
454 gclog_or_tty->print("BARF! age is %d", age);
455 guarantee( seq->num() > 0, "invariant" );
456 double pred = get_new_prediction(seq);
457 if (pred > 1.0)
458 pred = 1.0;
459 return pred;
460 }
462 double predict_yg_surv_rate(int age) {
463 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
464 }
466 double accum_yg_surv_rate_pred(int age) {
467 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
468 }
470 private:
471 // Statistics kept per GC stoppage, pause or full.
472 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
474 // Add a new GC of the given duration and end time to the record.
475 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
477 // The head of the list (via "next_in_collection_set()") representing the
478 // current collection set. Set from the incrementally built collection
479 // set at the start of the pause.
480 HeapRegion* _collection_set;
482 // The number of bytes in the collection set before the pause. Set from
483 // the incrementally built collection set at the start of an evacuation
484 // pause, and incremented in finalize_cset() when adding old regions
485 // (if any) to the collection set.
486 size_t _collection_set_bytes_used_before;
488 // The number of bytes copied during the GC.
489 size_t _bytes_copied_during_gc;
491 // The associated information that is maintained while the incremental
492 // collection set is being built with young regions. Used to populate
493 // the recorded info for the evacuation pause.
495 enum CSetBuildType {
496 Active, // We are actively building the collection set
497 Inactive // We are not actively building the collection set
498 };
500 CSetBuildType _inc_cset_build_state;
502 // The head of the incrementally built collection set.
503 HeapRegion* _inc_cset_head;
505 // The tail of the incrementally built collection set.
506 HeapRegion* _inc_cset_tail;
508 // The number of bytes in the incrementally built collection set.
509 // Used to set _collection_set_bytes_used_before at the start of
510 // an evacuation pause.
511 size_t _inc_cset_bytes_used_before;
513 // Used to record the highest end of heap region in collection set
514 HeapWord* _inc_cset_max_finger;
516 // The RSet lengths recorded for regions in the CSet. It is updated
517 // by the thread that adds a new region to the CSet. We assume that
518 // only one thread can be allocating a new CSet region (currently,
519 // it does so after taking the Heap_lock) hence no need to
520 // synchronize updates to this field.
521 size_t _inc_cset_recorded_rs_lengths;
523 // A concurrent refinement thread periodcially samples the young
524 // region RSets and needs to update _inc_cset_recorded_rs_lengths as
525 // the RSets grow. Instead of having to syncronize updates to that
526 // field we accumulate them in this field and add it to
527 // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
528 ssize_t _inc_cset_recorded_rs_lengths_diffs;
530 // The predicted elapsed time it will take to collect the regions in
531 // the CSet. This is updated by the thread that adds a new region to
532 // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
533 // MT-safety assumptions.
534 double _inc_cset_predicted_elapsed_time_ms;
536 // See the comment for _inc_cset_recorded_rs_lengths_diffs.
537 double _inc_cset_predicted_elapsed_time_ms_diffs;
539 // Stash a pointer to the g1 heap.
540 G1CollectedHeap* _g1;
542 G1GCPhaseTimes* _phase_times;
544 // The ratio of gc time to elapsed time, computed over recent pauses.
545 double _recent_avg_pause_time_ratio;
547 double recent_avg_pause_time_ratio() {
548 return _recent_avg_pause_time_ratio;
549 }
551 // At the end of a pause we check the heap occupancy and we decide
552 // whether we will start a marking cycle during the next pause. If
553 // we decide that we want to do that, we will set this parameter to
554 // true. So, this parameter will stay true between the end of a
555 // pause and the beginning of a subsequent pause (not necessarily
556 // the next one, see the comments on the next field) when we decide
557 // that we will indeed start a marking cycle and do the initial-mark
558 // work.
559 volatile bool _initiate_conc_mark_if_possible;
561 // If initiate_conc_mark_if_possible() is set at the beginning of a
562 // pause, it is a suggestion that the pause should start a marking
563 // cycle by doing the initial-mark work. However, it is possible
564 // that the concurrent marking thread is still finishing up the
565 // previous marking cycle (e.g., clearing the next marking
566 // bitmap). If that is the case we cannot start a new cycle and
567 // we'll have to wait for the concurrent marking thread to finish
568 // what it is doing. In this case we will postpone the marking cycle
569 // initiation decision for the next pause. When we eventually decide
570 // to start a cycle, we will set _during_initial_mark_pause which
571 // will stay true until the end of the initial-mark pause and it's
572 // the condition that indicates that a pause is doing the
573 // initial-mark work.
574 volatile bool _during_initial_mark_pause;
576 bool _last_young_gc;
578 // This set of variables tracks the collector efficiency, in order to
579 // determine whether we should initiate a new marking.
580 double _cur_mark_stop_world_time_ms;
581 double _mark_remark_start_sec;
582 double _mark_cleanup_start_sec;
584 // Update the young list target length either by setting it to the
585 // desired fixed value or by calculating it using G1's pause
586 // prediction model. If no rs_lengths parameter is passed, predict
587 // the RS lengths using the prediction model, otherwise use the
588 // given rs_lengths as the prediction.
589 void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
591 // Calculate and return the minimum desired young list target
592 // length. This is the minimum desired young list length according
593 // to the user's inputs.
594 uint calculate_young_list_desired_min_length(uint base_min_length);
596 // Calculate and return the maximum desired young list target
597 // length. This is the maximum desired young list length according
598 // to the user's inputs.
599 uint calculate_young_list_desired_max_length();
601 // Calculate and return the maximum young list target length that
602 // can fit into the pause time goal. The parameters are: rs_lengths
603 // represent the prediction of how large the young RSet lengths will
604 // be, base_min_length is the alreay existing number of regions in
605 // the young list, min_length and max_length are the desired min and
606 // max young list length according to the user's inputs.
607 uint calculate_young_list_target_length(size_t rs_lengths,
608 uint base_min_length,
609 uint desired_min_length,
610 uint desired_max_length);
612 // Check whether a given young length (young_length) fits into the
613 // given target pause time and whether the prediction for the amount
614 // of objects to be copied for the given length will fit into the
615 // given free space (expressed by base_free_regions). It is used by
616 // calculate_young_list_target_length().
617 bool predict_will_fit(uint young_length, double base_time_ms,
618 uint base_free_regions, double target_pause_time_ms);
620 // Calculate the minimum number of old regions we'll add to the CSet
621 // during a mixed GC.
622 uint calc_min_old_cset_length();
624 // Calculate the maximum number of old regions we'll add to the CSet
625 // during a mixed GC.
626 uint calc_max_old_cset_length();
628 // Returns the given amount of uncollected reclaimable space
629 // as a percentage of the current heap capacity.
630 double reclaimable_bytes_perc(size_t reclaimable_bytes);
632 public:
634 G1CollectorPolicy();
636 virtual G1CollectorPolicy* as_g1_policy() { return this; }
638 virtual CollectorPolicy::Name kind() {
639 return CollectorPolicy::G1CollectorPolicyKind;
640 }
642 G1GCPhaseTimes* phase_times() const { return _phase_times; }
644 // Check the current value of the young list RSet lengths and
645 // compare it against the last prediction. If the current value is
646 // higher, recalculate the young list target length prediction.
647 void revise_young_list_target_length_if_necessary();
649 // This should be called after the heap is resized.
650 void record_new_heap_size(uint new_number_of_regions);
652 void init();
654 // Create jstat counters for the policy.
655 virtual void initialize_gc_policy_counters();
657 virtual HeapWord* mem_allocate_work(size_t size,
658 bool is_tlab,
659 bool* gc_overhead_limit_was_exceeded);
661 // This method controls how a collector handles one or more
662 // of its generations being fully allocated.
663 virtual HeapWord* satisfy_failed_allocation(size_t size,
664 bool is_tlab);
666 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
668 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
670 bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
672 // Record the start and end of an evacuation pause.
673 void record_collection_pause_start(double start_time_sec);
674 void record_collection_pause_end(double pause_time_ms);
676 // Record the start and end of a full collection.
677 void record_full_collection_start();
678 void record_full_collection_end();
680 // Must currently be called while the world is stopped.
681 void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
683 // Record start and end of remark.
684 void record_concurrent_mark_remark_start();
685 void record_concurrent_mark_remark_end();
687 // Record start, end, and completion of cleanup.
688 void record_concurrent_mark_cleanup_start();
689 void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
690 void record_concurrent_mark_cleanup_completed();
692 // Records the information about the heap size for reporting in
693 // print_detailed_heap_transition
694 void record_heap_size_info_at_start(bool full);
696 // Print heap sizing transition (with less and more detail).
697 void print_heap_transition();
698 void print_detailed_heap_transition(bool full = false);
700 void record_stop_world_start();
701 void record_concurrent_pause();
703 // Record how much space we copied during a GC. This is typically
704 // called when a GC alloc region is being retired.
705 void record_bytes_copied_during_gc(size_t bytes) {
706 _bytes_copied_during_gc += bytes;
707 }
709 // The amount of space we copied during a GC.
710 size_t bytes_copied_during_gc() {
711 return _bytes_copied_during_gc;
712 }
714 // Determine whether there are candidate regions so that the
715 // next GC should be mixed. The two action strings are used
716 // in the ergo output when the method returns true or false.
717 bool next_gc_should_be_mixed(const char* true_action_str,
718 const char* false_action_str);
720 // Choose a new collection set. Marks the chosen regions as being
721 // "in_collection_set", and links them together. The head and number of
722 // the collection set are available via access methods.
723 void finalize_cset(double target_pause_time_ms);
725 // The head of the list (via "next_in_collection_set()") representing the
726 // current collection set.
727 HeapRegion* collection_set() { return _collection_set; }
729 void clear_collection_set() { _collection_set = NULL; }
731 // Add old region "hr" to the CSet.
732 void add_old_region_to_cset(HeapRegion* hr);
734 // Incremental CSet Support
736 // The head of the incrementally built collection set.
737 HeapRegion* inc_cset_head() { return _inc_cset_head; }
739 // The tail of the incrementally built collection set.
740 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
742 // Initialize incremental collection set info.
743 void start_incremental_cset_building();
745 // Perform any final calculations on the incremental CSet fields
746 // before we can use them.
747 void finalize_incremental_cset_building();
749 void clear_incremental_cset() {
750 _inc_cset_head = NULL;
751 _inc_cset_tail = NULL;
752 }
754 // Stop adding regions to the incremental collection set
755 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
757 // Add information about hr to the aggregated information for the
758 // incrementally built collection set.
759 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
761 // Update information about hr in the aggregated information for
762 // the incrementally built collection set.
763 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
765 private:
766 // Update the incremental cset information when adding a region
767 // (should not be called directly).
768 void add_region_to_incremental_cset_common(HeapRegion* hr);
770 public:
771 // Add hr to the LHS of the incremental collection set.
772 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
774 // Add hr to the RHS of the incremental collection set.
775 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
777 #ifndef PRODUCT
778 void print_collection_set(HeapRegion* list_head, outputStream* st);
779 #endif // !PRODUCT
781 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
782 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
783 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
785 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
786 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
787 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
789 // This sets the initiate_conc_mark_if_possible() flag to start a
790 // new cycle, as long as we are not already in one. It's best if it
791 // is called during a safepoint when the test whether a cycle is in
792 // progress or not is stable.
793 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
795 // This is called at the very beginning of an evacuation pause (it
796 // has to be the first thing that the pause does). If
797 // initiate_conc_mark_if_possible() is true, and the concurrent
798 // marking thread has completed its work during the previous cycle,
799 // it will set during_initial_mark_pause() to so that the pause does
800 // the initial-mark work and start a marking cycle.
801 void decide_on_conc_mark_initiation();
803 // If an expansion would be appropriate, because recent GC overhead had
804 // exceeded the desired limit, return an amount to expand by.
805 size_t expansion_amount();
807 // Print tracing information.
808 void print_tracing_info() const;
810 // Print stats on young survival ratio
811 void print_yg_surv_rate_info() const;
813 void finished_recalculating_age_indexes(bool is_survivors) {
814 if (is_survivors) {
815 _survivor_surv_rate_group->finished_recalculating_age_indexes();
816 } else {
817 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
818 }
819 // do that for any other surv rate groups
820 }
822 bool is_young_list_full() {
823 uint young_list_length = _g1->young_list()->length();
824 uint young_list_target_length = _young_list_target_length;
825 return young_list_length >= young_list_target_length;
826 }
828 bool can_expand_young_list() {
829 uint young_list_length = _g1->young_list()->length();
830 uint young_list_max_length = _young_list_max_length;
831 return young_list_length < young_list_max_length;
832 }
834 uint young_list_max_length() {
835 return _young_list_max_length;
836 }
838 bool gcs_are_young() {
839 return _gcs_are_young;
840 }
841 void set_gcs_are_young(bool gcs_are_young) {
842 _gcs_are_young = gcs_are_young;
843 }
845 bool adaptive_young_list_length() {
846 return _young_gen_sizer->adaptive_young_list_length();
847 }
849 private:
850 //
851 // Survivor regions policy.
852 //
854 // Current tenuring threshold, set to 0 if the collector reaches the
855 // maximum amount of survivors regions.
856 uint _tenuring_threshold;
858 // The limit on the number of regions allocated for survivors.
859 uint _max_survivor_regions;
861 // For reporting purposes.
862 // The value of _heap_bytes_before_gc is also used to calculate
863 // the cost of copying.
865 size_t _eden_used_bytes_before_gc; // Eden occupancy before GC
866 size_t _survivor_used_bytes_before_gc; // Survivor occupancy before GC
867 size_t _heap_used_bytes_before_gc; // Heap occupancy before GC
868 size_t _metaspace_used_bytes_before_gc; // Metaspace occupancy before GC
870 size_t _eden_capacity_bytes_before_gc; // Eden capacity before GC
871 size_t _heap_capacity_bytes_before_gc; // Heap capacity before GC
873 // The amount of survivor regions after a collection.
874 uint _recorded_survivor_regions;
875 // List of survivor regions.
876 HeapRegion* _recorded_survivor_head;
877 HeapRegion* _recorded_survivor_tail;
879 ageTable _survivors_age_table;
881 public:
883 inline GCAllocPurpose
884 evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
885 if (age < _tenuring_threshold && src_region->is_young()) {
886 return GCAllocForSurvived;
887 } else {
888 return GCAllocForTenured;
889 }
890 }
892 inline bool track_object_age(GCAllocPurpose purpose) {
893 return purpose == GCAllocForSurvived;
894 }
896 static const uint REGIONS_UNLIMITED = (uint) -1;
898 uint max_regions(int purpose);
900 // The limit on regions for a particular purpose is reached.
901 void note_alloc_region_limit_reached(int purpose) {
902 if (purpose == GCAllocForSurvived) {
903 _tenuring_threshold = 0;
904 }
905 }
907 void note_start_adding_survivor_regions() {
908 _survivor_surv_rate_group->start_adding_regions();
909 }
911 void note_stop_adding_survivor_regions() {
912 _survivor_surv_rate_group->stop_adding_regions();
913 }
915 void record_survivor_regions(uint regions,
916 HeapRegion* head,
917 HeapRegion* tail) {
918 _recorded_survivor_regions = regions;
919 _recorded_survivor_head = head;
920 _recorded_survivor_tail = tail;
921 }
923 uint recorded_survivor_regions() {
924 return _recorded_survivor_regions;
925 }
927 void record_thread_age_table(ageTable* age_table) {
928 _survivors_age_table.merge_par(age_table);
929 }
931 void update_max_gc_locker_expansion();
933 // Calculates survivor space parameters.
934 void update_survivors_policy();
936 };
938 // This should move to some place more general...
940 // If we have "n" measurements, and we've kept track of their "sum" and the
941 // "sum_of_squares" of the measurements, this returns the variance of the
942 // sequence.
943 inline double variance(int n, double sum_of_squares, double sum) {
944 double n_d = (double)n;
945 double avg = sum/n_d;
946 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
947 }
949 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP