Wed, 04 Aug 2010 13:03:23 -0400
6963209: G1: remove the concept of abandoned pauses
Summary: As part of 6944166 we disabled the concept of abandoned pauses (i.e., if the collection set is empty, we would still try to do a pause even if it is to update the RSets and scan the roots). This changeset removes the code and structures associated with abandoned pauses.
Reviewed-by: iveresov, johnc
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // A G1CollectorPolicy makes policy decisions that determine the
26 // characteristics of the collector. Examples include:
27 // * choice of collection set.
28 // * when to collect.
30 class HeapRegion;
31 class CollectionSetChooser;
33 // Yes, this is a bit unpleasant... but it saves replicating the same thing
34 // over and over again and introducing subtle problems through small typos and
35 // cutting and pasting mistakes. The macros below introduces a number
36 // sequnce into the following two classes and the methods that access it.
38 #define define_num_seq(name) \
39 private: \
40 NumberSeq _all_##name##_times_ms; \
41 public: \
42 void record_##name##_time_ms(double ms) { \
43 _all_##name##_times_ms.add(ms); \
44 } \
45 NumberSeq* get_##name##_seq() { \
46 return &_all_##name##_times_ms; \
47 }
49 class MainBodySummary;
51 class PauseSummary: public CHeapObj {
52 define_num_seq(total)
53 define_num_seq(other)
55 public:
56 virtual MainBodySummary* main_body_summary() { return NULL; }
57 };
59 class MainBodySummary: public CHeapObj {
60 define_num_seq(satb_drain) // optional
61 define_num_seq(parallel) // parallel only
62 define_num_seq(ext_root_scan)
63 define_num_seq(mark_stack_scan)
64 define_num_seq(update_rs)
65 define_num_seq(scan_rs)
66 define_num_seq(obj_copy)
67 define_num_seq(termination) // parallel only
68 define_num_seq(parallel_other) // parallel only
69 define_num_seq(mark_closure)
70 define_num_seq(clear_ct) // parallel only
71 };
73 class Summary: public PauseSummary,
74 public MainBodySummary {
75 public:
76 virtual MainBodySummary* main_body_summary() { return this; }
77 };
79 class G1CollectorPolicy: public CollectorPolicy {
80 protected:
81 // The number of pauses during the execution.
82 long _n_pauses;
84 // either equal to the number of parallel threads, if ParallelGCThreads
85 // has been set, or 1 otherwise
86 int _parallel_gc_threads;
88 enum SomePrivateConstants {
89 NumPrevPausesForHeuristics = 10
90 };
92 G1MMUTracker* _mmu_tracker;
94 void initialize_flags();
96 void initialize_all() {
97 initialize_flags();
98 initialize_size_info();
99 initialize_perm_generation(PermGen::MarkSweepCompact);
100 }
102 virtual size_t default_init_heap_size() {
103 // Pick some reasonable default.
104 return 8*M;
105 }
107 double _cur_collection_start_sec;
108 size_t _cur_collection_pause_used_at_start_bytes;
109 size_t _cur_collection_pause_used_regions_at_start;
110 size_t _prev_collection_pause_used_at_end_bytes;
111 double _cur_collection_par_time_ms;
112 double _cur_satb_drain_time_ms;
113 double _cur_clear_ct_time_ms;
114 bool _satb_drain_time_set;
116 #ifndef PRODUCT
117 // Card Table Count Cache stats
118 double _min_clear_cc_time_ms; // min
119 double _max_clear_cc_time_ms; // max
120 double _cur_clear_cc_time_ms; // clearing time during current pause
121 double _cum_clear_cc_time_ms; // cummulative clearing time
122 jlong _num_cc_clears; // number of times the card count cache has been cleared
123 #endif
125 double _cur_CH_strong_roots_end_sec;
126 double _cur_CH_strong_roots_dur_ms;
127 double _cur_G1_strong_roots_end_sec;
128 double _cur_G1_strong_roots_dur_ms;
130 // Statistics for recent GC pauses. See below for how indexed.
131 TruncatedSeq* _recent_CH_strong_roots_times_ms;
132 TruncatedSeq* _recent_G1_strong_roots_times_ms;
133 TruncatedSeq* _recent_evac_times_ms;
134 // These exclude marking times.
135 TruncatedSeq* _recent_pause_times_ms;
136 TruncatedSeq* _recent_gc_times_ms;
138 TruncatedSeq* _recent_CS_bytes_used_before;
139 TruncatedSeq* _recent_CS_bytes_surviving;
141 TruncatedSeq* _recent_rs_sizes;
143 TruncatedSeq* _concurrent_mark_init_times_ms;
144 TruncatedSeq* _concurrent_mark_remark_times_ms;
145 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
147 Summary* _summary;
149 NumberSeq* _all_pause_times_ms;
150 NumberSeq* _all_full_gc_times_ms;
151 double _stop_world_start;
152 NumberSeq* _all_stop_world_times_ms;
153 NumberSeq* _all_yield_times_ms;
155 size_t _region_num_young;
156 size_t _region_num_tenured;
157 size_t _prev_region_num_young;
158 size_t _prev_region_num_tenured;
160 NumberSeq* _all_mod_union_times_ms;
162 int _aux_num;
163 NumberSeq* _all_aux_times_ms;
164 double* _cur_aux_start_times_ms;
165 double* _cur_aux_times_ms;
166 bool* _cur_aux_times_set;
168 double* _par_last_gc_worker_start_times_ms;
169 double* _par_last_ext_root_scan_times_ms;
170 double* _par_last_mark_stack_scan_times_ms;
171 double* _par_last_update_rs_times_ms;
172 double* _par_last_update_rs_processed_buffers;
173 double* _par_last_scan_rs_times_ms;
174 double* _par_last_obj_copy_times_ms;
175 double* _par_last_termination_times_ms;
176 double* _par_last_termination_attempts;
177 double* _par_last_gc_worker_end_times_ms;
179 // indicates that we are in young GC mode
180 bool _in_young_gc_mode;
182 // indicates whether we are in full young or partially young GC mode
183 bool _full_young_gcs;
185 // if true, then it tries to dynamically adjust the length of the
186 // young list
187 bool _adaptive_young_list_length;
188 size_t _young_list_min_length;
189 size_t _young_list_target_length;
190 size_t _young_list_fixed_length;
192 size_t _young_cset_length;
193 bool _last_young_gc_full;
195 unsigned _full_young_pause_num;
196 unsigned _partial_young_pause_num;
198 bool _during_marking;
199 bool _in_marking_window;
200 bool _in_marking_window_im;
202 SurvRateGroup* _short_lived_surv_rate_group;
203 SurvRateGroup* _survivor_surv_rate_group;
204 // add here any more surv rate groups
206 double _gc_overhead_perc;
208 bool during_marking() {
209 return _during_marking;
210 }
212 // <NEW PREDICTION>
214 private:
215 enum PredictionConstants {
216 TruncatedSeqLength = 10
217 };
219 TruncatedSeq* _alloc_rate_ms_seq;
220 double _prev_collection_pause_end_ms;
222 TruncatedSeq* _pending_card_diff_seq;
223 TruncatedSeq* _rs_length_diff_seq;
224 TruncatedSeq* _cost_per_card_ms_seq;
225 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
226 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
227 TruncatedSeq* _cost_per_entry_ms_seq;
228 TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
229 TruncatedSeq* _cost_per_byte_ms_seq;
230 TruncatedSeq* _constant_other_time_ms_seq;
231 TruncatedSeq* _young_other_cost_per_region_ms_seq;
232 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
234 TruncatedSeq* _pending_cards_seq;
235 TruncatedSeq* _scanned_cards_seq;
236 TruncatedSeq* _rs_lengths_seq;
238 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
240 TruncatedSeq* _young_gc_eff_seq;
242 TruncatedSeq* _max_conc_overhead_seq;
244 size_t _recorded_young_regions;
245 size_t _recorded_non_young_regions;
246 size_t _recorded_region_num;
248 size_t _free_regions_at_end_of_collection;
250 size_t _recorded_rs_lengths;
251 size_t _max_rs_lengths;
253 size_t _recorded_marked_bytes;
254 size_t _recorded_young_bytes;
256 size_t _predicted_pending_cards;
257 size_t _predicted_cards_scanned;
258 size_t _predicted_rs_lengths;
259 size_t _predicted_bytes_to_copy;
261 double _predicted_survival_ratio;
262 double _predicted_rs_update_time_ms;
263 double _predicted_rs_scan_time_ms;
264 double _predicted_object_copy_time_ms;
265 double _predicted_constant_other_time_ms;
266 double _predicted_young_other_time_ms;
267 double _predicted_non_young_other_time_ms;
268 double _predicted_pause_time_ms;
270 double _vtime_diff_ms;
272 double _recorded_young_free_cset_time_ms;
273 double _recorded_non_young_free_cset_time_ms;
275 double _sigma;
276 double _expensive_region_limit_ms;
278 size_t _rs_lengths_prediction;
280 size_t _known_garbage_bytes;
281 double _known_garbage_ratio;
283 double sigma() {
284 return _sigma;
285 }
287 // A function that prevents us putting too much stock in small sample
288 // sets. Returns a number between 2.0 and 1.0, depending on the number
289 // of samples. 5 or more samples yields one; fewer scales linearly from
290 // 2.0 at 1 sample to 1.0 at 5.
291 double confidence_factor(int samples) {
292 if (samples > 4) return 1.0;
293 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
294 }
296 double get_new_neg_prediction(TruncatedSeq* seq) {
297 return seq->davg() - sigma() * seq->dsd();
298 }
300 #ifndef PRODUCT
301 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
302 #endif // PRODUCT
304 void adjust_concurrent_refinement(double update_rs_time,
305 double update_rs_processed_buffers,
306 double goal_ms);
308 protected:
309 double _pause_time_target_ms;
310 double _recorded_young_cset_choice_time_ms;
311 double _recorded_non_young_cset_choice_time_ms;
312 bool _within_target;
313 size_t _pending_cards;
314 size_t _max_pending_cards;
316 public:
318 void set_region_short_lived(HeapRegion* hr) {
319 hr->install_surv_rate_group(_short_lived_surv_rate_group);
320 }
322 void set_region_survivors(HeapRegion* hr) {
323 hr->install_surv_rate_group(_survivor_surv_rate_group);
324 }
326 #ifndef PRODUCT
327 bool verify_young_ages();
328 #endif // PRODUCT
330 double get_new_prediction(TruncatedSeq* seq) {
331 return MAX2(seq->davg() + sigma() * seq->dsd(),
332 seq->davg() * confidence_factor(seq->num()));
333 }
335 size_t young_cset_length() {
336 return _young_cset_length;
337 }
339 void record_max_rs_lengths(size_t rs_lengths) {
340 _max_rs_lengths = rs_lengths;
341 }
343 size_t predict_pending_card_diff() {
344 double prediction = get_new_neg_prediction(_pending_card_diff_seq);
345 if (prediction < 0.00001)
346 return 0;
347 else
348 return (size_t) prediction;
349 }
351 size_t predict_pending_cards() {
352 size_t max_pending_card_num = _g1->max_pending_card_num();
353 size_t diff = predict_pending_card_diff();
354 size_t prediction;
355 if (diff > max_pending_card_num)
356 prediction = max_pending_card_num;
357 else
358 prediction = max_pending_card_num - diff;
360 return prediction;
361 }
363 size_t predict_rs_length_diff() {
364 return (size_t) get_new_prediction(_rs_length_diff_seq);
365 }
367 double predict_alloc_rate_ms() {
368 return get_new_prediction(_alloc_rate_ms_seq);
369 }
371 double predict_cost_per_card_ms() {
372 return get_new_prediction(_cost_per_card_ms_seq);
373 }
375 double predict_rs_update_time_ms(size_t pending_cards) {
376 return (double) pending_cards * predict_cost_per_card_ms();
377 }
379 double predict_fully_young_cards_per_entry_ratio() {
380 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
381 }
383 double predict_partially_young_cards_per_entry_ratio() {
384 if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
385 return predict_fully_young_cards_per_entry_ratio();
386 else
387 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
388 }
390 size_t predict_young_card_num(size_t rs_length) {
391 return (size_t) ((double) rs_length *
392 predict_fully_young_cards_per_entry_ratio());
393 }
395 size_t predict_non_young_card_num(size_t rs_length) {
396 return (size_t) ((double) rs_length *
397 predict_partially_young_cards_per_entry_ratio());
398 }
400 double predict_rs_scan_time_ms(size_t card_num) {
401 if (full_young_gcs())
402 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
403 else
404 return predict_partially_young_rs_scan_time_ms(card_num);
405 }
407 double predict_partially_young_rs_scan_time_ms(size_t card_num) {
408 if (_partially_young_cost_per_entry_ms_seq->num() < 3)
409 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
410 else
411 return (double) card_num *
412 get_new_prediction(_partially_young_cost_per_entry_ms_seq);
413 }
415 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
416 if (_cost_per_byte_ms_during_cm_seq->num() < 3)
417 return 1.1 * (double) bytes_to_copy *
418 get_new_prediction(_cost_per_byte_ms_seq);
419 else
420 return (double) bytes_to_copy *
421 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
422 }
424 double predict_object_copy_time_ms(size_t bytes_to_copy) {
425 if (_in_marking_window && !_in_marking_window_im)
426 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
427 else
428 return (double) bytes_to_copy *
429 get_new_prediction(_cost_per_byte_ms_seq);
430 }
432 double predict_constant_other_time_ms() {
433 return get_new_prediction(_constant_other_time_ms_seq);
434 }
436 double predict_young_other_time_ms(size_t young_num) {
437 return
438 (double) young_num *
439 get_new_prediction(_young_other_cost_per_region_ms_seq);
440 }
442 double predict_non_young_other_time_ms(size_t non_young_num) {
443 return
444 (double) non_young_num *
445 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
446 }
448 void check_if_region_is_too_expensive(double predicted_time_ms);
450 double predict_young_collection_elapsed_time_ms(size_t adjustment);
451 double predict_base_elapsed_time_ms(size_t pending_cards);
452 double predict_base_elapsed_time_ms(size_t pending_cards,
453 size_t scanned_cards);
454 size_t predict_bytes_to_copy(HeapRegion* hr);
455 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
457 // for use by: calculate_young_list_target_length(rs_length)
458 bool predict_will_fit(size_t young_region_num,
459 double base_time_ms,
460 size_t init_free_regions,
461 double target_pause_time_ms);
463 void start_recording_regions();
464 void record_cset_region_info(HeapRegion* hr, bool young);
465 void record_non_young_cset_region(HeapRegion* hr);
467 void set_recorded_young_regions(size_t n_regions);
468 void set_recorded_young_bytes(size_t bytes);
469 void set_recorded_rs_lengths(size_t rs_lengths);
470 void set_predicted_bytes_to_copy(size_t bytes);
472 void end_recording_regions();
474 void record_vtime_diff_ms(double vtime_diff_ms) {
475 _vtime_diff_ms = vtime_diff_ms;
476 }
478 void record_young_free_cset_time_ms(double time_ms) {
479 _recorded_young_free_cset_time_ms = time_ms;
480 }
482 void record_non_young_free_cset_time_ms(double time_ms) {
483 _recorded_non_young_free_cset_time_ms = time_ms;
484 }
486 double predict_young_gc_eff() {
487 return get_new_neg_prediction(_young_gc_eff_seq);
488 }
490 double predict_survivor_regions_evac_time();
492 // </NEW PREDICTION>
494 public:
495 void cset_regions_freed() {
496 bool propagate = _last_young_gc_full && !_in_marking_window;
497 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
498 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
499 // also call it on any more surv rate groups
500 }
502 void set_known_garbage_bytes(size_t known_garbage_bytes) {
503 _known_garbage_bytes = known_garbage_bytes;
504 size_t heap_bytes = _g1->capacity();
505 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
506 }
508 void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
509 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
511 _known_garbage_bytes -= known_garbage_bytes;
512 size_t heap_bytes = _g1->capacity();
513 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
514 }
516 G1MMUTracker* mmu_tracker() {
517 return _mmu_tracker;
518 }
520 double max_pause_time_ms() {
521 return _mmu_tracker->max_gc_time() * 1000.0;
522 }
524 double predict_init_time_ms() {
525 return get_new_prediction(_concurrent_mark_init_times_ms);
526 }
528 double predict_remark_time_ms() {
529 return get_new_prediction(_concurrent_mark_remark_times_ms);
530 }
532 double predict_cleanup_time_ms() {
533 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
534 }
536 // Returns an estimate of the survival rate of the region at yg-age
537 // "yg_age".
538 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
539 TruncatedSeq* seq = surv_rate_group->get_seq(age);
540 if (seq->num() == 0)
541 gclog_or_tty->print("BARF! age is %d", age);
542 guarantee( seq->num() > 0, "invariant" );
543 double pred = get_new_prediction(seq);
544 if (pred > 1.0)
545 pred = 1.0;
546 return pred;
547 }
549 double predict_yg_surv_rate(int age) {
550 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
551 }
553 double accum_yg_surv_rate_pred(int age) {
554 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
555 }
557 protected:
558 void print_stats(int level, const char* str, double value);
559 void print_stats(int level, const char* str, int value);
561 void print_par_stats(int level, const char* str, double* data) {
562 print_par_stats(level, str, data, true);
563 }
564 void print_par_stats(int level, const char* str, double* data, bool summary);
565 void print_par_sizes(int level, const char* str, double* data, bool summary);
567 void check_other_times(int level,
568 NumberSeq* other_times_ms,
569 NumberSeq* calc_other_times_ms) const;
571 void print_summary (PauseSummary* stats) const;
573 void print_summary (int level, const char* str, NumberSeq* seq) const;
574 void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
576 double avg_value (double* data);
577 double max_value (double* data);
578 double sum_of_values (double* data);
579 double max_sum (double* data1, double* data2);
581 int _last_satb_drain_processed_buffers;
582 int _last_update_rs_processed_buffers;
583 double _last_pause_time_ms;
585 size_t _bytes_in_to_space_before_gc;
586 size_t _bytes_in_to_space_after_gc;
587 size_t bytes_in_to_space_during_gc() {
588 return
589 _bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc;
590 }
591 size_t _bytes_in_collection_set_before_gc;
592 // Used to count used bytes in CS.
593 friend class CountCSClosure;
595 // Statistics kept per GC stoppage, pause or full.
596 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
598 // We track markings.
599 int _num_markings;
600 double _mark_thread_startup_sec; // Time at startup of marking thread
602 // Add a new GC of the given duration and end time to the record.
603 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
605 // The head of the list (via "next_in_collection_set()") representing the
606 // current collection set. Set from the incrementally built collection
607 // set at the start of the pause.
608 HeapRegion* _collection_set;
610 // The number of regions in the collection set. Set from the incrementally
611 // built collection set at the start of an evacuation pause.
612 size_t _collection_set_size;
614 // The number of bytes in the collection set before the pause. Set from
615 // the incrementally built collection set at the start of an evacuation
616 // pause.
617 size_t _collection_set_bytes_used_before;
619 // The associated information that is maintained while the incremental
620 // collection set is being built with young regions. Used to populate
621 // the recorded info for the evacuation pause.
623 enum CSetBuildType {
624 Active, // We are actively building the collection set
625 Inactive // We are not actively building the collection set
626 };
628 CSetBuildType _inc_cset_build_state;
630 // The head of the incrementally built collection set.
631 HeapRegion* _inc_cset_head;
633 // The tail of the incrementally built collection set.
634 HeapRegion* _inc_cset_tail;
636 // The number of regions in the incrementally built collection set.
637 // Used to set _collection_set_size at the start of an evacuation
638 // pause.
639 size_t _inc_cset_size;
641 // Used as the index in the surving young words structure
642 // which tracks the amount of space, for each young region,
643 // that survives the pause.
644 size_t _inc_cset_young_index;
646 // The number of bytes in the incrementally built collection set.
647 // Used to set _collection_set_bytes_used_before at the start of
648 // an evacuation pause.
649 size_t _inc_cset_bytes_used_before;
651 // Used to record the highest end of heap region in collection set
652 HeapWord* _inc_cset_max_finger;
654 // The number of recorded used bytes in the young regions
655 // of the collection set. This is the sum of the used() bytes
656 // of retired young regions in the collection set.
657 size_t _inc_cset_recorded_young_bytes;
659 // The RSet lengths recorded for regions in the collection set
660 // (updated by the periodic sampling of the regions in the
661 // young list/collection set).
662 size_t _inc_cset_recorded_rs_lengths;
664 // The predicted elapsed time it will take to collect the regions
665 // in the collection set (updated by the periodic sampling of the
666 // regions in the young list/collection set).
667 double _inc_cset_predicted_elapsed_time_ms;
669 // The predicted bytes to copy for the regions in the collection
670 // set (updated by the periodic sampling of the regions in the
671 // young list/collection set).
672 size_t _inc_cset_predicted_bytes_to_copy;
674 // Info about marking.
675 int _n_marks; // Sticky at 2, so we know when we've done at least 2.
677 // The number of collection pauses at the end of the last mark.
678 size_t _n_pauses_at_mark_end;
680 // Stash a pointer to the g1 heap.
681 G1CollectedHeap* _g1;
683 // The average time in ms per collection pause, averaged over recent pauses.
684 double recent_avg_time_for_pauses_ms();
686 // The average time in ms for processing CollectedHeap strong roots, per
687 // collection pause, averaged over recent pauses.
688 double recent_avg_time_for_CH_strong_ms();
690 // The average time in ms for processing the G1 remembered set, per
691 // pause, averaged over recent pauses.
692 double recent_avg_time_for_G1_strong_ms();
694 // The average time in ms for "evacuating followers", per pause, averaged
695 // over recent pauses.
696 double recent_avg_time_for_evac_ms();
698 // The number of "recent" GCs recorded in the number sequences
699 int number_of_recent_gcs();
701 // The average survival ratio, computed by the total number of bytes
702 // suriviving / total number of bytes before collection over the last
703 // several recent pauses.
704 double recent_avg_survival_fraction();
705 // The survival fraction of the most recent pause; if there have been no
706 // pauses, returns 1.0.
707 double last_survival_fraction();
709 // Returns a "conservative" estimate of the recent survival rate, i.e.,
710 // one that may be higher than "recent_avg_survival_fraction".
711 // This is conservative in several ways:
712 // If there have been few pauses, it will assume a potential high
713 // variance, and err on the side of caution.
714 // It puts a lower bound (currently 0.1) on the value it will return.
715 // To try to detect phase changes, if the most recent pause ("latest") has a
716 // higher-than average ("avg") survival rate, it returns that rate.
717 // "work" version is a utility function; young is restricted to young regions.
718 double conservative_avg_survival_fraction_work(double avg,
719 double latest);
721 // The arguments are the two sequences that keep track of the number of bytes
722 // surviving and the total number of bytes before collection, resp.,
723 // over the last evereal recent pauses
724 // Returns the survival rate for the category in the most recent pause.
725 // If there have been no pauses, returns 1.0.
726 double last_survival_fraction_work(TruncatedSeq* surviving,
727 TruncatedSeq* before);
729 // The arguments are the two sequences that keep track of the number of bytes
730 // surviving and the total number of bytes before collection, resp.,
731 // over the last several recent pauses
732 // Returns the average survival ration over the last several recent pauses
733 // If there have been no pauses, return 1.0
734 double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
735 TruncatedSeq* before);
737 double conservative_avg_survival_fraction() {
738 double avg = recent_avg_survival_fraction();
739 double latest = last_survival_fraction();
740 return conservative_avg_survival_fraction_work(avg, latest);
741 }
743 // The ratio of gc time to elapsed time, computed over recent pauses.
744 double _recent_avg_pause_time_ratio;
746 double recent_avg_pause_time_ratio() {
747 return _recent_avg_pause_time_ratio;
748 }
750 // Number of pauses between concurrent marking.
751 size_t _pauses_btwn_concurrent_mark;
753 size_t _n_marks_since_last_pause;
755 // At the end of a pause we check the heap occupancy and we decide
756 // whether we will start a marking cycle during the next pause. If
757 // we decide that we want to do that, we will set this parameter to
758 // true. So, this parameter will stay true between the end of a
759 // pause and the beginning of a subsequent pause (not necessarily
760 // the next one, see the comments on the next field) when we decide
761 // that we will indeed start a marking cycle and do the initial-mark
762 // work.
763 volatile bool _initiate_conc_mark_if_possible;
765 // If initiate_conc_mark_if_possible() is set at the beginning of a
766 // pause, it is a suggestion that the pause should start a marking
767 // cycle by doing the initial-mark work. However, it is possible
768 // that the concurrent marking thread is still finishing up the
769 // previous marking cycle (e.g., clearing the next marking
770 // bitmap). If that is the case we cannot start a new cycle and
771 // we'll have to wait for the concurrent marking thread to finish
772 // what it is doing. In this case we will postpone the marking cycle
773 // initiation decision for the next pause. When we eventually decide
774 // to start a cycle, we will set _during_initial_mark_pause which
775 // will stay true until the end of the initial-mark pause and it's
776 // the condition that indicates that a pause is doing the
777 // initial-mark work.
778 volatile bool _during_initial_mark_pause;
780 bool _should_revert_to_full_young_gcs;
781 bool _last_full_young_gc;
783 // This set of variables tracks the collector efficiency, in order to
784 // determine whether we should initiate a new marking.
785 double _cur_mark_stop_world_time_ms;
786 double _mark_init_start_sec;
787 double _mark_remark_start_sec;
788 double _mark_cleanup_start_sec;
789 double _mark_closure_time_ms;
791 void calculate_young_list_min_length();
792 void calculate_young_list_target_length();
793 void calculate_young_list_target_length(size_t rs_lengths);
795 public:
797 G1CollectorPolicy();
799 virtual G1CollectorPolicy* as_g1_policy() { return this; }
801 virtual CollectorPolicy::Name kind() {
802 return CollectorPolicy::G1CollectorPolicyKind;
803 }
805 void check_prediction_validity();
807 size_t bytes_in_collection_set() {
808 return _bytes_in_collection_set_before_gc;
809 }
811 size_t bytes_in_to_space() {
812 return bytes_in_to_space_during_gc();
813 }
815 unsigned calc_gc_alloc_time_stamp() {
816 return _all_pause_times_ms->num() + 1;
817 }
819 protected:
821 // Count the number of bytes used in the CS.
822 void count_CS_bytes_used();
824 // Together these do the base cleanup-recording work. Subclasses might
825 // want to put something between them.
826 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
827 size_t max_live_bytes);
828 void record_concurrent_mark_cleanup_end_work2();
830 public:
832 virtual void init();
834 // Create jstat counters for the policy.
835 virtual void initialize_gc_policy_counters();
837 virtual HeapWord* mem_allocate_work(size_t size,
838 bool is_tlab,
839 bool* gc_overhead_limit_was_exceeded);
841 // This method controls how a collector handles one or more
842 // of its generations being fully allocated.
843 virtual HeapWord* satisfy_failed_allocation(size_t size,
844 bool is_tlab);
846 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
848 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
850 // The number of collection pauses so far.
851 long n_pauses() const { return _n_pauses; }
853 // Update the heuristic info to record a collection pause of the given
854 // start time, where the given number of bytes were used at the start.
855 // This may involve changing the desired size of a collection set.
857 virtual void record_stop_world_start();
859 virtual void record_collection_pause_start(double start_time_sec,
860 size_t start_used);
862 // Must currently be called while the world is stopped.
863 virtual void record_concurrent_mark_init_start();
864 virtual void record_concurrent_mark_init_end();
865 void record_concurrent_mark_init_end_pre(double
866 mark_init_elapsed_time_ms);
868 void record_mark_closure_time(double mark_closure_time_ms);
870 virtual void record_concurrent_mark_remark_start();
871 virtual void record_concurrent_mark_remark_end();
873 virtual void record_concurrent_mark_cleanup_start();
874 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
875 size_t max_live_bytes);
876 virtual void record_concurrent_mark_cleanup_completed();
878 virtual void record_concurrent_pause();
879 virtual void record_concurrent_pause_end();
881 virtual void record_collection_pause_end_CH_strong_roots();
882 virtual void record_collection_pause_end_G1_strong_roots();
884 virtual void record_collection_pause_end();
886 // Record the fact that a full collection occurred.
887 virtual void record_full_collection_start();
888 virtual void record_full_collection_end();
890 void record_gc_worker_start_time(int worker_i, double ms) {
891 _par_last_gc_worker_start_times_ms[worker_i] = ms;
892 }
894 void record_ext_root_scan_time(int worker_i, double ms) {
895 _par_last_ext_root_scan_times_ms[worker_i] = ms;
896 }
898 void record_mark_stack_scan_time(int worker_i, double ms) {
899 _par_last_mark_stack_scan_times_ms[worker_i] = ms;
900 }
902 void record_satb_drain_time(double ms) {
903 _cur_satb_drain_time_ms = ms;
904 _satb_drain_time_set = true;
905 }
907 void record_satb_drain_processed_buffers (int processed_buffers) {
908 _last_satb_drain_processed_buffers = processed_buffers;
909 }
911 void record_mod_union_time(double ms) {
912 _all_mod_union_times_ms->add(ms);
913 }
915 void record_update_rs_time(int thread, double ms) {
916 _par_last_update_rs_times_ms[thread] = ms;
917 }
919 void record_update_rs_processed_buffers (int thread,
920 double processed_buffers) {
921 _par_last_update_rs_processed_buffers[thread] = processed_buffers;
922 }
924 void record_scan_rs_time(int thread, double ms) {
925 _par_last_scan_rs_times_ms[thread] = ms;
926 }
928 void reset_obj_copy_time(int thread) {
929 _par_last_obj_copy_times_ms[thread] = 0.0;
930 }
932 void reset_obj_copy_time() {
933 reset_obj_copy_time(0);
934 }
936 void record_obj_copy_time(int thread, double ms) {
937 _par_last_obj_copy_times_ms[thread] += ms;
938 }
940 void record_termination(int thread, double ms, size_t attempts) {
941 _par_last_termination_times_ms[thread] = ms;
942 _par_last_termination_attempts[thread] = (double) attempts;
943 }
945 void record_gc_worker_end_time(int worker_i, double ms) {
946 _par_last_gc_worker_end_times_ms[worker_i] = ms;
947 }
949 void record_pause_time_ms(double ms) {
950 _last_pause_time_ms = ms;
951 }
953 void record_clear_ct_time(double ms) {
954 _cur_clear_ct_time_ms = ms;
955 }
957 void record_par_time(double ms) {
958 _cur_collection_par_time_ms = ms;
959 }
961 void record_aux_start_time(int i) {
962 guarantee(i < _aux_num, "should be within range");
963 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
964 }
966 void record_aux_end_time(int i) {
967 guarantee(i < _aux_num, "should be within range");
968 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
969 _cur_aux_times_set[i] = true;
970 _cur_aux_times_ms[i] += ms;
971 }
973 #ifndef PRODUCT
974 void record_cc_clear_time(double ms) {
975 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
976 _min_clear_cc_time_ms = ms;
977 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
978 _max_clear_cc_time_ms = ms;
979 _cur_clear_cc_time_ms = ms;
980 _cum_clear_cc_time_ms += ms;
981 _num_cc_clears++;
982 }
983 #endif
985 // Record the fact that "bytes" bytes allocated in a region.
986 void record_before_bytes(size_t bytes);
987 void record_after_bytes(size_t bytes);
989 // Returns "true" if this is a good time to do a collection pause.
990 // The "word_size" argument, if non-zero, indicates the size of an
991 // allocation request that is prompting this query.
992 virtual bool should_do_collection_pause(size_t word_size) = 0;
994 // Choose a new collection set. Marks the chosen regions as being
995 // "in_collection_set", and links them together. The head and number of
996 // the collection set are available via access methods.
997 virtual void choose_collection_set(double target_pause_time_ms) = 0;
999 // The head of the list (via "next_in_collection_set()") representing the
1000 // current collection set.
1001 HeapRegion* collection_set() { return _collection_set; }
1003 void clear_collection_set() { _collection_set = NULL; }
1005 // The number of elements in the current collection set.
1006 size_t collection_set_size() { return _collection_set_size; }
1008 // Add "hr" to the CS.
1009 void add_to_collection_set(HeapRegion* hr);
1011 // Incremental CSet Support
1013 // The head of the incrementally built collection set.
1014 HeapRegion* inc_cset_head() { return _inc_cset_head; }
1016 // The tail of the incrementally built collection set.
1017 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
1019 // The number of elements in the incrementally built collection set.
1020 size_t inc_cset_size() { return _inc_cset_size; }
1022 // Initialize incremental collection set info.
1023 void start_incremental_cset_building();
1025 void clear_incremental_cset() {
1026 _inc_cset_head = NULL;
1027 _inc_cset_tail = NULL;
1028 }
1030 // Stop adding regions to the incremental collection set
1031 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
1033 // Add/remove information about hr to the aggregated information
1034 // for the incrementally built collection set.
1035 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
1036 void remove_from_incremental_cset_info(HeapRegion* hr);
1038 // Update information about hr in the aggregated information for
1039 // the incrementally built collection set.
1040 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
1042 private:
1043 // Update the incremental cset information when adding a region
1044 // (should not be called directly).
1045 void add_region_to_incremental_cset_common(HeapRegion* hr);
1047 public:
1048 // Add hr to the LHS of the incremental collection set.
1049 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
1051 // Add hr to the RHS of the incremental collection set.
1052 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
1054 #ifndef PRODUCT
1055 void print_collection_set(HeapRegion* list_head, outputStream* st);
1056 #endif // !PRODUCT
1058 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
1059 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
1060 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
1062 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
1063 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
1064 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
1066 // This sets the initiate_conc_mark_if_possible() flag to start a
1067 // new cycle, as long as we are not already in one. It's best if it
1068 // is called during a safepoint when the test whether a cycle is in
1069 // progress or not is stable.
1070 bool force_initial_mark_if_outside_cycle();
1072 // This is called at the very beginning of an evacuation pause (it
1073 // has to be the first thing that the pause does). If
1074 // initiate_conc_mark_if_possible() is true, and the concurrent
1075 // marking thread has completed its work during the previous cycle,
1076 // it will set during_initial_mark_pause() to so that the pause does
1077 // the initial-mark work and start a marking cycle.
1078 void decide_on_conc_mark_initiation();
1080 // If an expansion would be appropriate, because recent GC overhead had
1081 // exceeded the desired limit, return an amount to expand by.
1082 virtual size_t expansion_amount();
1084 // note start of mark thread
1085 void note_start_of_mark_thread();
1087 // The marked bytes of the "r" has changed; reclassify it's desirability
1088 // for marking. Also asserts that "r" is eligible for a CS.
1089 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
1091 #ifndef PRODUCT
1092 // Check any appropriate marked bytes info, asserting false if
1093 // something's wrong, else returning "true".
1094 virtual bool assertMarkedBytesDataOK() = 0;
1095 #endif
1097 // Print tracing information.
1098 void print_tracing_info() const;
1100 // Print stats on young survival ratio
1101 void print_yg_surv_rate_info() const;
1103 void finished_recalculating_age_indexes(bool is_survivors) {
1104 if (is_survivors) {
1105 _survivor_surv_rate_group->finished_recalculating_age_indexes();
1106 } else {
1107 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
1108 }
1109 // do that for any other surv rate groups
1110 }
1112 bool should_add_next_region_to_young_list();
1114 bool in_young_gc_mode() {
1115 return _in_young_gc_mode;
1116 }
1117 void set_in_young_gc_mode(bool in_young_gc_mode) {
1118 _in_young_gc_mode = in_young_gc_mode;
1119 }
1121 bool full_young_gcs() {
1122 return _full_young_gcs;
1123 }
1124 void set_full_young_gcs(bool full_young_gcs) {
1125 _full_young_gcs = full_young_gcs;
1126 }
1128 bool adaptive_young_list_length() {
1129 return _adaptive_young_list_length;
1130 }
1131 void set_adaptive_young_list_length(bool adaptive_young_list_length) {
1132 _adaptive_young_list_length = adaptive_young_list_length;
1133 }
1135 inline double get_gc_eff_factor() {
1136 double ratio = _known_garbage_ratio;
1138 double square = ratio * ratio;
1139 // square = square * square;
1140 double ret = square * 9.0 + 1.0;
1141 #if 0
1142 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
1143 #endif // 0
1144 guarantee(0.0 <= ret && ret < 10.0, "invariant!");
1145 return ret;
1146 }
1148 //
1149 // Survivor regions policy.
1150 //
1151 protected:
1153 // Current tenuring threshold, set to 0 if the collector reaches the
1154 // maximum amount of suvivors regions.
1155 int _tenuring_threshold;
1157 // The limit on the number of regions allocated for survivors.
1158 size_t _max_survivor_regions;
1160 // The amount of survor regions after a collection.
1161 size_t _recorded_survivor_regions;
1162 // List of survivor regions.
1163 HeapRegion* _recorded_survivor_head;
1164 HeapRegion* _recorded_survivor_tail;
1166 ageTable _survivors_age_table;
1168 public:
1170 inline GCAllocPurpose
1171 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
1172 if (age < _tenuring_threshold && src_region->is_young()) {
1173 return GCAllocForSurvived;
1174 } else {
1175 return GCAllocForTenured;
1176 }
1177 }
1179 inline bool track_object_age(GCAllocPurpose purpose) {
1180 return purpose == GCAllocForSurvived;
1181 }
1183 inline GCAllocPurpose alternative_purpose(int purpose) {
1184 return GCAllocForTenured;
1185 }
1187 static const size_t REGIONS_UNLIMITED = ~(size_t)0;
1189 size_t max_regions(int purpose);
1191 // The limit on regions for a particular purpose is reached.
1192 void note_alloc_region_limit_reached(int purpose) {
1193 if (purpose == GCAllocForSurvived) {
1194 _tenuring_threshold = 0;
1195 }
1196 }
1198 void note_start_adding_survivor_regions() {
1199 _survivor_surv_rate_group->start_adding_regions();
1200 }
1202 void note_stop_adding_survivor_regions() {
1203 _survivor_surv_rate_group->stop_adding_regions();
1204 }
1206 void record_survivor_regions(size_t regions,
1207 HeapRegion* head,
1208 HeapRegion* tail) {
1209 _recorded_survivor_regions = regions;
1210 _recorded_survivor_head = head;
1211 _recorded_survivor_tail = tail;
1212 }
1214 size_t recorded_survivor_regions() {
1215 return _recorded_survivor_regions;
1216 }
1218 void record_thread_age_table(ageTable* age_table)
1219 {
1220 _survivors_age_table.merge_par(age_table);
1221 }
1223 // Calculates survivor space parameters.
1224 void calculate_survivors_policy();
1226 };
1228 // This encapsulates a particular strategy for a g1 Collector.
1229 //
1230 // Start a concurrent mark when our heap size is n bytes
1231 // greater then our heap size was at the last concurrent
1232 // mark. Where n is a function of the CMSTriggerRatio
1233 // and the MinHeapFreeRatio.
1234 //
1235 // Start a g1 collection pause when we have allocated the
1236 // average number of bytes currently being freed in
1237 // a collection, but only if it is at least one region
1238 // full
1239 //
1240 // Resize Heap based on desired
1241 // allocation space, where desired allocation space is
1242 // a function of survival rate and desired future to size.
1243 //
1244 // Choose collection set by first picking all older regions
1245 // which have a survival rate which beats our projected young
1246 // survival rate. Then fill out the number of needed regions
1247 // with young regions.
1249 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
1250 CollectionSetChooser* _collectionSetChooser;
1251 // If the estimated is less then desirable, resize if possible.
1252 void expand_if_possible(size_t numRegions);
1254 virtual void choose_collection_set(double target_pause_time_ms);
1255 virtual void record_collection_pause_start(double start_time_sec,
1256 size_t start_used);
1257 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
1258 size_t max_live_bytes);
1259 virtual void record_full_collection_end();
1261 public:
1262 G1CollectorPolicy_BestRegionsFirst() {
1263 _collectionSetChooser = new CollectionSetChooser();
1264 }
1265 void record_collection_pause_end();
1266 bool should_do_collection_pause(size_t word_size);
1267 // This is not needed any more, after the CSet choosing code was
1268 // changed to use the pause prediction work. But let's leave the
1269 // hook in just in case.
1270 void note_change_in_marked_bytes(HeapRegion* r) { }
1271 #ifndef PRODUCT
1272 bool assertMarkedBytesDataOK();
1273 #endif
1274 };
1276 // This should move to some place more general...
1278 // If we have "n" measurements, and we've kept track of their "sum" and the
1279 // "sum_of_squares" of the measurements, this returns the variance of the
1280 // sequence.
1281 inline double variance(int n, double sum_of_squares, double sum) {
1282 double n_d = (double)n;
1283 double avg = sum/n_d;
1284 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
1285 }
1287 // Local Variables: ***
1288 // c-indentation-style: gnu ***
1289 // End: ***