Tue, 07 Dec 2010 16:47:42 -0500
6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
Summary: Allow the eden to the expanded up to a point when the GC locker is active.
Reviewed-by: jwilhelm, johnc, ysr, jcoomes
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
28 #include "gc_implementation/g1/collectionSetChooser.hpp"
29 #include "gc_implementation/g1/g1MMUTracker.hpp"
30 #include "memory/collectorPolicy.hpp"
32 // A G1CollectorPolicy makes policy decisions that determine the
33 // characteristics of the collector. Examples include:
34 // * choice of collection set.
35 // * when to collect.
37 class HeapRegion;
38 class CollectionSetChooser;
40 // Yes, this is a bit unpleasant... but it saves replicating the same thing
41 // over and over again and introducing subtle problems through small typos and
42 // cutting and pasting mistakes. The macros below introduces a number
43 // sequnce into the following two classes and the methods that access it.
45 #define define_num_seq(name) \
46 private: \
47 NumberSeq _all_##name##_times_ms; \
48 public: \
49 void record_##name##_time_ms(double ms) { \
50 _all_##name##_times_ms.add(ms); \
51 } \
52 NumberSeq* get_##name##_seq() { \
53 return &_all_##name##_times_ms; \
54 }
56 class MainBodySummary;
58 class PauseSummary: public CHeapObj {
59 define_num_seq(total)
60 define_num_seq(other)
62 public:
63 virtual MainBodySummary* main_body_summary() { return NULL; }
64 };
66 class MainBodySummary: public CHeapObj {
67 define_num_seq(satb_drain) // optional
68 define_num_seq(parallel) // parallel only
69 define_num_seq(ext_root_scan)
70 define_num_seq(mark_stack_scan)
71 define_num_seq(update_rs)
72 define_num_seq(scan_rs)
73 define_num_seq(obj_copy)
74 define_num_seq(termination) // parallel only
75 define_num_seq(parallel_other) // parallel only
76 define_num_seq(mark_closure)
77 define_num_seq(clear_ct) // parallel only
78 };
80 class Summary: public PauseSummary,
81 public MainBodySummary {
82 public:
83 virtual MainBodySummary* main_body_summary() { return this; }
84 };
86 class G1CollectorPolicy: public CollectorPolicy {
87 protected:
88 // The number of pauses during the execution.
89 long _n_pauses;
91 // either equal to the number of parallel threads, if ParallelGCThreads
92 // has been set, or 1 otherwise
93 int _parallel_gc_threads;
95 enum SomePrivateConstants {
96 NumPrevPausesForHeuristics = 10
97 };
99 G1MMUTracker* _mmu_tracker;
101 void initialize_flags();
103 void initialize_all() {
104 initialize_flags();
105 initialize_size_info();
106 initialize_perm_generation(PermGen::MarkSweepCompact);
107 }
109 virtual size_t default_init_heap_size() {
110 // Pick some reasonable default.
111 return 8*M;
112 }
114 double _cur_collection_start_sec;
115 size_t _cur_collection_pause_used_at_start_bytes;
116 size_t _cur_collection_pause_used_regions_at_start;
117 size_t _prev_collection_pause_used_at_end_bytes;
118 double _cur_collection_par_time_ms;
119 double _cur_satb_drain_time_ms;
120 double _cur_clear_ct_time_ms;
121 bool _satb_drain_time_set;
123 #ifndef PRODUCT
124 // Card Table Count Cache stats
125 double _min_clear_cc_time_ms; // min
126 double _max_clear_cc_time_ms; // max
127 double _cur_clear_cc_time_ms; // clearing time during current pause
128 double _cum_clear_cc_time_ms; // cummulative clearing time
129 jlong _num_cc_clears; // number of times the card count cache has been cleared
130 #endif
132 double _cur_CH_strong_roots_end_sec;
133 double _cur_CH_strong_roots_dur_ms;
134 double _cur_G1_strong_roots_end_sec;
135 double _cur_G1_strong_roots_dur_ms;
137 // Statistics for recent GC pauses. See below for how indexed.
138 TruncatedSeq* _recent_CH_strong_roots_times_ms;
139 TruncatedSeq* _recent_G1_strong_roots_times_ms;
140 TruncatedSeq* _recent_evac_times_ms;
141 // These exclude marking times.
142 TruncatedSeq* _recent_pause_times_ms;
143 TruncatedSeq* _recent_gc_times_ms;
145 TruncatedSeq* _recent_CS_bytes_used_before;
146 TruncatedSeq* _recent_CS_bytes_surviving;
148 TruncatedSeq* _recent_rs_sizes;
150 TruncatedSeq* _concurrent_mark_init_times_ms;
151 TruncatedSeq* _concurrent_mark_remark_times_ms;
152 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
154 Summary* _summary;
156 NumberSeq* _all_pause_times_ms;
157 NumberSeq* _all_full_gc_times_ms;
158 double _stop_world_start;
159 NumberSeq* _all_stop_world_times_ms;
160 NumberSeq* _all_yield_times_ms;
162 size_t _region_num_young;
163 size_t _region_num_tenured;
164 size_t _prev_region_num_young;
165 size_t _prev_region_num_tenured;
167 NumberSeq* _all_mod_union_times_ms;
169 int _aux_num;
170 NumberSeq* _all_aux_times_ms;
171 double* _cur_aux_start_times_ms;
172 double* _cur_aux_times_ms;
173 bool* _cur_aux_times_set;
175 double* _par_last_gc_worker_start_times_ms;
176 double* _par_last_ext_root_scan_times_ms;
177 double* _par_last_mark_stack_scan_times_ms;
178 double* _par_last_update_rs_times_ms;
179 double* _par_last_update_rs_processed_buffers;
180 double* _par_last_scan_rs_times_ms;
181 double* _par_last_obj_copy_times_ms;
182 double* _par_last_termination_times_ms;
183 double* _par_last_termination_attempts;
184 double* _par_last_gc_worker_end_times_ms;
186 // indicates that we are in young GC mode
187 bool _in_young_gc_mode;
189 // indicates whether we are in full young or partially young GC mode
190 bool _full_young_gcs;
192 // if true, then it tries to dynamically adjust the length of the
193 // young list
194 bool _adaptive_young_list_length;
195 size_t _young_list_min_length;
196 size_t _young_list_target_length;
197 size_t _young_list_fixed_length;
199 // The max number of regions we can extend the eden by while the GC
200 // locker is active. This should be >= _young_list_target_length;
201 size_t _young_list_max_length;
203 size_t _young_cset_length;
204 bool _last_young_gc_full;
206 unsigned _full_young_pause_num;
207 unsigned _partial_young_pause_num;
209 bool _during_marking;
210 bool _in_marking_window;
211 bool _in_marking_window_im;
213 SurvRateGroup* _short_lived_surv_rate_group;
214 SurvRateGroup* _survivor_surv_rate_group;
215 // add here any more surv rate groups
217 double _gc_overhead_perc;
219 bool during_marking() {
220 return _during_marking;
221 }
223 // <NEW PREDICTION>
225 private:
226 enum PredictionConstants {
227 TruncatedSeqLength = 10
228 };
230 TruncatedSeq* _alloc_rate_ms_seq;
231 double _prev_collection_pause_end_ms;
233 TruncatedSeq* _pending_card_diff_seq;
234 TruncatedSeq* _rs_length_diff_seq;
235 TruncatedSeq* _cost_per_card_ms_seq;
236 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
237 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
238 TruncatedSeq* _cost_per_entry_ms_seq;
239 TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
240 TruncatedSeq* _cost_per_byte_ms_seq;
241 TruncatedSeq* _constant_other_time_ms_seq;
242 TruncatedSeq* _young_other_cost_per_region_ms_seq;
243 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
245 TruncatedSeq* _pending_cards_seq;
246 TruncatedSeq* _scanned_cards_seq;
247 TruncatedSeq* _rs_lengths_seq;
249 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
251 TruncatedSeq* _young_gc_eff_seq;
253 TruncatedSeq* _max_conc_overhead_seq;
255 size_t _recorded_young_regions;
256 size_t _recorded_non_young_regions;
257 size_t _recorded_region_num;
259 size_t _free_regions_at_end_of_collection;
261 size_t _recorded_rs_lengths;
262 size_t _max_rs_lengths;
264 size_t _recorded_marked_bytes;
265 size_t _recorded_young_bytes;
267 size_t _predicted_pending_cards;
268 size_t _predicted_cards_scanned;
269 size_t _predicted_rs_lengths;
270 size_t _predicted_bytes_to_copy;
272 double _predicted_survival_ratio;
273 double _predicted_rs_update_time_ms;
274 double _predicted_rs_scan_time_ms;
275 double _predicted_object_copy_time_ms;
276 double _predicted_constant_other_time_ms;
277 double _predicted_young_other_time_ms;
278 double _predicted_non_young_other_time_ms;
279 double _predicted_pause_time_ms;
281 double _vtime_diff_ms;
283 double _recorded_young_free_cset_time_ms;
284 double _recorded_non_young_free_cset_time_ms;
286 double _sigma;
287 double _expensive_region_limit_ms;
289 size_t _rs_lengths_prediction;
291 size_t _known_garbage_bytes;
292 double _known_garbage_ratio;
294 double sigma() {
295 return _sigma;
296 }
298 // A function that prevents us putting too much stock in small sample
299 // sets. Returns a number between 2.0 and 1.0, depending on the number
300 // of samples. 5 or more samples yields one; fewer scales linearly from
301 // 2.0 at 1 sample to 1.0 at 5.
302 double confidence_factor(int samples) {
303 if (samples > 4) return 1.0;
304 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
305 }
307 double get_new_neg_prediction(TruncatedSeq* seq) {
308 return seq->davg() - sigma() * seq->dsd();
309 }
311 #ifndef PRODUCT
312 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
313 #endif // PRODUCT
315 void adjust_concurrent_refinement(double update_rs_time,
316 double update_rs_processed_buffers,
317 double goal_ms);
319 protected:
320 double _pause_time_target_ms;
321 double _recorded_young_cset_choice_time_ms;
322 double _recorded_non_young_cset_choice_time_ms;
323 bool _within_target;
324 size_t _pending_cards;
325 size_t _max_pending_cards;
327 public:
329 void set_region_short_lived(HeapRegion* hr) {
330 hr->install_surv_rate_group(_short_lived_surv_rate_group);
331 }
333 void set_region_survivors(HeapRegion* hr) {
334 hr->install_surv_rate_group(_survivor_surv_rate_group);
335 }
337 #ifndef PRODUCT
338 bool verify_young_ages();
339 #endif // PRODUCT
341 double get_new_prediction(TruncatedSeq* seq) {
342 return MAX2(seq->davg() + sigma() * seq->dsd(),
343 seq->davg() * confidence_factor(seq->num()));
344 }
346 size_t young_cset_length() {
347 return _young_cset_length;
348 }
350 void record_max_rs_lengths(size_t rs_lengths) {
351 _max_rs_lengths = rs_lengths;
352 }
354 size_t predict_pending_card_diff() {
355 double prediction = get_new_neg_prediction(_pending_card_diff_seq);
356 if (prediction < 0.00001)
357 return 0;
358 else
359 return (size_t) prediction;
360 }
362 size_t predict_pending_cards() {
363 size_t max_pending_card_num = _g1->max_pending_card_num();
364 size_t diff = predict_pending_card_diff();
365 size_t prediction;
366 if (diff > max_pending_card_num)
367 prediction = max_pending_card_num;
368 else
369 prediction = max_pending_card_num - diff;
371 return prediction;
372 }
374 size_t predict_rs_length_diff() {
375 return (size_t) get_new_prediction(_rs_length_diff_seq);
376 }
378 double predict_alloc_rate_ms() {
379 return get_new_prediction(_alloc_rate_ms_seq);
380 }
382 double predict_cost_per_card_ms() {
383 return get_new_prediction(_cost_per_card_ms_seq);
384 }
386 double predict_rs_update_time_ms(size_t pending_cards) {
387 return (double) pending_cards * predict_cost_per_card_ms();
388 }
390 double predict_fully_young_cards_per_entry_ratio() {
391 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
392 }
394 double predict_partially_young_cards_per_entry_ratio() {
395 if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
396 return predict_fully_young_cards_per_entry_ratio();
397 else
398 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
399 }
401 size_t predict_young_card_num(size_t rs_length) {
402 return (size_t) ((double) rs_length *
403 predict_fully_young_cards_per_entry_ratio());
404 }
406 size_t predict_non_young_card_num(size_t rs_length) {
407 return (size_t) ((double) rs_length *
408 predict_partially_young_cards_per_entry_ratio());
409 }
411 double predict_rs_scan_time_ms(size_t card_num) {
412 if (full_young_gcs())
413 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
414 else
415 return predict_partially_young_rs_scan_time_ms(card_num);
416 }
418 double predict_partially_young_rs_scan_time_ms(size_t card_num) {
419 if (_partially_young_cost_per_entry_ms_seq->num() < 3)
420 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
421 else
422 return (double) card_num *
423 get_new_prediction(_partially_young_cost_per_entry_ms_seq);
424 }
426 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
427 if (_cost_per_byte_ms_during_cm_seq->num() < 3)
428 return 1.1 * (double) bytes_to_copy *
429 get_new_prediction(_cost_per_byte_ms_seq);
430 else
431 return (double) bytes_to_copy *
432 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
433 }
435 double predict_object_copy_time_ms(size_t bytes_to_copy) {
436 if (_in_marking_window && !_in_marking_window_im)
437 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
438 else
439 return (double) bytes_to_copy *
440 get_new_prediction(_cost_per_byte_ms_seq);
441 }
443 double predict_constant_other_time_ms() {
444 return get_new_prediction(_constant_other_time_ms_seq);
445 }
447 double predict_young_other_time_ms(size_t young_num) {
448 return
449 (double) young_num *
450 get_new_prediction(_young_other_cost_per_region_ms_seq);
451 }
453 double predict_non_young_other_time_ms(size_t non_young_num) {
454 return
455 (double) non_young_num *
456 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
457 }
459 void check_if_region_is_too_expensive(double predicted_time_ms);
461 double predict_young_collection_elapsed_time_ms(size_t adjustment);
462 double predict_base_elapsed_time_ms(size_t pending_cards);
463 double predict_base_elapsed_time_ms(size_t pending_cards,
464 size_t scanned_cards);
465 size_t predict_bytes_to_copy(HeapRegion* hr);
466 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
468 // for use by: calculate_young_list_target_length(rs_length)
469 bool predict_will_fit(size_t young_region_num,
470 double base_time_ms,
471 size_t init_free_regions,
472 double target_pause_time_ms);
474 void start_recording_regions();
475 void record_cset_region_info(HeapRegion* hr, bool young);
476 void record_non_young_cset_region(HeapRegion* hr);
478 void set_recorded_young_regions(size_t n_regions);
479 void set_recorded_young_bytes(size_t bytes);
480 void set_recorded_rs_lengths(size_t rs_lengths);
481 void set_predicted_bytes_to_copy(size_t bytes);
483 void end_recording_regions();
485 void record_vtime_diff_ms(double vtime_diff_ms) {
486 _vtime_diff_ms = vtime_diff_ms;
487 }
489 void record_young_free_cset_time_ms(double time_ms) {
490 _recorded_young_free_cset_time_ms = time_ms;
491 }
493 void record_non_young_free_cset_time_ms(double time_ms) {
494 _recorded_non_young_free_cset_time_ms = time_ms;
495 }
497 double predict_young_gc_eff() {
498 return get_new_neg_prediction(_young_gc_eff_seq);
499 }
501 double predict_survivor_regions_evac_time();
503 // </NEW PREDICTION>
505 public:
506 void cset_regions_freed() {
507 bool propagate = _last_young_gc_full && !_in_marking_window;
508 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
509 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
510 // also call it on any more surv rate groups
511 }
513 void set_known_garbage_bytes(size_t known_garbage_bytes) {
514 _known_garbage_bytes = known_garbage_bytes;
515 size_t heap_bytes = _g1->capacity();
516 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
517 }
519 void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
520 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
522 _known_garbage_bytes -= known_garbage_bytes;
523 size_t heap_bytes = _g1->capacity();
524 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
525 }
527 G1MMUTracker* mmu_tracker() {
528 return _mmu_tracker;
529 }
531 double max_pause_time_ms() {
532 return _mmu_tracker->max_gc_time() * 1000.0;
533 }
535 double predict_init_time_ms() {
536 return get_new_prediction(_concurrent_mark_init_times_ms);
537 }
539 double predict_remark_time_ms() {
540 return get_new_prediction(_concurrent_mark_remark_times_ms);
541 }
543 double predict_cleanup_time_ms() {
544 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
545 }
547 // Returns an estimate of the survival rate of the region at yg-age
548 // "yg_age".
549 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
550 TruncatedSeq* seq = surv_rate_group->get_seq(age);
551 if (seq->num() == 0)
552 gclog_or_tty->print("BARF! age is %d", age);
553 guarantee( seq->num() > 0, "invariant" );
554 double pred = get_new_prediction(seq);
555 if (pred > 1.0)
556 pred = 1.0;
557 return pred;
558 }
560 double predict_yg_surv_rate(int age) {
561 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
562 }
564 double accum_yg_surv_rate_pred(int age) {
565 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
566 }
568 protected:
569 void print_stats(int level, const char* str, double value);
570 void print_stats(int level, const char* str, int value);
572 void print_par_stats(int level, const char* str, double* data) {
573 print_par_stats(level, str, data, true);
574 }
575 void print_par_stats(int level, const char* str, double* data, bool summary);
576 void print_par_sizes(int level, const char* str, double* data, bool summary);
578 void check_other_times(int level,
579 NumberSeq* other_times_ms,
580 NumberSeq* calc_other_times_ms) const;
582 void print_summary (PauseSummary* stats) const;
584 void print_summary (int level, const char* str, NumberSeq* seq) const;
585 void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
587 double avg_value (double* data);
588 double max_value (double* data);
589 double sum_of_values (double* data);
590 double max_sum (double* data1, double* data2);
592 int _last_satb_drain_processed_buffers;
593 int _last_update_rs_processed_buffers;
594 double _last_pause_time_ms;
596 size_t _bytes_in_to_space_before_gc;
597 size_t _bytes_in_to_space_after_gc;
598 size_t bytes_in_to_space_during_gc() {
599 return
600 _bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc;
601 }
602 size_t _bytes_in_collection_set_before_gc;
603 // Used to count used bytes in CS.
604 friend class CountCSClosure;
606 // Statistics kept per GC stoppage, pause or full.
607 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
609 // We track markings.
610 int _num_markings;
611 double _mark_thread_startup_sec; // Time at startup of marking thread
613 // Add a new GC of the given duration and end time to the record.
614 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
616 // The head of the list (via "next_in_collection_set()") representing the
617 // current collection set. Set from the incrementally built collection
618 // set at the start of the pause.
619 HeapRegion* _collection_set;
621 // The number of regions in the collection set. Set from the incrementally
622 // built collection set at the start of an evacuation pause.
623 size_t _collection_set_size;
625 // The number of bytes in the collection set before the pause. Set from
626 // the incrementally built collection set at the start of an evacuation
627 // pause.
628 size_t _collection_set_bytes_used_before;
630 // The associated information that is maintained while the incremental
631 // collection set is being built with young regions. Used to populate
632 // the recorded info for the evacuation pause.
634 enum CSetBuildType {
635 Active, // We are actively building the collection set
636 Inactive // We are not actively building the collection set
637 };
639 CSetBuildType _inc_cset_build_state;
641 // The head of the incrementally built collection set.
642 HeapRegion* _inc_cset_head;
644 // The tail of the incrementally built collection set.
645 HeapRegion* _inc_cset_tail;
647 // The number of regions in the incrementally built collection set.
648 // Used to set _collection_set_size at the start of an evacuation
649 // pause.
650 size_t _inc_cset_size;
652 // Used as the index in the surving young words structure
653 // which tracks the amount of space, for each young region,
654 // that survives the pause.
655 size_t _inc_cset_young_index;
657 // The number of bytes in the incrementally built collection set.
658 // Used to set _collection_set_bytes_used_before at the start of
659 // an evacuation pause.
660 size_t _inc_cset_bytes_used_before;
662 // Used to record the highest end of heap region in collection set
663 HeapWord* _inc_cset_max_finger;
665 // The number of recorded used bytes in the young regions
666 // of the collection set. This is the sum of the used() bytes
667 // of retired young regions in the collection set.
668 size_t _inc_cset_recorded_young_bytes;
670 // The RSet lengths recorded for regions in the collection set
671 // (updated by the periodic sampling of the regions in the
672 // young list/collection set).
673 size_t _inc_cset_recorded_rs_lengths;
675 // The predicted elapsed time it will take to collect the regions
676 // in the collection set (updated by the periodic sampling of the
677 // regions in the young list/collection set).
678 double _inc_cset_predicted_elapsed_time_ms;
680 // The predicted bytes to copy for the regions in the collection
681 // set (updated by the periodic sampling of the regions in the
682 // young list/collection set).
683 size_t _inc_cset_predicted_bytes_to_copy;
685 // Info about marking.
686 int _n_marks; // Sticky at 2, so we know when we've done at least 2.
688 // The number of collection pauses at the end of the last mark.
689 size_t _n_pauses_at_mark_end;
691 // Stash a pointer to the g1 heap.
692 G1CollectedHeap* _g1;
694 // The average time in ms per collection pause, averaged over recent pauses.
695 double recent_avg_time_for_pauses_ms();
697 // The average time in ms for processing CollectedHeap strong roots, per
698 // collection pause, averaged over recent pauses.
699 double recent_avg_time_for_CH_strong_ms();
701 // The average time in ms for processing the G1 remembered set, per
702 // pause, averaged over recent pauses.
703 double recent_avg_time_for_G1_strong_ms();
705 // The average time in ms for "evacuating followers", per pause, averaged
706 // over recent pauses.
707 double recent_avg_time_for_evac_ms();
709 // The number of "recent" GCs recorded in the number sequences
710 int number_of_recent_gcs();
712 // The average survival ratio, computed by the total number of bytes
713 // suriviving / total number of bytes before collection over the last
714 // several recent pauses.
715 double recent_avg_survival_fraction();
716 // The survival fraction of the most recent pause; if there have been no
717 // pauses, returns 1.0.
718 double last_survival_fraction();
720 // Returns a "conservative" estimate of the recent survival rate, i.e.,
721 // one that may be higher than "recent_avg_survival_fraction".
722 // This is conservative in several ways:
723 // If there have been few pauses, it will assume a potential high
724 // variance, and err on the side of caution.
725 // It puts a lower bound (currently 0.1) on the value it will return.
726 // To try to detect phase changes, if the most recent pause ("latest") has a
727 // higher-than average ("avg") survival rate, it returns that rate.
728 // "work" version is a utility function; young is restricted to young regions.
729 double conservative_avg_survival_fraction_work(double avg,
730 double latest);
732 // The arguments are the two sequences that keep track of the number of bytes
733 // surviving and the total number of bytes before collection, resp.,
734 // over the last evereal recent pauses
735 // Returns the survival rate for the category in the most recent pause.
736 // If there have been no pauses, returns 1.0.
737 double last_survival_fraction_work(TruncatedSeq* surviving,
738 TruncatedSeq* before);
740 // The arguments are the two sequences that keep track of the number of bytes
741 // surviving and the total number of bytes before collection, resp.,
742 // over the last several recent pauses
743 // Returns the average survival ration over the last several recent pauses
744 // If there have been no pauses, return 1.0
745 double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
746 TruncatedSeq* before);
748 double conservative_avg_survival_fraction() {
749 double avg = recent_avg_survival_fraction();
750 double latest = last_survival_fraction();
751 return conservative_avg_survival_fraction_work(avg, latest);
752 }
754 // The ratio of gc time to elapsed time, computed over recent pauses.
755 double _recent_avg_pause_time_ratio;
757 double recent_avg_pause_time_ratio() {
758 return _recent_avg_pause_time_ratio;
759 }
761 // Number of pauses between concurrent marking.
762 size_t _pauses_btwn_concurrent_mark;
764 size_t _n_marks_since_last_pause;
766 // At the end of a pause we check the heap occupancy and we decide
767 // whether we will start a marking cycle during the next pause. If
768 // we decide that we want to do that, we will set this parameter to
769 // true. So, this parameter will stay true between the end of a
770 // pause and the beginning of a subsequent pause (not necessarily
771 // the next one, see the comments on the next field) when we decide
772 // that we will indeed start a marking cycle and do the initial-mark
773 // work.
774 volatile bool _initiate_conc_mark_if_possible;
776 // If initiate_conc_mark_if_possible() is set at the beginning of a
777 // pause, it is a suggestion that the pause should start a marking
778 // cycle by doing the initial-mark work. However, it is possible
779 // that the concurrent marking thread is still finishing up the
780 // previous marking cycle (e.g., clearing the next marking
781 // bitmap). If that is the case we cannot start a new cycle and
782 // we'll have to wait for the concurrent marking thread to finish
783 // what it is doing. In this case we will postpone the marking cycle
784 // initiation decision for the next pause. When we eventually decide
785 // to start a cycle, we will set _during_initial_mark_pause which
786 // will stay true until the end of the initial-mark pause and it's
787 // the condition that indicates that a pause is doing the
788 // initial-mark work.
789 volatile bool _during_initial_mark_pause;
791 bool _should_revert_to_full_young_gcs;
792 bool _last_full_young_gc;
794 // This set of variables tracks the collector efficiency, in order to
795 // determine whether we should initiate a new marking.
796 double _cur_mark_stop_world_time_ms;
797 double _mark_init_start_sec;
798 double _mark_remark_start_sec;
799 double _mark_cleanup_start_sec;
800 double _mark_closure_time_ms;
802 void calculate_young_list_min_length();
803 void calculate_young_list_target_length();
804 void calculate_young_list_target_length(size_t rs_lengths);
806 public:
808 G1CollectorPolicy();
810 virtual G1CollectorPolicy* as_g1_policy() { return this; }
812 virtual CollectorPolicy::Name kind() {
813 return CollectorPolicy::G1CollectorPolicyKind;
814 }
816 void check_prediction_validity();
818 size_t bytes_in_collection_set() {
819 return _bytes_in_collection_set_before_gc;
820 }
822 size_t bytes_in_to_space() {
823 return bytes_in_to_space_during_gc();
824 }
826 unsigned calc_gc_alloc_time_stamp() {
827 return _all_pause_times_ms->num() + 1;
828 }
830 protected:
832 // Count the number of bytes used in the CS.
833 void count_CS_bytes_used();
835 // Together these do the base cleanup-recording work. Subclasses might
836 // want to put something between them.
837 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
838 size_t max_live_bytes);
839 void record_concurrent_mark_cleanup_end_work2();
841 public:
843 virtual void init();
845 // Create jstat counters for the policy.
846 virtual void initialize_gc_policy_counters();
848 virtual HeapWord* mem_allocate_work(size_t size,
849 bool is_tlab,
850 bool* gc_overhead_limit_was_exceeded);
852 // This method controls how a collector handles one or more
853 // of its generations being fully allocated.
854 virtual HeapWord* satisfy_failed_allocation(size_t size,
855 bool is_tlab);
857 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
859 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
861 // The number of collection pauses so far.
862 long n_pauses() const { return _n_pauses; }
864 // Update the heuristic info to record a collection pause of the given
865 // start time, where the given number of bytes were used at the start.
866 // This may involve changing the desired size of a collection set.
868 virtual void record_stop_world_start();
870 virtual void record_collection_pause_start(double start_time_sec,
871 size_t start_used);
873 // Must currently be called while the world is stopped.
874 virtual void record_concurrent_mark_init_start();
875 virtual void record_concurrent_mark_init_end();
876 void record_concurrent_mark_init_end_pre(double
877 mark_init_elapsed_time_ms);
879 void record_mark_closure_time(double mark_closure_time_ms);
881 virtual void record_concurrent_mark_remark_start();
882 virtual void record_concurrent_mark_remark_end();
884 virtual void record_concurrent_mark_cleanup_start();
885 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
886 size_t max_live_bytes);
887 virtual void record_concurrent_mark_cleanup_completed();
889 virtual void record_concurrent_pause();
890 virtual void record_concurrent_pause_end();
892 virtual void record_collection_pause_end_CH_strong_roots();
893 virtual void record_collection_pause_end_G1_strong_roots();
895 virtual void record_collection_pause_end();
897 // Record the fact that a full collection occurred.
898 virtual void record_full_collection_start();
899 virtual void record_full_collection_end();
901 void record_gc_worker_start_time(int worker_i, double ms) {
902 _par_last_gc_worker_start_times_ms[worker_i] = ms;
903 }
905 void record_ext_root_scan_time(int worker_i, double ms) {
906 _par_last_ext_root_scan_times_ms[worker_i] = ms;
907 }
909 void record_mark_stack_scan_time(int worker_i, double ms) {
910 _par_last_mark_stack_scan_times_ms[worker_i] = ms;
911 }
913 void record_satb_drain_time(double ms) {
914 _cur_satb_drain_time_ms = ms;
915 _satb_drain_time_set = true;
916 }
918 void record_satb_drain_processed_buffers (int processed_buffers) {
919 _last_satb_drain_processed_buffers = processed_buffers;
920 }
922 void record_mod_union_time(double ms) {
923 _all_mod_union_times_ms->add(ms);
924 }
926 void record_update_rs_time(int thread, double ms) {
927 _par_last_update_rs_times_ms[thread] = ms;
928 }
930 void record_update_rs_processed_buffers (int thread,
931 double processed_buffers) {
932 _par_last_update_rs_processed_buffers[thread] = processed_buffers;
933 }
935 void record_scan_rs_time(int thread, double ms) {
936 _par_last_scan_rs_times_ms[thread] = ms;
937 }
939 void reset_obj_copy_time(int thread) {
940 _par_last_obj_copy_times_ms[thread] = 0.0;
941 }
943 void reset_obj_copy_time() {
944 reset_obj_copy_time(0);
945 }
947 void record_obj_copy_time(int thread, double ms) {
948 _par_last_obj_copy_times_ms[thread] += ms;
949 }
951 void record_termination(int thread, double ms, size_t attempts) {
952 _par_last_termination_times_ms[thread] = ms;
953 _par_last_termination_attempts[thread] = (double) attempts;
954 }
956 void record_gc_worker_end_time(int worker_i, double ms) {
957 _par_last_gc_worker_end_times_ms[worker_i] = ms;
958 }
960 void record_pause_time_ms(double ms) {
961 _last_pause_time_ms = ms;
962 }
964 void record_clear_ct_time(double ms) {
965 _cur_clear_ct_time_ms = ms;
966 }
968 void record_par_time(double ms) {
969 _cur_collection_par_time_ms = ms;
970 }
972 void record_aux_start_time(int i) {
973 guarantee(i < _aux_num, "should be within range");
974 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
975 }
977 void record_aux_end_time(int i) {
978 guarantee(i < _aux_num, "should be within range");
979 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
980 _cur_aux_times_set[i] = true;
981 _cur_aux_times_ms[i] += ms;
982 }
984 #ifndef PRODUCT
985 void record_cc_clear_time(double ms) {
986 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
987 _min_clear_cc_time_ms = ms;
988 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
989 _max_clear_cc_time_ms = ms;
990 _cur_clear_cc_time_ms = ms;
991 _cum_clear_cc_time_ms += ms;
992 _num_cc_clears++;
993 }
994 #endif
996 // Record the fact that "bytes" bytes allocated in a region.
997 void record_before_bytes(size_t bytes);
998 void record_after_bytes(size_t bytes);
1000 // Choose a new collection set. Marks the chosen regions as being
1001 // "in_collection_set", and links them together. The head and number of
1002 // the collection set are available via access methods.
1003 virtual void choose_collection_set(double target_pause_time_ms) = 0;
1005 // The head of the list (via "next_in_collection_set()") representing the
1006 // current collection set.
1007 HeapRegion* collection_set() { return _collection_set; }
1009 void clear_collection_set() { _collection_set = NULL; }
1011 // The number of elements in the current collection set.
1012 size_t collection_set_size() { return _collection_set_size; }
1014 // Add "hr" to the CS.
1015 void add_to_collection_set(HeapRegion* hr);
1017 // Incremental CSet Support
1019 // The head of the incrementally built collection set.
1020 HeapRegion* inc_cset_head() { return _inc_cset_head; }
1022 // The tail of the incrementally built collection set.
1023 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
1025 // The number of elements in the incrementally built collection set.
1026 size_t inc_cset_size() { return _inc_cset_size; }
1028 // Initialize incremental collection set info.
1029 void start_incremental_cset_building();
1031 void clear_incremental_cset() {
1032 _inc_cset_head = NULL;
1033 _inc_cset_tail = NULL;
1034 }
1036 // Stop adding regions to the incremental collection set
1037 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
1039 // Add/remove information about hr to the aggregated information
1040 // for the incrementally built collection set.
1041 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
1042 void remove_from_incremental_cset_info(HeapRegion* hr);
1044 // Update information about hr in the aggregated information for
1045 // the incrementally built collection set.
1046 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
1048 private:
1049 // Update the incremental cset information when adding a region
1050 // (should not be called directly).
1051 void add_region_to_incremental_cset_common(HeapRegion* hr);
1053 public:
1054 // Add hr to the LHS of the incremental collection set.
1055 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
1057 // Add hr to the RHS of the incremental collection set.
1058 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
1060 #ifndef PRODUCT
1061 void print_collection_set(HeapRegion* list_head, outputStream* st);
1062 #endif // !PRODUCT
1064 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
1065 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
1066 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
1068 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
1069 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
1070 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
1072 // This sets the initiate_conc_mark_if_possible() flag to start a
1073 // new cycle, as long as we are not already in one. It's best if it
1074 // is called during a safepoint when the test whether a cycle is in
1075 // progress or not is stable.
1076 bool force_initial_mark_if_outside_cycle();
1078 // This is called at the very beginning of an evacuation pause (it
1079 // has to be the first thing that the pause does). If
1080 // initiate_conc_mark_if_possible() is true, and the concurrent
1081 // marking thread has completed its work during the previous cycle,
1082 // it will set during_initial_mark_pause() to so that the pause does
1083 // the initial-mark work and start a marking cycle.
1084 void decide_on_conc_mark_initiation();
1086 // If an expansion would be appropriate, because recent GC overhead had
1087 // exceeded the desired limit, return an amount to expand by.
1088 virtual size_t expansion_amount();
1090 // note start of mark thread
1091 void note_start_of_mark_thread();
1093 // The marked bytes of the "r" has changed; reclassify it's desirability
1094 // for marking. Also asserts that "r" is eligible for a CS.
1095 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
1097 #ifndef PRODUCT
1098 // Check any appropriate marked bytes info, asserting false if
1099 // something's wrong, else returning "true".
1100 virtual bool assertMarkedBytesDataOK() = 0;
1101 #endif
1103 // Print tracing information.
1104 void print_tracing_info() const;
1106 // Print stats on young survival ratio
1107 void print_yg_surv_rate_info() const;
1109 void finished_recalculating_age_indexes(bool is_survivors) {
1110 if (is_survivors) {
1111 _survivor_surv_rate_group->finished_recalculating_age_indexes();
1112 } else {
1113 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
1114 }
1115 // do that for any other surv rate groups
1116 }
1118 bool is_young_list_full() {
1119 size_t young_list_length = _g1->young_list()->length();
1120 size_t young_list_target_length = _young_list_target_length;
1121 if (G1FixedEdenSize) {
1122 young_list_target_length -= _max_survivor_regions;
1123 }
1124 return young_list_length >= young_list_target_length;
1125 }
1127 bool can_expand_young_list() {
1128 size_t young_list_length = _g1->young_list()->length();
1129 size_t young_list_max_length = _young_list_max_length;
1130 if (G1FixedEdenSize) {
1131 young_list_max_length -= _max_survivor_regions;
1132 }
1133 return young_list_length < young_list_max_length;
1134 }
1136 void update_region_num(bool young);
1138 bool in_young_gc_mode() {
1139 return _in_young_gc_mode;
1140 }
1141 void set_in_young_gc_mode(bool in_young_gc_mode) {
1142 _in_young_gc_mode = in_young_gc_mode;
1143 }
1145 bool full_young_gcs() {
1146 return _full_young_gcs;
1147 }
1148 void set_full_young_gcs(bool full_young_gcs) {
1149 _full_young_gcs = full_young_gcs;
1150 }
1152 bool adaptive_young_list_length() {
1153 return _adaptive_young_list_length;
1154 }
1155 void set_adaptive_young_list_length(bool adaptive_young_list_length) {
1156 _adaptive_young_list_length = adaptive_young_list_length;
1157 }
1159 inline double get_gc_eff_factor() {
1160 double ratio = _known_garbage_ratio;
1162 double square = ratio * ratio;
1163 // square = square * square;
1164 double ret = square * 9.0 + 1.0;
1165 #if 0
1166 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
1167 #endif // 0
1168 guarantee(0.0 <= ret && ret < 10.0, "invariant!");
1169 return ret;
1170 }
1172 //
1173 // Survivor regions policy.
1174 //
1175 protected:
1177 // Current tenuring threshold, set to 0 if the collector reaches the
1178 // maximum amount of suvivors regions.
1179 int _tenuring_threshold;
1181 // The limit on the number of regions allocated for survivors.
1182 size_t _max_survivor_regions;
1184 // The amount of survor regions after a collection.
1185 size_t _recorded_survivor_regions;
1186 // List of survivor regions.
1187 HeapRegion* _recorded_survivor_head;
1188 HeapRegion* _recorded_survivor_tail;
1190 ageTable _survivors_age_table;
1192 public:
1194 inline GCAllocPurpose
1195 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
1196 if (age < _tenuring_threshold && src_region->is_young()) {
1197 return GCAllocForSurvived;
1198 } else {
1199 return GCAllocForTenured;
1200 }
1201 }
1203 inline bool track_object_age(GCAllocPurpose purpose) {
1204 return purpose == GCAllocForSurvived;
1205 }
1207 inline GCAllocPurpose alternative_purpose(int purpose) {
1208 return GCAllocForTenured;
1209 }
1211 static const size_t REGIONS_UNLIMITED = ~(size_t)0;
1213 size_t max_regions(int purpose);
1215 // The limit on regions for a particular purpose is reached.
1216 void note_alloc_region_limit_reached(int purpose) {
1217 if (purpose == GCAllocForSurvived) {
1218 _tenuring_threshold = 0;
1219 }
1220 }
1222 void note_start_adding_survivor_regions() {
1223 _survivor_surv_rate_group->start_adding_regions();
1224 }
1226 void note_stop_adding_survivor_regions() {
1227 _survivor_surv_rate_group->stop_adding_regions();
1228 }
1230 void record_survivor_regions(size_t regions,
1231 HeapRegion* head,
1232 HeapRegion* tail) {
1233 _recorded_survivor_regions = regions;
1234 _recorded_survivor_head = head;
1235 _recorded_survivor_tail = tail;
1236 }
1238 size_t recorded_survivor_regions() {
1239 return _recorded_survivor_regions;
1240 }
1242 void record_thread_age_table(ageTable* age_table)
1243 {
1244 _survivors_age_table.merge_par(age_table);
1245 }
1247 void calculate_max_gc_locker_expansion();
1249 // Calculates survivor space parameters.
1250 void calculate_survivors_policy();
1252 };
1254 // This encapsulates a particular strategy for a g1 Collector.
1255 //
1256 // Start a concurrent mark when our heap size is n bytes
1257 // greater then our heap size was at the last concurrent
1258 // mark. Where n is a function of the CMSTriggerRatio
1259 // and the MinHeapFreeRatio.
1260 //
1261 // Start a g1 collection pause when we have allocated the
1262 // average number of bytes currently being freed in
1263 // a collection, but only if it is at least one region
1264 // full
1265 //
1266 // Resize Heap based on desired
1267 // allocation space, where desired allocation space is
1268 // a function of survival rate and desired future to size.
1269 //
1270 // Choose collection set by first picking all older regions
1271 // which have a survival rate which beats our projected young
1272 // survival rate. Then fill out the number of needed regions
1273 // with young regions.
1275 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
1276 CollectionSetChooser* _collectionSetChooser;
1277 // If the estimated is less then desirable, resize if possible.
1278 void expand_if_possible(size_t numRegions);
1280 virtual void choose_collection_set(double target_pause_time_ms);
1281 virtual void record_collection_pause_start(double start_time_sec,
1282 size_t start_used);
1283 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
1284 size_t max_live_bytes);
1285 virtual void record_full_collection_end();
1287 public:
1288 G1CollectorPolicy_BestRegionsFirst() {
1289 _collectionSetChooser = new CollectionSetChooser();
1290 }
1291 void record_collection_pause_end();
1292 // This is not needed any more, after the CSet choosing code was
1293 // changed to use the pause prediction work. But let's leave the
1294 // hook in just in case.
1295 void note_change_in_marked_bytes(HeapRegion* r) { }
1296 #ifndef PRODUCT
1297 bool assertMarkedBytesDataOK();
1298 #endif
1299 };
1301 // This should move to some place more general...
1303 // If we have "n" measurements, and we've kept track of their "sum" and the
1304 // "sum_of_squares" of the measurements, this returns the variance of the
1305 // sequence.
1306 inline double variance(int n, double sum_of_squares, double sum) {
1307 double n_d = (double)n;
1308 double avg = sum/n_d;
1309 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
1310 }
1312 // Local Variables: ***
1313 // c-indentation-style: gnu ***
1314 // End: ***
1316 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP