Mon, 28 Nov 2011 09:49:05 -0800
7114303: G1: assert(_g1->mark_in_progress()) failed: shouldn't be here otherwise
Summary: Race between the VM thread reading G1CollectedHeap::_mark_in_progress and it being set by the concurrent mark thread when concurrent marking is aborted by a full GC. Have the concurrent mark thread join the SuspendibleThreadSet before changing the marking state.
Reviewed-by: tonyp, brutisso
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
28 #include "gc_implementation/g1/collectionSetChooser.hpp"
29 #include "gc_implementation/g1/g1MMUTracker.hpp"
30 #include "memory/collectorPolicy.hpp"
32 // A G1CollectorPolicy makes policy decisions that determine the
33 // characteristics of the collector. Examples include:
34 // * choice of collection set.
35 // * when to collect.
37 class HeapRegion;
38 class CollectionSetChooser;
40 // Yes, this is a bit unpleasant... but it saves replicating the same thing
41 // over and over again and introducing subtle problems through small typos and
42 // cutting and pasting mistakes. The macros below introduces a number
43 // sequnce into the following two classes and the methods that access it.
45 #define define_num_seq(name) \
46 private: \
47 NumberSeq _all_##name##_times_ms; \
48 public: \
49 void record_##name##_time_ms(double ms) { \
50 _all_##name##_times_ms.add(ms); \
51 } \
52 NumberSeq* get_##name##_seq() { \
53 return &_all_##name##_times_ms; \
54 }
56 class MainBodySummary;
58 class PauseSummary: public CHeapObj {
59 define_num_seq(total)
60 define_num_seq(other)
62 public:
63 virtual MainBodySummary* main_body_summary() { return NULL; }
64 };
66 class MainBodySummary: public CHeapObj {
67 define_num_seq(satb_drain) // optional
68 define_num_seq(parallel) // parallel only
69 define_num_seq(ext_root_scan)
70 define_num_seq(mark_stack_scan)
71 define_num_seq(update_rs)
72 define_num_seq(scan_rs)
73 define_num_seq(obj_copy)
74 define_num_seq(termination) // parallel only
75 define_num_seq(parallel_other) // parallel only
76 define_num_seq(mark_closure)
77 define_num_seq(clear_ct)
78 };
80 class Summary: public PauseSummary,
81 public MainBodySummary {
82 public:
83 virtual MainBodySummary* main_body_summary() { return this; }
84 };
86 class G1CollectorPolicy: public CollectorPolicy {
87 private:
88 // either equal to the number of parallel threads, if ParallelGCThreads
89 // has been set, or 1 otherwise
90 int _parallel_gc_threads;
92 // The number of GC threads currently active.
93 uintx _no_of_gc_threads;
95 enum SomePrivateConstants {
96 NumPrevPausesForHeuristics = 10
97 };
99 G1MMUTracker* _mmu_tracker;
101 void initialize_flags();
103 void initialize_all() {
104 initialize_flags();
105 initialize_size_info();
106 initialize_perm_generation(PermGen::MarkSweepCompact);
107 }
109 CollectionSetChooser* _collectionSetChooser;
111 double _cur_collection_start_sec;
112 size_t _cur_collection_pause_used_at_start_bytes;
113 size_t _cur_collection_pause_used_regions_at_start;
114 size_t _prev_collection_pause_used_at_end_bytes;
115 double _cur_collection_par_time_ms;
116 double _cur_satb_drain_time_ms;
117 double _cur_clear_ct_time_ms;
118 double _cur_ref_proc_time_ms;
119 double _cur_ref_enq_time_ms;
121 #ifndef PRODUCT
122 // Card Table Count Cache stats
123 double _min_clear_cc_time_ms; // min
124 double _max_clear_cc_time_ms; // max
125 double _cur_clear_cc_time_ms; // clearing time during current pause
126 double _cum_clear_cc_time_ms; // cummulative clearing time
127 jlong _num_cc_clears; // number of times the card count cache has been cleared
128 #endif
130 // These exclude marking times.
131 TruncatedSeq* _recent_gc_times_ms;
133 TruncatedSeq* _concurrent_mark_remark_times_ms;
134 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
136 Summary* _summary;
138 NumberSeq* _all_pause_times_ms;
139 NumberSeq* _all_full_gc_times_ms;
140 double _stop_world_start;
141 NumberSeq* _all_stop_world_times_ms;
142 NumberSeq* _all_yield_times_ms;
144 int _aux_num;
145 NumberSeq* _all_aux_times_ms;
146 double* _cur_aux_start_times_ms;
147 double* _cur_aux_times_ms;
148 bool* _cur_aux_times_set;
150 double* _par_last_gc_worker_start_times_ms;
151 double* _par_last_ext_root_scan_times_ms;
152 double* _par_last_mark_stack_scan_times_ms;
153 double* _par_last_update_rs_times_ms;
154 double* _par_last_update_rs_processed_buffers;
155 double* _par_last_scan_rs_times_ms;
156 double* _par_last_obj_copy_times_ms;
157 double* _par_last_termination_times_ms;
158 double* _par_last_termination_attempts;
159 double* _par_last_gc_worker_end_times_ms;
160 double* _par_last_gc_worker_times_ms;
162 // Each workers 'other' time i.e. the elapsed time of the parallel
163 // phase of the pause minus the sum of the individual sub-phase
164 // times for a given worker thread.
165 double* _par_last_gc_worker_other_times_ms;
167 // indicates whether we are in full young or partially young GC mode
168 bool _full_young_gcs;
170 // if true, then it tries to dynamically adjust the length of the
171 // young list
172 bool _adaptive_young_list_length;
173 size_t _young_list_target_length;
174 size_t _young_list_fixed_length;
175 size_t _prev_eden_capacity; // used for logging
177 // The max number of regions we can extend the eden by while the GC
178 // locker is active. This should be >= _young_list_target_length;
179 size_t _young_list_max_length;
181 bool _last_young_gc_full;
183 unsigned _full_young_pause_num;
184 unsigned _partial_young_pause_num;
186 bool _during_marking;
187 bool _in_marking_window;
188 bool _in_marking_window_im;
190 SurvRateGroup* _short_lived_surv_rate_group;
191 SurvRateGroup* _survivor_surv_rate_group;
192 // add here any more surv rate groups
194 double _gc_overhead_perc;
196 double _reserve_factor;
197 size_t _reserve_regions;
199 bool during_marking() {
200 return _during_marking;
201 }
203 private:
204 enum PredictionConstants {
205 TruncatedSeqLength = 10
206 };
208 TruncatedSeq* _alloc_rate_ms_seq;
209 double _prev_collection_pause_end_ms;
211 TruncatedSeq* _pending_card_diff_seq;
212 TruncatedSeq* _rs_length_diff_seq;
213 TruncatedSeq* _cost_per_card_ms_seq;
214 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
215 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
216 TruncatedSeq* _cost_per_entry_ms_seq;
217 TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
218 TruncatedSeq* _cost_per_byte_ms_seq;
219 TruncatedSeq* _constant_other_time_ms_seq;
220 TruncatedSeq* _young_other_cost_per_region_ms_seq;
221 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
223 TruncatedSeq* _pending_cards_seq;
224 TruncatedSeq* _rs_lengths_seq;
226 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
228 TruncatedSeq* _young_gc_eff_seq;
230 bool _using_new_ratio_calculations;
231 size_t _min_desired_young_length; // as set on the command line or default calculations
232 size_t _max_desired_young_length; // as set on the command line or default calculations
234 size_t _eden_cset_region_length;
235 size_t _survivor_cset_region_length;
236 size_t _old_cset_region_length;
238 void init_cset_region_lengths(size_t eden_cset_region_length,
239 size_t survivor_cset_region_length);
241 size_t eden_cset_region_length() { return _eden_cset_region_length; }
242 size_t survivor_cset_region_length() { return _survivor_cset_region_length; }
243 size_t old_cset_region_length() { return _old_cset_region_length; }
245 size_t _free_regions_at_end_of_collection;
247 size_t _recorded_rs_lengths;
248 size_t _max_rs_lengths;
250 double _recorded_young_free_cset_time_ms;
251 double _recorded_non_young_free_cset_time_ms;
253 double _sigma;
254 double _expensive_region_limit_ms;
256 size_t _rs_lengths_prediction;
258 size_t _known_garbage_bytes;
259 double _known_garbage_ratio;
261 double sigma() {
262 return _sigma;
263 }
265 // A function that prevents us putting too much stock in small sample
266 // sets. Returns a number between 2.0 and 1.0, depending on the number
267 // of samples. 5 or more samples yields one; fewer scales linearly from
268 // 2.0 at 1 sample to 1.0 at 5.
269 double confidence_factor(int samples) {
270 if (samples > 4) return 1.0;
271 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
272 }
274 double get_new_neg_prediction(TruncatedSeq* seq) {
275 return seq->davg() - sigma() * seq->dsd();
276 }
278 #ifndef PRODUCT
279 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
280 #endif // PRODUCT
282 void adjust_concurrent_refinement(double update_rs_time,
283 double update_rs_processed_buffers,
284 double goal_ms);
286 uintx no_of_gc_threads() { return _no_of_gc_threads; }
287 void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
289 double _pause_time_target_ms;
290 double _recorded_young_cset_choice_time_ms;
291 double _recorded_non_young_cset_choice_time_ms;
292 size_t _pending_cards;
293 size_t _max_pending_cards;
295 public:
296 // Accessors
298 void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
299 hr->set_young();
300 hr->install_surv_rate_group(_short_lived_surv_rate_group);
301 hr->set_young_index_in_cset(young_index_in_cset);
302 }
304 void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
305 assert(hr->is_young() && hr->is_survivor(), "pre-condition");
306 hr->install_surv_rate_group(_survivor_surv_rate_group);
307 hr->set_young_index_in_cset(young_index_in_cset);
308 }
310 #ifndef PRODUCT
311 bool verify_young_ages();
312 #endif // PRODUCT
314 double get_new_prediction(TruncatedSeq* seq) {
315 return MAX2(seq->davg() + sigma() * seq->dsd(),
316 seq->davg() * confidence_factor(seq->num()));
317 }
319 void record_max_rs_lengths(size_t rs_lengths) {
320 _max_rs_lengths = rs_lengths;
321 }
323 size_t predict_pending_card_diff() {
324 double prediction = get_new_neg_prediction(_pending_card_diff_seq);
325 if (prediction < 0.00001)
326 return 0;
327 else
328 return (size_t) prediction;
329 }
331 size_t predict_pending_cards() {
332 size_t max_pending_card_num = _g1->max_pending_card_num();
333 size_t diff = predict_pending_card_diff();
334 size_t prediction;
335 if (diff > max_pending_card_num)
336 prediction = max_pending_card_num;
337 else
338 prediction = max_pending_card_num - diff;
340 return prediction;
341 }
343 size_t predict_rs_length_diff() {
344 return (size_t) get_new_prediction(_rs_length_diff_seq);
345 }
347 double predict_alloc_rate_ms() {
348 return get_new_prediction(_alloc_rate_ms_seq);
349 }
351 double predict_cost_per_card_ms() {
352 return get_new_prediction(_cost_per_card_ms_seq);
353 }
355 double predict_rs_update_time_ms(size_t pending_cards) {
356 return (double) pending_cards * predict_cost_per_card_ms();
357 }
359 double predict_fully_young_cards_per_entry_ratio() {
360 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
361 }
363 double predict_partially_young_cards_per_entry_ratio() {
364 if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
365 return predict_fully_young_cards_per_entry_ratio();
366 else
367 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
368 }
370 size_t predict_young_card_num(size_t rs_length) {
371 return (size_t) ((double) rs_length *
372 predict_fully_young_cards_per_entry_ratio());
373 }
375 size_t predict_non_young_card_num(size_t rs_length) {
376 return (size_t) ((double) rs_length *
377 predict_partially_young_cards_per_entry_ratio());
378 }
380 double predict_rs_scan_time_ms(size_t card_num) {
381 if (full_young_gcs())
382 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
383 else
384 return predict_partially_young_rs_scan_time_ms(card_num);
385 }
387 double predict_partially_young_rs_scan_time_ms(size_t card_num) {
388 if (_partially_young_cost_per_entry_ms_seq->num() < 3)
389 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
390 else
391 return (double) card_num *
392 get_new_prediction(_partially_young_cost_per_entry_ms_seq);
393 }
395 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
396 if (_cost_per_byte_ms_during_cm_seq->num() < 3)
397 return 1.1 * (double) bytes_to_copy *
398 get_new_prediction(_cost_per_byte_ms_seq);
399 else
400 return (double) bytes_to_copy *
401 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
402 }
404 double predict_object_copy_time_ms(size_t bytes_to_copy) {
405 if (_in_marking_window && !_in_marking_window_im)
406 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
407 else
408 return (double) bytes_to_copy *
409 get_new_prediction(_cost_per_byte_ms_seq);
410 }
412 double predict_constant_other_time_ms() {
413 return get_new_prediction(_constant_other_time_ms_seq);
414 }
416 double predict_young_other_time_ms(size_t young_num) {
417 return
418 (double) young_num *
419 get_new_prediction(_young_other_cost_per_region_ms_seq);
420 }
422 double predict_non_young_other_time_ms(size_t non_young_num) {
423 return
424 (double) non_young_num *
425 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
426 }
428 void check_if_region_is_too_expensive(double predicted_time_ms);
430 double predict_young_collection_elapsed_time_ms(size_t adjustment);
431 double predict_base_elapsed_time_ms(size_t pending_cards);
432 double predict_base_elapsed_time_ms(size_t pending_cards,
433 size_t scanned_cards);
434 size_t predict_bytes_to_copy(HeapRegion* hr);
435 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
437 void set_recorded_rs_lengths(size_t rs_lengths);
439 size_t cset_region_length() { return young_cset_region_length() +
440 old_cset_region_length(); }
441 size_t young_cset_region_length() { return eden_cset_region_length() +
442 survivor_cset_region_length(); }
444 void record_young_free_cset_time_ms(double time_ms) {
445 _recorded_young_free_cset_time_ms = time_ms;
446 }
448 void record_non_young_free_cset_time_ms(double time_ms) {
449 _recorded_non_young_free_cset_time_ms = time_ms;
450 }
452 double predict_young_gc_eff() {
453 return get_new_neg_prediction(_young_gc_eff_seq);
454 }
456 double predict_survivor_regions_evac_time();
458 void cset_regions_freed() {
459 bool propagate = _last_young_gc_full && !_in_marking_window;
460 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
461 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
462 // also call it on any more surv rate groups
463 }
465 void set_known_garbage_bytes(size_t known_garbage_bytes) {
466 _known_garbage_bytes = known_garbage_bytes;
467 size_t heap_bytes = _g1->capacity();
468 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
469 }
471 void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
472 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
474 _known_garbage_bytes -= known_garbage_bytes;
475 size_t heap_bytes = _g1->capacity();
476 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
477 }
479 G1MMUTracker* mmu_tracker() {
480 return _mmu_tracker;
481 }
483 double max_pause_time_ms() {
484 return _mmu_tracker->max_gc_time() * 1000.0;
485 }
487 double predict_remark_time_ms() {
488 return get_new_prediction(_concurrent_mark_remark_times_ms);
489 }
491 double predict_cleanup_time_ms() {
492 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
493 }
495 // Returns an estimate of the survival rate of the region at yg-age
496 // "yg_age".
497 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
498 TruncatedSeq* seq = surv_rate_group->get_seq(age);
499 if (seq->num() == 0)
500 gclog_or_tty->print("BARF! age is %d", age);
501 guarantee( seq->num() > 0, "invariant" );
502 double pred = get_new_prediction(seq);
503 if (pred > 1.0)
504 pred = 1.0;
505 return pred;
506 }
508 double predict_yg_surv_rate(int age) {
509 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
510 }
512 double accum_yg_surv_rate_pred(int age) {
513 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
514 }
516 private:
517 void print_stats(int level, const char* str, double value);
518 void print_stats(int level, const char* str, int value);
520 void print_par_stats(int level, const char* str, double* data);
521 void print_par_sizes(int level, const char* str, double* data);
523 void check_other_times(int level,
524 NumberSeq* other_times_ms,
525 NumberSeq* calc_other_times_ms) const;
527 void print_summary (PauseSummary* stats) const;
529 void print_summary (int level, const char* str, NumberSeq* seq) const;
530 void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
532 double avg_value (double* data);
533 double max_value (double* data);
534 double sum_of_values (double* data);
535 double max_sum (double* data1, double* data2);
537 double _last_pause_time_ms;
539 size_t _bytes_in_collection_set_before_gc;
540 size_t _bytes_copied_during_gc;
542 // Used to count used bytes in CS.
543 friend class CountCSClosure;
545 // Statistics kept per GC stoppage, pause or full.
546 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
548 // Add a new GC of the given duration and end time to the record.
549 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
551 // The head of the list (via "next_in_collection_set()") representing the
552 // current collection set. Set from the incrementally built collection
553 // set at the start of the pause.
554 HeapRegion* _collection_set;
556 // The number of bytes in the collection set before the pause. Set from
557 // the incrementally built collection set at the start of an evacuation
558 // pause.
559 size_t _collection_set_bytes_used_before;
561 // The associated information that is maintained while the incremental
562 // collection set is being built with young regions. Used to populate
563 // the recorded info for the evacuation pause.
565 enum CSetBuildType {
566 Active, // We are actively building the collection set
567 Inactive // We are not actively building the collection set
568 };
570 CSetBuildType _inc_cset_build_state;
572 // The head of the incrementally built collection set.
573 HeapRegion* _inc_cset_head;
575 // The tail of the incrementally built collection set.
576 HeapRegion* _inc_cset_tail;
578 // The number of bytes in the incrementally built collection set.
579 // Used to set _collection_set_bytes_used_before at the start of
580 // an evacuation pause.
581 size_t _inc_cset_bytes_used_before;
583 // Used to record the highest end of heap region in collection set
584 HeapWord* _inc_cset_max_finger;
586 // The RSet lengths recorded for regions in the collection set
587 // (updated by the periodic sampling of the regions in the
588 // young list/collection set).
589 size_t _inc_cset_recorded_rs_lengths;
591 // The predicted elapsed time it will take to collect the regions
592 // in the collection set (updated by the periodic sampling of the
593 // regions in the young list/collection set).
594 double _inc_cset_predicted_elapsed_time_ms;
596 // Stash a pointer to the g1 heap.
597 G1CollectedHeap* _g1;
599 // The ratio of gc time to elapsed time, computed over recent pauses.
600 double _recent_avg_pause_time_ratio;
602 double recent_avg_pause_time_ratio() {
603 return _recent_avg_pause_time_ratio;
604 }
606 // At the end of a pause we check the heap occupancy and we decide
607 // whether we will start a marking cycle during the next pause. If
608 // we decide that we want to do that, we will set this parameter to
609 // true. So, this parameter will stay true between the end of a
610 // pause and the beginning of a subsequent pause (not necessarily
611 // the next one, see the comments on the next field) when we decide
612 // that we will indeed start a marking cycle and do the initial-mark
613 // work.
614 volatile bool _initiate_conc_mark_if_possible;
616 // If initiate_conc_mark_if_possible() is set at the beginning of a
617 // pause, it is a suggestion that the pause should start a marking
618 // cycle by doing the initial-mark work. However, it is possible
619 // that the concurrent marking thread is still finishing up the
620 // previous marking cycle (e.g., clearing the next marking
621 // bitmap). If that is the case we cannot start a new cycle and
622 // we'll have to wait for the concurrent marking thread to finish
623 // what it is doing. In this case we will postpone the marking cycle
624 // initiation decision for the next pause. When we eventually decide
625 // to start a cycle, we will set _during_initial_mark_pause which
626 // will stay true until the end of the initial-mark pause and it's
627 // the condition that indicates that a pause is doing the
628 // initial-mark work.
629 volatile bool _during_initial_mark_pause;
631 bool _should_revert_to_full_young_gcs;
632 bool _last_full_young_gc;
634 // This set of variables tracks the collector efficiency, in order to
635 // determine whether we should initiate a new marking.
636 double _cur_mark_stop_world_time_ms;
637 double _mark_remark_start_sec;
638 double _mark_cleanup_start_sec;
639 double _mark_closure_time_ms;
641 // Update the young list target length either by setting it to the
642 // desired fixed value or by calculating it using G1's pause
643 // prediction model. If no rs_lengths parameter is passed, predict
644 // the RS lengths using the prediction model, otherwise use the
645 // given rs_lengths as the prediction.
646 void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
648 // Calculate and return the minimum desired young list target
649 // length. This is the minimum desired young list length according
650 // to the user's inputs.
651 size_t calculate_young_list_desired_min_length(size_t base_min_length);
653 // Calculate and return the maximum desired young list target
654 // length. This is the maximum desired young list length according
655 // to the user's inputs.
656 size_t calculate_young_list_desired_max_length();
658 // Calculate and return the maximum young list target length that
659 // can fit into the pause time goal. The parameters are: rs_lengths
660 // represent the prediction of how large the young RSet lengths will
661 // be, base_min_length is the alreay existing number of regions in
662 // the young list, min_length and max_length are the desired min and
663 // max young list length according to the user's inputs.
664 size_t calculate_young_list_target_length(size_t rs_lengths,
665 size_t base_min_length,
666 size_t desired_min_length,
667 size_t desired_max_length);
669 // Check whether a given young length (young_length) fits into the
670 // given target pause time and whether the prediction for the amount
671 // of objects to be copied for the given length will fit into the
672 // given free space (expressed by base_free_regions). It is used by
673 // calculate_young_list_target_length().
674 bool predict_will_fit(size_t young_length, double base_time_ms,
675 size_t base_free_regions, double target_pause_time_ms);
677 // Count the number of bytes used in the CS.
678 void count_CS_bytes_used();
680 void update_young_list_size_using_newratio(size_t number_of_heap_regions);
682 public:
684 G1CollectorPolicy();
686 virtual G1CollectorPolicy* as_g1_policy() { return this; }
688 virtual CollectorPolicy::Name kind() {
689 return CollectorPolicy::G1CollectorPolicyKind;
690 }
692 // Check the current value of the young list RSet lengths and
693 // compare it against the last prediction. If the current value is
694 // higher, recalculate the young list target length prediction.
695 void revise_young_list_target_length_if_necessary();
697 size_t bytes_in_collection_set() {
698 return _bytes_in_collection_set_before_gc;
699 }
701 unsigned calc_gc_alloc_time_stamp() {
702 return _all_pause_times_ms->num() + 1;
703 }
705 // This should be called after the heap is resized.
706 void record_new_heap_size(size_t new_number_of_regions);
708 public:
710 void init();
712 // Create jstat counters for the policy.
713 virtual void initialize_gc_policy_counters();
715 virtual HeapWord* mem_allocate_work(size_t size,
716 bool is_tlab,
717 bool* gc_overhead_limit_was_exceeded);
719 // This method controls how a collector handles one or more
720 // of its generations being fully allocated.
721 virtual HeapWord* satisfy_failed_allocation(size_t size,
722 bool is_tlab);
724 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
726 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
728 // Update the heuristic info to record a collection pause of the given
729 // start time, where the given number of bytes were used at the start.
730 // This may involve changing the desired size of a collection set.
732 void record_stop_world_start();
734 void record_collection_pause_start(double start_time_sec, size_t start_used);
736 // Must currently be called while the world is stopped.
737 void record_concurrent_mark_init_end(double
738 mark_init_elapsed_time_ms);
740 void record_mark_closure_time(double mark_closure_time_ms);
742 void record_concurrent_mark_remark_start();
743 void record_concurrent_mark_remark_end();
745 void record_concurrent_mark_cleanup_start();
746 void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
747 void record_concurrent_mark_cleanup_completed();
749 void record_concurrent_pause();
750 void record_concurrent_pause_end();
752 void record_collection_pause_end(int no_of_gc_threads);
753 void print_heap_transition();
755 // Record the fact that a full collection occurred.
756 void record_full_collection_start();
757 void record_full_collection_end();
759 void record_gc_worker_start_time(int worker_i, double ms) {
760 _par_last_gc_worker_start_times_ms[worker_i] = ms;
761 }
763 void record_ext_root_scan_time(int worker_i, double ms) {
764 _par_last_ext_root_scan_times_ms[worker_i] = ms;
765 }
767 void record_mark_stack_scan_time(int worker_i, double ms) {
768 _par_last_mark_stack_scan_times_ms[worker_i] = ms;
769 }
771 void record_satb_drain_time(double ms) {
772 assert(_g1->mark_in_progress(), "shouldn't be here otherwise");
773 _cur_satb_drain_time_ms = ms;
774 }
776 void record_update_rs_time(int thread, double ms) {
777 _par_last_update_rs_times_ms[thread] = ms;
778 }
780 void record_update_rs_processed_buffers (int thread,
781 double processed_buffers) {
782 _par_last_update_rs_processed_buffers[thread] = processed_buffers;
783 }
785 void record_scan_rs_time(int thread, double ms) {
786 _par_last_scan_rs_times_ms[thread] = ms;
787 }
789 void reset_obj_copy_time(int thread) {
790 _par_last_obj_copy_times_ms[thread] = 0.0;
791 }
793 void reset_obj_copy_time() {
794 reset_obj_copy_time(0);
795 }
797 void record_obj_copy_time(int thread, double ms) {
798 _par_last_obj_copy_times_ms[thread] += ms;
799 }
801 void record_termination(int thread, double ms, size_t attempts) {
802 _par_last_termination_times_ms[thread] = ms;
803 _par_last_termination_attempts[thread] = (double) attempts;
804 }
806 void record_gc_worker_end_time(int worker_i, double ms) {
807 _par_last_gc_worker_end_times_ms[worker_i] = ms;
808 }
810 void record_pause_time_ms(double ms) {
811 _last_pause_time_ms = ms;
812 }
814 void record_clear_ct_time(double ms) {
815 _cur_clear_ct_time_ms = ms;
816 }
818 void record_par_time(double ms) {
819 _cur_collection_par_time_ms = ms;
820 }
822 void record_aux_start_time(int i) {
823 guarantee(i < _aux_num, "should be within range");
824 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
825 }
827 void record_aux_end_time(int i) {
828 guarantee(i < _aux_num, "should be within range");
829 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
830 _cur_aux_times_set[i] = true;
831 _cur_aux_times_ms[i] += ms;
832 }
834 void record_ref_proc_time(double ms) {
835 _cur_ref_proc_time_ms = ms;
836 }
838 void record_ref_enq_time(double ms) {
839 _cur_ref_enq_time_ms = ms;
840 }
842 #ifndef PRODUCT
843 void record_cc_clear_time(double ms) {
844 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
845 _min_clear_cc_time_ms = ms;
846 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
847 _max_clear_cc_time_ms = ms;
848 _cur_clear_cc_time_ms = ms;
849 _cum_clear_cc_time_ms += ms;
850 _num_cc_clears++;
851 }
852 #endif
854 // Record how much space we copied during a GC. This is typically
855 // called when a GC alloc region is being retired.
856 void record_bytes_copied_during_gc(size_t bytes) {
857 _bytes_copied_during_gc += bytes;
858 }
860 // The amount of space we copied during a GC.
861 size_t bytes_copied_during_gc() {
862 return _bytes_copied_during_gc;
863 }
865 // Choose a new collection set. Marks the chosen regions as being
866 // "in_collection_set", and links them together. The head and number of
867 // the collection set are available via access methods.
868 void choose_collection_set(double target_pause_time_ms);
870 // The head of the list (via "next_in_collection_set()") representing the
871 // current collection set.
872 HeapRegion* collection_set() { return _collection_set; }
874 void clear_collection_set() { _collection_set = NULL; }
876 // Add old region "hr" to the CSet.
877 void add_old_region_to_cset(HeapRegion* hr);
879 // Incremental CSet Support
881 // The head of the incrementally built collection set.
882 HeapRegion* inc_cset_head() { return _inc_cset_head; }
884 // The tail of the incrementally built collection set.
885 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
887 // Initialize incremental collection set info.
888 void start_incremental_cset_building();
890 void clear_incremental_cset() {
891 _inc_cset_head = NULL;
892 _inc_cset_tail = NULL;
893 }
895 // Stop adding regions to the incremental collection set
896 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
898 // Add/remove information about hr to the aggregated information
899 // for the incrementally built collection set.
900 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
901 void remove_from_incremental_cset_info(HeapRegion* hr);
903 // Update information about hr in the aggregated information for
904 // the incrementally built collection set.
905 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
907 private:
908 // Update the incremental cset information when adding a region
909 // (should not be called directly).
910 void add_region_to_incremental_cset_common(HeapRegion* hr);
912 public:
913 // Add hr to the LHS of the incremental collection set.
914 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
916 // Add hr to the RHS of the incremental collection set.
917 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
919 #ifndef PRODUCT
920 void print_collection_set(HeapRegion* list_head, outputStream* st);
921 #endif // !PRODUCT
923 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
924 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
925 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
927 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
928 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
929 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
931 // This sets the initiate_conc_mark_if_possible() flag to start a
932 // new cycle, as long as we are not already in one. It's best if it
933 // is called during a safepoint when the test whether a cycle is in
934 // progress or not is stable.
935 bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
937 // This is called at the very beginning of an evacuation pause (it
938 // has to be the first thing that the pause does). If
939 // initiate_conc_mark_if_possible() is true, and the concurrent
940 // marking thread has completed its work during the previous cycle,
941 // it will set during_initial_mark_pause() to so that the pause does
942 // the initial-mark work and start a marking cycle.
943 void decide_on_conc_mark_initiation();
945 // If an expansion would be appropriate, because recent GC overhead had
946 // exceeded the desired limit, return an amount to expand by.
947 size_t expansion_amount();
949 #ifndef PRODUCT
950 // Check any appropriate marked bytes info, asserting false if
951 // something's wrong, else returning "true".
952 bool assertMarkedBytesDataOK();
953 #endif
955 // Print tracing information.
956 void print_tracing_info() const;
958 // Print stats on young survival ratio
959 void print_yg_surv_rate_info() const;
961 void finished_recalculating_age_indexes(bool is_survivors) {
962 if (is_survivors) {
963 _survivor_surv_rate_group->finished_recalculating_age_indexes();
964 } else {
965 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
966 }
967 // do that for any other surv rate groups
968 }
970 bool is_young_list_full() {
971 size_t young_list_length = _g1->young_list()->length();
972 size_t young_list_target_length = _young_list_target_length;
973 return young_list_length >= young_list_target_length;
974 }
976 bool can_expand_young_list() {
977 size_t young_list_length = _g1->young_list()->length();
978 size_t young_list_max_length = _young_list_max_length;
979 return young_list_length < young_list_max_length;
980 }
982 size_t young_list_max_length() {
983 return _young_list_max_length;
984 }
986 bool full_young_gcs() {
987 return _full_young_gcs;
988 }
989 void set_full_young_gcs(bool full_young_gcs) {
990 _full_young_gcs = full_young_gcs;
991 }
993 bool adaptive_young_list_length() {
994 return _adaptive_young_list_length;
995 }
996 void set_adaptive_young_list_length(bool adaptive_young_list_length) {
997 _adaptive_young_list_length = adaptive_young_list_length;
998 }
1000 inline double get_gc_eff_factor() {
1001 double ratio = _known_garbage_ratio;
1003 double square = ratio * ratio;
1004 // square = square * square;
1005 double ret = square * 9.0 + 1.0;
1006 #if 0
1007 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
1008 #endif // 0
1009 guarantee(0.0 <= ret && ret < 10.0, "invariant!");
1010 return ret;
1011 }
1013 private:
1014 //
1015 // Survivor regions policy.
1016 //
1018 // Current tenuring threshold, set to 0 if the collector reaches the
1019 // maximum amount of suvivors regions.
1020 int _tenuring_threshold;
1022 // The limit on the number of regions allocated for survivors.
1023 size_t _max_survivor_regions;
1025 // For reporting purposes.
1026 size_t _eden_bytes_before_gc;
1027 size_t _survivor_bytes_before_gc;
1028 size_t _capacity_before_gc;
1030 // The amount of survor regions after a collection.
1031 size_t _recorded_survivor_regions;
1032 // List of survivor regions.
1033 HeapRegion* _recorded_survivor_head;
1034 HeapRegion* _recorded_survivor_tail;
1036 ageTable _survivors_age_table;
1038 public:
1040 inline GCAllocPurpose
1041 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
1042 if (age < _tenuring_threshold && src_region->is_young()) {
1043 return GCAllocForSurvived;
1044 } else {
1045 return GCAllocForTenured;
1046 }
1047 }
1049 inline bool track_object_age(GCAllocPurpose purpose) {
1050 return purpose == GCAllocForSurvived;
1051 }
1053 static const size_t REGIONS_UNLIMITED = ~(size_t)0;
1055 size_t max_regions(int purpose);
1057 // The limit on regions for a particular purpose is reached.
1058 void note_alloc_region_limit_reached(int purpose) {
1059 if (purpose == GCAllocForSurvived) {
1060 _tenuring_threshold = 0;
1061 }
1062 }
1064 void note_start_adding_survivor_regions() {
1065 _survivor_surv_rate_group->start_adding_regions();
1066 }
1068 void note_stop_adding_survivor_regions() {
1069 _survivor_surv_rate_group->stop_adding_regions();
1070 }
1072 void record_survivor_regions(size_t regions,
1073 HeapRegion* head,
1074 HeapRegion* tail) {
1075 _recorded_survivor_regions = regions;
1076 _recorded_survivor_head = head;
1077 _recorded_survivor_tail = tail;
1078 }
1080 size_t recorded_survivor_regions() {
1081 return _recorded_survivor_regions;
1082 }
1084 void record_thread_age_table(ageTable* age_table)
1085 {
1086 _survivors_age_table.merge_par(age_table);
1087 }
1089 void update_max_gc_locker_expansion();
1091 // Calculates survivor space parameters.
1092 void update_survivors_policy();
1094 };
1096 // This should move to some place more general...
1098 // If we have "n" measurements, and we've kept track of their "sum" and the
1099 // "sum_of_squares" of the measurements, this returns the variance of the
1100 // sequence.
1101 inline double variance(int n, double sum_of_squares, double sum) {
1102 double n_d = (double)n;
1103 double avg = sum/n_d;
1104 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
1105 }
1107 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP