Thu, 07 Apr 2011 09:53:20 -0700
7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
28 #include "gc_implementation/g1/collectionSetChooser.hpp"
29 #include "gc_implementation/g1/g1MMUTracker.hpp"
30 #include "memory/collectorPolicy.hpp"
32 // A G1CollectorPolicy makes policy decisions that determine the
33 // characteristics of the collector. Examples include:
34 // * choice of collection set.
35 // * when to collect.
37 class HeapRegion;
38 class CollectionSetChooser;
40 // Yes, this is a bit unpleasant... but it saves replicating the same thing
41 // over and over again and introducing subtle problems through small typos and
42 // cutting and pasting mistakes. The macros below introduces a number
43 // sequnce into the following two classes and the methods that access it.
45 #define define_num_seq(name) \
46 private: \
47 NumberSeq _all_##name##_times_ms; \
48 public: \
49 void record_##name##_time_ms(double ms) { \
50 _all_##name##_times_ms.add(ms); \
51 } \
52 NumberSeq* get_##name##_seq() { \
53 return &_all_##name##_times_ms; \
54 }
56 class MainBodySummary;
58 class PauseSummary: public CHeapObj {
59 define_num_seq(total)
60 define_num_seq(other)
62 public:
63 virtual MainBodySummary* main_body_summary() { return NULL; }
64 };
66 class MainBodySummary: public CHeapObj {
67 define_num_seq(satb_drain) // optional
68 define_num_seq(parallel) // parallel only
69 define_num_seq(ext_root_scan)
70 define_num_seq(mark_stack_scan)
71 define_num_seq(update_rs)
72 define_num_seq(scan_rs)
73 define_num_seq(obj_copy)
74 define_num_seq(termination) // parallel only
75 define_num_seq(parallel_other) // parallel only
76 define_num_seq(mark_closure)
77 define_num_seq(clear_ct) // parallel only
78 };
80 class Summary: public PauseSummary,
81 public MainBodySummary {
82 public:
83 virtual MainBodySummary* main_body_summary() { return this; }
84 };
86 class G1CollectorPolicy: public CollectorPolicy {
87 protected:
88 // The number of pauses during the execution.
89 long _n_pauses;
91 // either equal to the number of parallel threads, if ParallelGCThreads
92 // has been set, or 1 otherwise
93 int _parallel_gc_threads;
95 enum SomePrivateConstants {
96 NumPrevPausesForHeuristics = 10
97 };
99 G1MMUTracker* _mmu_tracker;
101 void initialize_flags();
103 void initialize_all() {
104 initialize_flags();
105 initialize_size_info();
106 initialize_perm_generation(PermGen::MarkSweepCompact);
107 }
109 virtual size_t default_init_heap_size() {
110 // Pick some reasonable default.
111 return 8*M;
112 }
114 double _cur_collection_start_sec;
115 size_t _cur_collection_pause_used_at_start_bytes;
116 size_t _cur_collection_pause_used_regions_at_start;
117 size_t _prev_collection_pause_used_at_end_bytes;
118 double _cur_collection_par_time_ms;
119 double _cur_satb_drain_time_ms;
120 double _cur_clear_ct_time_ms;
121 bool _satb_drain_time_set;
123 #ifndef PRODUCT
124 // Card Table Count Cache stats
125 double _min_clear_cc_time_ms; // min
126 double _max_clear_cc_time_ms; // max
127 double _cur_clear_cc_time_ms; // clearing time during current pause
128 double _cum_clear_cc_time_ms; // cummulative clearing time
129 jlong _num_cc_clears; // number of times the card count cache has been cleared
130 #endif
132 double _cur_CH_strong_roots_end_sec;
133 double _cur_CH_strong_roots_dur_ms;
134 double _cur_G1_strong_roots_end_sec;
135 double _cur_G1_strong_roots_dur_ms;
137 // Statistics for recent GC pauses. See below for how indexed.
138 TruncatedSeq* _recent_CH_strong_roots_times_ms;
139 TruncatedSeq* _recent_G1_strong_roots_times_ms;
140 TruncatedSeq* _recent_evac_times_ms;
141 // These exclude marking times.
142 TruncatedSeq* _recent_pause_times_ms;
143 TruncatedSeq* _recent_gc_times_ms;
145 TruncatedSeq* _recent_CS_bytes_used_before;
146 TruncatedSeq* _recent_CS_bytes_surviving;
148 TruncatedSeq* _recent_rs_sizes;
150 TruncatedSeq* _concurrent_mark_init_times_ms;
151 TruncatedSeq* _concurrent_mark_remark_times_ms;
152 TruncatedSeq* _concurrent_mark_cleanup_times_ms;
154 Summary* _summary;
156 NumberSeq* _all_pause_times_ms;
157 NumberSeq* _all_full_gc_times_ms;
158 double _stop_world_start;
159 NumberSeq* _all_stop_world_times_ms;
160 NumberSeq* _all_yield_times_ms;
162 size_t _region_num_young;
163 size_t _region_num_tenured;
164 size_t _prev_region_num_young;
165 size_t _prev_region_num_tenured;
167 NumberSeq* _all_mod_union_times_ms;
169 int _aux_num;
170 NumberSeq* _all_aux_times_ms;
171 double* _cur_aux_start_times_ms;
172 double* _cur_aux_times_ms;
173 bool* _cur_aux_times_set;
175 double* _par_last_gc_worker_start_times_ms;
176 double* _par_last_ext_root_scan_times_ms;
177 double* _par_last_mark_stack_scan_times_ms;
178 double* _par_last_update_rs_times_ms;
179 double* _par_last_update_rs_processed_buffers;
180 double* _par_last_scan_rs_times_ms;
181 double* _par_last_obj_copy_times_ms;
182 double* _par_last_termination_times_ms;
183 double* _par_last_termination_attempts;
184 double* _par_last_gc_worker_end_times_ms;
185 double* _par_last_gc_worker_times_ms;
187 // indicates that we are in young GC mode
188 bool _in_young_gc_mode;
190 // indicates whether we are in full young or partially young GC mode
191 bool _full_young_gcs;
193 // if true, then it tries to dynamically adjust the length of the
194 // young list
195 bool _adaptive_young_list_length;
196 size_t _young_list_min_length;
197 size_t _young_list_target_length;
198 size_t _young_list_fixed_length;
200 // The max number of regions we can extend the eden by while the GC
201 // locker is active. This should be >= _young_list_target_length;
202 size_t _young_list_max_length;
204 size_t _young_cset_length;
205 bool _last_young_gc_full;
207 unsigned _full_young_pause_num;
208 unsigned _partial_young_pause_num;
210 bool _during_marking;
211 bool _in_marking_window;
212 bool _in_marking_window_im;
214 SurvRateGroup* _short_lived_surv_rate_group;
215 SurvRateGroup* _survivor_surv_rate_group;
216 // add here any more surv rate groups
218 double _gc_overhead_perc;
220 bool during_marking() {
221 return _during_marking;
222 }
224 // <NEW PREDICTION>
226 private:
227 enum PredictionConstants {
228 TruncatedSeqLength = 10
229 };
231 TruncatedSeq* _alloc_rate_ms_seq;
232 double _prev_collection_pause_end_ms;
234 TruncatedSeq* _pending_card_diff_seq;
235 TruncatedSeq* _rs_length_diff_seq;
236 TruncatedSeq* _cost_per_card_ms_seq;
237 TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
238 TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
239 TruncatedSeq* _cost_per_entry_ms_seq;
240 TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
241 TruncatedSeq* _cost_per_byte_ms_seq;
242 TruncatedSeq* _constant_other_time_ms_seq;
243 TruncatedSeq* _young_other_cost_per_region_ms_seq;
244 TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
246 TruncatedSeq* _pending_cards_seq;
247 TruncatedSeq* _scanned_cards_seq;
248 TruncatedSeq* _rs_lengths_seq;
250 TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
252 TruncatedSeq* _young_gc_eff_seq;
254 TruncatedSeq* _max_conc_overhead_seq;
256 size_t _recorded_young_regions;
257 size_t _recorded_non_young_regions;
258 size_t _recorded_region_num;
260 size_t _free_regions_at_end_of_collection;
262 size_t _recorded_rs_lengths;
263 size_t _max_rs_lengths;
265 size_t _recorded_marked_bytes;
266 size_t _recorded_young_bytes;
268 size_t _predicted_pending_cards;
269 size_t _predicted_cards_scanned;
270 size_t _predicted_rs_lengths;
271 size_t _predicted_bytes_to_copy;
273 double _predicted_survival_ratio;
274 double _predicted_rs_update_time_ms;
275 double _predicted_rs_scan_time_ms;
276 double _predicted_object_copy_time_ms;
277 double _predicted_constant_other_time_ms;
278 double _predicted_young_other_time_ms;
279 double _predicted_non_young_other_time_ms;
280 double _predicted_pause_time_ms;
282 double _vtime_diff_ms;
284 double _recorded_young_free_cset_time_ms;
285 double _recorded_non_young_free_cset_time_ms;
287 double _sigma;
288 double _expensive_region_limit_ms;
290 size_t _rs_lengths_prediction;
292 size_t _known_garbage_bytes;
293 double _known_garbage_ratio;
295 double sigma() {
296 return _sigma;
297 }
299 // A function that prevents us putting too much stock in small sample
300 // sets. Returns a number between 2.0 and 1.0, depending on the number
301 // of samples. 5 or more samples yields one; fewer scales linearly from
302 // 2.0 at 1 sample to 1.0 at 5.
303 double confidence_factor(int samples) {
304 if (samples > 4) return 1.0;
305 else return 1.0 + sigma() * ((double)(5 - samples))/2.0;
306 }
308 double get_new_neg_prediction(TruncatedSeq* seq) {
309 return seq->davg() - sigma() * seq->dsd();
310 }
312 #ifndef PRODUCT
313 bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
314 #endif // PRODUCT
316 void adjust_concurrent_refinement(double update_rs_time,
317 double update_rs_processed_buffers,
318 double goal_ms);
320 protected:
321 double _pause_time_target_ms;
322 double _recorded_young_cset_choice_time_ms;
323 double _recorded_non_young_cset_choice_time_ms;
324 bool _within_target;
325 size_t _pending_cards;
326 size_t _max_pending_cards;
328 public:
330 void set_region_short_lived(HeapRegion* hr) {
331 hr->install_surv_rate_group(_short_lived_surv_rate_group);
332 }
334 void set_region_survivors(HeapRegion* hr) {
335 hr->install_surv_rate_group(_survivor_surv_rate_group);
336 }
338 #ifndef PRODUCT
339 bool verify_young_ages();
340 #endif // PRODUCT
342 double get_new_prediction(TruncatedSeq* seq) {
343 return MAX2(seq->davg() + sigma() * seq->dsd(),
344 seq->davg() * confidence_factor(seq->num()));
345 }
347 size_t young_cset_length() {
348 return _young_cset_length;
349 }
351 void record_max_rs_lengths(size_t rs_lengths) {
352 _max_rs_lengths = rs_lengths;
353 }
355 size_t predict_pending_card_diff() {
356 double prediction = get_new_neg_prediction(_pending_card_diff_seq);
357 if (prediction < 0.00001)
358 return 0;
359 else
360 return (size_t) prediction;
361 }
363 size_t predict_pending_cards() {
364 size_t max_pending_card_num = _g1->max_pending_card_num();
365 size_t diff = predict_pending_card_diff();
366 size_t prediction;
367 if (diff > max_pending_card_num)
368 prediction = max_pending_card_num;
369 else
370 prediction = max_pending_card_num - diff;
372 return prediction;
373 }
375 size_t predict_rs_length_diff() {
376 return (size_t) get_new_prediction(_rs_length_diff_seq);
377 }
379 double predict_alloc_rate_ms() {
380 return get_new_prediction(_alloc_rate_ms_seq);
381 }
383 double predict_cost_per_card_ms() {
384 return get_new_prediction(_cost_per_card_ms_seq);
385 }
387 double predict_rs_update_time_ms(size_t pending_cards) {
388 return (double) pending_cards * predict_cost_per_card_ms();
389 }
391 double predict_fully_young_cards_per_entry_ratio() {
392 return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
393 }
395 double predict_partially_young_cards_per_entry_ratio() {
396 if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
397 return predict_fully_young_cards_per_entry_ratio();
398 else
399 return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
400 }
402 size_t predict_young_card_num(size_t rs_length) {
403 return (size_t) ((double) rs_length *
404 predict_fully_young_cards_per_entry_ratio());
405 }
407 size_t predict_non_young_card_num(size_t rs_length) {
408 return (size_t) ((double) rs_length *
409 predict_partially_young_cards_per_entry_ratio());
410 }
412 double predict_rs_scan_time_ms(size_t card_num) {
413 if (full_young_gcs())
414 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
415 else
416 return predict_partially_young_rs_scan_time_ms(card_num);
417 }
419 double predict_partially_young_rs_scan_time_ms(size_t card_num) {
420 if (_partially_young_cost_per_entry_ms_seq->num() < 3)
421 return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
422 else
423 return (double) card_num *
424 get_new_prediction(_partially_young_cost_per_entry_ms_seq);
425 }
427 double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
428 if (_cost_per_byte_ms_during_cm_seq->num() < 3)
429 return 1.1 * (double) bytes_to_copy *
430 get_new_prediction(_cost_per_byte_ms_seq);
431 else
432 return (double) bytes_to_copy *
433 get_new_prediction(_cost_per_byte_ms_during_cm_seq);
434 }
436 double predict_object_copy_time_ms(size_t bytes_to_copy) {
437 if (_in_marking_window && !_in_marking_window_im)
438 return predict_object_copy_time_ms_during_cm(bytes_to_copy);
439 else
440 return (double) bytes_to_copy *
441 get_new_prediction(_cost_per_byte_ms_seq);
442 }
444 double predict_constant_other_time_ms() {
445 return get_new_prediction(_constant_other_time_ms_seq);
446 }
448 double predict_young_other_time_ms(size_t young_num) {
449 return
450 (double) young_num *
451 get_new_prediction(_young_other_cost_per_region_ms_seq);
452 }
454 double predict_non_young_other_time_ms(size_t non_young_num) {
455 return
456 (double) non_young_num *
457 get_new_prediction(_non_young_other_cost_per_region_ms_seq);
458 }
460 void check_if_region_is_too_expensive(double predicted_time_ms);
462 double predict_young_collection_elapsed_time_ms(size_t adjustment);
463 double predict_base_elapsed_time_ms(size_t pending_cards);
464 double predict_base_elapsed_time_ms(size_t pending_cards,
465 size_t scanned_cards);
466 size_t predict_bytes_to_copy(HeapRegion* hr);
467 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
469 // for use by: calculate_young_list_target_length(rs_length)
470 bool predict_will_fit(size_t young_region_num,
471 double base_time_ms,
472 size_t init_free_regions,
473 double target_pause_time_ms);
475 void start_recording_regions();
476 void record_cset_region_info(HeapRegion* hr, bool young);
477 void record_non_young_cset_region(HeapRegion* hr);
479 void set_recorded_young_regions(size_t n_regions);
480 void set_recorded_young_bytes(size_t bytes);
481 void set_recorded_rs_lengths(size_t rs_lengths);
482 void set_predicted_bytes_to_copy(size_t bytes);
484 void end_recording_regions();
486 void record_vtime_diff_ms(double vtime_diff_ms) {
487 _vtime_diff_ms = vtime_diff_ms;
488 }
490 void record_young_free_cset_time_ms(double time_ms) {
491 _recorded_young_free_cset_time_ms = time_ms;
492 }
494 void record_non_young_free_cset_time_ms(double time_ms) {
495 _recorded_non_young_free_cset_time_ms = time_ms;
496 }
498 double predict_young_gc_eff() {
499 return get_new_neg_prediction(_young_gc_eff_seq);
500 }
502 double predict_survivor_regions_evac_time();
504 // </NEW PREDICTION>
506 public:
507 void cset_regions_freed() {
508 bool propagate = _last_young_gc_full && !_in_marking_window;
509 _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
510 _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
511 // also call it on any more surv rate groups
512 }
514 void set_known_garbage_bytes(size_t known_garbage_bytes) {
515 _known_garbage_bytes = known_garbage_bytes;
516 size_t heap_bytes = _g1->capacity();
517 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
518 }
520 void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
521 guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
523 _known_garbage_bytes -= known_garbage_bytes;
524 size_t heap_bytes = _g1->capacity();
525 _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
526 }
528 G1MMUTracker* mmu_tracker() {
529 return _mmu_tracker;
530 }
532 double max_pause_time_ms() {
533 return _mmu_tracker->max_gc_time() * 1000.0;
534 }
536 double predict_init_time_ms() {
537 return get_new_prediction(_concurrent_mark_init_times_ms);
538 }
540 double predict_remark_time_ms() {
541 return get_new_prediction(_concurrent_mark_remark_times_ms);
542 }
544 double predict_cleanup_time_ms() {
545 return get_new_prediction(_concurrent_mark_cleanup_times_ms);
546 }
548 // Returns an estimate of the survival rate of the region at yg-age
549 // "yg_age".
550 double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
551 TruncatedSeq* seq = surv_rate_group->get_seq(age);
552 if (seq->num() == 0)
553 gclog_or_tty->print("BARF! age is %d", age);
554 guarantee( seq->num() > 0, "invariant" );
555 double pred = get_new_prediction(seq);
556 if (pred > 1.0)
557 pred = 1.0;
558 return pred;
559 }
561 double predict_yg_surv_rate(int age) {
562 return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
563 }
565 double accum_yg_surv_rate_pred(int age) {
566 return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
567 }
569 protected:
570 void print_stats(int level, const char* str, double value);
571 void print_stats(int level, const char* str, int value);
573 void print_par_stats(int level, const char* str, double* data);
574 void print_par_sizes(int level, const char* str, double* data);
576 void check_other_times(int level,
577 NumberSeq* other_times_ms,
578 NumberSeq* calc_other_times_ms) const;
580 void print_summary (PauseSummary* stats) const;
582 void print_summary (int level, const char* str, NumberSeq* seq) const;
583 void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
585 double avg_value (double* data);
586 double max_value (double* data);
587 double sum_of_values (double* data);
588 double max_sum (double* data1, double* data2);
590 int _last_satb_drain_processed_buffers;
591 int _last_update_rs_processed_buffers;
592 double _last_pause_time_ms;
594 size_t _bytes_in_to_space_before_gc;
595 size_t _bytes_in_to_space_after_gc;
596 size_t bytes_in_to_space_during_gc() {
597 return
598 _bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc;
599 }
600 size_t _bytes_in_collection_set_before_gc;
601 // Used to count used bytes in CS.
602 friend class CountCSClosure;
604 // Statistics kept per GC stoppage, pause or full.
605 TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
607 // We track markings.
608 int _num_markings;
609 double _mark_thread_startup_sec; // Time at startup of marking thread
611 // Add a new GC of the given duration and end time to the record.
612 void update_recent_gc_times(double end_time_sec, double elapsed_ms);
614 // The head of the list (via "next_in_collection_set()") representing the
615 // current collection set. Set from the incrementally built collection
616 // set at the start of the pause.
617 HeapRegion* _collection_set;
619 // The number of regions in the collection set. Set from the incrementally
620 // built collection set at the start of an evacuation pause.
621 size_t _collection_set_size;
623 // The number of bytes in the collection set before the pause. Set from
624 // the incrementally built collection set at the start of an evacuation
625 // pause.
626 size_t _collection_set_bytes_used_before;
628 // The associated information that is maintained while the incremental
629 // collection set is being built with young regions. Used to populate
630 // the recorded info for the evacuation pause.
632 enum CSetBuildType {
633 Active, // We are actively building the collection set
634 Inactive // We are not actively building the collection set
635 };
637 CSetBuildType _inc_cset_build_state;
639 // The head of the incrementally built collection set.
640 HeapRegion* _inc_cset_head;
642 // The tail of the incrementally built collection set.
643 HeapRegion* _inc_cset_tail;
645 // The number of regions in the incrementally built collection set.
646 // Used to set _collection_set_size at the start of an evacuation
647 // pause.
648 size_t _inc_cset_size;
650 // Used as the index in the surving young words structure
651 // which tracks the amount of space, for each young region,
652 // that survives the pause.
653 size_t _inc_cset_young_index;
655 // The number of bytes in the incrementally built collection set.
656 // Used to set _collection_set_bytes_used_before at the start of
657 // an evacuation pause.
658 size_t _inc_cset_bytes_used_before;
660 // Used to record the highest end of heap region in collection set
661 HeapWord* _inc_cset_max_finger;
663 // The number of recorded used bytes in the young regions
664 // of the collection set. This is the sum of the used() bytes
665 // of retired young regions in the collection set.
666 size_t _inc_cset_recorded_young_bytes;
668 // The RSet lengths recorded for regions in the collection set
669 // (updated by the periodic sampling of the regions in the
670 // young list/collection set).
671 size_t _inc_cset_recorded_rs_lengths;
673 // The predicted elapsed time it will take to collect the regions
674 // in the collection set (updated by the periodic sampling of the
675 // regions in the young list/collection set).
676 double _inc_cset_predicted_elapsed_time_ms;
678 // The predicted bytes to copy for the regions in the collection
679 // set (updated by the periodic sampling of the regions in the
680 // young list/collection set).
681 size_t _inc_cset_predicted_bytes_to_copy;
683 // Info about marking.
684 int _n_marks; // Sticky at 2, so we know when we've done at least 2.
686 // The number of collection pauses at the end of the last mark.
687 size_t _n_pauses_at_mark_end;
689 // Stash a pointer to the g1 heap.
690 G1CollectedHeap* _g1;
692 // The average time in ms per collection pause, averaged over recent pauses.
693 double recent_avg_time_for_pauses_ms();
695 // The average time in ms for processing CollectedHeap strong roots, per
696 // collection pause, averaged over recent pauses.
697 double recent_avg_time_for_CH_strong_ms();
699 // The average time in ms for processing the G1 remembered set, per
700 // pause, averaged over recent pauses.
701 double recent_avg_time_for_G1_strong_ms();
703 // The average time in ms for "evacuating followers", per pause, averaged
704 // over recent pauses.
705 double recent_avg_time_for_evac_ms();
707 // The number of "recent" GCs recorded in the number sequences
708 int number_of_recent_gcs();
710 // The average survival ratio, computed by the total number of bytes
711 // suriviving / total number of bytes before collection over the last
712 // several recent pauses.
713 double recent_avg_survival_fraction();
714 // The survival fraction of the most recent pause; if there have been no
715 // pauses, returns 1.0.
716 double last_survival_fraction();
718 // Returns a "conservative" estimate of the recent survival rate, i.e.,
719 // one that may be higher than "recent_avg_survival_fraction".
720 // This is conservative in several ways:
721 // If there have been few pauses, it will assume a potential high
722 // variance, and err on the side of caution.
723 // It puts a lower bound (currently 0.1) on the value it will return.
724 // To try to detect phase changes, if the most recent pause ("latest") has a
725 // higher-than average ("avg") survival rate, it returns that rate.
726 // "work" version is a utility function; young is restricted to young regions.
727 double conservative_avg_survival_fraction_work(double avg,
728 double latest);
730 // The arguments are the two sequences that keep track of the number of bytes
731 // surviving and the total number of bytes before collection, resp.,
732 // over the last evereal recent pauses
733 // Returns the survival rate for the category in the most recent pause.
734 // If there have been no pauses, returns 1.0.
735 double last_survival_fraction_work(TruncatedSeq* surviving,
736 TruncatedSeq* before);
738 // The arguments are the two sequences that keep track of the number of bytes
739 // surviving and the total number of bytes before collection, resp.,
740 // over the last several recent pauses
741 // Returns the average survival ration over the last several recent pauses
742 // If there have been no pauses, return 1.0
743 double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
744 TruncatedSeq* before);
746 double conservative_avg_survival_fraction() {
747 double avg = recent_avg_survival_fraction();
748 double latest = last_survival_fraction();
749 return conservative_avg_survival_fraction_work(avg, latest);
750 }
752 // The ratio of gc time to elapsed time, computed over recent pauses.
753 double _recent_avg_pause_time_ratio;
755 double recent_avg_pause_time_ratio() {
756 return _recent_avg_pause_time_ratio;
757 }
759 // Number of pauses between concurrent marking.
760 size_t _pauses_btwn_concurrent_mark;
762 size_t _n_marks_since_last_pause;
764 // At the end of a pause we check the heap occupancy and we decide
765 // whether we will start a marking cycle during the next pause. If
766 // we decide that we want to do that, we will set this parameter to
767 // true. So, this parameter will stay true between the end of a
768 // pause and the beginning of a subsequent pause (not necessarily
769 // the next one, see the comments on the next field) when we decide
770 // that we will indeed start a marking cycle and do the initial-mark
771 // work.
772 volatile bool _initiate_conc_mark_if_possible;
774 // If initiate_conc_mark_if_possible() is set at the beginning of a
775 // pause, it is a suggestion that the pause should start a marking
776 // cycle by doing the initial-mark work. However, it is possible
777 // that the concurrent marking thread is still finishing up the
778 // previous marking cycle (e.g., clearing the next marking
779 // bitmap). If that is the case we cannot start a new cycle and
780 // we'll have to wait for the concurrent marking thread to finish
781 // what it is doing. In this case we will postpone the marking cycle
782 // initiation decision for the next pause. When we eventually decide
783 // to start a cycle, we will set _during_initial_mark_pause which
784 // will stay true until the end of the initial-mark pause and it's
785 // the condition that indicates that a pause is doing the
786 // initial-mark work.
787 volatile bool _during_initial_mark_pause;
789 bool _should_revert_to_full_young_gcs;
790 bool _last_full_young_gc;
792 // This set of variables tracks the collector efficiency, in order to
793 // determine whether we should initiate a new marking.
794 double _cur_mark_stop_world_time_ms;
795 double _mark_init_start_sec;
796 double _mark_remark_start_sec;
797 double _mark_cleanup_start_sec;
798 double _mark_closure_time_ms;
800 void calculate_young_list_min_length();
801 void calculate_young_list_target_length();
802 void calculate_young_list_target_length(size_t rs_lengths);
804 public:
806 G1CollectorPolicy();
808 virtual G1CollectorPolicy* as_g1_policy() { return this; }
810 virtual CollectorPolicy::Name kind() {
811 return CollectorPolicy::G1CollectorPolicyKind;
812 }
814 void check_prediction_validity();
816 size_t bytes_in_collection_set() {
817 return _bytes_in_collection_set_before_gc;
818 }
820 size_t bytes_in_to_space() {
821 return bytes_in_to_space_during_gc();
822 }
824 unsigned calc_gc_alloc_time_stamp() {
825 return _all_pause_times_ms->num() + 1;
826 }
828 protected:
830 // Count the number of bytes used in the CS.
831 void count_CS_bytes_used();
833 // Together these do the base cleanup-recording work. Subclasses might
834 // want to put something between them.
835 void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
836 size_t max_live_bytes);
837 void record_concurrent_mark_cleanup_end_work2();
839 public:
841 virtual void init();
843 // Create jstat counters for the policy.
844 virtual void initialize_gc_policy_counters();
846 virtual HeapWord* mem_allocate_work(size_t size,
847 bool is_tlab,
848 bool* gc_overhead_limit_was_exceeded);
850 // This method controls how a collector handles one or more
851 // of its generations being fully allocated.
852 virtual HeapWord* satisfy_failed_allocation(size_t size,
853 bool is_tlab);
855 BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
857 GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; }
859 // The number of collection pauses so far.
860 long n_pauses() const { return _n_pauses; }
862 // Update the heuristic info to record a collection pause of the given
863 // start time, where the given number of bytes were used at the start.
864 // This may involve changing the desired size of a collection set.
866 virtual void record_stop_world_start();
868 virtual void record_collection_pause_start(double start_time_sec,
869 size_t start_used);
871 // Must currently be called while the world is stopped.
872 virtual void record_concurrent_mark_init_start();
873 virtual void record_concurrent_mark_init_end();
874 void record_concurrent_mark_init_end_pre(double
875 mark_init_elapsed_time_ms);
877 void record_mark_closure_time(double mark_closure_time_ms);
879 virtual void record_concurrent_mark_remark_start();
880 virtual void record_concurrent_mark_remark_end();
882 virtual void record_concurrent_mark_cleanup_start();
883 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
884 size_t max_live_bytes);
885 virtual void record_concurrent_mark_cleanup_completed();
887 virtual void record_concurrent_pause();
888 virtual void record_concurrent_pause_end();
890 virtual void record_collection_pause_end_CH_strong_roots();
891 virtual void record_collection_pause_end_G1_strong_roots();
893 virtual void record_collection_pause_end();
895 // Record the fact that a full collection occurred.
896 virtual void record_full_collection_start();
897 virtual void record_full_collection_end();
899 void record_gc_worker_start_time(int worker_i, double ms) {
900 _par_last_gc_worker_start_times_ms[worker_i] = ms;
901 }
903 void record_ext_root_scan_time(int worker_i, double ms) {
904 _par_last_ext_root_scan_times_ms[worker_i] = ms;
905 }
907 void record_mark_stack_scan_time(int worker_i, double ms) {
908 _par_last_mark_stack_scan_times_ms[worker_i] = ms;
909 }
911 void record_satb_drain_time(double ms) {
912 _cur_satb_drain_time_ms = ms;
913 _satb_drain_time_set = true;
914 }
916 void record_satb_drain_processed_buffers (int processed_buffers) {
917 _last_satb_drain_processed_buffers = processed_buffers;
918 }
920 void record_mod_union_time(double ms) {
921 _all_mod_union_times_ms->add(ms);
922 }
924 void record_update_rs_time(int thread, double ms) {
925 _par_last_update_rs_times_ms[thread] = ms;
926 }
928 void record_update_rs_processed_buffers (int thread,
929 double processed_buffers) {
930 _par_last_update_rs_processed_buffers[thread] = processed_buffers;
931 }
933 void record_scan_rs_time(int thread, double ms) {
934 _par_last_scan_rs_times_ms[thread] = ms;
935 }
937 void reset_obj_copy_time(int thread) {
938 _par_last_obj_copy_times_ms[thread] = 0.0;
939 }
941 void reset_obj_copy_time() {
942 reset_obj_copy_time(0);
943 }
945 void record_obj_copy_time(int thread, double ms) {
946 _par_last_obj_copy_times_ms[thread] += ms;
947 }
949 void record_termination(int thread, double ms, size_t attempts) {
950 _par_last_termination_times_ms[thread] = ms;
951 _par_last_termination_attempts[thread] = (double) attempts;
952 }
954 void record_gc_worker_end_time(int worker_i, double ms) {
955 _par_last_gc_worker_end_times_ms[worker_i] = ms;
956 }
958 void record_pause_time_ms(double ms) {
959 _last_pause_time_ms = ms;
960 }
962 void record_clear_ct_time(double ms) {
963 _cur_clear_ct_time_ms = ms;
964 }
966 void record_par_time(double ms) {
967 _cur_collection_par_time_ms = ms;
968 }
970 void record_aux_start_time(int i) {
971 guarantee(i < _aux_num, "should be within range");
972 _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
973 }
975 void record_aux_end_time(int i) {
976 guarantee(i < _aux_num, "should be within range");
977 double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
978 _cur_aux_times_set[i] = true;
979 _cur_aux_times_ms[i] += ms;
980 }
982 #ifndef PRODUCT
983 void record_cc_clear_time(double ms) {
984 if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
985 _min_clear_cc_time_ms = ms;
986 if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
987 _max_clear_cc_time_ms = ms;
988 _cur_clear_cc_time_ms = ms;
989 _cum_clear_cc_time_ms += ms;
990 _num_cc_clears++;
991 }
992 #endif
994 // Record the fact that "bytes" bytes allocated in a region.
995 void record_before_bytes(size_t bytes);
996 void record_after_bytes(size_t bytes);
998 // Choose a new collection set. Marks the chosen regions as being
999 // "in_collection_set", and links them together. The head and number of
1000 // the collection set are available via access methods.
1001 virtual void choose_collection_set(double target_pause_time_ms) = 0;
1003 // The head of the list (via "next_in_collection_set()") representing the
1004 // current collection set.
1005 HeapRegion* collection_set() { return _collection_set; }
1007 void clear_collection_set() { _collection_set = NULL; }
1009 // The number of elements in the current collection set.
1010 size_t collection_set_size() { return _collection_set_size; }
1012 // Add "hr" to the CS.
1013 void add_to_collection_set(HeapRegion* hr);
1015 // Incremental CSet Support
1017 // The head of the incrementally built collection set.
1018 HeapRegion* inc_cset_head() { return _inc_cset_head; }
1020 // The tail of the incrementally built collection set.
1021 HeapRegion* inc_set_tail() { return _inc_cset_tail; }
1023 // The number of elements in the incrementally built collection set.
1024 size_t inc_cset_size() { return _inc_cset_size; }
1026 // Initialize incremental collection set info.
1027 void start_incremental_cset_building();
1029 void clear_incremental_cset() {
1030 _inc_cset_head = NULL;
1031 _inc_cset_tail = NULL;
1032 }
1034 // Stop adding regions to the incremental collection set
1035 void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
1037 // Add/remove information about hr to the aggregated information
1038 // for the incrementally built collection set.
1039 void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
1040 void remove_from_incremental_cset_info(HeapRegion* hr);
1042 // Update information about hr in the aggregated information for
1043 // the incrementally built collection set.
1044 void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
1046 private:
1047 // Update the incremental cset information when adding a region
1048 // (should not be called directly).
1049 void add_region_to_incremental_cset_common(HeapRegion* hr);
1051 public:
1052 // Add hr to the LHS of the incremental collection set.
1053 void add_region_to_incremental_cset_lhs(HeapRegion* hr);
1055 // Add hr to the RHS of the incremental collection set.
1056 void add_region_to_incremental_cset_rhs(HeapRegion* hr);
1058 #ifndef PRODUCT
1059 void print_collection_set(HeapRegion* list_head, outputStream* st);
1060 #endif // !PRODUCT
1062 bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; }
1063 void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; }
1064 void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
1066 bool during_initial_mark_pause() { return _during_initial_mark_pause; }
1067 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; }
1068 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
1070 // This sets the initiate_conc_mark_if_possible() flag to start a
1071 // new cycle, as long as we are not already in one. It's best if it
1072 // is called during a safepoint when the test whether a cycle is in
1073 // progress or not is stable.
1074 bool force_initial_mark_if_outside_cycle();
1076 // This is called at the very beginning of an evacuation pause (it
1077 // has to be the first thing that the pause does). If
1078 // initiate_conc_mark_if_possible() is true, and the concurrent
1079 // marking thread has completed its work during the previous cycle,
1080 // it will set during_initial_mark_pause() to so that the pause does
1081 // the initial-mark work and start a marking cycle.
1082 void decide_on_conc_mark_initiation();
1084 // If an expansion would be appropriate, because recent GC overhead had
1085 // exceeded the desired limit, return an amount to expand by.
1086 virtual size_t expansion_amount();
1088 // note start of mark thread
1089 void note_start_of_mark_thread();
1091 // The marked bytes of the "r" has changed; reclassify it's desirability
1092 // for marking. Also asserts that "r" is eligible for a CS.
1093 virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
1095 #ifndef PRODUCT
1096 // Check any appropriate marked bytes info, asserting false if
1097 // something's wrong, else returning "true".
1098 virtual bool assertMarkedBytesDataOK() = 0;
1099 #endif
1101 // Print tracing information.
1102 void print_tracing_info() const;
1104 // Print stats on young survival ratio
1105 void print_yg_surv_rate_info() const;
1107 void finished_recalculating_age_indexes(bool is_survivors) {
1108 if (is_survivors) {
1109 _survivor_surv_rate_group->finished_recalculating_age_indexes();
1110 } else {
1111 _short_lived_surv_rate_group->finished_recalculating_age_indexes();
1112 }
1113 // do that for any other surv rate groups
1114 }
1116 bool is_young_list_full() {
1117 size_t young_list_length = _g1->young_list()->length();
1118 size_t young_list_target_length = _young_list_target_length;
1119 if (G1FixedEdenSize) {
1120 young_list_target_length -= _max_survivor_regions;
1121 }
1122 return young_list_length >= young_list_target_length;
1123 }
1125 bool can_expand_young_list() {
1126 size_t young_list_length = _g1->young_list()->length();
1127 size_t young_list_max_length = _young_list_max_length;
1128 if (G1FixedEdenSize) {
1129 young_list_max_length -= _max_survivor_regions;
1130 }
1131 return young_list_length < young_list_max_length;
1132 }
1134 void update_region_num(bool young);
1136 bool in_young_gc_mode() {
1137 return _in_young_gc_mode;
1138 }
1139 void set_in_young_gc_mode(bool in_young_gc_mode) {
1140 _in_young_gc_mode = in_young_gc_mode;
1141 }
1143 bool full_young_gcs() {
1144 return _full_young_gcs;
1145 }
1146 void set_full_young_gcs(bool full_young_gcs) {
1147 _full_young_gcs = full_young_gcs;
1148 }
1150 bool adaptive_young_list_length() {
1151 return _adaptive_young_list_length;
1152 }
1153 void set_adaptive_young_list_length(bool adaptive_young_list_length) {
1154 _adaptive_young_list_length = adaptive_young_list_length;
1155 }
1157 inline double get_gc_eff_factor() {
1158 double ratio = _known_garbage_ratio;
1160 double square = ratio * ratio;
1161 // square = square * square;
1162 double ret = square * 9.0 + 1.0;
1163 #if 0
1164 gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
1165 #endif // 0
1166 guarantee(0.0 <= ret && ret < 10.0, "invariant!");
1167 return ret;
1168 }
1170 //
1171 // Survivor regions policy.
1172 //
1173 protected:
1175 // Current tenuring threshold, set to 0 if the collector reaches the
1176 // maximum amount of suvivors regions.
1177 int _tenuring_threshold;
1179 // The limit on the number of regions allocated for survivors.
1180 size_t _max_survivor_regions;
1182 // The amount of survor regions after a collection.
1183 size_t _recorded_survivor_regions;
1184 // List of survivor regions.
1185 HeapRegion* _recorded_survivor_head;
1186 HeapRegion* _recorded_survivor_tail;
1188 ageTable _survivors_age_table;
1190 public:
1192 inline GCAllocPurpose
1193 evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
1194 if (age < _tenuring_threshold && src_region->is_young()) {
1195 return GCAllocForSurvived;
1196 } else {
1197 return GCAllocForTenured;
1198 }
1199 }
1201 inline bool track_object_age(GCAllocPurpose purpose) {
1202 return purpose == GCAllocForSurvived;
1203 }
1205 inline GCAllocPurpose alternative_purpose(int purpose) {
1206 return GCAllocForTenured;
1207 }
1209 static const size_t REGIONS_UNLIMITED = ~(size_t)0;
1211 size_t max_regions(int purpose);
1213 // The limit on regions for a particular purpose is reached.
1214 void note_alloc_region_limit_reached(int purpose) {
1215 if (purpose == GCAllocForSurvived) {
1216 _tenuring_threshold = 0;
1217 }
1218 }
1220 void note_start_adding_survivor_regions() {
1221 _survivor_surv_rate_group->start_adding_regions();
1222 }
1224 void note_stop_adding_survivor_regions() {
1225 _survivor_surv_rate_group->stop_adding_regions();
1226 }
1228 void record_survivor_regions(size_t regions,
1229 HeapRegion* head,
1230 HeapRegion* tail) {
1231 _recorded_survivor_regions = regions;
1232 _recorded_survivor_head = head;
1233 _recorded_survivor_tail = tail;
1234 }
1236 size_t recorded_survivor_regions() {
1237 return _recorded_survivor_regions;
1238 }
1240 void record_thread_age_table(ageTable* age_table)
1241 {
1242 _survivors_age_table.merge_par(age_table);
1243 }
1245 void calculate_max_gc_locker_expansion();
1247 // Calculates survivor space parameters.
1248 void calculate_survivors_policy();
1250 };
1252 // This encapsulates a particular strategy for a g1 Collector.
1253 //
1254 // Start a concurrent mark when our heap size is n bytes
1255 // greater then our heap size was at the last concurrent
1256 // mark. Where n is a function of the CMSTriggerRatio
1257 // and the MinHeapFreeRatio.
1258 //
1259 // Start a g1 collection pause when we have allocated the
1260 // average number of bytes currently being freed in
1261 // a collection, but only if it is at least one region
1262 // full
1263 //
1264 // Resize Heap based on desired
1265 // allocation space, where desired allocation space is
1266 // a function of survival rate and desired future to size.
1267 //
1268 // Choose collection set by first picking all older regions
1269 // which have a survival rate which beats our projected young
1270 // survival rate. Then fill out the number of needed regions
1271 // with young regions.
1273 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
1274 CollectionSetChooser* _collectionSetChooser;
1275 // If the estimated is less then desirable, resize if possible.
1276 void expand_if_possible(size_t numRegions);
1278 virtual void choose_collection_set(double target_pause_time_ms);
1279 virtual void record_collection_pause_start(double start_time_sec,
1280 size_t start_used);
1281 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
1282 size_t max_live_bytes);
1283 virtual void record_full_collection_end();
1285 public:
1286 G1CollectorPolicy_BestRegionsFirst() {
1287 _collectionSetChooser = new CollectionSetChooser();
1288 }
1289 void record_collection_pause_end();
1290 // This is not needed any more, after the CSet choosing code was
1291 // changed to use the pause prediction work. But let's leave the
1292 // hook in just in case.
1293 void note_change_in_marked_bytes(HeapRegion* r) { }
1294 #ifndef PRODUCT
1295 bool assertMarkedBytesDataOK();
1296 #endif
1297 };
1299 // This should move to some place more general...
1301 // If we have "n" measurements, and we've kept track of their "sum" and the
1302 // "sum_of_squares" of the measurements, this returns the variance of the
1303 // sequence.
1304 inline double variance(int n, double sum_of_squares, double sum) {
1305 double n_d = (double)n;
1306 double avg = sum/n_d;
1307 return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
1308 }
1310 // Local Variables: ***
1311 // c-indentation-style: gnu ***
1312 // End: ***
1314 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP