src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Tue, 24 Aug 2010 17:24:33 -0400

author
tonyp
date
Tue, 24 Aug 2010 17:24:33 -0400
changeset 2315
631f79e71e90
parent 2314
f95d63e2154a
child 2333
016a3628c885
permissions
-rw-r--r--

6974966: G1: unnecessary direct-to-old allocations
Summary: This change revamps the slow allocation path of G1. Improvements include the following: a) Allocations directly to old regions are now totally banned. G1 now only allows allocations out of young regions (with the only exception being humongous regions). b) The thread that allocates a new region (which is now guaranteed to be young) does not dirty all its cards. Each thread that successfully allocates out of a young region is now responsible for dirtying the cards that corresponding to the "block" that just got allocated. c) allocate_new_tlab() and mem_allocate() are now implemented differently and TLAB allocations are only done by allocate_new_tlab(). d) If a thread schedules an evacuation pause in order to satisfy an allocation request, it will perform the allocation at the end of the safepoint so that the thread that initiated the GC also gets "first pick" of any space made available by the GC. e) If a thread is unable to allocate a humongous object it will schedule an evacuation pause in case it reclaims enough regions so that the humongous allocation can be satisfied aftewards. f) The G1 policy is more careful to set the young list target length to be the survivor number +1. g) Lots of code tidy up, removal, refactoring to make future changes easier.
Reviewed-by: johnc, ysr

     1 /*
     2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
    28 #include "gc_implementation/g1/collectionSetChooser.hpp"
    29 #include "gc_implementation/g1/g1MMUTracker.hpp"
    30 #include "memory/collectorPolicy.hpp"
    32 // A G1CollectorPolicy makes policy decisions that determine the
    33 // characteristics of the collector.  Examples include:
    34 //   * choice of collection set.
    35 //   * when to collect.
    37 class HeapRegion;
    38 class CollectionSetChooser;
    40 // Yes, this is a bit unpleasant... but it saves replicating the same thing
    41 // over and over again and introducing subtle problems through small typos and
    42 // cutting and pasting mistakes. The macros below introduces a number
    43 // sequnce into the following two classes and the methods that access it.
    45 #define define_num_seq(name)                                                  \
    46 private:                                                                      \
    47   NumberSeq _all_##name##_times_ms;                                           \
    48 public:                                                                       \
    49   void record_##name##_time_ms(double ms) {                                   \
    50     _all_##name##_times_ms.add(ms);                                           \
    51   }                                                                           \
    52   NumberSeq* get_##name##_seq() {                                             \
    53     return &_all_##name##_times_ms;                                           \
    54   }
    56 class MainBodySummary;
    58 class PauseSummary: public CHeapObj {
    59   define_num_seq(total)
    60     define_num_seq(other)
    62 public:
    63   virtual MainBodySummary*    main_body_summary()    { return NULL; }
    64 };
    66 class MainBodySummary: public CHeapObj {
    67   define_num_seq(satb_drain) // optional
    68   define_num_seq(parallel) // parallel only
    69     define_num_seq(ext_root_scan)
    70     define_num_seq(mark_stack_scan)
    71     define_num_seq(update_rs)
    72     define_num_seq(scan_rs)
    73     define_num_seq(obj_copy)
    74     define_num_seq(termination) // parallel only
    75     define_num_seq(parallel_other) // parallel only
    76   define_num_seq(mark_closure)
    77   define_num_seq(clear_ct)  // parallel only
    78 };
    80 class Summary: public PauseSummary,
    81                public MainBodySummary {
    82 public:
    83   virtual MainBodySummary*    main_body_summary()    { return this; }
    84 };
    86 class G1CollectorPolicy: public CollectorPolicy {
    87 protected:
    88   // The number of pauses during the execution.
    89   long _n_pauses;
    91   // either equal to the number of parallel threads, if ParallelGCThreads
    92   // has been set, or 1 otherwise
    93   int _parallel_gc_threads;
    95   enum SomePrivateConstants {
    96     NumPrevPausesForHeuristics = 10
    97   };
    99   G1MMUTracker* _mmu_tracker;
   101   void initialize_flags();
   103   void initialize_all() {
   104     initialize_flags();
   105     initialize_size_info();
   106     initialize_perm_generation(PermGen::MarkSweepCompact);
   107   }
   109   virtual size_t default_init_heap_size() {
   110     // Pick some reasonable default.
   111     return 8*M;
   112   }
   114   double _cur_collection_start_sec;
   115   size_t _cur_collection_pause_used_at_start_bytes;
   116   size_t _cur_collection_pause_used_regions_at_start;
   117   size_t _prev_collection_pause_used_at_end_bytes;
   118   double _cur_collection_par_time_ms;
   119   double _cur_satb_drain_time_ms;
   120   double _cur_clear_ct_time_ms;
   121   bool   _satb_drain_time_set;
   123 #ifndef PRODUCT
   124   // Card Table Count Cache stats
   125   double _min_clear_cc_time_ms;         // min
   126   double _max_clear_cc_time_ms;         // max
   127   double _cur_clear_cc_time_ms;         // clearing time during current pause
   128   double _cum_clear_cc_time_ms;         // cummulative clearing time
   129   jlong  _num_cc_clears;                // number of times the card count cache has been cleared
   130 #endif
   132   double _cur_CH_strong_roots_end_sec;
   133   double _cur_CH_strong_roots_dur_ms;
   134   double _cur_G1_strong_roots_end_sec;
   135   double _cur_G1_strong_roots_dur_ms;
   137   // Statistics for recent GC pauses.  See below for how indexed.
   138   TruncatedSeq* _recent_CH_strong_roots_times_ms;
   139   TruncatedSeq* _recent_G1_strong_roots_times_ms;
   140   TruncatedSeq* _recent_evac_times_ms;
   141   // These exclude marking times.
   142   TruncatedSeq* _recent_pause_times_ms;
   143   TruncatedSeq* _recent_gc_times_ms;
   145   TruncatedSeq* _recent_CS_bytes_used_before;
   146   TruncatedSeq* _recent_CS_bytes_surviving;
   148   TruncatedSeq* _recent_rs_sizes;
   150   TruncatedSeq* _concurrent_mark_init_times_ms;
   151   TruncatedSeq* _concurrent_mark_remark_times_ms;
   152   TruncatedSeq* _concurrent_mark_cleanup_times_ms;
   154   Summary*           _summary;
   156   NumberSeq* _all_pause_times_ms;
   157   NumberSeq* _all_full_gc_times_ms;
   158   double _stop_world_start;
   159   NumberSeq* _all_stop_world_times_ms;
   160   NumberSeq* _all_yield_times_ms;
   162   size_t     _region_num_young;
   163   size_t     _region_num_tenured;
   164   size_t     _prev_region_num_young;
   165   size_t     _prev_region_num_tenured;
   167   NumberSeq* _all_mod_union_times_ms;
   169   int        _aux_num;
   170   NumberSeq* _all_aux_times_ms;
   171   double*    _cur_aux_start_times_ms;
   172   double*    _cur_aux_times_ms;
   173   bool*      _cur_aux_times_set;
   175   double* _par_last_gc_worker_start_times_ms;
   176   double* _par_last_ext_root_scan_times_ms;
   177   double* _par_last_mark_stack_scan_times_ms;
   178   double* _par_last_update_rs_times_ms;
   179   double* _par_last_update_rs_processed_buffers;
   180   double* _par_last_scan_rs_times_ms;
   181   double* _par_last_obj_copy_times_ms;
   182   double* _par_last_termination_times_ms;
   183   double* _par_last_termination_attempts;
   184   double* _par_last_gc_worker_end_times_ms;
   186   // indicates that we are in young GC mode
   187   bool _in_young_gc_mode;
   189   // indicates whether we are in full young or partially young GC mode
   190   bool _full_young_gcs;
   192   // if true, then it tries to dynamically adjust the length of the
   193   // young list
   194   bool _adaptive_young_list_length;
   195   size_t _young_list_min_length;
   196   size_t _young_list_target_length;
   197   size_t _young_list_fixed_length;
   199   size_t _young_cset_length;
   200   bool   _last_young_gc_full;
   202   unsigned              _full_young_pause_num;
   203   unsigned              _partial_young_pause_num;
   205   bool                  _during_marking;
   206   bool                  _in_marking_window;
   207   bool                  _in_marking_window_im;
   209   SurvRateGroup*        _short_lived_surv_rate_group;
   210   SurvRateGroup*        _survivor_surv_rate_group;
   211   // add here any more surv rate groups
   213   double                _gc_overhead_perc;
   215   bool during_marking() {
   216     return _during_marking;
   217   }
   219   // <NEW PREDICTION>
   221 private:
   222   enum PredictionConstants {
   223     TruncatedSeqLength = 10
   224   };
   226   TruncatedSeq* _alloc_rate_ms_seq;
   227   double        _prev_collection_pause_end_ms;
   229   TruncatedSeq* _pending_card_diff_seq;
   230   TruncatedSeq* _rs_length_diff_seq;
   231   TruncatedSeq* _cost_per_card_ms_seq;
   232   TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
   233   TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
   234   TruncatedSeq* _cost_per_entry_ms_seq;
   235   TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
   236   TruncatedSeq* _cost_per_byte_ms_seq;
   237   TruncatedSeq* _constant_other_time_ms_seq;
   238   TruncatedSeq* _young_other_cost_per_region_ms_seq;
   239   TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
   241   TruncatedSeq* _pending_cards_seq;
   242   TruncatedSeq* _scanned_cards_seq;
   243   TruncatedSeq* _rs_lengths_seq;
   245   TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
   247   TruncatedSeq* _young_gc_eff_seq;
   249   TruncatedSeq* _max_conc_overhead_seq;
   251   size_t _recorded_young_regions;
   252   size_t _recorded_non_young_regions;
   253   size_t _recorded_region_num;
   255   size_t _free_regions_at_end_of_collection;
   257   size_t _recorded_rs_lengths;
   258   size_t _max_rs_lengths;
   260   size_t _recorded_marked_bytes;
   261   size_t _recorded_young_bytes;
   263   size_t _predicted_pending_cards;
   264   size_t _predicted_cards_scanned;
   265   size_t _predicted_rs_lengths;
   266   size_t _predicted_bytes_to_copy;
   268   double _predicted_survival_ratio;
   269   double _predicted_rs_update_time_ms;
   270   double _predicted_rs_scan_time_ms;
   271   double _predicted_object_copy_time_ms;
   272   double _predicted_constant_other_time_ms;
   273   double _predicted_young_other_time_ms;
   274   double _predicted_non_young_other_time_ms;
   275   double _predicted_pause_time_ms;
   277   double _vtime_diff_ms;
   279   double _recorded_young_free_cset_time_ms;
   280   double _recorded_non_young_free_cset_time_ms;
   282   double _sigma;
   283   double _expensive_region_limit_ms;
   285   size_t _rs_lengths_prediction;
   287   size_t _known_garbage_bytes;
   288   double _known_garbage_ratio;
   290   double sigma() {
   291     return _sigma;
   292   }
   294   // A function that prevents us putting too much stock in small sample
   295   // sets.  Returns a number between 2.0 and 1.0, depending on the number
   296   // of samples.  5 or more samples yields one; fewer scales linearly from
   297   // 2.0 at 1 sample to 1.0 at 5.
   298   double confidence_factor(int samples) {
   299     if (samples > 4) return 1.0;
   300     else return  1.0 + sigma() * ((double)(5 - samples))/2.0;
   301   }
   303   double get_new_neg_prediction(TruncatedSeq* seq) {
   304     return seq->davg() - sigma() * seq->dsd();
   305   }
   307 #ifndef PRODUCT
   308   bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
   309 #endif // PRODUCT
   311   void adjust_concurrent_refinement(double update_rs_time,
   312                                     double update_rs_processed_buffers,
   313                                     double goal_ms);
   315 protected:
   316   double _pause_time_target_ms;
   317   double _recorded_young_cset_choice_time_ms;
   318   double _recorded_non_young_cset_choice_time_ms;
   319   bool   _within_target;
   320   size_t _pending_cards;
   321   size_t _max_pending_cards;
   323 public:
   325   void set_region_short_lived(HeapRegion* hr) {
   326     hr->install_surv_rate_group(_short_lived_surv_rate_group);
   327   }
   329   void set_region_survivors(HeapRegion* hr) {
   330     hr->install_surv_rate_group(_survivor_surv_rate_group);
   331   }
   333 #ifndef PRODUCT
   334   bool verify_young_ages();
   335 #endif // PRODUCT
   337   double get_new_prediction(TruncatedSeq* seq) {
   338     return MAX2(seq->davg() + sigma() * seq->dsd(),
   339                 seq->davg() * confidence_factor(seq->num()));
   340   }
   342   size_t young_cset_length() {
   343     return _young_cset_length;
   344   }
   346   void record_max_rs_lengths(size_t rs_lengths) {
   347     _max_rs_lengths = rs_lengths;
   348   }
   350   size_t predict_pending_card_diff() {
   351     double prediction = get_new_neg_prediction(_pending_card_diff_seq);
   352     if (prediction < 0.00001)
   353       return 0;
   354     else
   355       return (size_t) prediction;
   356   }
   358   size_t predict_pending_cards() {
   359     size_t max_pending_card_num = _g1->max_pending_card_num();
   360     size_t diff = predict_pending_card_diff();
   361     size_t prediction;
   362     if (diff > max_pending_card_num)
   363       prediction = max_pending_card_num;
   364     else
   365       prediction = max_pending_card_num - diff;
   367     return prediction;
   368   }
   370   size_t predict_rs_length_diff() {
   371     return (size_t) get_new_prediction(_rs_length_diff_seq);
   372   }
   374   double predict_alloc_rate_ms() {
   375     return get_new_prediction(_alloc_rate_ms_seq);
   376   }
   378   double predict_cost_per_card_ms() {
   379     return get_new_prediction(_cost_per_card_ms_seq);
   380   }
   382   double predict_rs_update_time_ms(size_t pending_cards) {
   383     return (double) pending_cards * predict_cost_per_card_ms();
   384   }
   386   double predict_fully_young_cards_per_entry_ratio() {
   387     return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
   388   }
   390   double predict_partially_young_cards_per_entry_ratio() {
   391     if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
   392       return predict_fully_young_cards_per_entry_ratio();
   393     else
   394       return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
   395   }
   397   size_t predict_young_card_num(size_t rs_length) {
   398     return (size_t) ((double) rs_length *
   399                      predict_fully_young_cards_per_entry_ratio());
   400   }
   402   size_t predict_non_young_card_num(size_t rs_length) {
   403     return (size_t) ((double) rs_length *
   404                      predict_partially_young_cards_per_entry_ratio());
   405   }
   407   double predict_rs_scan_time_ms(size_t card_num) {
   408     if (full_young_gcs())
   409       return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
   410     else
   411       return predict_partially_young_rs_scan_time_ms(card_num);
   412   }
   414   double predict_partially_young_rs_scan_time_ms(size_t card_num) {
   415     if (_partially_young_cost_per_entry_ms_seq->num() < 3)
   416       return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
   417     else
   418       return (double) card_num *
   419         get_new_prediction(_partially_young_cost_per_entry_ms_seq);
   420   }
   422   double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
   423     if (_cost_per_byte_ms_during_cm_seq->num() < 3)
   424       return 1.1 * (double) bytes_to_copy *
   425         get_new_prediction(_cost_per_byte_ms_seq);
   426     else
   427       return (double) bytes_to_copy *
   428         get_new_prediction(_cost_per_byte_ms_during_cm_seq);
   429   }
   431   double predict_object_copy_time_ms(size_t bytes_to_copy) {
   432     if (_in_marking_window && !_in_marking_window_im)
   433       return predict_object_copy_time_ms_during_cm(bytes_to_copy);
   434     else
   435       return (double) bytes_to_copy *
   436         get_new_prediction(_cost_per_byte_ms_seq);
   437   }
   439   double predict_constant_other_time_ms() {
   440     return get_new_prediction(_constant_other_time_ms_seq);
   441   }
   443   double predict_young_other_time_ms(size_t young_num) {
   444     return
   445       (double) young_num *
   446       get_new_prediction(_young_other_cost_per_region_ms_seq);
   447   }
   449   double predict_non_young_other_time_ms(size_t non_young_num) {
   450     return
   451       (double) non_young_num *
   452       get_new_prediction(_non_young_other_cost_per_region_ms_seq);
   453   }
   455   void check_if_region_is_too_expensive(double predicted_time_ms);
   457   double predict_young_collection_elapsed_time_ms(size_t adjustment);
   458   double predict_base_elapsed_time_ms(size_t pending_cards);
   459   double predict_base_elapsed_time_ms(size_t pending_cards,
   460                                       size_t scanned_cards);
   461   size_t predict_bytes_to_copy(HeapRegion* hr);
   462   double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
   464     // for use by: calculate_young_list_target_length(rs_length)
   465   bool predict_will_fit(size_t young_region_num,
   466                         double base_time_ms,
   467                         size_t init_free_regions,
   468                         double target_pause_time_ms);
   470   void start_recording_regions();
   471   void record_cset_region_info(HeapRegion* hr, bool young);
   472   void record_non_young_cset_region(HeapRegion* hr);
   474   void set_recorded_young_regions(size_t n_regions);
   475   void set_recorded_young_bytes(size_t bytes);
   476   void set_recorded_rs_lengths(size_t rs_lengths);
   477   void set_predicted_bytes_to_copy(size_t bytes);
   479   void end_recording_regions();
   481   void record_vtime_diff_ms(double vtime_diff_ms) {
   482     _vtime_diff_ms = vtime_diff_ms;
   483   }
   485   void record_young_free_cset_time_ms(double time_ms) {
   486     _recorded_young_free_cset_time_ms = time_ms;
   487   }
   489   void record_non_young_free_cset_time_ms(double time_ms) {
   490     _recorded_non_young_free_cset_time_ms = time_ms;
   491   }
   493   double predict_young_gc_eff() {
   494     return get_new_neg_prediction(_young_gc_eff_seq);
   495   }
   497   double predict_survivor_regions_evac_time();
   499   // </NEW PREDICTION>
   501 public:
   502   void cset_regions_freed() {
   503     bool propagate = _last_young_gc_full && !_in_marking_window;
   504     _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
   505     _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
   506     // also call it on any more surv rate groups
   507   }
   509   void set_known_garbage_bytes(size_t known_garbage_bytes) {
   510     _known_garbage_bytes = known_garbage_bytes;
   511     size_t heap_bytes = _g1->capacity();
   512     _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
   513   }
   515   void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
   516     guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
   518     _known_garbage_bytes -= known_garbage_bytes;
   519     size_t heap_bytes = _g1->capacity();
   520     _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
   521   }
   523   G1MMUTracker* mmu_tracker() {
   524     return _mmu_tracker;
   525   }
   527   double max_pause_time_ms() {
   528     return _mmu_tracker->max_gc_time() * 1000.0;
   529   }
   531   double predict_init_time_ms() {
   532     return get_new_prediction(_concurrent_mark_init_times_ms);
   533   }
   535   double predict_remark_time_ms() {
   536     return get_new_prediction(_concurrent_mark_remark_times_ms);
   537   }
   539   double predict_cleanup_time_ms() {
   540     return get_new_prediction(_concurrent_mark_cleanup_times_ms);
   541   }
   543   // Returns an estimate of the survival rate of the region at yg-age
   544   // "yg_age".
   545   double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
   546     TruncatedSeq* seq = surv_rate_group->get_seq(age);
   547     if (seq->num() == 0)
   548       gclog_or_tty->print("BARF! age is %d", age);
   549     guarantee( seq->num() > 0, "invariant" );
   550     double pred = get_new_prediction(seq);
   551     if (pred > 1.0)
   552       pred = 1.0;
   553     return pred;
   554   }
   556   double predict_yg_surv_rate(int age) {
   557     return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
   558   }
   560   double accum_yg_surv_rate_pred(int age) {
   561     return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
   562   }
   564 protected:
   565   void print_stats(int level, const char* str, double value);
   566   void print_stats(int level, const char* str, int value);
   568   void print_par_stats(int level, const char* str, double* data) {
   569     print_par_stats(level, str, data, true);
   570   }
   571   void print_par_stats(int level, const char* str, double* data, bool summary);
   572   void print_par_sizes(int level, const char* str, double* data, bool summary);
   574   void check_other_times(int level,
   575                          NumberSeq* other_times_ms,
   576                          NumberSeq* calc_other_times_ms) const;
   578   void print_summary (PauseSummary* stats) const;
   580   void print_summary (int level, const char* str, NumberSeq* seq) const;
   581   void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
   583   double avg_value (double* data);
   584   double max_value (double* data);
   585   double sum_of_values (double* data);
   586   double max_sum (double* data1, double* data2);
   588   int _last_satb_drain_processed_buffers;
   589   int _last_update_rs_processed_buffers;
   590   double _last_pause_time_ms;
   592   size_t _bytes_in_to_space_before_gc;
   593   size_t _bytes_in_to_space_after_gc;
   594   size_t bytes_in_to_space_during_gc() {
   595     return
   596       _bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc;
   597   }
   598   size_t _bytes_in_collection_set_before_gc;
   599   // Used to count used bytes in CS.
   600   friend class CountCSClosure;
   602   // Statistics kept per GC stoppage, pause or full.
   603   TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
   605   // We track markings.
   606   int _num_markings;
   607   double _mark_thread_startup_sec;       // Time at startup of marking thread
   609   // Add a new GC of the given duration and end time to the record.
   610   void update_recent_gc_times(double end_time_sec, double elapsed_ms);
   612   // The head of the list (via "next_in_collection_set()") representing the
   613   // current collection set. Set from the incrementally built collection
   614   // set at the start of the pause.
   615   HeapRegion* _collection_set;
   617   // The number of regions in the collection set. Set from the incrementally
   618   // built collection set at the start of an evacuation pause.
   619   size_t _collection_set_size;
   621   // The number of bytes in the collection set before the pause. Set from
   622   // the incrementally built collection set at the start of an evacuation
   623   // pause.
   624   size_t _collection_set_bytes_used_before;
   626   // The associated information that is maintained while the incremental
   627   // collection set is being built with young regions. Used to populate
   628   // the recorded info for the evacuation pause.
   630   enum CSetBuildType {
   631     Active,             // We are actively building the collection set
   632     Inactive            // We are not actively building the collection set
   633   };
   635   CSetBuildType _inc_cset_build_state;
   637   // The head of the incrementally built collection set.
   638   HeapRegion* _inc_cset_head;
   640   // The tail of the incrementally built collection set.
   641   HeapRegion* _inc_cset_tail;
   643   // The number of regions in the incrementally built collection set.
   644   // Used to set _collection_set_size at the start of an evacuation
   645   // pause.
   646   size_t _inc_cset_size;
   648   // Used as the index in the surving young words structure
   649   // which tracks the amount of space, for each young region,
   650   // that survives the pause.
   651   size_t _inc_cset_young_index;
   653   // The number of bytes in the incrementally built collection set.
   654   // Used to set _collection_set_bytes_used_before at the start of
   655   // an evacuation pause.
   656   size_t _inc_cset_bytes_used_before;
   658   // Used to record the highest end of heap region in collection set
   659   HeapWord* _inc_cset_max_finger;
   661   // The number of recorded used bytes in the young regions
   662   // of the collection set. This is the sum of the used() bytes
   663   // of retired young regions in the collection set.
   664   size_t _inc_cset_recorded_young_bytes;
   666   // The RSet lengths recorded for regions in the collection set
   667   // (updated by the periodic sampling of the regions in the
   668   // young list/collection set).
   669   size_t _inc_cset_recorded_rs_lengths;
   671   // The predicted elapsed time it will take to collect the regions
   672   // in the collection set (updated by the periodic sampling of the
   673   // regions in the young list/collection set).
   674   double _inc_cset_predicted_elapsed_time_ms;
   676   // The predicted bytes to copy for the regions in the collection
   677   // set (updated by the periodic sampling of the regions in the
   678   // young list/collection set).
   679   size_t _inc_cset_predicted_bytes_to_copy;
   681   // Info about marking.
   682   int _n_marks; // Sticky at 2, so we know when we've done at least 2.
   684   // The number of collection pauses at the end of the last mark.
   685   size_t _n_pauses_at_mark_end;
   687   // Stash a pointer to the g1 heap.
   688   G1CollectedHeap* _g1;
   690   // The average time in ms per collection pause, averaged over recent pauses.
   691   double recent_avg_time_for_pauses_ms();
   693   // The average time in ms for processing CollectedHeap strong roots, per
   694   // collection pause, averaged over recent pauses.
   695   double recent_avg_time_for_CH_strong_ms();
   697   // The average time in ms for processing the G1 remembered set, per
   698   // pause, averaged over recent pauses.
   699   double recent_avg_time_for_G1_strong_ms();
   701   // The average time in ms for "evacuating followers", per pause, averaged
   702   // over recent pauses.
   703   double recent_avg_time_for_evac_ms();
   705   // The number of "recent" GCs recorded in the number sequences
   706   int number_of_recent_gcs();
   708   // The average survival ratio, computed by the total number of bytes
   709   // suriviving / total number of bytes before collection over the last
   710   // several recent pauses.
   711   double recent_avg_survival_fraction();
   712   // The survival fraction of the most recent pause; if there have been no
   713   // pauses, returns 1.0.
   714   double last_survival_fraction();
   716   // Returns a "conservative" estimate of the recent survival rate, i.e.,
   717   // one that may be higher than "recent_avg_survival_fraction".
   718   // This is conservative in several ways:
   719   //   If there have been few pauses, it will assume a potential high
   720   //     variance, and err on the side of caution.
   721   //   It puts a lower bound (currently 0.1) on the value it will return.
   722   //   To try to detect phase changes, if the most recent pause ("latest") has a
   723   //     higher-than average ("avg") survival rate, it returns that rate.
   724   // "work" version is a utility function; young is restricted to young regions.
   725   double conservative_avg_survival_fraction_work(double avg,
   726                                                  double latest);
   728   // The arguments are the two sequences that keep track of the number of bytes
   729   //   surviving and the total number of bytes before collection, resp.,
   730   //   over the last evereal recent pauses
   731   // Returns the survival rate for the category in the most recent pause.
   732   // If there have been no pauses, returns 1.0.
   733   double last_survival_fraction_work(TruncatedSeq* surviving,
   734                                      TruncatedSeq* before);
   736   // The arguments are the two sequences that keep track of the number of bytes
   737   //   surviving and the total number of bytes before collection, resp.,
   738   //   over the last several recent pauses
   739   // Returns the average survival ration over the last several recent pauses
   740   // If there have been no pauses, return 1.0
   741   double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
   742                                            TruncatedSeq* before);
   744   double conservative_avg_survival_fraction() {
   745     double avg = recent_avg_survival_fraction();
   746     double latest = last_survival_fraction();
   747     return conservative_avg_survival_fraction_work(avg, latest);
   748   }
   750   // The ratio of gc time to elapsed time, computed over recent pauses.
   751   double _recent_avg_pause_time_ratio;
   753   double recent_avg_pause_time_ratio() {
   754     return _recent_avg_pause_time_ratio;
   755   }
   757   // Number of pauses between concurrent marking.
   758   size_t _pauses_btwn_concurrent_mark;
   760   size_t _n_marks_since_last_pause;
   762   // At the end of a pause we check the heap occupancy and we decide
   763   // whether we will start a marking cycle during the next pause. If
   764   // we decide that we want to do that, we will set this parameter to
   765   // true. So, this parameter will stay true between the end of a
   766   // pause and the beginning of a subsequent pause (not necessarily
   767   // the next one, see the comments on the next field) when we decide
   768   // that we will indeed start a marking cycle and do the initial-mark
   769   // work.
   770   volatile bool _initiate_conc_mark_if_possible;
   772   // If initiate_conc_mark_if_possible() is set at the beginning of a
   773   // pause, it is a suggestion that the pause should start a marking
   774   // cycle by doing the initial-mark work. However, it is possible
   775   // that the concurrent marking thread is still finishing up the
   776   // previous marking cycle (e.g., clearing the next marking
   777   // bitmap). If that is the case we cannot start a new cycle and
   778   // we'll have to wait for the concurrent marking thread to finish
   779   // what it is doing. In this case we will postpone the marking cycle
   780   // initiation decision for the next pause. When we eventually decide
   781   // to start a cycle, we will set _during_initial_mark_pause which
   782   // will stay true until the end of the initial-mark pause and it's
   783   // the condition that indicates that a pause is doing the
   784   // initial-mark work.
   785   volatile bool _during_initial_mark_pause;
   787   bool _should_revert_to_full_young_gcs;
   788   bool _last_full_young_gc;
   790   // This set of variables tracks the collector efficiency, in order to
   791   // determine whether we should initiate a new marking.
   792   double _cur_mark_stop_world_time_ms;
   793   double _mark_init_start_sec;
   794   double _mark_remark_start_sec;
   795   double _mark_cleanup_start_sec;
   796   double _mark_closure_time_ms;
   798   void   calculate_young_list_min_length();
   799   void   calculate_young_list_target_length();
   800   void   calculate_young_list_target_length(size_t rs_lengths);
   802 public:
   804   G1CollectorPolicy();
   806   virtual G1CollectorPolicy* as_g1_policy() { return this; }
   808   virtual CollectorPolicy::Name kind() {
   809     return CollectorPolicy::G1CollectorPolicyKind;
   810   }
   812   void check_prediction_validity();
   814   size_t bytes_in_collection_set() {
   815     return _bytes_in_collection_set_before_gc;
   816   }
   818   size_t bytes_in_to_space() {
   819     return bytes_in_to_space_during_gc();
   820   }
   822   unsigned calc_gc_alloc_time_stamp() {
   823     return _all_pause_times_ms->num() + 1;
   824   }
   826 protected:
   828   // Count the number of bytes used in the CS.
   829   void count_CS_bytes_used();
   831   // Together these do the base cleanup-recording work.  Subclasses might
   832   // want to put something between them.
   833   void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
   834                                                 size_t max_live_bytes);
   835   void record_concurrent_mark_cleanup_end_work2();
   837 public:
   839   virtual void init();
   841   // Create jstat counters for the policy.
   842   virtual void initialize_gc_policy_counters();
   844   virtual HeapWord* mem_allocate_work(size_t size,
   845                                       bool is_tlab,
   846                                       bool* gc_overhead_limit_was_exceeded);
   848   // This method controls how a collector handles one or more
   849   // of its generations being fully allocated.
   850   virtual HeapWord* satisfy_failed_allocation(size_t size,
   851                                               bool is_tlab);
   853   BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
   855   GenRemSet::Name  rem_set_name()     { return GenRemSet::CardTable; }
   857   // The number of collection pauses so far.
   858   long n_pauses() const { return _n_pauses; }
   860   // Update the heuristic info to record a collection pause of the given
   861   // start time, where the given number of bytes were used at the start.
   862   // This may involve changing the desired size of a collection set.
   864   virtual void record_stop_world_start();
   866   virtual void record_collection_pause_start(double start_time_sec,
   867                                              size_t start_used);
   869   // Must currently be called while the world is stopped.
   870   virtual void record_concurrent_mark_init_start();
   871   virtual void record_concurrent_mark_init_end();
   872   void record_concurrent_mark_init_end_pre(double
   873                                            mark_init_elapsed_time_ms);
   875   void record_mark_closure_time(double mark_closure_time_ms);
   877   virtual void record_concurrent_mark_remark_start();
   878   virtual void record_concurrent_mark_remark_end();
   880   virtual void record_concurrent_mark_cleanup_start();
   881   virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
   882                                                   size_t max_live_bytes);
   883   virtual void record_concurrent_mark_cleanup_completed();
   885   virtual void record_concurrent_pause();
   886   virtual void record_concurrent_pause_end();
   888   virtual void record_collection_pause_end_CH_strong_roots();
   889   virtual void record_collection_pause_end_G1_strong_roots();
   891   virtual void record_collection_pause_end();
   893   // Record the fact that a full collection occurred.
   894   virtual void record_full_collection_start();
   895   virtual void record_full_collection_end();
   897   void record_gc_worker_start_time(int worker_i, double ms) {
   898     _par_last_gc_worker_start_times_ms[worker_i] = ms;
   899   }
   901   void record_ext_root_scan_time(int worker_i, double ms) {
   902     _par_last_ext_root_scan_times_ms[worker_i] = ms;
   903   }
   905   void record_mark_stack_scan_time(int worker_i, double ms) {
   906     _par_last_mark_stack_scan_times_ms[worker_i] = ms;
   907   }
   909   void record_satb_drain_time(double ms) {
   910     _cur_satb_drain_time_ms = ms;
   911     _satb_drain_time_set    = true;
   912   }
   914   void record_satb_drain_processed_buffers (int processed_buffers) {
   915     _last_satb_drain_processed_buffers = processed_buffers;
   916   }
   918   void record_mod_union_time(double ms) {
   919     _all_mod_union_times_ms->add(ms);
   920   }
   922   void record_update_rs_time(int thread, double ms) {
   923     _par_last_update_rs_times_ms[thread] = ms;
   924   }
   926   void record_update_rs_processed_buffers (int thread,
   927                                            double processed_buffers) {
   928     _par_last_update_rs_processed_buffers[thread] = processed_buffers;
   929   }
   931   void record_scan_rs_time(int thread, double ms) {
   932     _par_last_scan_rs_times_ms[thread] = ms;
   933   }
   935   void reset_obj_copy_time(int thread) {
   936     _par_last_obj_copy_times_ms[thread] = 0.0;
   937   }
   939   void reset_obj_copy_time() {
   940     reset_obj_copy_time(0);
   941   }
   943   void record_obj_copy_time(int thread, double ms) {
   944     _par_last_obj_copy_times_ms[thread] += ms;
   945   }
   947   void record_termination(int thread, double ms, size_t attempts) {
   948     _par_last_termination_times_ms[thread] = ms;
   949     _par_last_termination_attempts[thread] = (double) attempts;
   950   }
   952   void record_gc_worker_end_time(int worker_i, double ms) {
   953     _par_last_gc_worker_end_times_ms[worker_i] = ms;
   954   }
   956   void record_pause_time_ms(double ms) {
   957     _last_pause_time_ms = ms;
   958   }
   960   void record_clear_ct_time(double ms) {
   961     _cur_clear_ct_time_ms = ms;
   962   }
   964   void record_par_time(double ms) {
   965     _cur_collection_par_time_ms = ms;
   966   }
   968   void record_aux_start_time(int i) {
   969     guarantee(i < _aux_num, "should be within range");
   970     _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
   971   }
   973   void record_aux_end_time(int i) {
   974     guarantee(i < _aux_num, "should be within range");
   975     double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
   976     _cur_aux_times_set[i] = true;
   977     _cur_aux_times_ms[i] += ms;
   978   }
   980 #ifndef PRODUCT
   981   void record_cc_clear_time(double ms) {
   982     if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
   983       _min_clear_cc_time_ms = ms;
   984     if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
   985       _max_clear_cc_time_ms = ms;
   986     _cur_clear_cc_time_ms = ms;
   987     _cum_clear_cc_time_ms += ms;
   988     _num_cc_clears++;
   989   }
   990 #endif
   992   // Record the fact that "bytes" bytes allocated in a region.
   993   void record_before_bytes(size_t bytes);
   994   void record_after_bytes(size_t bytes);
   996   // Choose a new collection set.  Marks the chosen regions as being
   997   // "in_collection_set", and links them together.  The head and number of
   998   // the collection set are available via access methods.
   999   virtual void choose_collection_set(double target_pause_time_ms) = 0;
  1001   // The head of the list (via "next_in_collection_set()") representing the
  1002   // current collection set.
  1003   HeapRegion* collection_set() { return _collection_set; }
  1005   void clear_collection_set() { _collection_set = NULL; }
  1007   // The number of elements in the current collection set.
  1008   size_t collection_set_size() { return _collection_set_size; }
  1010   // Add "hr" to the CS.
  1011   void add_to_collection_set(HeapRegion* hr);
  1013   // Incremental CSet Support
  1015   // The head of the incrementally built collection set.
  1016   HeapRegion* inc_cset_head() { return _inc_cset_head; }
  1018   // The tail of the incrementally built collection set.
  1019   HeapRegion* inc_set_tail() { return _inc_cset_tail; }
  1021   // The number of elements in the incrementally built collection set.
  1022   size_t inc_cset_size() { return _inc_cset_size; }
  1024   // Initialize incremental collection set info.
  1025   void start_incremental_cset_building();
  1027   void clear_incremental_cset() {
  1028     _inc_cset_head = NULL;
  1029     _inc_cset_tail = NULL;
  1032   // Stop adding regions to the incremental collection set
  1033   void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
  1035   // Add/remove information about hr to the aggregated information
  1036   // for the incrementally built collection set.
  1037   void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
  1038   void remove_from_incremental_cset_info(HeapRegion* hr);
  1040   // Update information about hr in the aggregated information for
  1041   // the incrementally built collection set.
  1042   void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
  1044 private:
  1045   // Update the incremental cset information when adding a region
  1046   // (should not be called directly).
  1047   void add_region_to_incremental_cset_common(HeapRegion* hr);
  1049 public:
  1050   // Add hr to the LHS of the incremental collection set.
  1051   void add_region_to_incremental_cset_lhs(HeapRegion* hr);
  1053   // Add hr to the RHS of the incremental collection set.
  1054   void add_region_to_incremental_cset_rhs(HeapRegion* hr);
  1056 #ifndef PRODUCT
  1057   void print_collection_set(HeapRegion* list_head, outputStream* st);
  1058 #endif // !PRODUCT
  1060   bool initiate_conc_mark_if_possible()       { return _initiate_conc_mark_if_possible;  }
  1061   void set_initiate_conc_mark_if_possible()   { _initiate_conc_mark_if_possible = true;  }
  1062   void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
  1064   bool during_initial_mark_pause()      { return _during_initial_mark_pause;  }
  1065   void set_during_initial_mark_pause()  { _during_initial_mark_pause = true;  }
  1066   void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
  1068   // This sets the initiate_conc_mark_if_possible() flag to start a
  1069   // new cycle, as long as we are not already in one. It's best if it
  1070   // is called during a safepoint when the test whether a cycle is in
  1071   // progress or not is stable.
  1072   bool force_initial_mark_if_outside_cycle();
  1074   // This is called at the very beginning of an evacuation pause (it
  1075   // has to be the first thing that the pause does). If
  1076   // initiate_conc_mark_if_possible() is true, and the concurrent
  1077   // marking thread has completed its work during the previous cycle,
  1078   // it will set during_initial_mark_pause() to so that the pause does
  1079   // the initial-mark work and start a marking cycle.
  1080   void decide_on_conc_mark_initiation();
  1082   // If an expansion would be appropriate, because recent GC overhead had
  1083   // exceeded the desired limit, return an amount to expand by.
  1084   virtual size_t expansion_amount();
  1086   // note start of mark thread
  1087   void note_start_of_mark_thread();
  1089   // The marked bytes of the "r" has changed; reclassify it's desirability
  1090   // for marking.  Also asserts that "r" is eligible for a CS.
  1091   virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
  1093 #ifndef PRODUCT
  1094   // Check any appropriate marked bytes info, asserting false if
  1095   // something's wrong, else returning "true".
  1096   virtual bool assertMarkedBytesDataOK() = 0;
  1097 #endif
  1099   // Print tracing information.
  1100   void print_tracing_info() const;
  1102   // Print stats on young survival ratio
  1103   void print_yg_surv_rate_info() const;
  1105   void finished_recalculating_age_indexes(bool is_survivors) {
  1106     if (is_survivors) {
  1107       _survivor_surv_rate_group->finished_recalculating_age_indexes();
  1108     } else {
  1109       _short_lived_surv_rate_group->finished_recalculating_age_indexes();
  1111     // do that for any other surv rate groups
  1114   bool is_young_list_full() {
  1115     size_t young_list_length = _g1->young_list()->length();
  1116     size_t young_list_max_length = _young_list_target_length;
  1117     if (G1FixedEdenSize) {
  1118       young_list_max_length -= _max_survivor_regions;
  1121     return young_list_length >= young_list_max_length;
  1123   void update_region_num(bool young);
  1125   bool in_young_gc_mode() {
  1126     return _in_young_gc_mode;
  1128   void set_in_young_gc_mode(bool in_young_gc_mode) {
  1129     _in_young_gc_mode = in_young_gc_mode;
  1132   bool full_young_gcs() {
  1133     return _full_young_gcs;
  1135   void set_full_young_gcs(bool full_young_gcs) {
  1136     _full_young_gcs = full_young_gcs;
  1139   bool adaptive_young_list_length() {
  1140     return _adaptive_young_list_length;
  1142   void set_adaptive_young_list_length(bool adaptive_young_list_length) {
  1143     _adaptive_young_list_length = adaptive_young_list_length;
  1146   inline double get_gc_eff_factor() {
  1147     double ratio = _known_garbage_ratio;
  1149     double square = ratio * ratio;
  1150     // square = square * square;
  1151     double ret = square * 9.0 + 1.0;
  1152 #if 0
  1153     gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
  1154 #endif // 0
  1155     guarantee(0.0 <= ret && ret < 10.0, "invariant!");
  1156     return ret;
  1159   //
  1160   // Survivor regions policy.
  1161   //
  1162 protected:
  1164   // Current tenuring threshold, set to 0 if the collector reaches the
  1165   // maximum amount of suvivors regions.
  1166   int _tenuring_threshold;
  1168   // The limit on the number of regions allocated for survivors.
  1169   size_t _max_survivor_regions;
  1171   // The amount of survor regions after a collection.
  1172   size_t _recorded_survivor_regions;
  1173   // List of survivor regions.
  1174   HeapRegion* _recorded_survivor_head;
  1175   HeapRegion* _recorded_survivor_tail;
  1177   ageTable _survivors_age_table;
  1179 public:
  1181   inline GCAllocPurpose
  1182     evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
  1183       if (age < _tenuring_threshold && src_region->is_young()) {
  1184         return GCAllocForSurvived;
  1185       } else {
  1186         return GCAllocForTenured;
  1190   inline bool track_object_age(GCAllocPurpose purpose) {
  1191     return purpose == GCAllocForSurvived;
  1194   inline GCAllocPurpose alternative_purpose(int purpose) {
  1195     return GCAllocForTenured;
  1198   static const size_t REGIONS_UNLIMITED = ~(size_t)0;
  1200   size_t max_regions(int purpose);
  1202   // The limit on regions for a particular purpose is reached.
  1203   void note_alloc_region_limit_reached(int purpose) {
  1204     if (purpose == GCAllocForSurvived) {
  1205       _tenuring_threshold = 0;
  1209   void note_start_adding_survivor_regions() {
  1210     _survivor_surv_rate_group->start_adding_regions();
  1213   void note_stop_adding_survivor_regions() {
  1214     _survivor_surv_rate_group->stop_adding_regions();
  1217   void record_survivor_regions(size_t      regions,
  1218                                HeapRegion* head,
  1219                                HeapRegion* tail) {
  1220     _recorded_survivor_regions = regions;
  1221     _recorded_survivor_head    = head;
  1222     _recorded_survivor_tail    = tail;
  1225   size_t recorded_survivor_regions() {
  1226     return _recorded_survivor_regions;
  1229   void record_thread_age_table(ageTable* age_table)
  1231     _survivors_age_table.merge_par(age_table);
  1234   // Calculates survivor space parameters.
  1235   void calculate_survivors_policy();
  1237 };
  1239 // This encapsulates a particular strategy for a g1 Collector.
  1240 //
  1241 //      Start a concurrent mark when our heap size is n bytes
  1242 //            greater then our heap size was at the last concurrent
  1243 //            mark.  Where n is a function of the CMSTriggerRatio
  1244 //            and the MinHeapFreeRatio.
  1245 //
  1246 //      Start a g1 collection pause when we have allocated the
  1247 //            average number of bytes currently being freed in
  1248 //            a collection, but only if it is at least one region
  1249 //            full
  1250 //
  1251 //      Resize Heap based on desired
  1252 //      allocation space, where desired allocation space is
  1253 //      a function of survival rate and desired future to size.
  1254 //
  1255 //      Choose collection set by first picking all older regions
  1256 //      which have a survival rate which beats our projected young
  1257 //      survival rate.  Then fill out the number of needed regions
  1258 //      with young regions.
  1260 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
  1261   CollectionSetChooser* _collectionSetChooser;
  1262   // If the estimated is less then desirable, resize if possible.
  1263   void expand_if_possible(size_t numRegions);
  1265   virtual void choose_collection_set(double target_pause_time_ms);
  1266   virtual void record_collection_pause_start(double start_time_sec,
  1267                                              size_t start_used);
  1268   virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
  1269                                                   size_t max_live_bytes);
  1270   virtual void record_full_collection_end();
  1272 public:
  1273   G1CollectorPolicy_BestRegionsFirst() {
  1274     _collectionSetChooser = new CollectionSetChooser();
  1276   void record_collection_pause_end();
  1277   // This is not needed any more, after the CSet choosing code was
  1278   // changed to use the pause prediction work. But let's leave the
  1279   // hook in just in case.
  1280   void note_change_in_marked_bytes(HeapRegion* r) { }
  1281 #ifndef PRODUCT
  1282   bool assertMarkedBytesDataOK();
  1283 #endif
  1284 };
  1286 // This should move to some place more general...
  1288 // If we have "n" measurements, and we've kept track of their "sum" and the
  1289 // "sum_of_squares" of the measurements, this returns the variance of the
  1290 // sequence.
  1291 inline double variance(int n, double sum_of_squares, double sum) {
  1292   double n_d = (double)n;
  1293   double avg = sum/n_d;
  1294   return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
  1297 // Local Variables: ***
  1298 // c-indentation-style: gnu ***
  1299 // End: ***
  1301 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP

mercurial