src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Thu, 08 Sep 2011 05:16:49 -0400

author
tonyp
date
Thu, 08 Sep 2011 05:16:49 -0400
changeset 3119
4f41766176cf
parent 3114
20213c8a3c40
child 3120
af2ab04e0038
permissions
-rw-r--r--

7084509: G1: fix inconsistencies and mistakes in the young list target length calculations
Summary: Fixed inconsistencies and mistakes in the young list target length calculations so that a) the calculated target length is optimal (before, it was not), b) other parameters like max survivor size and max gc locker eden expansion are always consistent with the calculated target length (before, they were not always), and c) the resulting target length was always bound by desired min and max values (before, it was not).
Reviewed-by: brutisso, johnc

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
    28 #include "gc_implementation/g1/collectionSetChooser.hpp"
    29 #include "gc_implementation/g1/g1MMUTracker.hpp"
    30 #include "memory/collectorPolicy.hpp"
    32 // A G1CollectorPolicy makes policy decisions that determine the
    33 // characteristics of the collector.  Examples include:
    34 //   * choice of collection set.
    35 //   * when to collect.
    37 class HeapRegion;
    38 class CollectionSetChooser;
    40 // Yes, this is a bit unpleasant... but it saves replicating the same thing
    41 // over and over again and introducing subtle problems through small typos and
    42 // cutting and pasting mistakes. The macros below introduces a number
    43 // sequnce into the following two classes and the methods that access it.
    45 #define define_num_seq(name)                                                  \
    46 private:                                                                      \
    47   NumberSeq _all_##name##_times_ms;                                           \
    48 public:                                                                       \
    49   void record_##name##_time_ms(double ms) {                                   \
    50     _all_##name##_times_ms.add(ms);                                           \
    51   }                                                                           \
    52   NumberSeq* get_##name##_seq() {                                             \
    53     return &_all_##name##_times_ms;                                           \
    54   }
    56 class MainBodySummary;
    58 class PauseSummary: public CHeapObj {
    59   define_num_seq(total)
    60     define_num_seq(other)
    62 public:
    63   virtual MainBodySummary*    main_body_summary()    { return NULL; }
    64 };
    66 class MainBodySummary: public CHeapObj {
    67   define_num_seq(satb_drain) // optional
    68   define_num_seq(parallel) // parallel only
    69     define_num_seq(ext_root_scan)
    70     define_num_seq(mark_stack_scan)
    71     define_num_seq(update_rs)
    72     define_num_seq(scan_rs)
    73     define_num_seq(obj_copy)
    74     define_num_seq(termination) // parallel only
    75     define_num_seq(parallel_other) // parallel only
    76   define_num_seq(mark_closure)
    77   define_num_seq(clear_ct)  // parallel only
    78 };
    80 class Summary: public PauseSummary,
    81                public MainBodySummary {
    82 public:
    83   virtual MainBodySummary*    main_body_summary()    { return this; }
    84 };
    86 class G1CollectorPolicy: public CollectorPolicy {
    87 protected:
    88   // The number of pauses during the execution.
    89   long _n_pauses;
    91   // either equal to the number of parallel threads, if ParallelGCThreads
    92   // has been set, or 1 otherwise
    93   int _parallel_gc_threads;
    95   enum SomePrivateConstants {
    96     NumPrevPausesForHeuristics = 10
    97   };
    99   G1MMUTracker* _mmu_tracker;
   101   void initialize_flags();
   103   void initialize_all() {
   104     initialize_flags();
   105     initialize_size_info();
   106     initialize_perm_generation(PermGen::MarkSweepCompact);
   107   }
   109   virtual size_t default_init_heap_size() {
   110     // Pick some reasonable default.
   111     return 8*M;
   112   }
   114   double _cur_collection_start_sec;
   115   size_t _cur_collection_pause_used_at_start_bytes;
   116   size_t _cur_collection_pause_used_regions_at_start;
   117   size_t _prev_collection_pause_used_at_end_bytes;
   118   double _cur_collection_par_time_ms;
   119   double _cur_satb_drain_time_ms;
   120   double _cur_clear_ct_time_ms;
   121   bool   _satb_drain_time_set;
   123 #ifndef PRODUCT
   124   // Card Table Count Cache stats
   125   double _min_clear_cc_time_ms;         // min
   126   double _max_clear_cc_time_ms;         // max
   127   double _cur_clear_cc_time_ms;         // clearing time during current pause
   128   double _cum_clear_cc_time_ms;         // cummulative clearing time
   129   jlong  _num_cc_clears;                // number of times the card count cache has been cleared
   130 #endif
   132   // Statistics for recent GC pauses.  See below for how indexed.
   133   TruncatedSeq* _recent_rs_scan_times_ms;
   135   // These exclude marking times.
   136   TruncatedSeq* _recent_pause_times_ms;
   137   TruncatedSeq* _recent_gc_times_ms;
   139   TruncatedSeq* _recent_CS_bytes_used_before;
   140   TruncatedSeq* _recent_CS_bytes_surviving;
   142   TruncatedSeq* _recent_rs_sizes;
   144   TruncatedSeq* _concurrent_mark_remark_times_ms;
   145   TruncatedSeq* _concurrent_mark_cleanup_times_ms;
   147   Summary*           _summary;
   149   NumberSeq* _all_pause_times_ms;
   150   NumberSeq* _all_full_gc_times_ms;
   151   double _stop_world_start;
   152   NumberSeq* _all_stop_world_times_ms;
   153   NumberSeq* _all_yield_times_ms;
   155   size_t     _region_num_young;
   156   size_t     _region_num_tenured;
   157   size_t     _prev_region_num_young;
   158   size_t     _prev_region_num_tenured;
   160   NumberSeq* _all_mod_union_times_ms;
   162   int        _aux_num;
   163   NumberSeq* _all_aux_times_ms;
   164   double*    _cur_aux_start_times_ms;
   165   double*    _cur_aux_times_ms;
   166   bool*      _cur_aux_times_set;
   168   double* _par_last_gc_worker_start_times_ms;
   169   double* _par_last_ext_root_scan_times_ms;
   170   double* _par_last_mark_stack_scan_times_ms;
   171   double* _par_last_update_rs_times_ms;
   172   double* _par_last_update_rs_processed_buffers;
   173   double* _par_last_scan_rs_times_ms;
   174   double* _par_last_obj_copy_times_ms;
   175   double* _par_last_termination_times_ms;
   176   double* _par_last_termination_attempts;
   177   double* _par_last_gc_worker_end_times_ms;
   178   double* _par_last_gc_worker_times_ms;
   180   // indicates whether we are in full young or partially young GC mode
   181   bool _full_young_gcs;
   183   // if true, then it tries to dynamically adjust the length of the
   184   // young list
   185   bool _adaptive_young_list_length;
   186   size_t _young_list_target_length;
   187   size_t _young_list_fixed_length;
   189   // The max number of regions we can extend the eden by while the GC
   190   // locker is active. This should be >= _young_list_target_length;
   191   size_t _young_list_max_length;
   193   size_t _young_cset_length;
   194   bool   _last_young_gc_full;
   196   unsigned              _full_young_pause_num;
   197   unsigned              _partial_young_pause_num;
   199   bool                  _during_marking;
   200   bool                  _in_marking_window;
   201   bool                  _in_marking_window_im;
   203   SurvRateGroup*        _short_lived_surv_rate_group;
   204   SurvRateGroup*        _survivor_surv_rate_group;
   205   // add here any more surv rate groups
   207   double                _gc_overhead_perc;
   209   double _reserve_factor;
   210   size_t _reserve_regions;
   212   bool during_marking() {
   213     return _during_marking;
   214   }
   216   // <NEW PREDICTION>
   218 private:
   219   enum PredictionConstants {
   220     TruncatedSeqLength = 10
   221   };
   223   TruncatedSeq* _alloc_rate_ms_seq;
   224   double        _prev_collection_pause_end_ms;
   226   TruncatedSeq* _pending_card_diff_seq;
   227   TruncatedSeq* _rs_length_diff_seq;
   228   TruncatedSeq* _cost_per_card_ms_seq;
   229   TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
   230   TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
   231   TruncatedSeq* _cost_per_entry_ms_seq;
   232   TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
   233   TruncatedSeq* _cost_per_byte_ms_seq;
   234   TruncatedSeq* _constant_other_time_ms_seq;
   235   TruncatedSeq* _young_other_cost_per_region_ms_seq;
   236   TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
   238   TruncatedSeq* _pending_cards_seq;
   239   TruncatedSeq* _scanned_cards_seq;
   240   TruncatedSeq* _rs_lengths_seq;
   242   TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
   244   TruncatedSeq* _young_gc_eff_seq;
   246   TruncatedSeq* _max_conc_overhead_seq;
   248   size_t _recorded_young_regions;
   249   size_t _recorded_non_young_regions;
   250   size_t _recorded_region_num;
   252   size_t _free_regions_at_end_of_collection;
   254   size_t _recorded_rs_lengths;
   255   size_t _max_rs_lengths;
   257   size_t _recorded_marked_bytes;
   258   size_t _recorded_young_bytes;
   260   size_t _predicted_pending_cards;
   261   size_t _predicted_cards_scanned;
   262   size_t _predicted_rs_lengths;
   263   size_t _predicted_bytes_to_copy;
   265   double _predicted_survival_ratio;
   266   double _predicted_rs_update_time_ms;
   267   double _predicted_rs_scan_time_ms;
   268   double _predicted_object_copy_time_ms;
   269   double _predicted_constant_other_time_ms;
   270   double _predicted_young_other_time_ms;
   271   double _predicted_non_young_other_time_ms;
   272   double _predicted_pause_time_ms;
   274   double _vtime_diff_ms;
   276   double _recorded_young_free_cset_time_ms;
   277   double _recorded_non_young_free_cset_time_ms;
   279   double _sigma;
   280   double _expensive_region_limit_ms;
   282   size_t _rs_lengths_prediction;
   284   size_t _known_garbage_bytes;
   285   double _known_garbage_ratio;
   287   double sigma() {
   288     return _sigma;
   289   }
   291   // A function that prevents us putting too much stock in small sample
   292   // sets.  Returns a number between 2.0 and 1.0, depending on the number
   293   // of samples.  5 or more samples yields one; fewer scales linearly from
   294   // 2.0 at 1 sample to 1.0 at 5.
   295   double confidence_factor(int samples) {
   296     if (samples > 4) return 1.0;
   297     else return  1.0 + sigma() * ((double)(5 - samples))/2.0;
   298   }
   300   double get_new_neg_prediction(TruncatedSeq* seq) {
   301     return seq->davg() - sigma() * seq->dsd();
   302   }
   304 #ifndef PRODUCT
   305   bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
   306 #endif // PRODUCT
   308   void adjust_concurrent_refinement(double update_rs_time,
   309                                     double update_rs_processed_buffers,
   310                                     double goal_ms);
   312 protected:
   313   double _pause_time_target_ms;
   314   double _recorded_young_cset_choice_time_ms;
   315   double _recorded_non_young_cset_choice_time_ms;
   316   bool   _within_target;
   317   size_t _pending_cards;
   318   size_t _max_pending_cards;
   320 public:
   322   void set_region_short_lived(HeapRegion* hr) {
   323     hr->install_surv_rate_group(_short_lived_surv_rate_group);
   324   }
   326   void set_region_survivors(HeapRegion* hr) {
   327     hr->install_surv_rate_group(_survivor_surv_rate_group);
   328   }
   330 #ifndef PRODUCT
   331   bool verify_young_ages();
   332 #endif // PRODUCT
   334   double get_new_prediction(TruncatedSeq* seq) {
   335     return MAX2(seq->davg() + sigma() * seq->dsd(),
   336                 seq->davg() * confidence_factor(seq->num()));
   337   }
   339   size_t young_cset_length() {
   340     return _young_cset_length;
   341   }
   343   void record_max_rs_lengths(size_t rs_lengths) {
   344     _max_rs_lengths = rs_lengths;
   345   }
   347   size_t predict_pending_card_diff() {
   348     double prediction = get_new_neg_prediction(_pending_card_diff_seq);
   349     if (prediction < 0.00001)
   350       return 0;
   351     else
   352       return (size_t) prediction;
   353   }
   355   size_t predict_pending_cards() {
   356     size_t max_pending_card_num = _g1->max_pending_card_num();
   357     size_t diff = predict_pending_card_diff();
   358     size_t prediction;
   359     if (diff > max_pending_card_num)
   360       prediction = max_pending_card_num;
   361     else
   362       prediction = max_pending_card_num - diff;
   364     return prediction;
   365   }
   367   size_t predict_rs_length_diff() {
   368     return (size_t) get_new_prediction(_rs_length_diff_seq);
   369   }
   371   double predict_alloc_rate_ms() {
   372     return get_new_prediction(_alloc_rate_ms_seq);
   373   }
   375   double predict_cost_per_card_ms() {
   376     return get_new_prediction(_cost_per_card_ms_seq);
   377   }
   379   double predict_rs_update_time_ms(size_t pending_cards) {
   380     return (double) pending_cards * predict_cost_per_card_ms();
   381   }
   383   double predict_fully_young_cards_per_entry_ratio() {
   384     return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
   385   }
   387   double predict_partially_young_cards_per_entry_ratio() {
   388     if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
   389       return predict_fully_young_cards_per_entry_ratio();
   390     else
   391       return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
   392   }
   394   size_t predict_young_card_num(size_t rs_length) {
   395     return (size_t) ((double) rs_length *
   396                      predict_fully_young_cards_per_entry_ratio());
   397   }
   399   size_t predict_non_young_card_num(size_t rs_length) {
   400     return (size_t) ((double) rs_length *
   401                      predict_partially_young_cards_per_entry_ratio());
   402   }
   404   double predict_rs_scan_time_ms(size_t card_num) {
   405     if (full_young_gcs())
   406       return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
   407     else
   408       return predict_partially_young_rs_scan_time_ms(card_num);
   409   }
   411   double predict_partially_young_rs_scan_time_ms(size_t card_num) {
   412     if (_partially_young_cost_per_entry_ms_seq->num() < 3)
   413       return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
   414     else
   415       return (double) card_num *
   416         get_new_prediction(_partially_young_cost_per_entry_ms_seq);
   417   }
   419   double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
   420     if (_cost_per_byte_ms_during_cm_seq->num() < 3)
   421       return 1.1 * (double) bytes_to_copy *
   422         get_new_prediction(_cost_per_byte_ms_seq);
   423     else
   424       return (double) bytes_to_copy *
   425         get_new_prediction(_cost_per_byte_ms_during_cm_seq);
   426   }
   428   double predict_object_copy_time_ms(size_t bytes_to_copy) {
   429     if (_in_marking_window && !_in_marking_window_im)
   430       return predict_object_copy_time_ms_during_cm(bytes_to_copy);
   431     else
   432       return (double) bytes_to_copy *
   433         get_new_prediction(_cost_per_byte_ms_seq);
   434   }
   436   double predict_constant_other_time_ms() {
   437     return get_new_prediction(_constant_other_time_ms_seq);
   438   }
   440   double predict_young_other_time_ms(size_t young_num) {
   441     return
   442       (double) young_num *
   443       get_new_prediction(_young_other_cost_per_region_ms_seq);
   444   }
   446   double predict_non_young_other_time_ms(size_t non_young_num) {
   447     return
   448       (double) non_young_num *
   449       get_new_prediction(_non_young_other_cost_per_region_ms_seq);
   450   }
   452   void check_if_region_is_too_expensive(double predicted_time_ms);
   454   double predict_young_collection_elapsed_time_ms(size_t adjustment);
   455   double predict_base_elapsed_time_ms(size_t pending_cards);
   456   double predict_base_elapsed_time_ms(size_t pending_cards,
   457                                       size_t scanned_cards);
   458   size_t predict_bytes_to_copy(HeapRegion* hr);
   459   double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
   461   void start_recording_regions();
   462   void record_cset_region_info(HeapRegion* hr, bool young);
   463   void record_non_young_cset_region(HeapRegion* hr);
   465   void set_recorded_young_regions(size_t n_regions);
   466   void set_recorded_young_bytes(size_t bytes);
   467   void set_recorded_rs_lengths(size_t rs_lengths);
   468   void set_predicted_bytes_to_copy(size_t bytes);
   470   void end_recording_regions();
   472   void record_vtime_diff_ms(double vtime_diff_ms) {
   473     _vtime_diff_ms = vtime_diff_ms;
   474   }
   476   void record_young_free_cset_time_ms(double time_ms) {
   477     _recorded_young_free_cset_time_ms = time_ms;
   478   }
   480   void record_non_young_free_cset_time_ms(double time_ms) {
   481     _recorded_non_young_free_cset_time_ms = time_ms;
   482   }
   484   double predict_young_gc_eff() {
   485     return get_new_neg_prediction(_young_gc_eff_seq);
   486   }
   488   double predict_survivor_regions_evac_time();
   490   // </NEW PREDICTION>
   492   void cset_regions_freed() {
   493     bool propagate = _last_young_gc_full && !_in_marking_window;
   494     _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
   495     _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
   496     // also call it on any more surv rate groups
   497   }
   499   void set_known_garbage_bytes(size_t known_garbage_bytes) {
   500     _known_garbage_bytes = known_garbage_bytes;
   501     size_t heap_bytes = _g1->capacity();
   502     _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
   503   }
   505   void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
   506     guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
   508     _known_garbage_bytes -= known_garbage_bytes;
   509     size_t heap_bytes = _g1->capacity();
   510     _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
   511   }
   513   G1MMUTracker* mmu_tracker() {
   514     return _mmu_tracker;
   515   }
   517   double max_pause_time_ms() {
   518     return _mmu_tracker->max_gc_time() * 1000.0;
   519   }
   521   double predict_remark_time_ms() {
   522     return get_new_prediction(_concurrent_mark_remark_times_ms);
   523   }
   525   double predict_cleanup_time_ms() {
   526     return get_new_prediction(_concurrent_mark_cleanup_times_ms);
   527   }
   529   // Returns an estimate of the survival rate of the region at yg-age
   530   // "yg_age".
   531   double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
   532     TruncatedSeq* seq = surv_rate_group->get_seq(age);
   533     if (seq->num() == 0)
   534       gclog_or_tty->print("BARF! age is %d", age);
   535     guarantee( seq->num() > 0, "invariant" );
   536     double pred = get_new_prediction(seq);
   537     if (pred > 1.0)
   538       pred = 1.0;
   539     return pred;
   540   }
   542   double predict_yg_surv_rate(int age) {
   543     return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
   544   }
   546   double accum_yg_surv_rate_pred(int age) {
   547     return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
   548   }
   550 protected:
   551   void print_stats(int level, const char* str, double value);
   552   void print_stats(int level, const char* str, int value);
   554   void print_par_stats(int level, const char* str, double* data);
   555   void print_par_sizes(int level, const char* str, double* data);
   557   void check_other_times(int level,
   558                          NumberSeq* other_times_ms,
   559                          NumberSeq* calc_other_times_ms) const;
   561   void print_summary (PauseSummary* stats) const;
   563   void print_summary (int level, const char* str, NumberSeq* seq) const;
   564   void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
   566   double avg_value (double* data);
   567   double max_value (double* data);
   568   double sum_of_values (double* data);
   569   double max_sum (double* data1, double* data2);
   571   int _last_satb_drain_processed_buffers;
   572   int _last_update_rs_processed_buffers;
   573   double _last_pause_time_ms;
   575   size_t _bytes_in_collection_set_before_gc;
   576   size_t _bytes_copied_during_gc;
   578   // Used to count used bytes in CS.
   579   friend class CountCSClosure;
   581   // Statistics kept per GC stoppage, pause or full.
   582   TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
   584   // We track markings.
   585   int _num_markings;
   586   double _mark_thread_startup_sec;       // Time at startup of marking thread
   588   // Add a new GC of the given duration and end time to the record.
   589   void update_recent_gc_times(double end_time_sec, double elapsed_ms);
   591   // The head of the list (via "next_in_collection_set()") representing the
   592   // current collection set. Set from the incrementally built collection
   593   // set at the start of the pause.
   594   HeapRegion* _collection_set;
   596   // The number of regions in the collection set. Set from the incrementally
   597   // built collection set at the start of an evacuation pause.
   598   size_t _collection_set_size;
   600   // The number of bytes in the collection set before the pause. Set from
   601   // the incrementally built collection set at the start of an evacuation
   602   // pause.
   603   size_t _collection_set_bytes_used_before;
   605   // The associated information that is maintained while the incremental
   606   // collection set is being built with young regions. Used to populate
   607   // the recorded info for the evacuation pause.
   609   enum CSetBuildType {
   610     Active,             // We are actively building the collection set
   611     Inactive            // We are not actively building the collection set
   612   };
   614   CSetBuildType _inc_cset_build_state;
   616   // The head of the incrementally built collection set.
   617   HeapRegion* _inc_cset_head;
   619   // The tail of the incrementally built collection set.
   620   HeapRegion* _inc_cset_tail;
   622   // The number of regions in the incrementally built collection set.
   623   // Used to set _collection_set_size at the start of an evacuation
   624   // pause.
   625   size_t _inc_cset_size;
   627   // Used as the index in the surving young words structure
   628   // which tracks the amount of space, for each young region,
   629   // that survives the pause.
   630   size_t _inc_cset_young_index;
   632   // The number of bytes in the incrementally built collection set.
   633   // Used to set _collection_set_bytes_used_before at the start of
   634   // an evacuation pause.
   635   size_t _inc_cset_bytes_used_before;
   637   // Used to record the highest end of heap region in collection set
   638   HeapWord* _inc_cset_max_finger;
   640   // The number of recorded used bytes in the young regions
   641   // of the collection set. This is the sum of the used() bytes
   642   // of retired young regions in the collection set.
   643   size_t _inc_cset_recorded_young_bytes;
   645   // The RSet lengths recorded for regions in the collection set
   646   // (updated by the periodic sampling of the regions in the
   647   // young list/collection set).
   648   size_t _inc_cset_recorded_rs_lengths;
   650   // The predicted elapsed time it will take to collect the regions
   651   // in the collection set (updated by the periodic sampling of the
   652   // regions in the young list/collection set).
   653   double _inc_cset_predicted_elapsed_time_ms;
   655   // The predicted bytes to copy for the regions in the collection
   656   // set (updated by the periodic sampling of the regions in the
   657   // young list/collection set).
   658   size_t _inc_cset_predicted_bytes_to_copy;
   660   // Info about marking.
   661   int _n_marks; // Sticky at 2, so we know when we've done at least 2.
   663   // The number of collection pauses at the end of the last mark.
   664   size_t _n_pauses_at_mark_end;
   666   // Stash a pointer to the g1 heap.
   667   G1CollectedHeap* _g1;
   669   // The average time in ms per collection pause, averaged over recent pauses.
   670   double recent_avg_time_for_pauses_ms();
   672   // The average time in ms for RS scanning, per pause, averaged
   673   // over recent pauses. (Note the RS scanning time for a pause
   674   // is itself an average of the RS scanning time for each worker
   675   // thread.)
   676   double recent_avg_time_for_rs_scan_ms();
   678   // The number of "recent" GCs recorded in the number sequences
   679   int number_of_recent_gcs();
   681   // The average survival ratio, computed by the total number of bytes
   682   // suriviving / total number of bytes before collection over the last
   683   // several recent pauses.
   684   double recent_avg_survival_fraction();
   685   // The survival fraction of the most recent pause; if there have been no
   686   // pauses, returns 1.0.
   687   double last_survival_fraction();
   689   // Returns a "conservative" estimate of the recent survival rate, i.e.,
   690   // one that may be higher than "recent_avg_survival_fraction".
   691   // This is conservative in several ways:
   692   //   If there have been few pauses, it will assume a potential high
   693   //     variance, and err on the side of caution.
   694   //   It puts a lower bound (currently 0.1) on the value it will return.
   695   //   To try to detect phase changes, if the most recent pause ("latest") has a
   696   //     higher-than average ("avg") survival rate, it returns that rate.
   697   // "work" version is a utility function; young is restricted to young regions.
   698   double conservative_avg_survival_fraction_work(double avg,
   699                                                  double latest);
   701   // The arguments are the two sequences that keep track of the number of bytes
   702   //   surviving and the total number of bytes before collection, resp.,
   703   //   over the last evereal recent pauses
   704   // Returns the survival rate for the category in the most recent pause.
   705   // If there have been no pauses, returns 1.0.
   706   double last_survival_fraction_work(TruncatedSeq* surviving,
   707                                      TruncatedSeq* before);
   709   // The arguments are the two sequences that keep track of the number of bytes
   710   //   surviving and the total number of bytes before collection, resp.,
   711   //   over the last several recent pauses
   712   // Returns the average survival ration over the last several recent pauses
   713   // If there have been no pauses, return 1.0
   714   double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
   715                                            TruncatedSeq* before);
   717   double conservative_avg_survival_fraction() {
   718     double avg = recent_avg_survival_fraction();
   719     double latest = last_survival_fraction();
   720     return conservative_avg_survival_fraction_work(avg, latest);
   721   }
   723   // The ratio of gc time to elapsed time, computed over recent pauses.
   724   double _recent_avg_pause_time_ratio;
   726   double recent_avg_pause_time_ratio() {
   727     return _recent_avg_pause_time_ratio;
   728   }
   730   // Number of pauses between concurrent marking.
   731   size_t _pauses_btwn_concurrent_mark;
   733   size_t _n_marks_since_last_pause;
   735   // At the end of a pause we check the heap occupancy and we decide
   736   // whether we will start a marking cycle during the next pause. If
   737   // we decide that we want to do that, we will set this parameter to
   738   // true. So, this parameter will stay true between the end of a
   739   // pause and the beginning of a subsequent pause (not necessarily
   740   // the next one, see the comments on the next field) when we decide
   741   // that we will indeed start a marking cycle and do the initial-mark
   742   // work.
   743   volatile bool _initiate_conc_mark_if_possible;
   745   // If initiate_conc_mark_if_possible() is set at the beginning of a
   746   // pause, it is a suggestion that the pause should start a marking
   747   // cycle by doing the initial-mark work. However, it is possible
   748   // that the concurrent marking thread is still finishing up the
   749   // previous marking cycle (e.g., clearing the next marking
   750   // bitmap). If that is the case we cannot start a new cycle and
   751   // we'll have to wait for the concurrent marking thread to finish
   752   // what it is doing. In this case we will postpone the marking cycle
   753   // initiation decision for the next pause. When we eventually decide
   754   // to start a cycle, we will set _during_initial_mark_pause which
   755   // will stay true until the end of the initial-mark pause and it's
   756   // the condition that indicates that a pause is doing the
   757   // initial-mark work.
   758   volatile bool _during_initial_mark_pause;
   760   bool _should_revert_to_full_young_gcs;
   761   bool _last_full_young_gc;
   763   // This set of variables tracks the collector efficiency, in order to
   764   // determine whether we should initiate a new marking.
   765   double _cur_mark_stop_world_time_ms;
   766   double _mark_remark_start_sec;
   767   double _mark_cleanup_start_sec;
   768   double _mark_closure_time_ms;
   770   // Update the young list target length either by setting it to the
   771   // desired fixed value or by calculating it using G1's pause
   772   // prediction model. If no rs_lengths parameter is passed, predict
   773   // the RS lengths using the prediction model, otherwise use the
   774   // given rs_lengths as the prediction.
   775   void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
   777   // Calculate and return the minimum desired young list target
   778   // length. This is the minimum desired young list length according
   779   // to the user's inputs.
   780   size_t calculate_young_list_desired_min_length(size_t base_min_length);
   782   // Calculate and return the maximum desired young list target
   783   // length. This is the maximum desired young list length according
   784   // to the user's inputs.
   785   size_t calculate_young_list_desired_max_length();
   787   // Calculate and return the maximum young list target length that
   788   // can fit into the pause time goal. The parameters are: rs_lengths
   789   // represent the prediction of how large the young RSet lengths will
   790   // be, base_min_length is the alreay existing number of regions in
   791   // the young list, min_length and max_length are the desired min and
   792   // max young list length according to the user's inputs.
   793   size_t calculate_young_list_target_length(size_t rs_lengths,
   794                                             size_t base_min_length,
   795                                             size_t desired_min_length,
   796                                             size_t desired_max_length);
   798   // Check whether a given young length (young_length) fits into the
   799   // given target pause time and whether the prediction for the amount
   800   // of objects to be copied for the given length will fit into the
   801   // given free space (expressed by base_free_regions).  It is used by
   802   // calculate_young_list_target_length().
   803   bool predict_will_fit(size_t young_length, double base_time_ms,
   804                         size_t base_free_regions, double target_pause_time_ms);
   806 public:
   808   G1CollectorPolicy();
   810   virtual G1CollectorPolicy* as_g1_policy() { return this; }
   812   virtual CollectorPolicy::Name kind() {
   813     return CollectorPolicy::G1CollectorPolicyKind;
   814   }
   816   // Check the current value of the young list RSet lengths and
   817   // compare it against the last prediction. If the current value is
   818   // higher, recalculate the young list target length prediction.
   819   void revise_young_list_target_length_if_necessary();
   821   size_t bytes_in_collection_set() {
   822     return _bytes_in_collection_set_before_gc;
   823   }
   825   unsigned calc_gc_alloc_time_stamp() {
   826     return _all_pause_times_ms->num() + 1;
   827   }
   829   // Recalculate the reserve region number. This should be called
   830   // after the heap is resized.
   831   void calculate_reserve(size_t all_regions);
   833 protected:
   835   // Count the number of bytes used in the CS.
   836   void count_CS_bytes_used();
   838   // Together these do the base cleanup-recording work.  Subclasses might
   839   // want to put something between them.
   840   void record_concurrent_mark_cleanup_end_work1(size_t freed_bytes,
   841                                                 size_t max_live_bytes);
   842   void record_concurrent_mark_cleanup_end_work2();
   844 public:
   846   virtual void init();
   848   // Create jstat counters for the policy.
   849   virtual void initialize_gc_policy_counters();
   851   virtual HeapWord* mem_allocate_work(size_t size,
   852                                       bool is_tlab,
   853                                       bool* gc_overhead_limit_was_exceeded);
   855   // This method controls how a collector handles one or more
   856   // of its generations being fully allocated.
   857   virtual HeapWord* satisfy_failed_allocation(size_t size,
   858                                               bool is_tlab);
   860   BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
   862   GenRemSet::Name  rem_set_name()     { return GenRemSet::CardTable; }
   864   // The number of collection pauses so far.
   865   long n_pauses() const { return _n_pauses; }
   867   // Update the heuristic info to record a collection pause of the given
   868   // start time, where the given number of bytes were used at the start.
   869   // This may involve changing the desired size of a collection set.
   871   virtual void record_stop_world_start();
   873   virtual void record_collection_pause_start(double start_time_sec,
   874                                              size_t start_used);
   876   // Must currently be called while the world is stopped.
   877   void record_concurrent_mark_init_end(double
   878                                            mark_init_elapsed_time_ms);
   880   void record_mark_closure_time(double mark_closure_time_ms);
   882   virtual void record_concurrent_mark_remark_start();
   883   virtual void record_concurrent_mark_remark_end();
   885   virtual void record_concurrent_mark_cleanup_start();
   886   virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
   887                                                   size_t max_live_bytes);
   888   virtual void record_concurrent_mark_cleanup_completed();
   890   virtual void record_concurrent_pause();
   891   virtual void record_concurrent_pause_end();
   893   virtual void record_collection_pause_end();
   894   void print_heap_transition();
   896   // Record the fact that a full collection occurred.
   897   virtual void record_full_collection_start();
   898   virtual void record_full_collection_end();
   900   void record_gc_worker_start_time(int worker_i, double ms) {
   901     _par_last_gc_worker_start_times_ms[worker_i] = ms;
   902   }
   904   void record_ext_root_scan_time(int worker_i, double ms) {
   905     _par_last_ext_root_scan_times_ms[worker_i] = ms;
   906   }
   908   void record_mark_stack_scan_time(int worker_i, double ms) {
   909     _par_last_mark_stack_scan_times_ms[worker_i] = ms;
   910   }
   912   void record_satb_drain_time(double ms) {
   913     _cur_satb_drain_time_ms = ms;
   914     _satb_drain_time_set    = true;
   915   }
   917   void record_satb_drain_processed_buffers (int processed_buffers) {
   918     _last_satb_drain_processed_buffers = processed_buffers;
   919   }
   921   void record_mod_union_time(double ms) {
   922     _all_mod_union_times_ms->add(ms);
   923   }
   925   void record_update_rs_time(int thread, double ms) {
   926     _par_last_update_rs_times_ms[thread] = ms;
   927   }
   929   void record_update_rs_processed_buffers (int thread,
   930                                            double processed_buffers) {
   931     _par_last_update_rs_processed_buffers[thread] = processed_buffers;
   932   }
   934   void record_scan_rs_time(int thread, double ms) {
   935     _par_last_scan_rs_times_ms[thread] = ms;
   936   }
   938   void reset_obj_copy_time(int thread) {
   939     _par_last_obj_copy_times_ms[thread] = 0.0;
   940   }
   942   void reset_obj_copy_time() {
   943     reset_obj_copy_time(0);
   944   }
   946   void record_obj_copy_time(int thread, double ms) {
   947     _par_last_obj_copy_times_ms[thread] += ms;
   948   }
   950   void record_termination(int thread, double ms, size_t attempts) {
   951     _par_last_termination_times_ms[thread] = ms;
   952     _par_last_termination_attempts[thread] = (double) attempts;
   953   }
   955   void record_gc_worker_end_time(int worker_i, double ms) {
   956     _par_last_gc_worker_end_times_ms[worker_i] = ms;
   957   }
   959   void record_pause_time_ms(double ms) {
   960     _last_pause_time_ms = ms;
   961   }
   963   void record_clear_ct_time(double ms) {
   964     _cur_clear_ct_time_ms = ms;
   965   }
   967   void record_par_time(double ms) {
   968     _cur_collection_par_time_ms = ms;
   969   }
   971   void record_aux_start_time(int i) {
   972     guarantee(i < _aux_num, "should be within range");
   973     _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
   974   }
   976   void record_aux_end_time(int i) {
   977     guarantee(i < _aux_num, "should be within range");
   978     double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
   979     _cur_aux_times_set[i] = true;
   980     _cur_aux_times_ms[i] += ms;
   981   }
   983 #ifndef PRODUCT
   984   void record_cc_clear_time(double ms) {
   985     if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
   986       _min_clear_cc_time_ms = ms;
   987     if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
   988       _max_clear_cc_time_ms = ms;
   989     _cur_clear_cc_time_ms = ms;
   990     _cum_clear_cc_time_ms += ms;
   991     _num_cc_clears++;
   992   }
   993 #endif
   995   // Record how much space we copied during a GC. This is typically
   996   // called when a GC alloc region is being retired.
   997   void record_bytes_copied_during_gc(size_t bytes) {
   998     _bytes_copied_during_gc += bytes;
   999   }
  1001   // The amount of space we copied during a GC.
  1002   size_t bytes_copied_during_gc() {
  1003     return _bytes_copied_during_gc;
  1006   // Choose a new collection set.  Marks the chosen regions as being
  1007   // "in_collection_set", and links them together.  The head and number of
  1008   // the collection set are available via access methods.
  1009   virtual void choose_collection_set(double target_pause_time_ms) = 0;
  1011   // The head of the list (via "next_in_collection_set()") representing the
  1012   // current collection set.
  1013   HeapRegion* collection_set() { return _collection_set; }
  1015   void clear_collection_set() { _collection_set = NULL; }
  1017   // The number of elements in the current collection set.
  1018   size_t collection_set_size() { return _collection_set_size; }
  1020   // Add "hr" to the CS.
  1021   void add_to_collection_set(HeapRegion* hr);
  1023   // Incremental CSet Support
  1025   // The head of the incrementally built collection set.
  1026   HeapRegion* inc_cset_head() { return _inc_cset_head; }
  1028   // The tail of the incrementally built collection set.
  1029   HeapRegion* inc_set_tail() { return _inc_cset_tail; }
  1031   // The number of elements in the incrementally built collection set.
  1032   size_t inc_cset_size() { return _inc_cset_size; }
  1034   // Initialize incremental collection set info.
  1035   void start_incremental_cset_building();
  1037   void clear_incremental_cset() {
  1038     _inc_cset_head = NULL;
  1039     _inc_cset_tail = NULL;
  1042   // Stop adding regions to the incremental collection set
  1043   void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
  1045   // Add/remove information about hr to the aggregated information
  1046   // for the incrementally built collection set.
  1047   void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
  1048   void remove_from_incremental_cset_info(HeapRegion* hr);
  1050   // Update information about hr in the aggregated information for
  1051   // the incrementally built collection set.
  1052   void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
  1054 private:
  1055   // Update the incremental cset information when adding a region
  1056   // (should not be called directly).
  1057   void add_region_to_incremental_cset_common(HeapRegion* hr);
  1059 public:
  1060   // Add hr to the LHS of the incremental collection set.
  1061   void add_region_to_incremental_cset_lhs(HeapRegion* hr);
  1063   // Add hr to the RHS of the incremental collection set.
  1064   void add_region_to_incremental_cset_rhs(HeapRegion* hr);
  1066 #ifndef PRODUCT
  1067   void print_collection_set(HeapRegion* list_head, outputStream* st);
  1068 #endif // !PRODUCT
  1070   bool initiate_conc_mark_if_possible()       { return _initiate_conc_mark_if_possible;  }
  1071   void set_initiate_conc_mark_if_possible()   { _initiate_conc_mark_if_possible = true;  }
  1072   void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
  1074   bool during_initial_mark_pause()      { return _during_initial_mark_pause;  }
  1075   void set_during_initial_mark_pause()  { _during_initial_mark_pause = true;  }
  1076   void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
  1078   // This sets the initiate_conc_mark_if_possible() flag to start a
  1079   // new cycle, as long as we are not already in one. It's best if it
  1080   // is called during a safepoint when the test whether a cycle is in
  1081   // progress or not is stable.
  1082   bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
  1084   // This is called at the very beginning of an evacuation pause (it
  1085   // has to be the first thing that the pause does). If
  1086   // initiate_conc_mark_if_possible() is true, and the concurrent
  1087   // marking thread has completed its work during the previous cycle,
  1088   // it will set during_initial_mark_pause() to so that the pause does
  1089   // the initial-mark work and start a marking cycle.
  1090   void decide_on_conc_mark_initiation();
  1092   // If an expansion would be appropriate, because recent GC overhead had
  1093   // exceeded the desired limit, return an amount to expand by.
  1094   virtual size_t expansion_amount();
  1096   // note start of mark thread
  1097   void note_start_of_mark_thread();
  1099   // The marked bytes of the "r" has changed; reclassify it's desirability
  1100   // for marking.  Also asserts that "r" is eligible for a CS.
  1101   virtual void note_change_in_marked_bytes(HeapRegion* r) = 0;
  1103 #ifndef PRODUCT
  1104   // Check any appropriate marked bytes info, asserting false if
  1105   // something's wrong, else returning "true".
  1106   virtual bool assertMarkedBytesDataOK() = 0;
  1107 #endif
  1109   // Print tracing information.
  1110   void print_tracing_info() const;
  1112   // Print stats on young survival ratio
  1113   void print_yg_surv_rate_info() const;
  1115   void finished_recalculating_age_indexes(bool is_survivors) {
  1116     if (is_survivors) {
  1117       _survivor_surv_rate_group->finished_recalculating_age_indexes();
  1118     } else {
  1119       _short_lived_surv_rate_group->finished_recalculating_age_indexes();
  1121     // do that for any other surv rate groups
  1124   bool is_young_list_full() {
  1125     size_t young_list_length = _g1->young_list()->length();
  1126     size_t young_list_target_length = _young_list_target_length;
  1127     return young_list_length >= young_list_target_length;
  1130   bool can_expand_young_list() {
  1131     size_t young_list_length = _g1->young_list()->length();
  1132     size_t young_list_max_length = _young_list_max_length;
  1133     return young_list_length < young_list_max_length;
  1136   void update_region_num(bool young);
  1138   bool full_young_gcs() {
  1139     return _full_young_gcs;
  1141   void set_full_young_gcs(bool full_young_gcs) {
  1142     _full_young_gcs = full_young_gcs;
  1145   bool adaptive_young_list_length() {
  1146     return _adaptive_young_list_length;
  1148   void set_adaptive_young_list_length(bool adaptive_young_list_length) {
  1149     _adaptive_young_list_length = adaptive_young_list_length;
  1152   inline double get_gc_eff_factor() {
  1153     double ratio = _known_garbage_ratio;
  1155     double square = ratio * ratio;
  1156     // square = square * square;
  1157     double ret = square * 9.0 + 1.0;
  1158 #if 0
  1159     gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
  1160 #endif // 0
  1161     guarantee(0.0 <= ret && ret < 10.0, "invariant!");
  1162     return ret;
  1165   //
  1166   // Survivor regions policy.
  1167   //
  1168 protected:
  1170   // Current tenuring threshold, set to 0 if the collector reaches the
  1171   // maximum amount of suvivors regions.
  1172   int _tenuring_threshold;
  1174   // The limit on the number of regions allocated for survivors.
  1175   size_t _max_survivor_regions;
  1177   // For reporting purposes.
  1178   size_t _eden_bytes_before_gc;
  1179   size_t _survivor_bytes_before_gc;
  1180   size_t _capacity_before_gc;
  1182   // The amount of survor regions after a collection.
  1183   size_t _recorded_survivor_regions;
  1184   // List of survivor regions.
  1185   HeapRegion* _recorded_survivor_head;
  1186   HeapRegion* _recorded_survivor_tail;
  1188   ageTable _survivors_age_table;
  1190 public:
  1192   inline GCAllocPurpose
  1193     evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
  1194       if (age < _tenuring_threshold && src_region->is_young()) {
  1195         return GCAllocForSurvived;
  1196       } else {
  1197         return GCAllocForTenured;
  1201   inline bool track_object_age(GCAllocPurpose purpose) {
  1202     return purpose == GCAllocForSurvived;
  1205   static const size_t REGIONS_UNLIMITED = ~(size_t)0;
  1207   size_t max_regions(int purpose);
  1209   // The limit on regions for a particular purpose is reached.
  1210   void note_alloc_region_limit_reached(int purpose) {
  1211     if (purpose == GCAllocForSurvived) {
  1212       _tenuring_threshold = 0;
  1216   void note_start_adding_survivor_regions() {
  1217     _survivor_surv_rate_group->start_adding_regions();
  1220   void note_stop_adding_survivor_regions() {
  1221     _survivor_surv_rate_group->stop_adding_regions();
  1224   void record_survivor_regions(size_t      regions,
  1225                                HeapRegion* head,
  1226                                HeapRegion* tail) {
  1227     _recorded_survivor_regions = regions;
  1228     _recorded_survivor_head    = head;
  1229     _recorded_survivor_tail    = tail;
  1232   size_t recorded_survivor_regions() {
  1233     return _recorded_survivor_regions;
  1236   void record_thread_age_table(ageTable* age_table)
  1238     _survivors_age_table.merge_par(age_table);
  1241   void update_max_gc_locker_expansion();
  1243   // Calculates survivor space parameters.
  1244   void update_survivors_policy();
  1246 };
  1248 // This encapsulates a particular strategy for a g1 Collector.
  1249 //
  1250 //      Start a concurrent mark when our heap size is n bytes
  1251 //            greater then our heap size was at the last concurrent
  1252 //            mark.  Where n is a function of the CMSTriggerRatio
  1253 //            and the MinHeapFreeRatio.
  1254 //
  1255 //      Start a g1 collection pause when we have allocated the
  1256 //            average number of bytes currently being freed in
  1257 //            a collection, but only if it is at least one region
  1258 //            full
  1259 //
  1260 //      Resize Heap based on desired
  1261 //      allocation space, where desired allocation space is
  1262 //      a function of survival rate and desired future to size.
  1263 //
  1264 //      Choose collection set by first picking all older regions
  1265 //      which have a survival rate which beats our projected young
  1266 //      survival rate.  Then fill out the number of needed regions
  1267 //      with young regions.
  1269 class G1CollectorPolicy_BestRegionsFirst: public G1CollectorPolicy {
  1270   CollectionSetChooser* _collectionSetChooser;
  1272   virtual void choose_collection_set(double target_pause_time_ms);
  1273   virtual void record_collection_pause_start(double start_time_sec,
  1274                                              size_t start_used);
  1275   virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
  1276                                                   size_t max_live_bytes);
  1277   virtual void record_full_collection_end();
  1279 public:
  1280   G1CollectorPolicy_BestRegionsFirst() {
  1281     _collectionSetChooser = new CollectionSetChooser();
  1283   void record_collection_pause_end();
  1284   // This is not needed any more, after the CSet choosing code was
  1285   // changed to use the pause prediction work. But let's leave the
  1286   // hook in just in case.
  1287   void note_change_in_marked_bytes(HeapRegion* r) { }
  1288 #ifndef PRODUCT
  1289   bool assertMarkedBytesDataOK();
  1290 #endif
  1291 };
  1293 // This should move to some place more general...
  1295 // If we have "n" measurements, and we've kept track of their "sum" and the
  1296 // "sum_of_squares" of the measurements, this returns the variance of the
  1297 // sequence.
  1298 inline double variance(int n, double sum_of_squares, double sum) {
  1299   double n_d = (double)n;
  1300   double avg = sum/n_d;
  1301   return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
  1304 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP

mercurial