src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7195
c02ec279b062
child 7369
b840813adfcc
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

     1 /*
     2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
    28 #include "gc_implementation/g1/collectionSetChooser.hpp"
    29 #include "gc_implementation/g1/g1MMUTracker.hpp"
    30 #include "memory/collectorPolicy.hpp"
    32 // A G1CollectorPolicy makes policy decisions that determine the
    33 // characteristics of the collector.  Examples include:
    34 //   * choice of collection set.
    35 //   * when to collect.
    37 class HeapRegion;
    38 class CollectionSetChooser;
    39 class G1GCPhaseTimes;
    41 // TraceGen0Time collects data on _both_ young and mixed evacuation pauses
    42 // (the latter may contain non-young regions - i.e. regions that are
    43 // technically in Gen1) while TraceGen1Time collects data about full GCs.
    44 class TraceGen0TimeData : public CHeapObj<mtGC> {
    45  private:
    46   unsigned  _young_pause_num;
    47   unsigned  _mixed_pause_num;
    49   NumberSeq _all_stop_world_times_ms;
    50   NumberSeq _all_yield_times_ms;
    52   NumberSeq _total;
    53   NumberSeq _other;
    54   NumberSeq _root_region_scan_wait;
    55   NumberSeq _parallel;
    56   NumberSeq _ext_root_scan;
    57   NumberSeq _satb_filtering;
    58   NumberSeq _update_rs;
    59   NumberSeq _scan_rs;
    60   NumberSeq _obj_copy;
    61   NumberSeq _termination;
    62   NumberSeq _parallel_other;
    63   NumberSeq _clear_ct;
    65   void print_summary(const char* str, const NumberSeq* seq) const;
    66   void print_summary_sd(const char* str, const NumberSeq* seq) const;
    68 public:
    69    TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {};
    70   void record_start_collection(double time_to_stop_the_world_ms);
    71   void record_yield_time(double yield_time_ms);
    72   void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times);
    73   void increment_young_collection_count();
    74   void increment_mixed_collection_count();
    75   void print() const;
    76 };
    78 class TraceGen1TimeData : public CHeapObj<mtGC> {
    79  private:
    80   NumberSeq _all_full_gc_times;
    82  public:
    83   void record_full_collection(double full_gc_time_ms);
    84   void print() const;
    85 };
    87 // There are three command line options related to the young gen size:
    88 // NewSize, MaxNewSize and NewRatio (There is also -Xmn, but that is
    89 // just a short form for NewSize==MaxNewSize). G1 will use its internal
    90 // heuristics to calculate the actual young gen size, so these options
    91 // basically only limit the range within which G1 can pick a young gen
    92 // size. Also, these are general options taking byte sizes. G1 will
    93 // internally work with a number of regions instead. So, some rounding
    94 // will occur.
    95 //
    96 // If nothing related to the the young gen size is set on the command
    97 // line we should allow the young gen to be between G1NewSizePercent
    98 // and G1MaxNewSizePercent of the heap size. This means that every time
    99 // the heap size changes, the limits for the young gen size will be
   100 // recalculated.
   101 //
   102 // If only -XX:NewSize is set we should use the specified value as the
   103 // minimum size for young gen. Still using G1MaxNewSizePercent of the
   104 // heap as maximum.
   105 //
   106 // If only -XX:MaxNewSize is set we should use the specified value as the
   107 // maximum size for young gen. Still using G1NewSizePercent of the heap
   108 // as minimum.
   109 //
   110 // If -XX:NewSize and -XX:MaxNewSize are both specified we use these values.
   111 // No updates when the heap size changes. There is a special case when
   112 // NewSize==MaxNewSize. This is interpreted as "fixed" and will use a
   113 // different heuristic for calculating the collection set when we do mixed
   114 // collection.
   115 //
   116 // If only -XX:NewRatio is set we should use the specified ratio of the heap
   117 // as both min and max. This will be interpreted as "fixed" just like the
   118 // NewSize==MaxNewSize case above. But we will update the min and max
   119 // everytime the heap size changes.
   120 //
   121 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
   122 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
   123 class G1YoungGenSizer : public CHeapObj<mtGC> {
   124 private:
   125   enum SizerKind {
   126     SizerDefaults,
   127     SizerNewSizeOnly,
   128     SizerMaxNewSizeOnly,
   129     SizerMaxAndNewSize,
   130     SizerNewRatio
   131   };
   132   SizerKind _sizer_kind;
   133   uint _min_desired_young_length;
   134   uint _max_desired_young_length;
   135   bool _adaptive_size;
   136   uint calculate_default_min_length(uint new_number_of_heap_regions);
   137   uint calculate_default_max_length(uint new_number_of_heap_regions);
   139   // Update the given values for minimum and maximum young gen length in regions
   140   // given the number of heap regions depending on the kind of sizing algorithm.
   141   void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length);
   143 public:
   144   G1YoungGenSizer();
   145   // Calculate the maximum length of the young gen given the number of regions
   146   // depending on the sizing algorithm.
   147   uint max_young_length(uint number_of_heap_regions);
   149   void heap_size_changed(uint new_number_of_heap_regions);
   150   uint min_desired_young_length() {
   151     return _min_desired_young_length;
   152   }
   153   uint max_desired_young_length() {
   154     return _max_desired_young_length;
   155   }
   156   bool adaptive_young_list_length() {
   157     return _adaptive_size;
   158   }
   159 };
   161 class G1CollectorPolicy: public CollectorPolicy {
   162 private:
   163   // either equal to the number of parallel threads, if ParallelGCThreads
   164   // has been set, or 1 otherwise
   165   int _parallel_gc_threads;
   167   // The number of GC threads currently active.
   168   uintx _no_of_gc_threads;
   170   enum SomePrivateConstants {
   171     NumPrevPausesForHeuristics = 10
   172   };
   174   G1MMUTracker* _mmu_tracker;
   176   void initialize_alignments();
   177   void initialize_flags();
   179   CollectionSetChooser* _collectionSetChooser;
   181   double _full_collection_start_sec;
   182   uint   _cur_collection_pause_used_regions_at_start;
   184   // These exclude marking times.
   185   TruncatedSeq* _recent_gc_times_ms;
   187   TruncatedSeq* _concurrent_mark_remark_times_ms;
   188   TruncatedSeq* _concurrent_mark_cleanup_times_ms;
   190   TraceGen0TimeData _trace_gen0_time_data;
   191   TraceGen1TimeData _trace_gen1_time_data;
   193   double _stop_world_start;
   195   // indicates whether we are in young or mixed GC mode
   196   bool _gcs_are_young;
   198   uint _young_list_target_length;
   199   uint _young_list_fixed_length;
   201   // The max number of regions we can extend the eden by while the GC
   202   // locker is active. This should be >= _young_list_target_length;
   203   uint _young_list_max_length;
   205   bool                  _last_gc_was_young;
   207   bool                  _during_marking;
   208   bool                  _in_marking_window;
   209   bool                  _in_marking_window_im;
   211   SurvRateGroup*        _short_lived_surv_rate_group;
   212   SurvRateGroup*        _survivor_surv_rate_group;
   213   // add here any more surv rate groups
   215   double                _gc_overhead_perc;
   217   double _reserve_factor;
   218   uint _reserve_regions;
   220   bool during_marking() {
   221     return _during_marking;
   222   }
   224   enum PredictionConstants {
   225     TruncatedSeqLength = 10
   226   };
   228   TruncatedSeq* _alloc_rate_ms_seq;
   229   double        _prev_collection_pause_end_ms;
   231   TruncatedSeq* _rs_length_diff_seq;
   232   TruncatedSeq* _cost_per_card_ms_seq;
   233   TruncatedSeq* _young_cards_per_entry_ratio_seq;
   234   TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
   235   TruncatedSeq* _cost_per_entry_ms_seq;
   236   TruncatedSeq* _mixed_cost_per_entry_ms_seq;
   237   TruncatedSeq* _cost_per_byte_ms_seq;
   238   TruncatedSeq* _constant_other_time_ms_seq;
   239   TruncatedSeq* _young_other_cost_per_region_ms_seq;
   240   TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
   242   TruncatedSeq* _pending_cards_seq;
   243   TruncatedSeq* _rs_lengths_seq;
   245   TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
   247   G1YoungGenSizer* _young_gen_sizer;
   249   uint _eden_cset_region_length;
   250   uint _survivor_cset_region_length;
   251   uint _old_cset_region_length;
   253   void init_cset_region_lengths(uint eden_cset_region_length,
   254                                 uint survivor_cset_region_length);
   256   uint eden_cset_region_length()     { return _eden_cset_region_length;     }
   257   uint survivor_cset_region_length() { return _survivor_cset_region_length; }
   258   uint old_cset_region_length()      { return _old_cset_region_length;      }
   260   uint _free_regions_at_end_of_collection;
   262   size_t _recorded_rs_lengths;
   263   size_t _max_rs_lengths;
   264   double _sigma;
   266   size_t _rs_lengths_prediction;
   268   double sigma() { return _sigma; }
   270   // A function that prevents us putting too much stock in small sample
   271   // sets.  Returns a number between 2.0 and 1.0, depending on the number
   272   // of samples.  5 or more samples yields one; fewer scales linearly from
   273   // 2.0 at 1 sample to 1.0 at 5.
   274   double confidence_factor(int samples) {
   275     if (samples > 4) return 1.0;
   276     else return  1.0 + sigma() * ((double)(5 - samples))/2.0;
   277   }
   279   double get_new_neg_prediction(TruncatedSeq* seq) {
   280     return seq->davg() - sigma() * seq->dsd();
   281   }
   283 #ifndef PRODUCT
   284   bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
   285 #endif // PRODUCT
   287   void adjust_concurrent_refinement(double update_rs_time,
   288                                     double update_rs_processed_buffers,
   289                                     double goal_ms);
   291   uintx no_of_gc_threads() { return _no_of_gc_threads; }
   292   void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
   294   double _pause_time_target_ms;
   296   size_t _pending_cards;
   298 public:
   299   // Accessors
   301   void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
   302     hr->set_eden();
   303     hr->install_surv_rate_group(_short_lived_surv_rate_group);
   304     hr->set_young_index_in_cset(young_index_in_cset);
   305   }
   307   void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
   308     assert(hr->is_survivor(), "pre-condition");
   309     hr->install_surv_rate_group(_survivor_surv_rate_group);
   310     hr->set_young_index_in_cset(young_index_in_cset);
   311   }
   313 #ifndef PRODUCT
   314   bool verify_young_ages();
   315 #endif // PRODUCT
   317   double get_new_prediction(TruncatedSeq* seq) {
   318     return MAX2(seq->davg() + sigma() * seq->dsd(),
   319                 seq->davg() * confidence_factor(seq->num()));
   320   }
   322   void record_max_rs_lengths(size_t rs_lengths) {
   323     _max_rs_lengths = rs_lengths;
   324   }
   326   size_t predict_rs_length_diff() {
   327     return (size_t) get_new_prediction(_rs_length_diff_seq);
   328   }
   330   double predict_alloc_rate_ms() {
   331     return get_new_prediction(_alloc_rate_ms_seq);
   332   }
   334   double predict_cost_per_card_ms() {
   335     return get_new_prediction(_cost_per_card_ms_seq);
   336   }
   338   double predict_rs_update_time_ms(size_t pending_cards) {
   339     return (double) pending_cards * predict_cost_per_card_ms();
   340   }
   342   double predict_young_cards_per_entry_ratio() {
   343     return get_new_prediction(_young_cards_per_entry_ratio_seq);
   344   }
   346   double predict_mixed_cards_per_entry_ratio() {
   347     if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
   348       return predict_young_cards_per_entry_ratio();
   349     } else {
   350       return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
   351     }
   352   }
   354   size_t predict_young_card_num(size_t rs_length) {
   355     return (size_t) ((double) rs_length *
   356                      predict_young_cards_per_entry_ratio());
   357   }
   359   size_t predict_non_young_card_num(size_t rs_length) {
   360     return (size_t) ((double) rs_length *
   361                      predict_mixed_cards_per_entry_ratio());
   362   }
   364   double predict_rs_scan_time_ms(size_t card_num) {
   365     if (gcs_are_young()) {
   366       return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
   367     } else {
   368       return predict_mixed_rs_scan_time_ms(card_num);
   369     }
   370   }
   372   double predict_mixed_rs_scan_time_ms(size_t card_num) {
   373     if (_mixed_cost_per_entry_ms_seq->num() < 3) {
   374       return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
   375     } else {
   376       return (double) (card_num *
   377                        get_new_prediction(_mixed_cost_per_entry_ms_seq));
   378     }
   379   }
   381   double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
   382     if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
   383       return (1.1 * (double) bytes_to_copy) *
   384               get_new_prediction(_cost_per_byte_ms_seq);
   385     } else {
   386       return (double) bytes_to_copy *
   387              get_new_prediction(_cost_per_byte_ms_during_cm_seq);
   388     }
   389   }
   391   double predict_object_copy_time_ms(size_t bytes_to_copy) {
   392     if (_in_marking_window && !_in_marking_window_im) {
   393       return predict_object_copy_time_ms_during_cm(bytes_to_copy);
   394     } else {
   395       return (double) bytes_to_copy *
   396               get_new_prediction(_cost_per_byte_ms_seq);
   397     }
   398   }
   400   double predict_constant_other_time_ms() {
   401     return get_new_prediction(_constant_other_time_ms_seq);
   402   }
   404   double predict_young_other_time_ms(size_t young_num) {
   405     return (double) young_num *
   406            get_new_prediction(_young_other_cost_per_region_ms_seq);
   407   }
   409   double predict_non_young_other_time_ms(size_t non_young_num) {
   410     return (double) non_young_num *
   411            get_new_prediction(_non_young_other_cost_per_region_ms_seq);
   412   }
   414   double predict_base_elapsed_time_ms(size_t pending_cards);
   415   double predict_base_elapsed_time_ms(size_t pending_cards,
   416                                       size_t scanned_cards);
   417   size_t predict_bytes_to_copy(HeapRegion* hr);
   418   double predict_region_elapsed_time_ms(HeapRegion* hr, bool for_young_gc);
   420   void set_recorded_rs_lengths(size_t rs_lengths);
   422   uint cset_region_length()       { return young_cset_region_length() +
   423                                            old_cset_region_length(); }
   424   uint young_cset_region_length() { return eden_cset_region_length() +
   425                                            survivor_cset_region_length(); }
   427   double predict_survivor_regions_evac_time();
   429   void cset_regions_freed() {
   430     bool propagate = _last_gc_was_young && !_in_marking_window;
   431     _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
   432     _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
   433     // also call it on any more surv rate groups
   434   }
   436   G1MMUTracker* mmu_tracker() {
   437     return _mmu_tracker;
   438   }
   440   double max_pause_time_ms() {
   441     return _mmu_tracker->max_gc_time() * 1000.0;
   442   }
   444   double predict_remark_time_ms() {
   445     return get_new_prediction(_concurrent_mark_remark_times_ms);
   446   }
   448   double predict_cleanup_time_ms() {
   449     return get_new_prediction(_concurrent_mark_cleanup_times_ms);
   450   }
   452   // Returns an estimate of the survival rate of the region at yg-age
   453   // "yg_age".
   454   double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
   455     TruncatedSeq* seq = surv_rate_group->get_seq(age);
   456     if (seq->num() == 0)
   457       gclog_or_tty->print("BARF! age is %d", age);
   458     guarantee( seq->num() > 0, "invariant" );
   459     double pred = get_new_prediction(seq);
   460     if (pred > 1.0)
   461       pred = 1.0;
   462     return pred;
   463   }
   465   double predict_yg_surv_rate(int age) {
   466     return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
   467   }
   469   double accum_yg_surv_rate_pred(int age) {
   470     return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
   471   }
   473 private:
   474   // Statistics kept per GC stoppage, pause or full.
   475   TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
   477   // Add a new GC of the given duration and end time to the record.
   478   void update_recent_gc_times(double end_time_sec, double elapsed_ms);
   480   // The head of the list (via "next_in_collection_set()") representing the
   481   // current collection set. Set from the incrementally built collection
   482   // set at the start of the pause.
   483   HeapRegion* _collection_set;
   485   // The number of bytes in the collection set before the pause. Set from
   486   // the incrementally built collection set at the start of an evacuation
   487   // pause, and incremented in finalize_cset() when adding old regions
   488   // (if any) to the collection set.
   489   size_t _collection_set_bytes_used_before;
   491   // The number of bytes copied during the GC.
   492   size_t _bytes_copied_during_gc;
   494   // The associated information that is maintained while the incremental
   495   // collection set is being built with young regions. Used to populate
   496   // the recorded info for the evacuation pause.
   498   enum CSetBuildType {
   499     Active,             // We are actively building the collection set
   500     Inactive            // We are not actively building the collection set
   501   };
   503   CSetBuildType _inc_cset_build_state;
   505   // The head of the incrementally built collection set.
   506   HeapRegion* _inc_cset_head;
   508   // The tail of the incrementally built collection set.
   509   HeapRegion* _inc_cset_tail;
   511   // The number of bytes in the incrementally built collection set.
   512   // Used to set _collection_set_bytes_used_before at the start of
   513   // an evacuation pause.
   514   size_t _inc_cset_bytes_used_before;
   516   // Used to record the highest end of heap region in collection set
   517   HeapWord* _inc_cset_max_finger;
   519   // The RSet lengths recorded for regions in the CSet. It is updated
   520   // by the thread that adds a new region to the CSet. We assume that
   521   // only one thread can be allocating a new CSet region (currently,
   522   // it does so after taking the Heap_lock) hence no need to
   523   // synchronize updates to this field.
   524   size_t _inc_cset_recorded_rs_lengths;
   526   // A concurrent refinement thread periodcially samples the young
   527   // region RSets and needs to update _inc_cset_recorded_rs_lengths as
   528   // the RSets grow. Instead of having to syncronize updates to that
   529   // field we accumulate them in this field and add it to
   530   // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
   531   ssize_t _inc_cset_recorded_rs_lengths_diffs;
   533   // The predicted elapsed time it will take to collect the regions in
   534   // the CSet. This is updated by the thread that adds a new region to
   535   // the CSet. See the comment for _inc_cset_recorded_rs_lengths about
   536   // MT-safety assumptions.
   537   double _inc_cset_predicted_elapsed_time_ms;
   539   // See the comment for _inc_cset_recorded_rs_lengths_diffs.
   540   double _inc_cset_predicted_elapsed_time_ms_diffs;
   542   // Stash a pointer to the g1 heap.
   543   G1CollectedHeap* _g1;
   545   G1GCPhaseTimes* _phase_times;
   547   // The ratio of gc time to elapsed time, computed over recent pauses.
   548   double _recent_avg_pause_time_ratio;
   550   double recent_avg_pause_time_ratio() {
   551     return _recent_avg_pause_time_ratio;
   552   }
   554   // At the end of a pause we check the heap occupancy and we decide
   555   // whether we will start a marking cycle during the next pause. If
   556   // we decide that we want to do that, we will set this parameter to
   557   // true. So, this parameter will stay true between the end of a
   558   // pause and the beginning of a subsequent pause (not necessarily
   559   // the next one, see the comments on the next field) when we decide
   560   // that we will indeed start a marking cycle and do the initial-mark
   561   // work.
   562   volatile bool _initiate_conc_mark_if_possible;
   564   // If initiate_conc_mark_if_possible() is set at the beginning of a
   565   // pause, it is a suggestion that the pause should start a marking
   566   // cycle by doing the initial-mark work. However, it is possible
   567   // that the concurrent marking thread is still finishing up the
   568   // previous marking cycle (e.g., clearing the next marking
   569   // bitmap). If that is the case we cannot start a new cycle and
   570   // we'll have to wait for the concurrent marking thread to finish
   571   // what it is doing. In this case we will postpone the marking cycle
   572   // initiation decision for the next pause. When we eventually decide
   573   // to start a cycle, we will set _during_initial_mark_pause which
   574   // will stay true until the end of the initial-mark pause and it's
   575   // the condition that indicates that a pause is doing the
   576   // initial-mark work.
   577   volatile bool _during_initial_mark_pause;
   579   bool _last_young_gc;
   581   // This set of variables tracks the collector efficiency, in order to
   582   // determine whether we should initiate a new marking.
   583   double _cur_mark_stop_world_time_ms;
   584   double _mark_remark_start_sec;
   585   double _mark_cleanup_start_sec;
   587   // Update the young list target length either by setting it to the
   588   // desired fixed value or by calculating it using G1's pause
   589   // prediction model. If no rs_lengths parameter is passed, predict
   590   // the RS lengths using the prediction model, otherwise use the
   591   // given rs_lengths as the prediction.
   592   void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
   594   // Calculate and return the minimum desired young list target
   595   // length. This is the minimum desired young list length according
   596   // to the user's inputs.
   597   uint calculate_young_list_desired_min_length(uint base_min_length);
   599   // Calculate and return the maximum desired young list target
   600   // length. This is the maximum desired young list length according
   601   // to the user's inputs.
   602   uint calculate_young_list_desired_max_length();
   604   // Calculate and return the maximum young list target length that
   605   // can fit into the pause time goal. The parameters are: rs_lengths
   606   // represent the prediction of how large the young RSet lengths will
   607   // be, base_min_length is the alreay existing number of regions in
   608   // the young list, min_length and max_length are the desired min and
   609   // max young list length according to the user's inputs.
   610   uint calculate_young_list_target_length(size_t rs_lengths,
   611                                           uint base_min_length,
   612                                           uint desired_min_length,
   613                                           uint desired_max_length);
   615   // Check whether a given young length (young_length) fits into the
   616   // given target pause time and whether the prediction for the amount
   617   // of objects to be copied for the given length will fit into the
   618   // given free space (expressed by base_free_regions).  It is used by
   619   // calculate_young_list_target_length().
   620   bool predict_will_fit(uint young_length, double base_time_ms,
   621                         uint base_free_regions, double target_pause_time_ms);
   623   // Calculate the minimum number of old regions we'll add to the CSet
   624   // during a mixed GC.
   625   uint calc_min_old_cset_length();
   627   // Calculate the maximum number of old regions we'll add to the CSet
   628   // during a mixed GC.
   629   uint calc_max_old_cset_length();
   631   // Returns the given amount of uncollected reclaimable space
   632   // as a percentage of the current heap capacity.
   633   double reclaimable_bytes_perc(size_t reclaimable_bytes);
   635 public:
   637   G1CollectorPolicy();
   639   virtual G1CollectorPolicy* as_g1_policy() { return this; }
   641   virtual CollectorPolicy::Name kind() {
   642     return CollectorPolicy::G1CollectorPolicyKind;
   643   }
   645   G1GCPhaseTimes* phase_times() const { return _phase_times; }
   647   // Check the current value of the young list RSet lengths and
   648   // compare it against the last prediction. If the current value is
   649   // higher, recalculate the young list target length prediction.
   650   void revise_young_list_target_length_if_necessary();
   652   // This should be called after the heap is resized.
   653   void record_new_heap_size(uint new_number_of_regions);
   655   void init();
   657   // Create jstat counters for the policy.
   658   virtual void initialize_gc_policy_counters();
   660   virtual HeapWord* mem_allocate_work(size_t size,
   661                                       bool is_tlab,
   662                                       bool* gc_overhead_limit_was_exceeded);
   664   // This method controls how a collector handles one or more
   665   // of its generations being fully allocated.
   666   virtual HeapWord* satisfy_failed_allocation(size_t size,
   667                                               bool is_tlab);
   669   BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
   671   bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
   673   // Record the start and end of an evacuation pause.
   674   void record_collection_pause_start(double start_time_sec);
   675   void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
   677   // Record the start and end of a full collection.
   678   void record_full_collection_start();
   679   void record_full_collection_end();
   681   // Must currently be called while the world is stopped.
   682   void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
   684   // Record start and end of remark.
   685   void record_concurrent_mark_remark_start();
   686   void record_concurrent_mark_remark_end();
   688   // Record start, end, and completion of cleanup.
   689   void record_concurrent_mark_cleanup_start();
   690   void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
   691   void record_concurrent_mark_cleanup_completed();
   693   // Records the information about the heap size for reporting in
   694   // print_detailed_heap_transition
   695   void record_heap_size_info_at_start(bool full);
   697   // Print heap sizing transition (with less and more detail).
   698   void print_heap_transition();
   699   void print_detailed_heap_transition(bool full = false);
   701   void record_stop_world_start();
   702   void record_concurrent_pause();
   704   // Record how much space we copied during a GC. This is typically
   705   // called when a GC alloc region is being retired.
   706   void record_bytes_copied_during_gc(size_t bytes) {
   707     _bytes_copied_during_gc += bytes;
   708   }
   710   // The amount of space we copied during a GC.
   711   size_t bytes_copied_during_gc() {
   712     return _bytes_copied_during_gc;
   713   }
   715   // Determine whether there are candidate regions so that the
   716   // next GC should be mixed. The two action strings are used
   717   // in the ergo output when the method returns true or false.
   718   bool next_gc_should_be_mixed(const char* true_action_str,
   719                                const char* false_action_str);
   721   // Choose a new collection set.  Marks the chosen regions as being
   722   // "in_collection_set", and links them together.  The head and number of
   723   // the collection set are available via access methods.
   724   void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
   726   // The head of the list (via "next_in_collection_set()") representing the
   727   // current collection set.
   728   HeapRegion* collection_set() { return _collection_set; }
   730   void clear_collection_set() { _collection_set = NULL; }
   732   // Add old region "hr" to the CSet.
   733   void add_old_region_to_cset(HeapRegion* hr);
   735   // Incremental CSet Support
   737   // The head of the incrementally built collection set.
   738   HeapRegion* inc_cset_head() { return _inc_cset_head; }
   740   // The tail of the incrementally built collection set.
   741   HeapRegion* inc_set_tail() { return _inc_cset_tail; }
   743   // Initialize incremental collection set info.
   744   void start_incremental_cset_building();
   746   // Perform any final calculations on the incremental CSet fields
   747   // before we can use them.
   748   void finalize_incremental_cset_building();
   750   void clear_incremental_cset() {
   751     _inc_cset_head = NULL;
   752     _inc_cset_tail = NULL;
   753   }
   755   // Stop adding regions to the incremental collection set
   756   void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
   758   // Add information about hr to the aggregated information for the
   759   // incrementally built collection set.
   760   void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
   762   // Update information about hr in the aggregated information for
   763   // the incrementally built collection set.
   764   void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
   766 private:
   767   // Update the incremental cset information when adding a region
   768   // (should not be called directly).
   769   void add_region_to_incremental_cset_common(HeapRegion* hr);
   771 public:
   772   // Add hr to the LHS of the incremental collection set.
   773   void add_region_to_incremental_cset_lhs(HeapRegion* hr);
   775   // Add hr to the RHS of the incremental collection set.
   776   void add_region_to_incremental_cset_rhs(HeapRegion* hr);
   778 #ifndef PRODUCT
   779   void print_collection_set(HeapRegion* list_head, outputStream* st);
   780 #endif // !PRODUCT
   782   bool initiate_conc_mark_if_possible()       { return _initiate_conc_mark_if_possible;  }
   783   void set_initiate_conc_mark_if_possible()   { _initiate_conc_mark_if_possible = true;  }
   784   void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
   786   bool during_initial_mark_pause()      { return _during_initial_mark_pause;  }
   787   void set_during_initial_mark_pause()  { _during_initial_mark_pause = true;  }
   788   void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
   790   // This sets the initiate_conc_mark_if_possible() flag to start a
   791   // new cycle, as long as we are not already in one. It's best if it
   792   // is called during a safepoint when the test whether a cycle is in
   793   // progress or not is stable.
   794   bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
   796   // This is called at the very beginning of an evacuation pause (it
   797   // has to be the first thing that the pause does). If
   798   // initiate_conc_mark_if_possible() is true, and the concurrent
   799   // marking thread has completed its work during the previous cycle,
   800   // it will set during_initial_mark_pause() to so that the pause does
   801   // the initial-mark work and start a marking cycle.
   802   void decide_on_conc_mark_initiation();
   804   // If an expansion would be appropriate, because recent GC overhead had
   805   // exceeded the desired limit, return an amount to expand by.
   806   size_t expansion_amount();
   808   // Print tracing information.
   809   void print_tracing_info() const;
   811   // Print stats on young survival ratio
   812   void print_yg_surv_rate_info() const;
   814   void finished_recalculating_age_indexes(bool is_survivors) {
   815     if (is_survivors) {
   816       _survivor_surv_rate_group->finished_recalculating_age_indexes();
   817     } else {
   818       _short_lived_surv_rate_group->finished_recalculating_age_indexes();
   819     }
   820     // do that for any other surv rate groups
   821   }
   823   size_t young_list_target_length() const { return _young_list_target_length; }
   825   bool is_young_list_full() {
   826     uint young_list_length = _g1->young_list()->length();
   827     uint young_list_target_length = _young_list_target_length;
   828     return young_list_length >= young_list_target_length;
   829   }
   831   bool can_expand_young_list() {
   832     uint young_list_length = _g1->young_list()->length();
   833     uint young_list_max_length = _young_list_max_length;
   834     return young_list_length < young_list_max_length;
   835   }
   837   uint young_list_max_length() {
   838     return _young_list_max_length;
   839   }
   841   bool gcs_are_young() {
   842     return _gcs_are_young;
   843   }
   844   void set_gcs_are_young(bool gcs_are_young) {
   845     _gcs_are_young = gcs_are_young;
   846   }
   848   bool adaptive_young_list_length() {
   849     return _young_gen_sizer->adaptive_young_list_length();
   850   }
   852 private:
   853   //
   854   // Survivor regions policy.
   855   //
   857   // Current tenuring threshold, set to 0 if the collector reaches the
   858   // maximum amount of survivors regions.
   859   uint _tenuring_threshold;
   861   // The limit on the number of regions allocated for survivors.
   862   uint _max_survivor_regions;
   864   // For reporting purposes.
   865   // The value of _heap_bytes_before_gc is also used to calculate
   866   // the cost of copying.
   868   size_t _eden_used_bytes_before_gc;         // Eden occupancy before GC
   869   size_t _survivor_used_bytes_before_gc;     // Survivor occupancy before GC
   870   size_t _heap_used_bytes_before_gc;         // Heap occupancy before GC
   871   size_t _metaspace_used_bytes_before_gc;    // Metaspace occupancy before GC
   873   size_t _eden_capacity_bytes_before_gc;     // Eden capacity before GC
   874   size_t _heap_capacity_bytes_before_gc;     // Heap capacity before GC
   876   // The amount of survivor regions after a collection.
   877   uint _recorded_survivor_regions;
   878   // List of survivor regions.
   879   HeapRegion* _recorded_survivor_head;
   880   HeapRegion* _recorded_survivor_tail;
   882   ageTable _survivors_age_table;
   884 public:
   885   uint tenuring_threshold() const { return _tenuring_threshold; }
   887   inline GCAllocPurpose
   888     evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
   889       if (age < _tenuring_threshold && src_region->is_young()) {
   890         return GCAllocForSurvived;
   891       } else {
   892         return GCAllocForTenured;
   893       }
   894   }
   896   inline bool track_object_age(GCAllocPurpose purpose) {
   897     return purpose == GCAllocForSurvived;
   898   }
   900   static const uint REGIONS_UNLIMITED = (uint) -1;
   902   uint max_regions(int purpose);
   904   // The limit on regions for a particular purpose is reached.
   905   void note_alloc_region_limit_reached(int purpose) {
   906     if (purpose == GCAllocForSurvived) {
   907       _tenuring_threshold = 0;
   908     }
   909   }
   911   void note_start_adding_survivor_regions() {
   912     _survivor_surv_rate_group->start_adding_regions();
   913   }
   915   void note_stop_adding_survivor_regions() {
   916     _survivor_surv_rate_group->stop_adding_regions();
   917   }
   919   void record_survivor_regions(uint regions,
   920                                HeapRegion* head,
   921                                HeapRegion* tail) {
   922     _recorded_survivor_regions = regions;
   923     _recorded_survivor_head    = head;
   924     _recorded_survivor_tail    = tail;
   925   }
   927   uint recorded_survivor_regions() {
   928     return _recorded_survivor_regions;
   929   }
   931   void record_thread_age_table(ageTable* age_table) {
   932     _survivors_age_table.merge_par(age_table);
   933   }
   935   void update_max_gc_locker_expansion();
   937   // Calculates survivor space parameters.
   938   void update_survivors_policy();
   940   virtual void post_heap_initialize();
   941 };
   943 // This should move to some place more general...
   945 // If we have "n" measurements, and we've kept track of their "sum" and the
   946 // "sum_of_squares" of the measurements, this returns the variance of the
   947 // sequence.
   948 inline double variance(int n, double sum_of_squares, double sum) {
   949   double n_d = (double)n;
   950   double avg = sum/n_d;
   951   return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
   952 }
   954 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP

mercurial