src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp

Thu, 17 Nov 2011 12:40:15 -0800

author
johnc
date
Thu, 17 Nov 2011 12:40:15 -0800
changeset 3296
dc467e8b2c5e
parent 3295
00dd86e542eb
child 3337
41406797186b
permissions
-rw-r--r--

7112743: G1: Reduce overhead of marking closure during evacuation pauses
Summary: Parallelize the serial code that was used to mark objects reachable from survivor objects in the collection set. Some minor improvments in the timers used to track the freeing of the collection set along with some tweaks to PrintGCDetails.
Reviewed-by: tonyp, brutisso

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
    28 #include "gc_implementation/g1/collectionSetChooser.hpp"
    29 #include "gc_implementation/g1/g1MMUTracker.hpp"
    30 #include "memory/collectorPolicy.hpp"
    32 // A G1CollectorPolicy makes policy decisions that determine the
    33 // characteristics of the collector.  Examples include:
    34 //   * choice of collection set.
    35 //   * when to collect.
    37 class HeapRegion;
    38 class CollectionSetChooser;
    40 // Yes, this is a bit unpleasant... but it saves replicating the same thing
    41 // over and over again and introducing subtle problems through small typos and
    42 // cutting and pasting mistakes. The macros below introduces a number
    43 // sequnce into the following two classes and the methods that access it.
    45 #define define_num_seq(name)                                                  \
    46 private:                                                                      \
    47   NumberSeq _all_##name##_times_ms;                                           \
    48 public:                                                                       \
    49   void record_##name##_time_ms(double ms) {                                   \
    50     _all_##name##_times_ms.add(ms);                                           \
    51   }                                                                           \
    52   NumberSeq* get_##name##_seq() {                                             \
    53     return &_all_##name##_times_ms;                                           \
    54   }
    56 class MainBodySummary;
    58 class PauseSummary: public CHeapObj {
    59   define_num_seq(total)
    60     define_num_seq(other)
    62 public:
    63   virtual MainBodySummary*    main_body_summary()    { return NULL; }
    64 };
    66 class MainBodySummary: public CHeapObj {
    67   define_num_seq(satb_drain) // optional
    68   define_num_seq(parallel) // parallel only
    69     define_num_seq(ext_root_scan)
    70     define_num_seq(mark_stack_scan)
    71     define_num_seq(update_rs)
    72     define_num_seq(scan_rs)
    73     define_num_seq(obj_copy)
    74     define_num_seq(termination) // parallel only
    75     define_num_seq(parallel_other) // parallel only
    76   define_num_seq(mark_closure)
    77   define_num_seq(clear_ct)
    78 };
    80 class Summary: public PauseSummary,
    81                public MainBodySummary {
    82 public:
    83   virtual MainBodySummary*    main_body_summary()    { return this; }
    84 };
    86 class G1CollectorPolicy: public CollectorPolicy {
    87 private:
    88   // either equal to the number of parallel threads, if ParallelGCThreads
    89   // has been set, or 1 otherwise
    90   int _parallel_gc_threads;
    92   // The number of GC threads currently active.
    93   uintx _no_of_gc_threads;
    95   enum SomePrivateConstants {
    96     NumPrevPausesForHeuristics = 10
    97   };
    99   G1MMUTracker* _mmu_tracker;
   101   void initialize_flags();
   103   void initialize_all() {
   104     initialize_flags();
   105     initialize_size_info();
   106     initialize_perm_generation(PermGen::MarkSweepCompact);
   107   }
   109   CollectionSetChooser* _collectionSetChooser;
   111   double _cur_collection_start_sec;
   112   size_t _cur_collection_pause_used_at_start_bytes;
   113   size_t _cur_collection_pause_used_regions_at_start;
   114   size_t _prev_collection_pause_used_at_end_bytes;
   115   double _cur_collection_par_time_ms;
   116   double _cur_satb_drain_time_ms;
   117   double _cur_clear_ct_time_ms;
   118   double _cur_ref_proc_time_ms;
   119   double _cur_ref_enq_time_ms;
   121 #ifndef PRODUCT
   122   // Card Table Count Cache stats
   123   double _min_clear_cc_time_ms;         // min
   124   double _max_clear_cc_time_ms;         // max
   125   double _cur_clear_cc_time_ms;         // clearing time during current pause
   126   double _cum_clear_cc_time_ms;         // cummulative clearing time
   127   jlong  _num_cc_clears;                // number of times the card count cache has been cleared
   128 #endif
   130   // These exclude marking times.
   131   TruncatedSeq* _recent_gc_times_ms;
   133   TruncatedSeq* _concurrent_mark_remark_times_ms;
   134   TruncatedSeq* _concurrent_mark_cleanup_times_ms;
   136   Summary*           _summary;
   138   NumberSeq* _all_pause_times_ms;
   139   NumberSeq* _all_full_gc_times_ms;
   140   double _stop_world_start;
   141   NumberSeq* _all_stop_world_times_ms;
   142   NumberSeq* _all_yield_times_ms;
   144   int        _aux_num;
   145   NumberSeq* _all_aux_times_ms;
   146   double*    _cur_aux_start_times_ms;
   147   double*    _cur_aux_times_ms;
   148   bool*      _cur_aux_times_set;
   150   double* _par_last_gc_worker_start_times_ms;
   151   double* _par_last_ext_root_scan_times_ms;
   152   double* _par_last_mark_stack_scan_times_ms;
   153   double* _par_last_update_rs_times_ms;
   154   double* _par_last_update_rs_processed_buffers;
   155   double* _par_last_scan_rs_times_ms;
   156   double* _par_last_obj_copy_times_ms;
   157   double* _par_last_termination_times_ms;
   158   double* _par_last_termination_attempts;
   159   double* _par_last_gc_worker_end_times_ms;
   160   double* _par_last_gc_worker_times_ms;
   162   // Each workers 'other' time i.e. the elapsed time of the parallel
   163   // phase of the pause minus the sum of the individual sub-phase
   164   // times for a given worker thread.
   165   double* _par_last_gc_worker_other_times_ms;
   167   // indicates whether we are in full young or partially young GC mode
   168   bool _full_young_gcs;
   170   // if true, then it tries to dynamically adjust the length of the
   171   // young list
   172   bool _adaptive_young_list_length;
   173   size_t _young_list_target_length;
   174   size_t _young_list_fixed_length;
   175   size_t _prev_eden_capacity; // used for logging
   177   // The max number of regions we can extend the eden by while the GC
   178   // locker is active. This should be >= _young_list_target_length;
   179   size_t _young_list_max_length;
   181   bool   _last_young_gc_full;
   183   unsigned              _full_young_pause_num;
   184   unsigned              _partial_young_pause_num;
   186   bool                  _during_marking;
   187   bool                  _in_marking_window;
   188   bool                  _in_marking_window_im;
   190   SurvRateGroup*        _short_lived_surv_rate_group;
   191   SurvRateGroup*        _survivor_surv_rate_group;
   192   // add here any more surv rate groups
   194   double                _gc_overhead_perc;
   196   double _reserve_factor;
   197   size_t _reserve_regions;
   199   bool during_marking() {
   200     return _during_marking;
   201   }
   203 private:
   204   enum PredictionConstants {
   205     TruncatedSeqLength = 10
   206   };
   208   TruncatedSeq* _alloc_rate_ms_seq;
   209   double        _prev_collection_pause_end_ms;
   211   TruncatedSeq* _pending_card_diff_seq;
   212   TruncatedSeq* _rs_length_diff_seq;
   213   TruncatedSeq* _cost_per_card_ms_seq;
   214   TruncatedSeq* _fully_young_cards_per_entry_ratio_seq;
   215   TruncatedSeq* _partially_young_cards_per_entry_ratio_seq;
   216   TruncatedSeq* _cost_per_entry_ms_seq;
   217   TruncatedSeq* _partially_young_cost_per_entry_ms_seq;
   218   TruncatedSeq* _cost_per_byte_ms_seq;
   219   TruncatedSeq* _constant_other_time_ms_seq;
   220   TruncatedSeq* _young_other_cost_per_region_ms_seq;
   221   TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
   223   TruncatedSeq* _pending_cards_seq;
   224   TruncatedSeq* _rs_lengths_seq;
   226   TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
   228   TruncatedSeq* _young_gc_eff_seq;
   230   bool   _using_new_ratio_calculations;
   231   size_t _min_desired_young_length; // as set on the command line or default calculations
   232   size_t _max_desired_young_length; // as set on the command line or default calculations
   234   size_t _eden_cset_region_length;
   235   size_t _survivor_cset_region_length;
   236   size_t _old_cset_region_length;
   238   void init_cset_region_lengths(size_t eden_cset_region_length,
   239                                 size_t survivor_cset_region_length);
   241   size_t eden_cset_region_length()     { return _eden_cset_region_length;     }
   242   size_t survivor_cset_region_length() { return _survivor_cset_region_length; }
   243   size_t old_cset_region_length()      { return _old_cset_region_length;      }
   245   size_t _free_regions_at_end_of_collection;
   247   size_t _recorded_rs_lengths;
   248   size_t _max_rs_lengths;
   250   double _recorded_young_free_cset_time_ms;
   251   double _recorded_non_young_free_cset_time_ms;
   253   double _sigma;
   254   double _expensive_region_limit_ms;
   256   size_t _rs_lengths_prediction;
   258   size_t _known_garbage_bytes;
   259   double _known_garbage_ratio;
   261   double sigma() {
   262     return _sigma;
   263   }
   265   // A function that prevents us putting too much stock in small sample
   266   // sets.  Returns a number between 2.0 and 1.0, depending on the number
   267   // of samples.  5 or more samples yields one; fewer scales linearly from
   268   // 2.0 at 1 sample to 1.0 at 5.
   269   double confidence_factor(int samples) {
   270     if (samples > 4) return 1.0;
   271     else return  1.0 + sigma() * ((double)(5 - samples))/2.0;
   272   }
   274   double get_new_neg_prediction(TruncatedSeq* seq) {
   275     return seq->davg() - sigma() * seq->dsd();
   276   }
   278 #ifndef PRODUCT
   279   bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
   280 #endif // PRODUCT
   282   void adjust_concurrent_refinement(double update_rs_time,
   283                                     double update_rs_processed_buffers,
   284                                     double goal_ms);
   286   uintx no_of_gc_threads() { return _no_of_gc_threads; }
   287   void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
   289   double _pause_time_target_ms;
   290   double _recorded_young_cset_choice_time_ms;
   291   double _recorded_non_young_cset_choice_time_ms;
   292   size_t _pending_cards;
   293   size_t _max_pending_cards;
   295 public:
   296   // Accessors
   298   void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
   299     hr->set_young();
   300     hr->install_surv_rate_group(_short_lived_surv_rate_group);
   301     hr->set_young_index_in_cset(young_index_in_cset);
   302   }
   304   void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
   305     assert(hr->is_young() && hr->is_survivor(), "pre-condition");
   306     hr->install_surv_rate_group(_survivor_surv_rate_group);
   307     hr->set_young_index_in_cset(young_index_in_cset);
   308   }
   310 #ifndef PRODUCT
   311   bool verify_young_ages();
   312 #endif // PRODUCT
   314   double get_new_prediction(TruncatedSeq* seq) {
   315     return MAX2(seq->davg() + sigma() * seq->dsd(),
   316                 seq->davg() * confidence_factor(seq->num()));
   317   }
   319   void record_max_rs_lengths(size_t rs_lengths) {
   320     _max_rs_lengths = rs_lengths;
   321   }
   323   size_t predict_pending_card_diff() {
   324     double prediction = get_new_neg_prediction(_pending_card_diff_seq);
   325     if (prediction < 0.00001)
   326       return 0;
   327     else
   328       return (size_t) prediction;
   329   }
   331   size_t predict_pending_cards() {
   332     size_t max_pending_card_num = _g1->max_pending_card_num();
   333     size_t diff = predict_pending_card_diff();
   334     size_t prediction;
   335     if (diff > max_pending_card_num)
   336       prediction = max_pending_card_num;
   337     else
   338       prediction = max_pending_card_num - diff;
   340     return prediction;
   341   }
   343   size_t predict_rs_length_diff() {
   344     return (size_t) get_new_prediction(_rs_length_diff_seq);
   345   }
   347   double predict_alloc_rate_ms() {
   348     return get_new_prediction(_alloc_rate_ms_seq);
   349   }
   351   double predict_cost_per_card_ms() {
   352     return get_new_prediction(_cost_per_card_ms_seq);
   353   }
   355   double predict_rs_update_time_ms(size_t pending_cards) {
   356     return (double) pending_cards * predict_cost_per_card_ms();
   357   }
   359   double predict_fully_young_cards_per_entry_ratio() {
   360     return get_new_prediction(_fully_young_cards_per_entry_ratio_seq);
   361   }
   363   double predict_partially_young_cards_per_entry_ratio() {
   364     if (_partially_young_cards_per_entry_ratio_seq->num() < 2)
   365       return predict_fully_young_cards_per_entry_ratio();
   366     else
   367       return get_new_prediction(_partially_young_cards_per_entry_ratio_seq);
   368   }
   370   size_t predict_young_card_num(size_t rs_length) {
   371     return (size_t) ((double) rs_length *
   372                      predict_fully_young_cards_per_entry_ratio());
   373   }
   375   size_t predict_non_young_card_num(size_t rs_length) {
   376     return (size_t) ((double) rs_length *
   377                      predict_partially_young_cards_per_entry_ratio());
   378   }
   380   double predict_rs_scan_time_ms(size_t card_num) {
   381     if (full_young_gcs())
   382       return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
   383     else
   384       return predict_partially_young_rs_scan_time_ms(card_num);
   385   }
   387   double predict_partially_young_rs_scan_time_ms(size_t card_num) {
   388     if (_partially_young_cost_per_entry_ms_seq->num() < 3)
   389       return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
   390     else
   391       return (double) card_num *
   392         get_new_prediction(_partially_young_cost_per_entry_ms_seq);
   393   }
   395   double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) {
   396     if (_cost_per_byte_ms_during_cm_seq->num() < 3)
   397       return 1.1 * (double) bytes_to_copy *
   398         get_new_prediction(_cost_per_byte_ms_seq);
   399     else
   400       return (double) bytes_to_copy *
   401         get_new_prediction(_cost_per_byte_ms_during_cm_seq);
   402   }
   404   double predict_object_copy_time_ms(size_t bytes_to_copy) {
   405     if (_in_marking_window && !_in_marking_window_im)
   406       return predict_object_copy_time_ms_during_cm(bytes_to_copy);
   407     else
   408       return (double) bytes_to_copy *
   409         get_new_prediction(_cost_per_byte_ms_seq);
   410   }
   412   double predict_constant_other_time_ms() {
   413     return get_new_prediction(_constant_other_time_ms_seq);
   414   }
   416   double predict_young_other_time_ms(size_t young_num) {
   417     return
   418       (double) young_num *
   419       get_new_prediction(_young_other_cost_per_region_ms_seq);
   420   }
   422   double predict_non_young_other_time_ms(size_t non_young_num) {
   423     return
   424       (double) non_young_num *
   425       get_new_prediction(_non_young_other_cost_per_region_ms_seq);
   426   }
   428   void check_if_region_is_too_expensive(double predicted_time_ms);
   430   double predict_young_collection_elapsed_time_ms(size_t adjustment);
   431   double predict_base_elapsed_time_ms(size_t pending_cards);
   432   double predict_base_elapsed_time_ms(size_t pending_cards,
   433                                       size_t scanned_cards);
   434   size_t predict_bytes_to_copy(HeapRegion* hr);
   435   double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
   437   void set_recorded_rs_lengths(size_t rs_lengths);
   439   size_t cset_region_length()       { return young_cset_region_length() +
   440                                              old_cset_region_length(); }
   441   size_t young_cset_region_length() { return eden_cset_region_length() +
   442                                              survivor_cset_region_length(); }
   444   void record_young_free_cset_time_ms(double time_ms) {
   445     _recorded_young_free_cset_time_ms = time_ms;
   446   }
   448   void record_non_young_free_cset_time_ms(double time_ms) {
   449     _recorded_non_young_free_cset_time_ms = time_ms;
   450   }
   452   double predict_young_gc_eff() {
   453     return get_new_neg_prediction(_young_gc_eff_seq);
   454   }
   456   double predict_survivor_regions_evac_time();
   458   void cset_regions_freed() {
   459     bool propagate = _last_young_gc_full && !_in_marking_window;
   460     _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
   461     _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
   462     // also call it on any more surv rate groups
   463   }
   465   void set_known_garbage_bytes(size_t known_garbage_bytes) {
   466     _known_garbage_bytes = known_garbage_bytes;
   467     size_t heap_bytes = _g1->capacity();
   468     _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
   469   }
   471   void decrease_known_garbage_bytes(size_t known_garbage_bytes) {
   472     guarantee( _known_garbage_bytes >= known_garbage_bytes, "invariant" );
   474     _known_garbage_bytes -= known_garbage_bytes;
   475     size_t heap_bytes = _g1->capacity();
   476     _known_garbage_ratio = (double) _known_garbage_bytes / (double) heap_bytes;
   477   }
   479   G1MMUTracker* mmu_tracker() {
   480     return _mmu_tracker;
   481   }
   483   double max_pause_time_ms() {
   484     return _mmu_tracker->max_gc_time() * 1000.0;
   485   }
   487   double predict_remark_time_ms() {
   488     return get_new_prediction(_concurrent_mark_remark_times_ms);
   489   }
   491   double predict_cleanup_time_ms() {
   492     return get_new_prediction(_concurrent_mark_cleanup_times_ms);
   493   }
   495   // Returns an estimate of the survival rate of the region at yg-age
   496   // "yg_age".
   497   double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
   498     TruncatedSeq* seq = surv_rate_group->get_seq(age);
   499     if (seq->num() == 0)
   500       gclog_or_tty->print("BARF! age is %d", age);
   501     guarantee( seq->num() > 0, "invariant" );
   502     double pred = get_new_prediction(seq);
   503     if (pred > 1.0)
   504       pred = 1.0;
   505     return pred;
   506   }
   508   double predict_yg_surv_rate(int age) {
   509     return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
   510   }
   512   double accum_yg_surv_rate_pred(int age) {
   513     return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
   514   }
   516 private:
   517   void print_stats(int level, const char* str, double value);
   518   void print_stats(int level, const char* str, int value);
   520   void print_par_stats(int level, const char* str, double* data);
   521   void print_par_sizes(int level, const char* str, double* data);
   523   void check_other_times(int level,
   524                          NumberSeq* other_times_ms,
   525                          NumberSeq* calc_other_times_ms) const;
   527   void print_summary (PauseSummary* stats) const;
   529   void print_summary (int level, const char* str, NumberSeq* seq) const;
   530   void print_summary_sd (int level, const char* str, NumberSeq* seq) const;
   532   double avg_value (double* data);
   533   double max_value (double* data);
   534   double sum_of_values (double* data);
   535   double max_sum (double* data1, double* data2);
   537   double _last_pause_time_ms;
   539   size_t _bytes_in_collection_set_before_gc;
   540   size_t _bytes_copied_during_gc;
   542   // Used to count used bytes in CS.
   543   friend class CountCSClosure;
   545   // Statistics kept per GC stoppage, pause or full.
   546   TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
   548   // Add a new GC of the given duration and end time to the record.
   549   void update_recent_gc_times(double end_time_sec, double elapsed_ms);
   551   // The head of the list (via "next_in_collection_set()") representing the
   552   // current collection set. Set from the incrementally built collection
   553   // set at the start of the pause.
   554   HeapRegion* _collection_set;
   556   // The number of bytes in the collection set before the pause. Set from
   557   // the incrementally built collection set at the start of an evacuation
   558   // pause.
   559   size_t _collection_set_bytes_used_before;
   561   // The associated information that is maintained while the incremental
   562   // collection set is being built with young regions. Used to populate
   563   // the recorded info for the evacuation pause.
   565   enum CSetBuildType {
   566     Active,             // We are actively building the collection set
   567     Inactive            // We are not actively building the collection set
   568   };
   570   CSetBuildType _inc_cset_build_state;
   572   // The head of the incrementally built collection set.
   573   HeapRegion* _inc_cset_head;
   575   // The tail of the incrementally built collection set.
   576   HeapRegion* _inc_cset_tail;
   578   // The number of bytes in the incrementally built collection set.
   579   // Used to set _collection_set_bytes_used_before at the start of
   580   // an evacuation pause.
   581   size_t _inc_cset_bytes_used_before;
   583   // Used to record the highest end of heap region in collection set
   584   HeapWord* _inc_cset_max_finger;
   586   // The RSet lengths recorded for regions in the collection set
   587   // (updated by the periodic sampling of the regions in the
   588   // young list/collection set).
   589   size_t _inc_cset_recorded_rs_lengths;
   591   // The predicted elapsed time it will take to collect the regions
   592   // in the collection set (updated by the periodic sampling of the
   593   // regions in the young list/collection set).
   594   double _inc_cset_predicted_elapsed_time_ms;
   596   // Stash a pointer to the g1 heap.
   597   G1CollectedHeap* _g1;
   599   // The ratio of gc time to elapsed time, computed over recent pauses.
   600   double _recent_avg_pause_time_ratio;
   602   double recent_avg_pause_time_ratio() {
   603     return _recent_avg_pause_time_ratio;
   604   }
   606   // At the end of a pause we check the heap occupancy and we decide
   607   // whether we will start a marking cycle during the next pause. If
   608   // we decide that we want to do that, we will set this parameter to
   609   // true. So, this parameter will stay true between the end of a
   610   // pause and the beginning of a subsequent pause (not necessarily
   611   // the next one, see the comments on the next field) when we decide
   612   // that we will indeed start a marking cycle and do the initial-mark
   613   // work.
   614   volatile bool _initiate_conc_mark_if_possible;
   616   // If initiate_conc_mark_if_possible() is set at the beginning of a
   617   // pause, it is a suggestion that the pause should start a marking
   618   // cycle by doing the initial-mark work. However, it is possible
   619   // that the concurrent marking thread is still finishing up the
   620   // previous marking cycle (e.g., clearing the next marking
   621   // bitmap). If that is the case we cannot start a new cycle and
   622   // we'll have to wait for the concurrent marking thread to finish
   623   // what it is doing. In this case we will postpone the marking cycle
   624   // initiation decision for the next pause. When we eventually decide
   625   // to start a cycle, we will set _during_initial_mark_pause which
   626   // will stay true until the end of the initial-mark pause and it's
   627   // the condition that indicates that a pause is doing the
   628   // initial-mark work.
   629   volatile bool _during_initial_mark_pause;
   631   bool _should_revert_to_full_young_gcs;
   632   bool _last_full_young_gc;
   634   // This set of variables tracks the collector efficiency, in order to
   635   // determine whether we should initiate a new marking.
   636   double _cur_mark_stop_world_time_ms;
   637   double _mark_remark_start_sec;
   638   double _mark_cleanup_start_sec;
   639   double _mark_closure_time_ms;
   641   // Update the young list target length either by setting it to the
   642   // desired fixed value or by calculating it using G1's pause
   643   // prediction model. If no rs_lengths parameter is passed, predict
   644   // the RS lengths using the prediction model, otherwise use the
   645   // given rs_lengths as the prediction.
   646   void update_young_list_target_length(size_t rs_lengths = (size_t) -1);
   648   // Calculate and return the minimum desired young list target
   649   // length. This is the minimum desired young list length according
   650   // to the user's inputs.
   651   size_t calculate_young_list_desired_min_length(size_t base_min_length);
   653   // Calculate and return the maximum desired young list target
   654   // length. This is the maximum desired young list length according
   655   // to the user's inputs.
   656   size_t calculate_young_list_desired_max_length();
   658   // Calculate and return the maximum young list target length that
   659   // can fit into the pause time goal. The parameters are: rs_lengths
   660   // represent the prediction of how large the young RSet lengths will
   661   // be, base_min_length is the alreay existing number of regions in
   662   // the young list, min_length and max_length are the desired min and
   663   // max young list length according to the user's inputs.
   664   size_t calculate_young_list_target_length(size_t rs_lengths,
   665                                             size_t base_min_length,
   666                                             size_t desired_min_length,
   667                                             size_t desired_max_length);
   669   // Check whether a given young length (young_length) fits into the
   670   // given target pause time and whether the prediction for the amount
   671   // of objects to be copied for the given length will fit into the
   672   // given free space (expressed by base_free_regions).  It is used by
   673   // calculate_young_list_target_length().
   674   bool predict_will_fit(size_t young_length, double base_time_ms,
   675                         size_t base_free_regions, double target_pause_time_ms);
   677   // Count the number of bytes used in the CS.
   678   void count_CS_bytes_used();
   680   void update_young_list_size_using_newratio(size_t number_of_heap_regions);
   682 public:
   684   G1CollectorPolicy();
   686   virtual G1CollectorPolicy* as_g1_policy() { return this; }
   688   virtual CollectorPolicy::Name kind() {
   689     return CollectorPolicy::G1CollectorPolicyKind;
   690   }
   692   // Check the current value of the young list RSet lengths and
   693   // compare it against the last prediction. If the current value is
   694   // higher, recalculate the young list target length prediction.
   695   void revise_young_list_target_length_if_necessary();
   697   size_t bytes_in_collection_set() {
   698     return _bytes_in_collection_set_before_gc;
   699   }
   701   unsigned calc_gc_alloc_time_stamp() {
   702     return _all_pause_times_ms->num() + 1;
   703   }
   705   // This should be called after the heap is resized.
   706   void record_new_heap_size(size_t new_number_of_regions);
   708 public:
   710   void init();
   712   // Create jstat counters for the policy.
   713   virtual void initialize_gc_policy_counters();
   715   virtual HeapWord* mem_allocate_work(size_t size,
   716                                       bool is_tlab,
   717                                       bool* gc_overhead_limit_was_exceeded);
   719   // This method controls how a collector handles one or more
   720   // of its generations being fully allocated.
   721   virtual HeapWord* satisfy_failed_allocation(size_t size,
   722                                               bool is_tlab);
   724   BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; }
   726   GenRemSet::Name  rem_set_name()     { return GenRemSet::CardTable; }
   728   // Update the heuristic info to record a collection pause of the given
   729   // start time, where the given number of bytes were used at the start.
   730   // This may involve changing the desired size of a collection set.
   732   void record_stop_world_start();
   734   void record_collection_pause_start(double start_time_sec, size_t start_used);
   736   // Must currently be called while the world is stopped.
   737   void record_concurrent_mark_init_end(double
   738                                            mark_init_elapsed_time_ms);
   740   void record_mark_closure_time(double mark_closure_time_ms) {
   741     _mark_closure_time_ms = mark_closure_time_ms;
   742   }
   744   void record_concurrent_mark_remark_start();
   745   void record_concurrent_mark_remark_end();
   747   void record_concurrent_mark_cleanup_start();
   748   void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
   749   void record_concurrent_mark_cleanup_completed();
   751   void record_concurrent_pause();
   752   void record_concurrent_pause_end();
   754   void record_collection_pause_end(int no_of_gc_threads);
   755   void print_heap_transition();
   757   // Record the fact that a full collection occurred.
   758   void record_full_collection_start();
   759   void record_full_collection_end();
   761   void record_gc_worker_start_time(int worker_i, double ms) {
   762     _par_last_gc_worker_start_times_ms[worker_i] = ms;
   763   }
   765   void record_ext_root_scan_time(int worker_i, double ms) {
   766     _par_last_ext_root_scan_times_ms[worker_i] = ms;
   767   }
   769   void record_mark_stack_scan_time(int worker_i, double ms) {
   770     _par_last_mark_stack_scan_times_ms[worker_i] = ms;
   771   }
   773   void record_satb_drain_time(double ms) {
   774     assert(_g1->mark_in_progress(), "shouldn't be here otherwise");
   775     _cur_satb_drain_time_ms = ms;
   776   }
   778   void record_update_rs_time(int thread, double ms) {
   779     _par_last_update_rs_times_ms[thread] = ms;
   780   }
   782   void record_update_rs_processed_buffers (int thread,
   783                                            double processed_buffers) {
   784     _par_last_update_rs_processed_buffers[thread] = processed_buffers;
   785   }
   787   void record_scan_rs_time(int thread, double ms) {
   788     _par_last_scan_rs_times_ms[thread] = ms;
   789   }
   791   void reset_obj_copy_time(int thread) {
   792     _par_last_obj_copy_times_ms[thread] = 0.0;
   793   }
   795   void reset_obj_copy_time() {
   796     reset_obj_copy_time(0);
   797   }
   799   void record_obj_copy_time(int thread, double ms) {
   800     _par_last_obj_copy_times_ms[thread] += ms;
   801   }
   803   void record_termination(int thread, double ms, size_t attempts) {
   804     _par_last_termination_times_ms[thread] = ms;
   805     _par_last_termination_attempts[thread] = (double) attempts;
   806   }
   808   void record_gc_worker_end_time(int worker_i, double ms) {
   809     _par_last_gc_worker_end_times_ms[worker_i] = ms;
   810   }
   812   void record_pause_time_ms(double ms) {
   813     _last_pause_time_ms = ms;
   814   }
   816   void record_clear_ct_time(double ms) {
   817     _cur_clear_ct_time_ms = ms;
   818   }
   820   void record_par_time(double ms) {
   821     _cur_collection_par_time_ms = ms;
   822   }
   824   void record_aux_start_time(int i) {
   825     guarantee(i < _aux_num, "should be within range");
   826     _cur_aux_start_times_ms[i] = os::elapsedTime() * 1000.0;
   827   }
   829   void record_aux_end_time(int i) {
   830     guarantee(i < _aux_num, "should be within range");
   831     double ms = os::elapsedTime() * 1000.0 - _cur_aux_start_times_ms[i];
   832     _cur_aux_times_set[i] = true;
   833     _cur_aux_times_ms[i] += ms;
   834   }
   836   void record_ref_proc_time(double ms) {
   837     _cur_ref_proc_time_ms = ms;
   838   }
   840   void record_ref_enq_time(double ms) {
   841     _cur_ref_enq_time_ms = ms;
   842   }
   844 #ifndef PRODUCT
   845   void record_cc_clear_time(double ms) {
   846     if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
   847       _min_clear_cc_time_ms = ms;
   848     if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
   849       _max_clear_cc_time_ms = ms;
   850     _cur_clear_cc_time_ms = ms;
   851     _cum_clear_cc_time_ms += ms;
   852     _num_cc_clears++;
   853   }
   854 #endif
   856   // Record how much space we copied during a GC. This is typically
   857   // called when a GC alloc region is being retired.
   858   void record_bytes_copied_during_gc(size_t bytes) {
   859     _bytes_copied_during_gc += bytes;
   860   }
   862   // The amount of space we copied during a GC.
   863   size_t bytes_copied_during_gc() {
   864     return _bytes_copied_during_gc;
   865   }
   867   // Choose a new collection set.  Marks the chosen regions as being
   868   // "in_collection_set", and links them together.  The head and number of
   869   // the collection set are available via access methods.
   870   void choose_collection_set(double target_pause_time_ms);
   872   // The head of the list (via "next_in_collection_set()") representing the
   873   // current collection set.
   874   HeapRegion* collection_set() { return _collection_set; }
   876   void clear_collection_set() { _collection_set = NULL; }
   878   // Add old region "hr" to the CSet.
   879   void add_old_region_to_cset(HeapRegion* hr);
   881   // Incremental CSet Support
   883   // The head of the incrementally built collection set.
   884   HeapRegion* inc_cset_head() { return _inc_cset_head; }
   886   // The tail of the incrementally built collection set.
   887   HeapRegion* inc_set_tail() { return _inc_cset_tail; }
   889   // Initialize incremental collection set info.
   890   void start_incremental_cset_building();
   892   void clear_incremental_cset() {
   893     _inc_cset_head = NULL;
   894     _inc_cset_tail = NULL;
   895   }
   897   // Stop adding regions to the incremental collection set
   898   void stop_incremental_cset_building() { _inc_cset_build_state = Inactive; }
   900   // Add/remove information about hr to the aggregated information
   901   // for the incrementally built collection set.
   902   void add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length);
   903   void remove_from_incremental_cset_info(HeapRegion* hr);
   905   // Update information about hr in the aggregated information for
   906   // the incrementally built collection set.
   907   void update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length);
   909 private:
   910   // Update the incremental cset information when adding a region
   911   // (should not be called directly).
   912   void add_region_to_incremental_cset_common(HeapRegion* hr);
   914 public:
   915   // Add hr to the LHS of the incremental collection set.
   916   void add_region_to_incremental_cset_lhs(HeapRegion* hr);
   918   // Add hr to the RHS of the incremental collection set.
   919   void add_region_to_incremental_cset_rhs(HeapRegion* hr);
   921 #ifndef PRODUCT
   922   void print_collection_set(HeapRegion* list_head, outputStream* st);
   923 #endif // !PRODUCT
   925   bool initiate_conc_mark_if_possible()       { return _initiate_conc_mark_if_possible;  }
   926   void set_initiate_conc_mark_if_possible()   { _initiate_conc_mark_if_possible = true;  }
   927   void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; }
   929   bool during_initial_mark_pause()      { return _during_initial_mark_pause;  }
   930   void set_during_initial_mark_pause()  { _during_initial_mark_pause = true;  }
   931   void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; }
   933   // This sets the initiate_conc_mark_if_possible() flag to start a
   934   // new cycle, as long as we are not already in one. It's best if it
   935   // is called during a safepoint when the test whether a cycle is in
   936   // progress or not is stable.
   937   bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause);
   939   // This is called at the very beginning of an evacuation pause (it
   940   // has to be the first thing that the pause does). If
   941   // initiate_conc_mark_if_possible() is true, and the concurrent
   942   // marking thread has completed its work during the previous cycle,
   943   // it will set during_initial_mark_pause() to so that the pause does
   944   // the initial-mark work and start a marking cycle.
   945   void decide_on_conc_mark_initiation();
   947   // If an expansion would be appropriate, because recent GC overhead had
   948   // exceeded the desired limit, return an amount to expand by.
   949   size_t expansion_amount();
   951 #ifndef PRODUCT
   952   // Check any appropriate marked bytes info, asserting false if
   953   // something's wrong, else returning "true".
   954   bool assertMarkedBytesDataOK();
   955 #endif
   957   // Print tracing information.
   958   void print_tracing_info() const;
   960   // Print stats on young survival ratio
   961   void print_yg_surv_rate_info() const;
   963   void finished_recalculating_age_indexes(bool is_survivors) {
   964     if (is_survivors) {
   965       _survivor_surv_rate_group->finished_recalculating_age_indexes();
   966     } else {
   967       _short_lived_surv_rate_group->finished_recalculating_age_indexes();
   968     }
   969     // do that for any other surv rate groups
   970   }
   972   bool is_young_list_full() {
   973     size_t young_list_length = _g1->young_list()->length();
   974     size_t young_list_target_length = _young_list_target_length;
   975     return young_list_length >= young_list_target_length;
   976   }
   978   bool can_expand_young_list() {
   979     size_t young_list_length = _g1->young_list()->length();
   980     size_t young_list_max_length = _young_list_max_length;
   981     return young_list_length < young_list_max_length;
   982   }
   984   size_t young_list_max_length() {
   985     return _young_list_max_length;
   986   }
   988   bool full_young_gcs() {
   989     return _full_young_gcs;
   990   }
   991   void set_full_young_gcs(bool full_young_gcs) {
   992     _full_young_gcs = full_young_gcs;
   993   }
   995   bool adaptive_young_list_length() {
   996     return _adaptive_young_list_length;
   997   }
   998   void set_adaptive_young_list_length(bool adaptive_young_list_length) {
   999     _adaptive_young_list_length = adaptive_young_list_length;
  1002   inline double get_gc_eff_factor() {
  1003     double ratio = _known_garbage_ratio;
  1005     double square = ratio * ratio;
  1006     // square = square * square;
  1007     double ret = square * 9.0 + 1.0;
  1008 #if 0
  1009     gclog_or_tty->print_cr("ratio = %1.2lf, ret = %1.2lf", ratio, ret);
  1010 #endif // 0
  1011     guarantee(0.0 <= ret && ret < 10.0, "invariant!");
  1012     return ret;
  1015 private:
  1016   //
  1017   // Survivor regions policy.
  1018   //
  1020   // Current tenuring threshold, set to 0 if the collector reaches the
  1021   // maximum amount of suvivors regions.
  1022   int _tenuring_threshold;
  1024   // The limit on the number of regions allocated for survivors.
  1025   size_t _max_survivor_regions;
  1027   // For reporting purposes.
  1028   size_t _eden_bytes_before_gc;
  1029   size_t _survivor_bytes_before_gc;
  1030   size_t _capacity_before_gc;
  1032   // The amount of survor regions after a collection.
  1033   size_t _recorded_survivor_regions;
  1034   // List of survivor regions.
  1035   HeapRegion* _recorded_survivor_head;
  1036   HeapRegion* _recorded_survivor_tail;
  1038   ageTable _survivors_age_table;
  1040 public:
  1042   inline GCAllocPurpose
  1043     evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
  1044       if (age < _tenuring_threshold && src_region->is_young()) {
  1045         return GCAllocForSurvived;
  1046       } else {
  1047         return GCAllocForTenured;
  1051   inline bool track_object_age(GCAllocPurpose purpose) {
  1052     return purpose == GCAllocForSurvived;
  1055   static const size_t REGIONS_UNLIMITED = ~(size_t)0;
  1057   size_t max_regions(int purpose);
  1059   // The limit on regions for a particular purpose is reached.
  1060   void note_alloc_region_limit_reached(int purpose) {
  1061     if (purpose == GCAllocForSurvived) {
  1062       _tenuring_threshold = 0;
  1066   void note_start_adding_survivor_regions() {
  1067     _survivor_surv_rate_group->start_adding_regions();
  1070   void note_stop_adding_survivor_regions() {
  1071     _survivor_surv_rate_group->stop_adding_regions();
  1074   void record_survivor_regions(size_t      regions,
  1075                                HeapRegion* head,
  1076                                HeapRegion* tail) {
  1077     _recorded_survivor_regions = regions;
  1078     _recorded_survivor_head    = head;
  1079     _recorded_survivor_tail    = tail;
  1082   size_t recorded_survivor_regions() {
  1083     return _recorded_survivor_regions;
  1086   void record_thread_age_table(ageTable* age_table)
  1088     _survivors_age_table.merge_par(age_table);
  1091   void update_max_gc_locker_expansion();
  1093   // Calculates survivor space parameters.
  1094   void update_survivors_policy();
  1096 };
  1098 // This should move to some place more general...
  1100 // If we have "n" measurements, and we've kept track of their "sum" and the
  1101 // "sum_of_squares" of the measurements, this returns the variance of the
  1102 // sequence.
  1103 inline double variance(int n, double sum_of_squares, double sum) {
  1104   double n_d = (double)n;
  1105   double avg = sum/n_d;
  1106   return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
  1109 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP

mercurial