src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp

Tue, 21 Aug 2012 14:10:39 -0700

author
johnc
date
Tue, 21 Aug 2012 14:10:39 -0700
changeset 3998
7383557659bd
parent 3956
db823a892a55
child 5014
5c93c1f61226
permissions
-rw-r--r--

7185699: G1: Prediction model discrepancies
Summary: Correct the result value of G1CollectedHeap::pending_card_num(). Change the code that calculates the GC efficiency of a non-young heap region to use historical data from mixed GCs and the actual number of live bytes when predicting how long it would take to collect the region. Changes were also reviewed by Thomas Schatzl.
Reviewed-by: azeemj, brutisso

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
    28 #include "gc_implementation/g1/sparsePRT.hpp"
    30 // Remembered set for a heap region.  Represent a set of "cards" that
    31 // contain pointers into the owner heap region.  Cards are defined somewhat
    32 // abstractly, in terms of what the "BlockOffsetTable" in use can parse.
    34 class G1CollectedHeap;
    35 class G1BlockOffsetSharedArray;
    36 class HeapRegion;
    37 class HeapRegionRemSetIterator;
    38 class PerRegionTable;
    39 class SparsePRT;
    41 // Essentially a wrapper around SparsePRTCleanupTask. See
    42 // sparsePRT.hpp for more details.
    43 class HRRSCleanupTask : public SparsePRTCleanupTask {
    44 };
    46 // The "_coarse_map" is a bitmap with one bit for each region, where set
    47 // bits indicate that the corresponding region may contain some pointer
    48 // into the owning region.
    50 // The "_fine_grain_entries" array is an open hash table of PerRegionTables
    51 // (PRTs), indicating regions for which we're keeping the RS as a set of
    52 // cards.  The strategy is to cap the size of the fine-grain table,
    53 // deleting an entry and setting the corresponding coarse-grained bit when
    54 // we would overflow this cap.
    56 // We use a mixture of locking and lock-free techniques here.  We allow
    57 // threads to locate PRTs without locking, but threads attempting to alter
    58 // a bucket list obtain a lock.  This means that any failing attempt to
    59 // find a PRT must be retried with the lock.  It might seem dangerous that
    60 // a read can find a PRT that is concurrently deleted.  This is all right,
    61 // because:
    62 //
    63 //   1) We only actually free PRT's at safe points (though we reuse them at
    64 //      other times).
    65 //   2) We find PRT's in an attempt to add entries.  If a PRT is deleted,
    66 //      it's _coarse_map bit is set, so the that we were attempting to add
    67 //      is represented.  If a deleted PRT is re-used, a thread adding a bit,
    68 //      thinking the PRT is for a different region, does no harm.
    70 class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
    71   friend class HeapRegionRemSetIterator;
    73   G1CollectedHeap* _g1h;
    74   Mutex            _m;
    75   HeapRegion*      _hr;
    77   // These are protected by "_m".
    78   BitMap      _coarse_map;
    79   size_t      _n_coarse_entries;
    80   static jint _n_coarsenings;
    82   PerRegionTable** _fine_grain_regions;
    83   size_t           _n_fine_entries;
    85   // The fine grain remembered sets are doubly linked together using
    86   // their 'next' and 'prev' fields.
    87   // This allows fast bulk freeing of all the fine grain remembered
    88   // set entries, and fast finding of all of them without iterating
    89   // over the _fine_grain_regions table.
    90   PerRegionTable * _first_all_fine_prts;
    91   PerRegionTable * _last_all_fine_prts;
    93   // Used to sample a subset of the fine grain PRTs to determine which
    94   // PRT to evict and coarsen.
    95   size_t        _fine_eviction_start;
    96   static size_t _fine_eviction_stride;
    97   static size_t _fine_eviction_sample_size;
    99   SparsePRT   _sparse_table;
   101   // These are static after init.
   102   static size_t _max_fine_entries;
   103   static size_t _mod_max_fine_entries_mask;
   105   // Requires "prt" to be the first element of the bucket list appropriate
   106   // for "hr".  If this list contains an entry for "hr", return it,
   107   // otherwise return "NULL".
   108   PerRegionTable* find_region_table(size_t ind, HeapRegion* hr) const;
   110   // Find, delete, and return a candidate PerRegionTable, if any exists,
   111   // adding the deleted region to the coarse bitmap.  Requires the caller
   112   // to hold _m, and the fine-grain table to be full.
   113   PerRegionTable* delete_region_table();
   115   // If a PRT for "hr" is in the bucket list indicated by "ind" (which must
   116   // be the correct index for "hr"), delete it and return true; else return
   117   // false.
   118   bool del_single_region_table(size_t ind, HeapRegion* hr);
   120   // Indexed by thread X heap region, to minimize thread contention.
   121   static int** _from_card_cache;
   122   static size_t _from_card_cache_max_regions;
   123   static size_t _from_card_cache_mem_size;
   125   // link/add the given fine grain remembered set into the "all" list
   126   void link_to_all(PerRegionTable * prt);
   127   // unlink/remove the given fine grain remembered set into the "all" list
   128   void unlink_from_all(PerRegionTable * prt);
   130 public:
   131   OtherRegionsTable(HeapRegion* hr);
   133   HeapRegion* hr() const { return _hr; }
   135   // For now.  Could "expand" some tables in the future, so that this made
   136   // sense.
   137   void add_reference(OopOrNarrowOopStar from, int tid);
   139   // Removes any entries shown by the given bitmaps to contain only dead
   140   // objects.
   141   void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
   143   // Not const because it takes a lock.
   144   size_t occupied() const;
   145   size_t occ_fine() const;
   146   size_t occ_coarse() const;
   147   size_t occ_sparse() const;
   149   static jint n_coarsenings() { return _n_coarsenings; }
   151   // Returns size in bytes.
   152   // Not const because it takes a lock.
   153   size_t mem_size() const;
   154   static size_t static_mem_size();
   155   static size_t fl_mem_size();
   157   bool contains_reference(OopOrNarrowOopStar from) const;
   158   bool contains_reference_locked(OopOrNarrowOopStar from) const;
   160   void clear();
   162   // Specifically clear the from_card_cache.
   163   void clear_fcc();
   165   // "from_hr" is being cleared; remove any entries from it.
   166   void clear_incoming_entry(HeapRegion* from_hr);
   168   void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
   170   // Declare the heap size (in # of regions) to the OtherRegionsTable.
   171   // (Uses it to initialize from_card_cache).
   172   static void init_from_card_cache(size_t max_regions);
   174   // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
   175   // Make sure any entries for higher regions are invalid.
   176   static void shrink_from_card_cache(size_t new_n_regs);
   178   static void print_from_card_cache();
   179 };
   181 class HeapRegionRemSet : public CHeapObj<mtGC> {
   182   friend class VMStructs;
   183   friend class HeapRegionRemSetIterator;
   185 public:
   186   enum Event {
   187     Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd
   188   };
   190 private:
   191   G1BlockOffsetSharedArray* _bosa;
   192   G1BlockOffsetSharedArray* bosa() const { return _bosa; }
   194   OtherRegionsTable _other_regions;
   196   enum ParIterState { Unclaimed, Claimed, Complete };
   197   volatile ParIterState _iter_state;
   198   volatile jlong _iter_claimed;
   200   // Unused unless G1RecordHRRSOops is true.
   202   static const int MaxRecorded = 1000000;
   203   static OopOrNarrowOopStar* _recorded_oops;
   204   static HeapWord**          _recorded_cards;
   205   static HeapRegion**        _recorded_regions;
   206   static int                 _n_recorded;
   208   static const int MaxRecordedEvents = 1000;
   209   static Event*       _recorded_events;
   210   static int*         _recorded_event_index;
   211   static int          _n_recorded_events;
   213   static void print_event(outputStream* str, Event evnt);
   215 public:
   216   HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
   217                    HeapRegion* hr);
   219   static int num_par_rem_sets();
   220   static void setup_remset_size();
   222   HeapRegion* hr() const {
   223     return _other_regions.hr();
   224   }
   226   size_t occupied() const {
   227     return _other_regions.occupied();
   228   }
   229   size_t occ_fine() const {
   230     return _other_regions.occ_fine();
   231   }
   232   size_t occ_coarse() const {
   233     return _other_regions.occ_coarse();
   234   }
   235   size_t occ_sparse() const {
   236     return _other_regions.occ_sparse();
   237   }
   239   static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); }
   241   // Used in the sequential case.
   242   void add_reference(OopOrNarrowOopStar from) {
   243     _other_regions.add_reference(from, 0);
   244   }
   246   // Used in the parallel case.
   247   void add_reference(OopOrNarrowOopStar from, int tid) {
   248     _other_regions.add_reference(from, tid);
   249   }
   251   // Removes any entries shown by the given bitmaps to contain only dead
   252   // objects.
   253   void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
   255   // The region is being reclaimed; clear its remset, and any mention of
   256   // entries for this region in other remsets.
   257   void clear();
   259   // Attempt to claim the region.  Returns true iff this call caused an
   260   // atomic transition from Unclaimed to Claimed.
   261   bool claim_iter();
   262   // Sets the iteration state to "complete".
   263   void set_iter_complete();
   264   // Returns "true" iff the region's iteration is complete.
   265   bool iter_is_complete();
   267   // Support for claiming blocks of cards during iteration
   268   size_t iter_claimed() const { return (size_t)_iter_claimed; }
   269   // Claim the next block of cards
   270   size_t iter_claimed_next(size_t step) {
   271     size_t current, next;
   272     do {
   273       current = iter_claimed();
   274       next = current + step;
   275     } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
   276     return current;
   277   }
   278   void reset_for_par_iteration();
   280   bool verify_ready_for_par_iteration() {
   281     return (_iter_state == Unclaimed) && (_iter_claimed == 0);
   282   }
   284   // Initialize the given iterator to iterate over this rem set.
   285   void init_iterator(HeapRegionRemSetIterator* iter) const;
   287   // The actual # of bytes this hr_remset takes up.
   288   size_t mem_size() {
   289     return _other_regions.mem_size()
   290       // This correction is necessary because the above includes the second
   291       // part.
   292       + sizeof(this) - sizeof(OtherRegionsTable);
   293   }
   295   // Returns the memory occupancy of all static data structures associated
   296   // with remembered sets.
   297   static size_t static_mem_size() {
   298     return OtherRegionsTable::static_mem_size();
   299   }
   301   // Returns the memory occupancy of all free_list data structures associated
   302   // with remembered sets.
   303   static size_t fl_mem_size() {
   304     return OtherRegionsTable::fl_mem_size();
   305   }
   307   bool contains_reference(OopOrNarrowOopStar from) const {
   308     return _other_regions.contains_reference(from);
   309   }
   310   void print() const;
   312   // Called during a stop-world phase to perform any deferred cleanups.
   313   static void cleanup();
   315   // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
   316   // (Uses it to initialize from_card_cache).
   317   static void init_heap(uint max_regions) {
   318     OtherRegionsTable::init_from_card_cache((size_t) max_regions);
   319   }
   321   // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
   322   static void shrink_heap(uint new_n_regs) {
   323     OtherRegionsTable::shrink_from_card_cache((size_t) new_n_regs);
   324   }
   326 #ifndef PRODUCT
   327   static void print_from_card_cache() {
   328     OtherRegionsTable::print_from_card_cache();
   329   }
   330 #endif
   332   static void record(HeapRegion* hr, OopOrNarrowOopStar f);
   333   static void print_recorded();
   334   static void record_event(Event evnt);
   336   // These are wrappers for the similarly-named methods on
   337   // SparsePRT. Look at sparsePRT.hpp for more details.
   338   static void reset_for_cleanup_tasks();
   339   void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
   340   static void finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task);
   342   // Run unit tests.
   343 #ifndef PRODUCT
   344   static void test();
   345 #endif
   346 };
   348 class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
   350   // The region over which we're iterating.
   351   const HeapRegionRemSet* _hrrs;
   353   // Local caching of HRRS fields.
   354   const BitMap*             _coarse_map;
   355   PerRegionTable**          _fine_grain_regions;
   357   G1BlockOffsetSharedArray* _bosa;
   358   G1CollectedHeap*          _g1h;
   360   // The number yielded since initialization.
   361   size_t _n_yielded_fine;
   362   size_t _n_yielded_coarse;
   363   size_t _n_yielded_sparse;
   365   // If true we're iterating over the coarse table; if false the fine
   366   // table.
   367   enum IterState {
   368     Sparse,
   369     Fine,
   370     Coarse
   371   };
   372   IterState _is;
   374   // In both kinds of iteration, heap offset of first card of current
   375   // region.
   376   size_t _cur_region_card_offset;
   377   // Card offset within cur region.
   378   size_t _cur_region_cur_card;
   380   // Coarse table iteration fields:
   382   // Current region index;
   383   int    _coarse_cur_region_index;
   384   size_t _coarse_cur_region_cur_card;
   386   bool coarse_has_next(size_t& card_index);
   388   // Fine table iteration fields:
   390   // Index of bucket-list we're working on.
   391   int _fine_array_index;
   393   // Per Region Table we're doing within current bucket list.
   394   PerRegionTable* _fine_cur_prt;
   396   /* SparsePRT::*/ SparsePRTIter _sparse_iter;
   398   void fine_find_next_non_null_prt();
   400   bool fine_has_next();
   401   bool fine_has_next(size_t& card_index);
   403 public:
   404   // We require an iterator to be initialized before use, so the
   405   // constructor does little.
   406   HeapRegionRemSetIterator();
   408   void initialize(const HeapRegionRemSet* hrrs);
   410   // If there remains one or more cards to be yielded, returns true and
   411   // sets "card_index" to one of those cards (which is then considered
   412   // yielded.)   Otherwise, returns false (and leaves "card_index"
   413   // undefined.)
   414   bool has_next(size_t& card_index);
   416   size_t n_yielded_fine() { return _n_yielded_fine; }
   417   size_t n_yielded_coarse() { return _n_yielded_coarse; }
   418   size_t n_yielded_sparse() { return _n_yielded_sparse; }
   419   size_t n_yielded() {
   420     return n_yielded_fine() + n_yielded_coarse() + n_yielded_sparse();
   421   }
   422 };
   424 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP

mercurial