src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp

Tue, 25 Jan 2011 17:58:19 -0500

author
tonyp
date
Tue, 25 Jan 2011 17:58:19 -0500
changeset 2493
97ba643ea3ed
parent 2314
f95d63e2154a
child 2974
e8b0b0392037
permissions
-rw-r--r--

7014261: G1: RSet-related failures
Summary: A race between the concurrent cleanup thread and the VM thread while it is processing the "expanded sparse table list" causes both threads to try to free the same sparse table entry and either causes one of the threads to fail or leaves the entry in an inconsistent state. The solution is purge all entries on the expanded list that correspond go regions that are being cleaned up.
Reviewed-by: brutisso, johnc

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
    28 #include "gc_implementation/g1/sparsePRT.hpp"
    30 // Remembered set for a heap region.  Represent a set of "cards" that
    31 // contain pointers into the owner heap region.  Cards are defined somewhat
    32 // abstractly, in terms of what the "BlockOffsetTable" in use can parse.
    34 class G1CollectedHeap;
    35 class G1BlockOffsetSharedArray;
    36 class HeapRegion;
    37 class HeapRegionRemSetIterator;
    38 class PosParPRT;
    39 class SparsePRT;
    41 // Essentially a wrapper around SparsePRTCleanupTask. See
    42 // sparsePRT.hpp for more details.
    43 class HRRSCleanupTask : public SparsePRTCleanupTask {
    44 };
    46 // The "_coarse_map" is a bitmap with one bit for each region, where set
    47 // bits indicate that the corresponding region may contain some pointer
    48 // into the owning region.
    50 // The "_fine_grain_entries" array is an open hash table of PerRegionTables
    51 // (PRTs), indicating regions for which we're keeping the RS as a set of
    52 // cards.  The strategy is to cap the size of the fine-grain table,
    53 // deleting an entry and setting the corresponding coarse-grained bit when
    54 // we would overflow this cap.
    56 // We use a mixture of locking and lock-free techniques here.  We allow
    57 // threads to locate PRTs without locking, but threads attempting to alter
    58 // a bucket list obtain a lock.  This means that any failing attempt to
    59 // find a PRT must be retried with the lock.  It might seem dangerous that
    60 // a read can find a PRT that is concurrently deleted.  This is all right,
    61 // because:
    62 //
    63 //   1) We only actually free PRT's at safe points (though we reuse them at
    64 //      other times).
    65 //   2) We find PRT's in an attempt to add entries.  If a PRT is deleted,
    66 //      it's _coarse_map bit is set, so the that we were attempting to add
    67 //      is represented.  If a deleted PRT is re-used, a thread adding a bit,
    68 //      thinking the PRT is for a different region, does no harm.
    70 class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
    71   friend class HeapRegionRemSetIterator;
    73   G1CollectedHeap* _g1h;
    74   Mutex            _m;
    75   HeapRegion*      _hr;
    77   // These are protected by "_m".
    78   BitMap      _coarse_map;
    79   size_t      _n_coarse_entries;
    80   static jint _n_coarsenings;
    82   PosParPRT** _fine_grain_regions;
    83   size_t      _n_fine_entries;
    85 #define SAMPLE_FOR_EVICTION 1
    86 #if SAMPLE_FOR_EVICTION
    87   size_t        _fine_eviction_start;
    88   static size_t _fine_eviction_stride;
    89   static size_t _fine_eviction_sample_size;
    90 #endif
    92   SparsePRT   _sparse_table;
    94   // These are static after init.
    95   static size_t _max_fine_entries;
    96   static size_t _mod_max_fine_entries_mask;
    98   // Requires "prt" to be the first element of the bucket list appropriate
    99   // for "hr".  If this list contains an entry for "hr", return it,
   100   // otherwise return "NULL".
   101   PosParPRT* find_region_table(size_t ind, HeapRegion* hr) const;
   103   // Find, delete, and return a candidate PosParPRT, if any exists,
   104   // adding the deleted region to the coarse bitmap.  Requires the caller
   105   // to hold _m, and the fine-grain table to be full.
   106   PosParPRT* delete_region_table();
   108   // If a PRT for "hr" is in the bucket list indicated by "ind" (which must
   109   // be the correct index for "hr"), delete it and return true; else return
   110   // false.
   111   bool del_single_region_table(size_t ind, HeapRegion* hr);
   113   static jint _cache_probes;
   114   static jint _cache_hits;
   116   // Indexed by thread X heap region, to minimize thread contention.
   117   static int** _from_card_cache;
   118   static size_t _from_card_cache_max_regions;
   119   static size_t _from_card_cache_mem_size;
   121 public:
   122   OtherRegionsTable(HeapRegion* hr);
   124   HeapRegion* hr() const { return _hr; }
   126   // For now.  Could "expand" some tables in the future, so that this made
   127   // sense.
   128   void add_reference(OopOrNarrowOopStar from, int tid);
   130   void add_reference(OopOrNarrowOopStar from) {
   131     return add_reference(from, 0);
   132   }
   134   // Removes any entries shown by the given bitmaps to contain only dead
   135   // objects.
   136   void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
   138   // Not const because it takes a lock.
   139   size_t occupied() const;
   140   size_t occ_fine() const;
   141   size_t occ_coarse() const;
   142   size_t occ_sparse() const;
   144   static jint n_coarsenings() { return _n_coarsenings; }
   146   // Returns size in bytes.
   147   // Not const because it takes a lock.
   148   size_t mem_size() const;
   149   static size_t static_mem_size();
   150   static size_t fl_mem_size();
   152   bool contains_reference(OopOrNarrowOopStar from) const;
   153   bool contains_reference_locked(OopOrNarrowOopStar from) const;
   155   void clear();
   157   // Specifically clear the from_card_cache.
   158   void clear_fcc();
   160   // "from_hr" is being cleared; remove any entries from it.
   161   void clear_incoming_entry(HeapRegion* from_hr);
   163   void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
   165   // Declare the heap size (in # of regions) to the OtherRegionsTable.
   166   // (Uses it to initialize from_card_cache).
   167   static void init_from_card_cache(size_t max_regions);
   169   // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
   170   // Make sure any entries for higher regions are invalid.
   171   static void shrink_from_card_cache(size_t new_n_regs);
   173   static void print_from_card_cache();
   174 };
   176 class HeapRegionRemSet : public CHeapObj {
   177   friend class VMStructs;
   178   friend class HeapRegionRemSetIterator;
   180 public:
   181   enum Event {
   182     Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd
   183   };
   185 private:
   186   G1BlockOffsetSharedArray* _bosa;
   187   G1BlockOffsetSharedArray* bosa() const { return _bosa; }
   189   OtherRegionsTable _other_regions;
   191   enum ParIterState { Unclaimed, Claimed, Complete };
   192   volatile ParIterState _iter_state;
   193   volatile jlong _iter_claimed;
   195   // Unused unless G1RecordHRRSOops is true.
   197   static const int MaxRecorded = 1000000;
   198   static OopOrNarrowOopStar* _recorded_oops;
   199   static HeapWord**          _recorded_cards;
   200   static HeapRegion**        _recorded_regions;
   201   static int                 _n_recorded;
   203   static const int MaxRecordedEvents = 1000;
   204   static Event*       _recorded_events;
   205   static int*         _recorded_event_index;
   206   static int          _n_recorded_events;
   208   static void print_event(outputStream* str, Event evnt);
   210 public:
   211   HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
   212                    HeapRegion* hr);
   214   static int num_par_rem_sets();
   215   static void setup_remset_size();
   217   HeapRegion* hr() const {
   218     return _other_regions.hr();
   219   }
   221   size_t occupied() const {
   222     return _other_regions.occupied();
   223   }
   224   size_t occ_fine() const {
   225     return _other_regions.occ_fine();
   226   }
   227   size_t occ_coarse() const {
   228     return _other_regions.occ_coarse();
   229   }
   230   size_t occ_sparse() const {
   231     return _other_regions.occ_sparse();
   232   }
   234   static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); }
   236   /* Used in the sequential case.  Returns "true" iff this addition causes
   237      the size limit to be reached. */
   238   void add_reference(OopOrNarrowOopStar from) {
   239     _other_regions.add_reference(from);
   240   }
   242   /* Used in the parallel case.  Returns "true" iff this addition causes
   243      the size limit to be reached. */
   244   void add_reference(OopOrNarrowOopStar from, int tid) {
   245     _other_regions.add_reference(from, tid);
   246   }
   248   // Removes any entries shown by the given bitmaps to contain only dead
   249   // objects.
   250   void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
   252   // The region is being reclaimed; clear its remset, and any mention of
   253   // entries for this region in other remsets.
   254   void clear();
   256   // Forget any entries due to pointers from "from_hr".
   257   void clear_incoming_entry(HeapRegion* from_hr) {
   258     _other_regions.clear_incoming_entry(from_hr);
   259   }
   261 #if 0
   262   virtual void cleanup() = 0;
   263 #endif
   265   // Should be called from single-threaded code.
   266   void init_for_par_iteration();
   267   // Attempt to claim the region.  Returns true iff this call caused an
   268   // atomic transition from Unclaimed to Claimed.
   269   bool claim_iter();
   270   // Sets the iteration state to "complete".
   271   void set_iter_complete();
   272   // Returns "true" iff the region's iteration is complete.
   273   bool iter_is_complete();
   275   // Support for claiming blocks of cards during iteration
   276   void set_iter_claimed(size_t x) { _iter_claimed = (jlong)x; }
   277   size_t iter_claimed() const { return (size_t)_iter_claimed; }
   278   // Claim the next block of cards
   279   size_t iter_claimed_next(size_t step) {
   280     size_t current, next;
   281     do {
   282       current = iter_claimed();
   283       next = current + step;
   284     } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
   285     return current;
   286   }
   288   // Initialize the given iterator to iterate over this rem set.
   289   void init_iterator(HeapRegionRemSetIterator* iter) const;
   291 #if 0
   292   // Apply the "do_card" method to the start address of every card in the
   293   // rem set.  Returns false if some application of the closure aborted.
   294   virtual bool card_iterate(CardClosure* iter) = 0;
   295 #endif
   297   // The actual # of bytes this hr_remset takes up.
   298   size_t mem_size() {
   299     return _other_regions.mem_size()
   300       // This correction is necessary because the above includes the second
   301       // part.
   302       + sizeof(this) - sizeof(OtherRegionsTable);
   303   }
   305   // Returns the memory occupancy of all static data structures associated
   306   // with remembered sets.
   307   static size_t static_mem_size() {
   308     return OtherRegionsTable::static_mem_size();
   309   }
   311   // Returns the memory occupancy of all free_list data structures associated
   312   // with remembered sets.
   313   static size_t fl_mem_size() {
   314     return OtherRegionsTable::fl_mem_size();
   315   }
   317   bool contains_reference(OopOrNarrowOopStar from) const {
   318     return _other_regions.contains_reference(from);
   319   }
   320   void print() const;
   322   // Called during a stop-world phase to perform any deferred cleanups.
   323   // The second version may be called by parallel threads after then finish
   324   // collection work.
   325   static void cleanup();
   326   static void par_cleanup();
   328   // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
   329   // (Uses it to initialize from_card_cache).
   330   static void init_heap(size_t max_regions) {
   331     OtherRegionsTable::init_from_card_cache(max_regions);
   332   }
   334   // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
   335   static void shrink_heap(size_t new_n_regs) {
   336     OtherRegionsTable::shrink_from_card_cache(new_n_regs);
   337   }
   339 #ifndef PRODUCT
   340   static void print_from_card_cache() {
   341     OtherRegionsTable::print_from_card_cache();
   342   }
   343 #endif
   345   static void record(HeapRegion* hr, OopOrNarrowOopStar f);
   346   static void print_recorded();
   347   static void record_event(Event evnt);
   349   // These are wrappers for the similarly-named methods on
   350   // SparsePRT. Look at sparsePRT.hpp for more details.
   351   static void reset_for_cleanup_tasks();
   352   void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
   353   static void finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task);
   355   // Run unit tests.
   356 #ifndef PRODUCT
   357   static void test();
   358 #endif
   359 };
   361 class HeapRegionRemSetIterator : public CHeapObj {
   363   // The region over which we're iterating.
   364   const HeapRegionRemSet* _hrrs;
   366   // Local caching of HRRS fields.
   367   const BitMap*             _coarse_map;
   368   PosParPRT**               _fine_grain_regions;
   370   G1BlockOffsetSharedArray* _bosa;
   371   G1CollectedHeap*          _g1h;
   373   // The number yielded since initialization.
   374   size_t _n_yielded_fine;
   375   size_t _n_yielded_coarse;
   376   size_t _n_yielded_sparse;
   378   // If true we're iterating over the coarse table; if false the fine
   379   // table.
   380   enum IterState {
   381     Sparse,
   382     Fine,
   383     Coarse
   384   };
   385   IterState _is;
   387   // In both kinds of iteration, heap offset of first card of current
   388   // region.
   389   size_t _cur_region_card_offset;
   390   // Card offset within cur region.
   391   size_t _cur_region_cur_card;
   393   // Coarse table iteration fields:
   395   // Current region index;
   396   int _coarse_cur_region_index;
   397   int _coarse_cur_region_cur_card;
   399   bool coarse_has_next(size_t& card_index);
   401   // Fine table iteration fields:
   403   // Index of bucket-list we're working on.
   404   int _fine_array_index;
   405   // Per Region Table we're doing within current bucket list.
   406   PosParPRT* _fine_cur_prt;
   408   /* SparsePRT::*/ SparsePRTIter _sparse_iter;
   410   void fine_find_next_non_null_prt();
   412   bool fine_has_next();
   413   bool fine_has_next(size_t& card_index);
   415 public:
   416   // We require an iterator to be initialized before use, so the
   417   // constructor does little.
   418   HeapRegionRemSetIterator();
   420   void initialize(const HeapRegionRemSet* hrrs);
   422   // If there remains one or more cards to be yielded, returns true and
   423   // sets "card_index" to one of those cards (which is then considered
   424   // yielded.)   Otherwise, returns false (and leaves "card_index"
   425   // undefined.)
   426   bool has_next(size_t& card_index);
   428   size_t n_yielded_fine() { return _n_yielded_fine; }
   429   size_t n_yielded_coarse() { return _n_yielded_coarse; }
   430   size_t n_yielded_sparse() { return _n_yielded_sparse; }
   431   size_t n_yielded() {
   432     return n_yielded_fine() + n_yielded_coarse() + n_yielded_sparse();
   433   }
   434 };
   436 #if 0
   437 class CardClosure: public Closure {
   438 public:
   439   virtual void do_card(HeapWord* card_start) = 0;
   440 };
   442 #endif
   444 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP

mercurial