src/share/vm/memory/cardTableModRefBS.hpp

Mon, 31 Aug 2009 05:27:29 -0700

author
apetrusenko
date
Mon, 31 Aug 2009 05:27:29 -0700
changeset 1375
8624da129f0b
parent 1280
df6caf649ff7
child 1696
0414c1049f15
permissions
-rw-r--r--

6841313: G1: dirty cards of survivor regions in parallel
Reviewed-by: tonyp, iveresov

     1 /*
     2  * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
    26 // enumerate ref fields that have been modified (since the last
    27 // enumeration.)
    29 // As it currently stands, this barrier is *imprecise*: when a ref field in
    30 // an object "o" is modified, the card table entry for the card containing
    31 // the head of "o" is dirtied, not necessarily the card containing the
    32 // modified field itself.  For object arrays, however, the barrier *is*
    33 // precise; only the card containing the modified element is dirtied.
    34 // Any MemRegionClosures used to scan dirty cards should take these
    35 // considerations into account.
    37 class Generation;
    38 class OopsInGenClosure;
    39 class DirtyCardToOopClosure;
    41 class CardTableModRefBS: public ModRefBarrierSet {
    42   // Some classes get to look at some private stuff.
    43   friend class BytecodeInterpreter;
    44   friend class VMStructs;
    45   friend class CardTableRS;
    46   friend class CheckForUnmarkedOops; // Needs access to raw card bytes.
    47 #ifndef PRODUCT
    48   // For debugging.
    49   friend class GuaranteeNotModClosure;
    50 #endif
    51  protected:
    53   enum CardValues {
    54     clean_card                  = -1,
    55     // The mask contains zeros in places for all other values.
    56     clean_card_mask             = clean_card - 31,
    58     dirty_card                  =  0,
    59     precleaned_card             =  1,
    60     claimed_card                =  2,
    61     deferred_card               =  4,
    62     last_card                   =  8,
    63     CT_MR_BS_last_reserved      = 16
    64   };
    66   // dirty and precleaned are equivalent wrt younger_refs_iter.
    67   static bool card_is_dirty_wrt_gen_iter(jbyte cv) {
    68     return cv == dirty_card || cv == precleaned_card;
    69   }
    71   // Returns "true" iff the value "cv" will cause the card containing it
    72   // to be scanned in the current traversal.  May be overridden by
    73   // subtypes.
    74   virtual bool card_will_be_scanned(jbyte cv) {
    75     return CardTableModRefBS::card_is_dirty_wrt_gen_iter(cv);
    76   }
    78   // Returns "true" iff the value "cv" may have represented a dirty card at
    79   // some point.
    80   virtual bool card_may_have_been_dirty(jbyte cv) {
    81     return card_is_dirty_wrt_gen_iter(cv);
    82   }
    84   // The declaration order of these const fields is important; see the
    85   // constructor before changing.
    86   const MemRegion _whole_heap;       // the region covered by the card table
    87   const size_t    _guard_index;      // index of very last element in the card
    88                                      // table; it is set to a guard value
    89                                      // (last_card) and should never be modified
    90   const size_t    _last_valid_index; // index of the last valid element
    91   const size_t    _page_size;        // page size used when mapping _byte_map
    92   const size_t    _byte_map_size;    // in bytes
    93   jbyte*          _byte_map;         // the card marking array
    95   int _cur_covered_regions;
    96   // The covered regions should be in address order.
    97   MemRegion* _covered;
    98   // The committed regions correspond one-to-one to the covered regions.
    99   // They represent the card-table memory that has been committed to service
   100   // the corresponding covered region.  It may be that committed region for
   101   // one covered region corresponds to a larger region because of page-size
   102   // roundings.  Thus, a committed region for one covered region may
   103   // actually extend onto the card-table space for the next covered region.
   104   MemRegion* _committed;
   106   // The last card is a guard card, and we commit the page for it so
   107   // we can use the card for verification purposes. We make sure we never
   108   // uncommit the MemRegion for that page.
   109   MemRegion _guard_region;
   111  protected:
   112   // Initialization utilities; covered_words is the size of the covered region
   113   // in, um, words.
   114   inline size_t cards_required(size_t covered_words);
   115   inline size_t compute_byte_map_size();
   117   // Finds and return the index of the region, if any, to which the given
   118   // region would be contiguous.  If none exists, assign a new region and
   119   // returns its index.  Requires that no more than the maximum number of
   120   // covered regions defined in the constructor are ever in use.
   121   int find_covering_region_by_base(HeapWord* base);
   123   // Same as above, but finds the region containing the given address
   124   // instead of starting at a given base address.
   125   int find_covering_region_containing(HeapWord* addr);
   127   // Resize one of the regions covered by the remembered set.
   128   void resize_covered_region(MemRegion new_region);
   130   // Returns the leftmost end of a committed region corresponding to a
   131   // covered region before covered region "ind", or else "NULL" if "ind" is
   132   // the first covered region.
   133   HeapWord* largest_prev_committed_end(int ind) const;
   135   // Returns the part of the region mr that doesn't intersect with
   136   // any committed region other than self.  Used to prevent uncommitting
   137   // regions that are also committed by other regions.  Also protects
   138   // against uncommitting the guard region.
   139   MemRegion committed_unique_to_self(int self, MemRegion mr) const;
   141   // Mapping from address to card marking array entry
   142   jbyte* byte_for(const void* p) const {
   143     assert(_whole_heap.contains(p),
   144            "out of bounds access to card marking array");
   145     jbyte* result = &byte_map_base[uintptr_t(p) >> card_shift];
   146     assert(result >= _byte_map && result < _byte_map + _byte_map_size,
   147            "out of bounds accessor for card marking array");
   148     return result;
   149   }
   151   // The card table byte one after the card marking array
   152   // entry for argument address. Typically used for higher bounds
   153   // for loops iterating through the card table.
   154   jbyte* byte_after(const void* p) const {
   155     return byte_for(p) + 1;
   156   }
   158   // Iterate over the portion of the card-table which covers the given
   159   // region mr in the given space and apply cl to any dirty sub-regions
   160   // of mr. cl and dcto_cl must either be the same closure or cl must
   161   // wrap dcto_cl. Both are required - neither may be NULL. Also, dcto_cl
   162   // may be modified. Note that this function will operate in a parallel
   163   // mode if worker threads are available.
   164   void non_clean_card_iterate(Space* sp, MemRegion mr,
   165                               DirtyCardToOopClosure* dcto_cl,
   166                               MemRegionClosure* cl,
   167                               bool clear);
   169   // Utility function used to implement the other versions below.
   170   void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl,
   171                                    bool clear);
   173   void par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
   174                                        DirtyCardToOopClosure* dcto_cl,
   175                                        MemRegionClosure* cl,
   176                                        bool clear,
   177                                        int n_threads);
   179   // Dirty the bytes corresponding to "mr" (not all of which must be
   180   // covered.)
   181   void dirty_MemRegion(MemRegion mr);
   183   // Clear (to clean_card) the bytes entirely contained within "mr" (not
   184   // all of which must be covered.)
   185   void clear_MemRegion(MemRegion mr);
   187   // *** Support for parallel card scanning.
   189   enum SomeConstantsForParallelism {
   190     StridesPerThread    = 2,
   191     CardsPerStrideChunk = 256
   192   };
   194   // This is an array, one element per covered region of the card table.
   195   // Each entry is itself an array, with one element per chunk in the
   196   // covered region.  Each entry of these arrays is the lowest non-clean
   197   // card of the corresponding chunk containing part of an object from the
   198   // previous chunk, or else NULL.
   199   typedef jbyte*  CardPtr;
   200   typedef CardPtr* CardArr;
   201   CardArr* _lowest_non_clean;
   202   size_t*  _lowest_non_clean_chunk_size;
   203   uintptr_t* _lowest_non_clean_base_chunk_index;
   204   int* _last_LNC_resizing_collection;
   206   // Initializes "lowest_non_clean" to point to the array for the region
   207   // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
   208   // index of the corresponding to the first element of that array.
   209   // Ensures that these arrays are of sufficient size, allocating if necessary.
   210   // May be called by several threads concurrently.
   211   void get_LNC_array_for_space(Space* sp,
   212                                jbyte**& lowest_non_clean,
   213                                uintptr_t& lowest_non_clean_base_chunk_index,
   214                                size_t& lowest_non_clean_chunk_size);
   216   // Returns the number of chunks necessary to cover "mr".
   217   size_t chunks_to_cover(MemRegion mr) {
   218     return (size_t)(addr_to_chunk_index(mr.last()) -
   219                     addr_to_chunk_index(mr.start()) + 1);
   220   }
   222   // Returns the index of the chunk in a stride which
   223   // covers the given address.
   224   uintptr_t addr_to_chunk_index(const void* addr) {
   225     uintptr_t card = (uintptr_t) byte_for(addr);
   226     return card / CardsPerStrideChunk;
   227   }
   229   // Apply cl, which must either itself apply dcto_cl or be dcto_cl,
   230   // to the cards in the stride (of n_strides) within the given space.
   231   void process_stride(Space* sp,
   232                       MemRegion used,
   233                       jint stride, int n_strides,
   234                       DirtyCardToOopClosure* dcto_cl,
   235                       MemRegionClosure* cl,
   236                       bool clear,
   237                       jbyte** lowest_non_clean,
   238                       uintptr_t lowest_non_clean_base_chunk_index,
   239                       size_t lowest_non_clean_chunk_size);
   241   // Makes sure that chunk boundaries are handled appropriately, by
   242   // adjusting the min_done of dcto_cl, and by using a special card-table
   243   // value to indicate how min_done should be set.
   244   void process_chunk_boundaries(Space* sp,
   245                                 DirtyCardToOopClosure* dcto_cl,
   246                                 MemRegion chunk_mr,
   247                                 MemRegion used,
   248                                 jbyte** lowest_non_clean,
   249                                 uintptr_t lowest_non_clean_base_chunk_index,
   250                                 size_t    lowest_non_clean_chunk_size);
   252 public:
   253   // Constants
   254   enum SomePublicConstants {
   255     card_shift                  = 9,
   256     card_size                   = 1 << card_shift,
   257     card_size_in_words          = card_size / sizeof(HeapWord)
   258   };
   260   static int clean_card_val()      { return clean_card; }
   261   static int clean_card_mask_val() { return clean_card_mask; }
   262   static int dirty_card_val()      { return dirty_card; }
   263   static int claimed_card_val()    { return claimed_card; }
   264   static int precleaned_card_val() { return precleaned_card; }
   265   static int deferred_card_val()   { return deferred_card; }
   267   // For RTTI simulation.
   268   bool is_a(BarrierSet::Name bsn) {
   269     return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn);
   270   }
   272   CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
   274   // *** Barrier set functions.
   276   bool has_write_ref_pre_barrier() { return false; }
   278   inline bool write_ref_needs_barrier(void* field, oop new_val) {
   279     // Note that this assumes the perm gen is the highest generation
   280     // in the address space
   281     return new_val != NULL && !new_val->is_perm();
   282   }
   284   // Record a reference update. Note that these versions are precise!
   285   // The scanning code has to handle the fact that the write barrier may be
   286   // either precise or imprecise. We make non-virtual inline variants of
   287   // these functions here for performance.
   288 protected:
   289   void write_ref_field_work(oop obj, size_t offset, oop newVal);
   290   virtual void write_ref_field_work(void* field, oop newVal);
   291 public:
   293   bool has_write_ref_array_opt() { return true; }
   294   bool has_write_region_opt() { return true; }
   296   inline void inline_write_region(MemRegion mr) {
   297     dirty_MemRegion(mr);
   298   }
   299 protected:
   300   void write_region_work(MemRegion mr) {
   301     inline_write_region(mr);
   302   }
   303 public:
   305   inline void inline_write_ref_array(MemRegion mr) {
   306     dirty_MemRegion(mr);
   307   }
   308 protected:
   309   void write_ref_array_work(MemRegion mr) {
   310     inline_write_ref_array(mr);
   311   }
   312 public:
   314   bool is_aligned(HeapWord* addr) {
   315     return is_card_aligned(addr);
   316   }
   318   // *** Card-table-barrier-specific things.
   320   template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {}
   322   template <class T> inline void inline_write_ref_field(T* field, oop newVal) {
   323     jbyte* byte = byte_for((void*)field);
   324     *byte = dirty_card;
   325   }
   327   // These are used by G1, when it uses the card table as a temporary data
   328   // structure for card claiming.
   329   bool is_card_dirty(size_t card_index) {
   330     return _byte_map[card_index] == dirty_card_val();
   331   }
   333   void mark_card_dirty(size_t card_index) {
   334     _byte_map[card_index] = dirty_card_val();
   335   }
   337   bool is_card_claimed(size_t card_index) {
   338     jbyte val = _byte_map[card_index];
   339     return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
   340   }
   342   bool claim_card(size_t card_index);
   344   bool is_card_clean(size_t card_index) {
   345     return _byte_map[card_index] == clean_card_val();
   346   }
   348   bool is_card_deferred(size_t card_index) {
   349     jbyte val = _byte_map[card_index];
   350     return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
   351   }
   353   bool mark_card_deferred(size_t card_index);
   355   // Card marking array base (adjusted for heap low boundary)
   356   // This would be the 0th element of _byte_map, if the heap started at 0x0.
   357   // But since the heap starts at some higher address, this points to somewhere
   358   // before the beginning of the actual _byte_map.
   359   jbyte* byte_map_base;
   361   // Return true if "p" is at the start of a card.
   362   bool is_card_aligned(HeapWord* p) {
   363     jbyte* pcard = byte_for(p);
   364     return (addr_for(pcard) == p);
   365   }
   367   // The kinds of precision a CardTableModRefBS may offer.
   368   enum PrecisionStyle {
   369     Precise,
   370     ObjHeadPreciseArray
   371   };
   373   // Tells what style of precision this card table offers.
   374   PrecisionStyle precision() {
   375     return ObjHeadPreciseArray; // Only one supported for now.
   376   }
   378   // ModRefBS functions.
   379   virtual void invalidate(MemRegion mr, bool whole_heap = false);
   380   void clear(MemRegion mr);
   381   void dirty(MemRegion mr);
   382   void mod_oop_in_space_iterate(Space* sp, OopClosure* cl,
   383                                 bool clear = false,
   384                                 bool before_save_marks = false);
   386   // *** Card-table-RemSet-specific things.
   388   // Invoke "cl.do_MemRegion" on a set of MemRegions that collectively
   389   // includes all the modified cards (expressing each card as a
   390   // MemRegion).  Thus, several modified cards may be lumped into one
   391   // region.  The regions are non-overlapping, and are visited in
   392   // *decreasing* address order.  (This order aids with imprecise card
   393   // marking, where a dirty card may cause scanning, and summarization
   394   // marking, of objects that extend onto subsequent cards.)
   395   // If "clear" is true, the card is (conceptually) marked unmodified before
   396   // applying the closure.
   397   void mod_card_iterate(MemRegionClosure* cl, bool clear = false) {
   398     non_clean_card_iterate_work(_whole_heap, cl, clear);
   399   }
   401   // Like the "mod_cards_iterate" above, except only invokes the closure
   402   // for cards within the MemRegion "mr" (which is required to be
   403   // card-aligned and sized.)
   404   void mod_card_iterate(MemRegion mr, MemRegionClosure* cl,
   405                         bool clear = false) {
   406     non_clean_card_iterate_work(mr, cl, clear);
   407   }
   409   static uintx ct_max_alignment_constraint();
   411   // Apply closure "cl" to the dirty cards containing some part of
   412   // MemRegion "mr".
   413   void dirty_card_iterate(MemRegion mr, MemRegionClosure* cl);
   415   // Return the MemRegion corresponding to the first maximal run
   416   // of dirty cards lying completely within MemRegion mr.
   417   // If reset is "true", then sets those card table entries to the given
   418   // value.
   419   MemRegion dirty_card_range_after_reset(MemRegion mr, bool reset,
   420                                          int reset_val);
   422   // Set all the dirty cards in the given region to precleaned state.
   423   void preclean_dirty_cards(MemRegion mr);
   425   // Provide read-only access to the card table array.
   426   const jbyte* byte_for_const(const void* p) const {
   427     return byte_for(p);
   428   }
   429   const jbyte* byte_after_const(const void* p) const {
   430     return byte_after(p);
   431   }
   433   // Mapping from card marking array entry to address of first word
   434   HeapWord* addr_for(const jbyte* p) const {
   435     assert(p >= _byte_map && p < _byte_map + _byte_map_size,
   436            "out of bounds access to card marking array");
   437     size_t delta = pointer_delta(p, byte_map_base, sizeof(jbyte));
   438     HeapWord* result = (HeapWord*) (delta << card_shift);
   439     assert(_whole_heap.contains(result),
   440            "out of bounds accessor from card marking array");
   441     return result;
   442   }
   444   // Mapping from address to card marking array index.
   445   size_t index_for(void* p) {
   446     assert(_whole_heap.contains(p),
   447            "out of bounds access to card marking array");
   448     return byte_for(p) - _byte_map;
   449   }
   451   const jbyte* byte_for_index(const size_t card_index) const {
   452     return _byte_map + card_index;
   453   }
   455   void verify();
   456   void verify_guard();
   458   void verify_clean_region(MemRegion mr) PRODUCT_RETURN;
   459   void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
   461   static size_t par_chunk_heapword_alignment() {
   462     return CardsPerStrideChunk * card_size_in_words;
   463   }
   465 };
   467 class CardTableRS;
   469 // A specialization for the CardTableRS gen rem set.
   470 class CardTableModRefBSForCTRS: public CardTableModRefBS {
   471   CardTableRS* _rs;
   472 protected:
   473   bool card_will_be_scanned(jbyte cv);
   474   bool card_may_have_been_dirty(jbyte cv);
   475 public:
   476   CardTableModRefBSForCTRS(MemRegion whole_heap,
   477                            int max_covered_regions) :
   478     CardTableModRefBS(whole_heap, max_covered_regions) {}
   480   void set_CTRS(CardTableRS* rs) { _rs = rs; }
   481 };

mercurial