src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp

Sat, 06 Oct 2012 01:17:44 -0700

author
johnc
date
Sat, 06 Oct 2012 01:17:44 -0700
changeset 4173
8a5ea0a9ccc4
parent 4061
859cd1a76f8a
child 4962
6f817ce50129
child 5014
5c93c1f61226
permissions
-rw-r--r--

7127708: G1: change task num types from int to uint in concurrent mark
Summary: Change the type of various task num fields, parameters etc to unsigned and rename them to be more consistent with the other collectors. Code changes were also reviewed by Vitaly Davidovich.
Reviewed-by: johnc
Contributed-by: Kaushik Srenevasan <kaushik@twitter.com>

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
    28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    29 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    30 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    31 #include "memory/allocation.hpp"
    32 #include "memory/space.inline.hpp"
    33 #include "oops/oop.inline.hpp"
    34 #include "utilities/bitMap.inline.hpp"
    35 #include "utilities/globalDefinitions.hpp"
    37 class PerRegionTable: public CHeapObj<mtGC> {
    38   friend class OtherRegionsTable;
    39   friend class HeapRegionRemSetIterator;
    41   HeapRegion*     _hr;
    42   BitMap          _bm;
    43   jint            _occupied;
    45   // next pointer for free/allocated 'all' list
    46   PerRegionTable* _next;
    48   // prev pointer for the allocated 'all' list
    49   PerRegionTable* _prev;
    51   // next pointer in collision list
    52   PerRegionTable * _collision_list_next;
    54   // Global free list of PRTs
    55   static PerRegionTable* _free_list;
    57 protected:
    58   // We need access in order to union things into the base table.
    59   BitMap* bm() { return &_bm; }
    61   void recount_occupied() {
    62     _occupied = (jint) bm()->count_one_bits();
    63   }
    65   PerRegionTable(HeapRegion* hr) :
    66     _hr(hr),
    67     _occupied(0),
    68     _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */),
    69     _collision_list_next(NULL), _next(NULL), _prev(NULL)
    70   {}
    72   void add_card_work(CardIdx_t from_card, bool par) {
    73     if (!_bm.at(from_card)) {
    74       if (par) {
    75         if (_bm.par_at_put(from_card, 1)) {
    76           Atomic::inc(&_occupied);
    77         }
    78       } else {
    79         _bm.at_put(from_card, 1);
    80         _occupied++;
    81       }
    82     }
    83   }
    85   void add_reference_work(OopOrNarrowOopStar from, bool par) {
    86     // Must make this robust in case "from" is not in "_hr", because of
    87     // concurrency.
    89     if (G1TraceHeapRegionRememberedSet) {
    90       gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
    91                              from,
    92                              UseCompressedOops
    93                              ? oopDesc::load_decode_heap_oop((narrowOop*)from)
    94                              : oopDesc::load_decode_heap_oop((oop*)from));
    95     }
    97     HeapRegion* loc_hr = hr();
    98     // If the test below fails, then this table was reused concurrently
    99     // with this operation.  This is OK, since the old table was coarsened,
   100     // and adding a bit to the new table is never incorrect.
   101     // If the table used to belong to a continues humongous region and is
   102     // now reused for the corresponding start humongous region, we need to
   103     // make sure that we detect this. Thus, we call is_in_reserved_raw()
   104     // instead of just is_in_reserved() here.
   105     if (loc_hr->is_in_reserved_raw(from)) {
   106       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
   107       CardIdx_t from_card = (CardIdx_t)
   108           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
   110       assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
   111              "Must be in range.");
   112       add_card_work(from_card, par);
   113     }
   114   }
   116 public:
   118   HeapRegion* hr() const { return _hr; }
   120   jint occupied() const {
   121     // Overkill, but if we ever need it...
   122     // guarantee(_occupied == _bm.count_one_bits(), "Check");
   123     return _occupied;
   124   }
   126   void init(HeapRegion* hr, bool clear_links_to_all_list) {
   127     if (clear_links_to_all_list) {
   128       set_next(NULL);
   129       set_prev(NULL);
   130     }
   131     _hr = hr;
   132     _collision_list_next = NULL;
   133     _occupied = 0;
   134     _bm.clear();
   135   }
   137   void add_reference(OopOrNarrowOopStar from) {
   138     add_reference_work(from, /*parallel*/ true);
   139   }
   141   void seq_add_reference(OopOrNarrowOopStar from) {
   142     add_reference_work(from, /*parallel*/ false);
   143   }
   145   void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
   146     HeapWord* hr_bot = hr()->bottom();
   147     size_t hr_first_card_index = ctbs->index_for(hr_bot);
   148     bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
   149     recount_occupied();
   150   }
   152   void add_card(CardIdx_t from_card_index) {
   153     add_card_work(from_card_index, /*parallel*/ true);
   154   }
   156   void seq_add_card(CardIdx_t from_card_index) {
   157     add_card_work(from_card_index, /*parallel*/ false);
   158   }
   160   // (Destructively) union the bitmap of the current table into the given
   161   // bitmap (which is assumed to be of the same size.)
   162   void union_bitmap_into(BitMap* bm) {
   163     bm->set_union(_bm);
   164   }
   166   // Mem size in bytes.
   167   size_t mem_size() const {
   168     return sizeof(this) + _bm.size_in_words() * HeapWordSize;
   169   }
   171   // Requires "from" to be in "hr()".
   172   bool contains_reference(OopOrNarrowOopStar from) const {
   173     assert(hr()->is_in_reserved(from), "Precondition.");
   174     size_t card_ind = pointer_delta(from, hr()->bottom(),
   175                                     CardTableModRefBS::card_size);
   176     return _bm.at(card_ind);
   177   }
   179   // Bulk-free the PRTs from prt to last, assumes that they are
   180   // linked together using their _next field.
   181   static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
   182     while (true) {
   183       PerRegionTable* fl = _free_list;
   184       last->set_next(fl);
   185       PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
   186       if (res == fl) {
   187         return;
   188       }
   189     }
   190     ShouldNotReachHere();
   191   }
   193   static void free(PerRegionTable* prt) {
   194     bulk_free(prt, prt);
   195   }
   197   // Returns an initialized PerRegionTable instance.
   198   static PerRegionTable* alloc(HeapRegion* hr) {
   199     PerRegionTable* fl = _free_list;
   200     while (fl != NULL) {
   201       PerRegionTable* nxt = fl->next();
   202       PerRegionTable* res =
   203         (PerRegionTable*)
   204         Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
   205       if (res == fl) {
   206         fl->init(hr, true);
   207         return fl;
   208       } else {
   209         fl = _free_list;
   210       }
   211     }
   212     assert(fl == NULL, "Loop condition.");
   213     return new PerRegionTable(hr);
   214   }
   216   PerRegionTable* next() const { return _next; }
   217   void set_next(PerRegionTable* next) { _next = next; }
   218   PerRegionTable* prev() const { return _prev; }
   219   void set_prev(PerRegionTable* prev) { _prev = prev; }
   221   // Accessor and Modification routines for the pointer for the
   222   // singly linked collision list that links the PRTs within the
   223   // OtherRegionsTable::_fine_grain_regions hash table.
   224   //
   225   // It might be useful to also make the collision list doubly linked
   226   // to avoid iteration over the collisions list during scrubbing/deletion.
   227   // OTOH there might not be many collisions.
   229   PerRegionTable* collision_list_next() const {
   230     return _collision_list_next;
   231   }
   233   void set_collision_list_next(PerRegionTable* next) {
   234     _collision_list_next = next;
   235   }
   237   PerRegionTable** collision_list_next_addr() {
   238     return &_collision_list_next;
   239   }
   241   static size_t fl_mem_size() {
   242     PerRegionTable* cur = _free_list;
   243     size_t res = 0;
   244     while (cur != NULL) {
   245       res += sizeof(PerRegionTable);
   246       cur = cur->next();
   247     }
   248     return res;
   249   }
   250 };
   252 PerRegionTable* PerRegionTable::_free_list = NULL;
   254 size_t OtherRegionsTable::_max_fine_entries = 0;
   255 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
   256 size_t OtherRegionsTable::_fine_eviction_stride = 0;
   257 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
   259 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
   260   _g1h(G1CollectedHeap::heap()),
   261   _m(Mutex::leaf, "An OtherRegionsTable lock", true),
   262   _hr(hr),
   263   _coarse_map(G1CollectedHeap::heap()->max_regions(),
   264               false /* in-resource-area */),
   265   _fine_grain_regions(NULL),
   266   _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
   267   _n_fine_entries(0), _n_coarse_entries(0),
   268   _fine_eviction_start(0),
   269   _sparse_table(hr)
   270 {
   271   typedef PerRegionTable* PerRegionTablePtr;
   273   if (_max_fine_entries == 0) {
   274     assert(_mod_max_fine_entries_mask == 0, "Both or none.");
   275     size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
   276     _max_fine_entries = (size_t)1 << max_entries_log;
   277     _mod_max_fine_entries_mask = _max_fine_entries - 1;
   279     assert(_fine_eviction_sample_size == 0
   280            && _fine_eviction_stride == 0, "All init at same time.");
   281     _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
   282     _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
   283   }
   285   _fine_grain_regions = new PerRegionTablePtr[_max_fine_entries];
   287   if (_fine_grain_regions == NULL) {
   288     vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries,
   289                           "Failed to allocate _fine_grain_entries.");
   290   }
   292   for (size_t i = 0; i < _max_fine_entries; i++) {
   293     _fine_grain_regions[i] = NULL;
   294   }
   295 }
   297 void OtherRegionsTable::link_to_all(PerRegionTable* prt) {
   298   // We always append to the beginning of the list for convenience;
   299   // the order of entries in this list does not matter.
   300   if (_first_all_fine_prts != NULL) {
   301     assert(_first_all_fine_prts->prev() == NULL, "invariant");
   302     _first_all_fine_prts->set_prev(prt);
   303     prt->set_next(_first_all_fine_prts);
   304   } else {
   305     // this is the first element we insert. Adjust the "last" pointer
   306     _last_all_fine_prts = prt;
   307     assert(prt->next() == NULL, "just checking");
   308   }
   309   // the new element is always the first element without a predecessor
   310   prt->set_prev(NULL);
   311   _first_all_fine_prts = prt;
   313   assert(prt->prev() == NULL, "just checking");
   314   assert(_first_all_fine_prts == prt, "just checking");
   315   assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
   316          (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
   317          "just checking");
   318   assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
   319          "just checking");
   320   assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
   321          "just checking");
   322 }
   324 void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
   325   if (prt->prev() != NULL) {
   326     assert(_first_all_fine_prts != prt, "just checking");
   327     prt->prev()->set_next(prt->next());
   328     // removing the last element in the list?
   329     if (_last_all_fine_prts == prt) {
   330       _last_all_fine_prts = prt->prev();
   331     }
   332   } else {
   333     assert(_first_all_fine_prts == prt, "just checking");
   334     _first_all_fine_prts = prt->next();
   335     // list is empty now?
   336     if (_first_all_fine_prts == NULL) {
   337       _last_all_fine_prts = NULL;
   338     }
   339   }
   341   if (prt->next() != NULL) {
   342     prt->next()->set_prev(prt->prev());
   343   }
   345   prt->set_next(NULL);
   346   prt->set_prev(NULL);
   348   assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
   349          (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
   350          "just checking");
   351   assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
   352          "just checking");
   353   assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
   354          "just checking");
   355 }
   357 int**  OtherRegionsTable::_from_card_cache = NULL;
   358 size_t OtherRegionsTable::_from_card_cache_max_regions = 0;
   359 size_t OtherRegionsTable::_from_card_cache_mem_size = 0;
   361 void OtherRegionsTable::init_from_card_cache(size_t max_regions) {
   362   _from_card_cache_max_regions = max_regions;
   364   int n_par_rs = HeapRegionRemSet::num_par_rem_sets();
   365   _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs, mtGC);
   366   for (int i = 0; i < n_par_rs; i++) {
   367     _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions, mtGC);
   368     for (size_t j = 0; j < max_regions; j++) {
   369       _from_card_cache[i][j] = -1;  // An invalid value.
   370     }
   371   }
   372   _from_card_cache_mem_size = n_par_rs * max_regions * sizeof(int);
   373 }
   375 void OtherRegionsTable::shrink_from_card_cache(size_t new_n_regs) {
   376   for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
   377     assert(new_n_regs <= _from_card_cache_max_regions, "Must be within max.");
   378     for (size_t j = new_n_regs; j < _from_card_cache_max_regions; j++) {
   379       _from_card_cache[i][j] = -1;  // An invalid value.
   380     }
   381   }
   382 }
   384 #ifndef PRODUCT
   385 void OtherRegionsTable::print_from_card_cache() {
   386   for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
   387     for (size_t j = 0; j < _from_card_cache_max_regions; j++) {
   388       gclog_or_tty->print_cr("_from_card_cache[%d][%d] = %d.",
   389                     i, j, _from_card_cache[i][j]);
   390     }
   391   }
   392 }
   393 #endif
   395 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
   396   size_t cur_hrs_ind = (size_t) hr()->hrs_index();
   398   if (G1TraceHeapRegionRememberedSet) {
   399     gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
   400                                                     from,
   401                                                     UseCompressedOops
   402                                                     ? oopDesc::load_decode_heap_oop((narrowOop*)from)
   403                                                     : oopDesc::load_decode_heap_oop((oop*)from));
   404   }
   406   int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
   408   if (G1TraceHeapRegionRememberedSet) {
   409     gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
   410                   hr()->bottom(), from_card,
   411                   _from_card_cache[tid][cur_hrs_ind]);
   412   }
   414   if (from_card == _from_card_cache[tid][cur_hrs_ind]) {
   415     if (G1TraceHeapRegionRememberedSet) {
   416       gclog_or_tty->print_cr("  from-card cache hit.");
   417     }
   418     assert(contains_reference(from), "We just added it!");
   419     return;
   420   } else {
   421     _from_card_cache[tid][cur_hrs_ind] = from_card;
   422   }
   424   // Note that this may be a continued H region.
   425   HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
   426   RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
   428   // If the region is already coarsened, return.
   429   if (_coarse_map.at(from_hrs_ind)) {
   430     if (G1TraceHeapRegionRememberedSet) {
   431       gclog_or_tty->print_cr("  coarse map hit.");
   432     }
   433     assert(contains_reference(from), "We just added it!");
   434     return;
   435   }
   437   // Otherwise find a per-region table to add it to.
   438   size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
   439   PerRegionTable* prt = find_region_table(ind, from_hr);
   440   if (prt == NULL) {
   441     MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
   442     // Confirm that it's really not there...
   443     prt = find_region_table(ind, from_hr);
   444     if (prt == NULL) {
   446       uintptr_t from_hr_bot_card_index =
   447         uintptr_t(from_hr->bottom())
   448           >> CardTableModRefBS::card_shift;
   449       CardIdx_t card_index = from_card - from_hr_bot_card_index;
   450       assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
   451              "Must be in range.");
   452       if (G1HRRSUseSparseTable &&
   453           _sparse_table.add_card(from_hrs_ind, card_index)) {
   454         if (G1RecordHRRSOops) {
   455           HeapRegionRemSet::record(hr(), from);
   456           if (G1TraceHeapRegionRememberedSet) {
   457             gclog_or_tty->print("   Added card " PTR_FORMAT " to region "
   458                                 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
   459                                 align_size_down(uintptr_t(from),
   460                                                 CardTableModRefBS::card_size),
   461                                 hr()->bottom(), from);
   462           }
   463         }
   464         if (G1TraceHeapRegionRememberedSet) {
   465           gclog_or_tty->print_cr("   added card to sparse table.");
   466         }
   467         assert(contains_reference_locked(from), "We just added it!");
   468         return;
   469       } else {
   470         if (G1TraceHeapRegionRememberedSet) {
   471           gclog_or_tty->print_cr("   [tid %d] sparse table entry "
   472                         "overflow(f: %d, t: %d)",
   473                         tid, from_hrs_ind, cur_hrs_ind);
   474         }
   475       }
   477       if (_n_fine_entries == _max_fine_entries) {
   478         prt = delete_region_table();
   479         // There is no need to clear the links to the 'all' list here:
   480         // prt will be reused immediately, i.e. remain in the 'all' list.
   481         prt->init(from_hr, false /* clear_links_to_all_list */);
   482       } else {
   483         prt = PerRegionTable::alloc(from_hr);
   484         link_to_all(prt);
   485       }
   487       PerRegionTable* first_prt = _fine_grain_regions[ind];
   488       prt->set_collision_list_next(first_prt);
   489       _fine_grain_regions[ind] = prt;
   490       _n_fine_entries++;
   492       if (G1HRRSUseSparseTable) {
   493         // Transfer from sparse to fine-grain.
   494         SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
   495         assert(sprt_entry != NULL, "There should have been an entry");
   496         for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
   497           CardIdx_t c = sprt_entry->card(i);
   498           if (c != SparsePRTEntry::NullEntry) {
   499             prt->add_card(c);
   500           }
   501         }
   502         // Now we can delete the sparse entry.
   503         bool res = _sparse_table.delete_entry(from_hrs_ind);
   504         assert(res, "It should have been there.");
   505       }
   506     }
   507     assert(prt != NULL && prt->hr() == from_hr, "consequence");
   508   }
   509   // Note that we can't assert "prt->hr() == from_hr", because of the
   510   // possibility of concurrent reuse.  But see head comment of
   511   // OtherRegionsTable for why this is OK.
   512   assert(prt != NULL, "Inv");
   514   prt->add_reference(from);
   516   if (G1RecordHRRSOops) {
   517     HeapRegionRemSet::record(hr(), from);
   518     if (G1TraceHeapRegionRememberedSet) {
   519       gclog_or_tty->print("Added card " PTR_FORMAT " to region "
   520                           "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
   521                           align_size_down(uintptr_t(from),
   522                                           CardTableModRefBS::card_size),
   523                           hr()->bottom(), from);
   524     }
   525   }
   526   assert(contains_reference(from), "We just added it!");
   527 }
   529 PerRegionTable*
   530 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
   531   assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
   532   PerRegionTable* prt = _fine_grain_regions[ind];
   533   while (prt != NULL && prt->hr() != hr) {
   534     prt = prt->collision_list_next();
   535   }
   536   // Loop postcondition is the method postcondition.
   537   return prt;
   538 }
   540 jint OtherRegionsTable::_n_coarsenings = 0;
   542 PerRegionTable* OtherRegionsTable::delete_region_table() {
   543   assert(_m.owned_by_self(), "Precondition");
   544   assert(_n_fine_entries == _max_fine_entries, "Precondition");
   545   PerRegionTable* max = NULL;
   546   jint max_occ = 0;
   547   PerRegionTable** max_prev;
   548   size_t max_ind;
   550   size_t i = _fine_eviction_start;
   551   for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
   552     size_t ii = i;
   553     // Make sure we get a non-NULL sample.
   554     while (_fine_grain_regions[ii] == NULL) {
   555       ii++;
   556       if (ii == _max_fine_entries) ii = 0;
   557       guarantee(ii != i, "We must find one.");
   558     }
   559     PerRegionTable** prev = &_fine_grain_regions[ii];
   560     PerRegionTable* cur = *prev;
   561     while (cur != NULL) {
   562       jint cur_occ = cur->occupied();
   563       if (max == NULL || cur_occ > max_occ) {
   564         max = cur;
   565         max_prev = prev;
   566         max_ind = i;
   567         max_occ = cur_occ;
   568       }
   569       prev = cur->collision_list_next_addr();
   570       cur = cur->collision_list_next();
   571     }
   572     i = i + _fine_eviction_stride;
   573     if (i >= _n_fine_entries) i = i - _n_fine_entries;
   574   }
   576   _fine_eviction_start++;
   578   if (_fine_eviction_start >= _n_fine_entries) {
   579     _fine_eviction_start -= _n_fine_entries;
   580   }
   582   guarantee(max != NULL, "Since _n_fine_entries > 0");
   584   // Set the corresponding coarse bit.
   585   size_t max_hrs_index = (size_t) max->hr()->hrs_index();
   586   if (!_coarse_map.at(max_hrs_index)) {
   587     _coarse_map.at_put(max_hrs_index, true);
   588     _n_coarse_entries++;
   589     if (G1TraceHeapRegionRememberedSet) {
   590       gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
   591                  "for region [" PTR_FORMAT "...] (%d coarse entries).\n",
   592                  hr()->bottom(),
   593                  max->hr()->bottom(),
   594                  _n_coarse_entries);
   595     }
   596   }
   598   // Unsplice.
   599   *max_prev = max->collision_list_next();
   600   Atomic::inc(&_n_coarsenings);
   601   _n_fine_entries--;
   602   return max;
   603 }
   606 // At present, this must be called stop-world single-threaded.
   607 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
   608                               BitMap* region_bm, BitMap* card_bm) {
   609   // First eliminated garbage regions from the coarse map.
   610   if (G1RSScrubVerbose) {
   611     gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index());
   612   }
   614   assert(_coarse_map.size() == region_bm->size(), "Precondition");
   615   if (G1RSScrubVerbose) {
   616     gclog_or_tty->print("   Coarse map: before = "SIZE_FORMAT"...",
   617                         _n_coarse_entries);
   618   }
   619   _coarse_map.set_intersection(*region_bm);
   620   _n_coarse_entries = _coarse_map.count_one_bits();
   621   if (G1RSScrubVerbose) {
   622     gclog_or_tty->print_cr("   after = "SIZE_FORMAT".", _n_coarse_entries);
   623   }
   625   // Now do the fine-grained maps.
   626   for (size_t i = 0; i < _max_fine_entries; i++) {
   627     PerRegionTable* cur = _fine_grain_regions[i];
   628     PerRegionTable** prev = &_fine_grain_regions[i];
   629     while (cur != NULL) {
   630       PerRegionTable* nxt = cur->collision_list_next();
   631       // If the entire region is dead, eliminate.
   632       if (G1RSScrubVerbose) {
   633         gclog_or_tty->print_cr("     For other region %u:",
   634                                cur->hr()->hrs_index());
   635       }
   636       if (!region_bm->at((size_t) cur->hr()->hrs_index())) {
   637         *prev = nxt;
   638         cur->set_collision_list_next(NULL);
   639         _n_fine_entries--;
   640         if (G1RSScrubVerbose) {
   641           gclog_or_tty->print_cr("          deleted via region map.");
   642         }
   643         unlink_from_all(cur);
   644         PerRegionTable::free(cur);
   645       } else {
   646         // Do fine-grain elimination.
   647         if (G1RSScrubVerbose) {
   648           gclog_or_tty->print("          occ: before = %4d.", cur->occupied());
   649         }
   650         cur->scrub(ctbs, card_bm);
   651         if (G1RSScrubVerbose) {
   652           gclog_or_tty->print_cr("          after = %4d.", cur->occupied());
   653         }
   654         // Did that empty the table completely?
   655         if (cur->occupied() == 0) {
   656           *prev = nxt;
   657           cur->set_collision_list_next(NULL);
   658           _n_fine_entries--;
   659           unlink_from_all(cur);
   660           PerRegionTable::free(cur);
   661         } else {
   662           prev = cur->collision_list_next_addr();
   663         }
   664       }
   665       cur = nxt;
   666     }
   667   }
   668   // Since we may have deleted a from_card_cache entry from the RS, clear
   669   // the FCC.
   670   clear_fcc();
   671 }
   674 size_t OtherRegionsTable::occupied() const {
   675   // Cast away const in this case.
   676   MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
   677   size_t sum = occ_fine();
   678   sum += occ_sparse();
   679   sum += occ_coarse();
   680   return sum;
   681 }
   683 size_t OtherRegionsTable::occ_fine() const {
   684   size_t sum = 0;
   686   size_t num = 0;
   687   PerRegionTable * cur = _first_all_fine_prts;
   688   while (cur != NULL) {
   689     sum += cur->occupied();
   690     cur = cur->next();
   691     num++;
   692   }
   693   guarantee(num == _n_fine_entries, "just checking");
   694   return sum;
   695 }
   697 size_t OtherRegionsTable::occ_coarse() const {
   698   return (_n_coarse_entries * HeapRegion::CardsPerRegion);
   699 }
   701 size_t OtherRegionsTable::occ_sparse() const {
   702   return _sparse_table.occupied();
   703 }
   705 size_t OtherRegionsTable::mem_size() const {
   706   // Cast away const in this case.
   707   MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
   708   size_t sum = 0;
   709   PerRegionTable * cur = _first_all_fine_prts;
   710   while (cur != NULL) {
   711     sum += cur->mem_size();
   712     cur = cur->next();
   713   }
   714   sum += (sizeof(PerRegionTable*) * _max_fine_entries);
   715   sum += (_coarse_map.size_in_words() * HeapWordSize);
   716   sum += (_sparse_table.mem_size());
   717   sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above.
   718   return sum;
   719 }
   721 size_t OtherRegionsTable::static_mem_size() {
   722   return _from_card_cache_mem_size;
   723 }
   725 size_t OtherRegionsTable::fl_mem_size() {
   726   return PerRegionTable::fl_mem_size();
   727 }
   729 void OtherRegionsTable::clear_fcc() {
   730   size_t hrs_idx = hr()->hrs_index();
   731   for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
   732     _from_card_cache[i][hrs_idx] = -1;
   733   }
   734 }
   736 void OtherRegionsTable::clear() {
   737   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
   738   // if there are no entries, skip this step
   739   if (_first_all_fine_prts != NULL) {
   740     guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
   741     PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
   742     memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
   743   } else {
   744     guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
   745   }
   747   _first_all_fine_prts = _last_all_fine_prts = NULL;
   748   _sparse_table.clear();
   749   _coarse_map.clear();
   750   _n_fine_entries = 0;
   751   _n_coarse_entries = 0;
   753   clear_fcc();
   754 }
   756 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
   757   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
   758   size_t hrs_ind = (size_t) from_hr->hrs_index();
   759   size_t ind = hrs_ind & _mod_max_fine_entries_mask;
   760   if (del_single_region_table(ind, from_hr)) {
   761     assert(!_coarse_map.at(hrs_ind), "Inv");
   762   } else {
   763     _coarse_map.par_at_put(hrs_ind, 0);
   764   }
   765   // Check to see if any of the fcc entries come from here.
   766   size_t hr_ind = (size_t) hr()->hrs_index();
   767   for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
   768     int fcc_ent = _from_card_cache[tid][hr_ind];
   769     if (fcc_ent != -1) {
   770       HeapWord* card_addr = (HeapWord*)
   771         (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift);
   772       if (hr()->is_in_reserved(card_addr)) {
   773         // Clear the from card cache.
   774         _from_card_cache[tid][hr_ind] = -1;
   775       }
   776     }
   777   }
   778 }
   780 bool OtherRegionsTable::del_single_region_table(size_t ind,
   781                                                 HeapRegion* hr) {
   782   assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
   783   PerRegionTable** prev_addr = &_fine_grain_regions[ind];
   784   PerRegionTable* prt = *prev_addr;
   785   while (prt != NULL && prt->hr() != hr) {
   786     prev_addr = prt->collision_list_next_addr();
   787     prt = prt->collision_list_next();
   788   }
   789   if (prt != NULL) {
   790     assert(prt->hr() == hr, "Loop postcondition.");
   791     *prev_addr = prt->collision_list_next();
   792     unlink_from_all(prt);
   793     PerRegionTable::free(prt);
   794     _n_fine_entries--;
   795     return true;
   796   } else {
   797     return false;
   798   }
   799 }
   801 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
   802   // Cast away const in this case.
   803   MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
   804   return contains_reference_locked(from);
   805 }
   807 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
   808   HeapRegion* hr = _g1h->heap_region_containing_raw(from);
   809   if (hr == NULL) return false;
   810   RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
   811   // Is this region in the coarse map?
   812   if (_coarse_map.at(hr_ind)) return true;
   814   PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
   815                                      hr);
   816   if (prt != NULL) {
   817     return prt->contains_reference(from);
   819   } else {
   820     uintptr_t from_card =
   821       (uintptr_t(from) >> CardTableModRefBS::card_shift);
   822     uintptr_t hr_bot_card_index =
   823       uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
   824     assert(from_card >= hr_bot_card_index, "Inv");
   825     CardIdx_t card_index = from_card - hr_bot_card_index;
   826     assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
   827            "Must be in range.");
   828     return _sparse_table.contains_card(hr_ind, card_index);
   829   }
   832 }
   834 void
   835 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
   836   _sparse_table.do_cleanup_work(hrrs_cleanup_task);
   837 }
   839 // Determines how many threads can add records to an rset in parallel.
   840 // This can be done by either mutator threads together with the
   841 // concurrent refinement threads or GC threads.
   842 int HeapRegionRemSet::num_par_rem_sets() {
   843   return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
   844 }
   846 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
   847                                    HeapRegion* hr)
   848   : _bosa(bosa), _other_regions(hr) {
   849   reset_for_par_iteration();
   850 }
   852 void HeapRegionRemSet::setup_remset_size() {
   853   // Setup sparse and fine-grain tables sizes.
   854   // table_size = base * (log(region_size / 1M) + 1)
   855   const int LOG_M = 20;
   856   int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
   857   if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
   858     G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
   859   }
   860   if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
   861     G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
   862   }
   863   guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
   864 }
   866 bool HeapRegionRemSet::claim_iter() {
   867   if (_iter_state != Unclaimed) return false;
   868   jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
   869   return (res == Unclaimed);
   870 }
   872 void HeapRegionRemSet::set_iter_complete() {
   873   _iter_state = Complete;
   874 }
   876 bool HeapRegionRemSet::iter_is_complete() {
   877   return _iter_state == Complete;
   878 }
   880 void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
   881   iter->initialize(this);
   882 }
   884 #ifndef PRODUCT
   885 void HeapRegionRemSet::print() const {
   886   HeapRegionRemSetIterator iter;
   887   init_iterator(&iter);
   888   size_t card_index;
   889   while (iter.has_next(card_index)) {
   890     HeapWord* card_start =
   891       G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
   892     gclog_or_tty->print_cr("  Card " PTR_FORMAT, card_start);
   893   }
   894   if (iter.n_yielded() != occupied()) {
   895     gclog_or_tty->print_cr("Yielded disagrees with occupied:");
   896     gclog_or_tty->print_cr("  %6d yielded (%6d coarse, %6d fine).",
   897                   iter.n_yielded(),
   898                   iter.n_yielded_coarse(), iter.n_yielded_fine());
   899     gclog_or_tty->print_cr("  %6d occ     (%6d coarse, %6d fine).",
   900                   occupied(), occ_coarse(), occ_fine());
   901   }
   902   guarantee(iter.n_yielded() == occupied(),
   903             "We should have yielded all the represented cards.");
   904 }
   905 #endif
   907 void HeapRegionRemSet::cleanup() {
   908   SparsePRT::cleanup_all();
   909 }
   911 void HeapRegionRemSet::clear() {
   912   _other_regions.clear();
   913   assert(occupied() == 0, "Should be clear.");
   914   reset_for_par_iteration();
   915 }
   917 void HeapRegionRemSet::reset_for_par_iteration() {
   918   _iter_state = Unclaimed;
   919   _iter_claimed = 0;
   920   // It's good to check this to make sure that the two methods are in sync.
   921   assert(verify_ready_for_par_iteration(), "post-condition");
   922 }
   924 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
   925                              BitMap* region_bm, BitMap* card_bm) {
   926   _other_regions.scrub(ctbs, region_bm, card_bm);
   927 }
   929 //-------------------- Iteration --------------------
   931 HeapRegionRemSetIterator::
   932 HeapRegionRemSetIterator() :
   933   _hrrs(NULL),
   934   _g1h(G1CollectedHeap::heap()),
   935   _bosa(NULL),
   936   _sparse_iter() { }
   938 void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
   939   _hrrs = hrrs;
   940   _coarse_map = &_hrrs->_other_regions._coarse_map;
   941   _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
   942   _bosa = _hrrs->bosa();
   944   _is = Sparse;
   945   // Set these values so that we increment to the first region.
   946   _coarse_cur_region_index = -1;
   947   _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);
   949   _cur_region_cur_card = 0;
   951   _fine_array_index = -1;
   952   _fine_cur_prt = NULL;
   954   _n_yielded_coarse = 0;
   955   _n_yielded_fine = 0;
   956   _n_yielded_sparse = 0;
   958   _sparse_iter.init(&hrrs->_other_regions._sparse_table);
   959 }
   961 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
   962   if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
   963   // Go to the next card.
   964   _coarse_cur_region_cur_card++;
   965   // Was the last the last card in the current region?
   966   if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
   967     // Yes: find the next region.  This may leave _coarse_cur_region_index
   968     // Set to the last index, in which case there are no more coarse
   969     // regions.
   970     _coarse_cur_region_index =
   971       (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
   972     if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
   973       _coarse_cur_region_cur_card = 0;
   974       HeapWord* r_bot =
   975         _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
   976       _cur_region_card_offset = _bosa->index_for(r_bot);
   977     } else {
   978       return false;
   979     }
   980   }
   981   // If we didn't return false above, then we can yield a card.
   982   card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
   983   return true;
   984 }
   986 void HeapRegionRemSetIterator::fine_find_next_non_null_prt() {
   987   // Otherwise, find the next bucket list in the array.
   988   _fine_array_index++;
   989   while (_fine_array_index < (int) OtherRegionsTable::_max_fine_entries) {
   990     _fine_cur_prt = _fine_grain_regions[_fine_array_index];
   991     if (_fine_cur_prt != NULL) return;
   992     else _fine_array_index++;
   993   }
   994   assert(_fine_cur_prt == NULL, "Loop post");
   995 }
   997 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
   998   if (fine_has_next()) {
   999     _cur_region_cur_card =
  1000       _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
  1002   while (!fine_has_next()) {
  1003     if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
  1004       _cur_region_cur_card = 0;
  1005       _fine_cur_prt = _fine_cur_prt->collision_list_next();
  1007     if (_fine_cur_prt == NULL) {
  1008       fine_find_next_non_null_prt();
  1009       if (_fine_cur_prt == NULL) return false;
  1011     assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0,
  1012            "inv.");
  1013     HeapWord* r_bot =
  1014       _fine_cur_prt->hr()->bottom();
  1015     _cur_region_card_offset = _bosa->index_for(r_bot);
  1016     _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0);
  1018   assert(fine_has_next(), "Or else we exited the loop via the return.");
  1019   card_index = _cur_region_card_offset + _cur_region_cur_card;
  1020   return true;
  1023 bool HeapRegionRemSetIterator::fine_has_next() {
  1024   return
  1025     _fine_cur_prt != NULL &&
  1026     _cur_region_cur_card < HeapRegion::CardsPerRegion;
  1029 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
  1030   switch (_is) {
  1031   case Sparse:
  1032     if (_sparse_iter.has_next(card_index)) {
  1033       _n_yielded_sparse++;
  1034       return true;
  1036     // Otherwise, deliberate fall-through
  1037     _is = Fine;
  1038   case Fine:
  1039     if (fine_has_next(card_index)) {
  1040       _n_yielded_fine++;
  1041       return true;
  1043     // Otherwise, deliberate fall-through
  1044     _is = Coarse;
  1045   case Coarse:
  1046     if (coarse_has_next(card_index)) {
  1047       _n_yielded_coarse++;
  1048       return true;
  1050     // Otherwise...
  1051     break;
  1053   assert(ParallelGCThreads > 1 ||
  1054          n_yielded() == _hrrs->occupied(),
  1055          "Should have yielded all the cards in the rem set "
  1056          "(in the non-par case).");
  1057   return false;
  1062 OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
  1063 HeapWord**          HeapRegionRemSet::_recorded_cards = NULL;
  1064 HeapRegion**        HeapRegionRemSet::_recorded_regions = NULL;
  1065 int                 HeapRegionRemSet::_n_recorded = 0;
  1067 HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
  1068 int*         HeapRegionRemSet::_recorded_event_index = NULL;
  1069 int          HeapRegionRemSet::_n_recorded_events = 0;
  1071 void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
  1072   if (_recorded_oops == NULL) {
  1073     assert(_n_recorded == 0
  1074            && _recorded_cards == NULL
  1075            && _recorded_regions == NULL,
  1076            "Inv");
  1077     _recorded_oops    = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded, mtGC);
  1078     _recorded_cards   = NEW_C_HEAP_ARRAY(HeapWord*,          MaxRecorded, mtGC);
  1079     _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*,        MaxRecorded, mtGC);
  1081   if (_n_recorded == MaxRecorded) {
  1082     gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
  1083   } else {
  1084     _recorded_cards[_n_recorded] =
  1085       (HeapWord*)align_size_down(uintptr_t(f),
  1086                                  CardTableModRefBS::card_size);
  1087     _recorded_oops[_n_recorded] = f;
  1088     _recorded_regions[_n_recorded] = hr;
  1089     _n_recorded++;
  1093 void HeapRegionRemSet::record_event(Event evnt) {
  1094   if (!G1RecordHRRSEvents) return;
  1096   if (_recorded_events == NULL) {
  1097     assert(_n_recorded_events == 0
  1098            && _recorded_event_index == NULL,
  1099            "Inv");
  1100     _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents, mtGC);
  1101     _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents, mtGC);
  1103   if (_n_recorded_events == MaxRecordedEvents) {
  1104     gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
  1105   } else {
  1106     _recorded_events[_n_recorded_events] = evnt;
  1107     _recorded_event_index[_n_recorded_events] = _n_recorded;
  1108     _n_recorded_events++;
  1112 void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
  1113   switch (evnt) {
  1114   case Event_EvacStart:
  1115     str->print("Evac Start");
  1116     break;
  1117   case Event_EvacEnd:
  1118     str->print("Evac End");
  1119     break;
  1120   case Event_RSUpdateEnd:
  1121     str->print("RS Update End");
  1122     break;
  1126 void HeapRegionRemSet::print_recorded() {
  1127   int cur_evnt = 0;
  1128   Event cur_evnt_kind;
  1129   int cur_evnt_ind = 0;
  1130   if (_n_recorded_events > 0) {
  1131     cur_evnt_kind = _recorded_events[cur_evnt];
  1132     cur_evnt_ind = _recorded_event_index[cur_evnt];
  1135   for (int i = 0; i < _n_recorded; i++) {
  1136     while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
  1137       gclog_or_tty->print("Event: ");
  1138       print_event(gclog_or_tty, cur_evnt_kind);
  1139       gclog_or_tty->print_cr("");
  1140       cur_evnt++;
  1141       if (cur_evnt < MaxRecordedEvents) {
  1142         cur_evnt_kind = _recorded_events[cur_evnt];
  1143         cur_evnt_ind = _recorded_event_index[cur_evnt];
  1146     gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
  1147                         " for ref " PTR_FORMAT ".\n",
  1148                         _recorded_cards[i], _recorded_regions[i]->bottom(),
  1149                         _recorded_oops[i]);
  1153 void HeapRegionRemSet::reset_for_cleanup_tasks() {
  1154   SparsePRT::reset_for_cleanup_tasks();
  1157 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
  1158   _other_regions.do_cleanup_work(hrrs_cleanup_task);
  1161 void
  1162 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
  1163   SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
  1166 #ifndef PRODUCT
  1167 void HeapRegionRemSet::test() {
  1168   os::sleep(Thread::current(), (jlong)5000, false);
  1169   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1171   // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
  1172   // hash bucket.
  1173   HeapRegion* hr0 = g1h->region_at(0);
  1174   HeapRegion* hr1 = g1h->region_at(1);
  1175   HeapRegion* hr2 = g1h->region_at(5);
  1176   HeapRegion* hr3 = g1h->region_at(6);
  1177   HeapRegion* hr4 = g1h->region_at(7);
  1178   HeapRegion* hr5 = g1h->region_at(8);
  1180   HeapWord* hr1_start = hr1->bottom();
  1181   HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
  1182   HeapWord* hr1_last = hr1->end() - 1;
  1184   HeapWord* hr2_start = hr2->bottom();
  1185   HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
  1186   HeapWord* hr2_last = hr2->end() - 1;
  1188   HeapWord* hr3_start = hr3->bottom();
  1189   HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
  1190   HeapWord* hr3_last = hr3->end() - 1;
  1192   HeapRegionRemSet* hrrs = hr0->rem_set();
  1194   // Make three references from region 0x101...
  1195   hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
  1196   hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
  1197   hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
  1199   hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
  1200   hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
  1201   hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
  1203   hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
  1204   hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
  1205   hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
  1207   // Now cause a coarsening.
  1208   hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
  1209   hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
  1211   // Now, does iteration yield these three?
  1212   HeapRegionRemSetIterator iter;
  1213   hrrs->init_iterator(&iter);
  1214   size_t sum = 0;
  1215   size_t card_index;
  1216   while (iter.has_next(card_index)) {
  1217     HeapWord* card_start =
  1218       G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
  1219     gclog_or_tty->print_cr("  Card " PTR_FORMAT ".", card_start);
  1220     sum++;
  1222   guarantee(sum == 11 - 3 + 2048, "Failure");
  1223   guarantee(sum == hrrs->occupied(), "Failure");
  1225 #endif

mercurial