src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp

Fri, 09 Sep 2011 05:20:58 -0400

author
tonyp
date
Fri, 09 Sep 2011 05:20:58 -0400
changeset 3121
3bddbf0f57d6
parent 3028
f44782f04dd4
child 3182
65a8ff39a6da
permissions
-rw-r--r--

7087717: G1: make the G1PrintRegionLivenessInfo parameter diagnostic
Reviewed-by: brutisso, ysr

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
    28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    29 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    30 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    31 #include "memory/allocation.hpp"
    32 #include "memory/space.inline.hpp"
    33 #include "utilities/bitMap.inline.hpp"
    34 #include "utilities/globalDefinitions.hpp"
    36 #define HRRS_VERBOSE 0
    38 #define PRT_COUNT_OCCUPIED 1
    40 // OtherRegionsTable
    42 class PerRegionTable: public CHeapObj {
    43   friend class OtherRegionsTable;
    44   friend class HeapRegionRemSetIterator;
    46   HeapRegion*     _hr;
    47   BitMap          _bm;
    48 #if PRT_COUNT_OCCUPIED
    49   jint            _occupied;
    50 #endif
    51   PerRegionTable* _next_free;
    53   PerRegionTable* next_free() { return _next_free; }
    54   void set_next_free(PerRegionTable* prt) { _next_free = prt; }
    57   static PerRegionTable* _free_list;
    59 #ifdef _MSC_VER
    60   // For some reason even though the classes are marked as friend they are unable
    61   // to access CardsPerRegion when private/protected. Only the windows c++ compiler
    62   // says this Sun CC and linux gcc don't have a problem with access when private
    64   public:
    66 #endif // _MSC_VER
    68 protected:
    69   // We need access in order to union things into the base table.
    70   BitMap* bm() { return &_bm; }
    72 #if PRT_COUNT_OCCUPIED
    73   void recount_occupied() {
    74     _occupied = (jint) bm()->count_one_bits();
    75   }
    76 #endif
    78   PerRegionTable(HeapRegion* hr) :
    79     _hr(hr),
    80 #if PRT_COUNT_OCCUPIED
    81     _occupied(0),
    82 #endif
    83     _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */)
    84   {}
    86   static void free(PerRegionTable* prt) {
    87     while (true) {
    88       PerRegionTable* fl = _free_list;
    89       prt->set_next_free(fl);
    90       PerRegionTable* res =
    91         (PerRegionTable*)
    92         Atomic::cmpxchg_ptr(prt, &_free_list, fl);
    93       if (res == fl) return;
    94     }
    95     ShouldNotReachHere();
    96   }
    98   static PerRegionTable* alloc(HeapRegion* hr) {
    99     PerRegionTable* fl = _free_list;
   100     while (fl != NULL) {
   101       PerRegionTable* nxt = fl->next_free();
   102       PerRegionTable* res =
   103         (PerRegionTable*)
   104         Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
   105       if (res == fl) {
   106         fl->init(hr);
   107         return fl;
   108       } else {
   109         fl = _free_list;
   110       }
   111     }
   112     assert(fl == NULL, "Loop condition.");
   113     return new PerRegionTable(hr);
   114   }
   116   void add_card_work(CardIdx_t from_card, bool par) {
   117     if (!_bm.at(from_card)) {
   118       if (par) {
   119         if (_bm.par_at_put(from_card, 1)) {
   120 #if PRT_COUNT_OCCUPIED
   121           Atomic::inc(&_occupied);
   122 #endif
   123         }
   124       } else {
   125         _bm.at_put(from_card, 1);
   126 #if PRT_COUNT_OCCUPIED
   127         _occupied++;
   128 #endif
   129       }
   130     }
   131   }
   133   void add_reference_work(OopOrNarrowOopStar from, bool par) {
   134     // Must make this robust in case "from" is not in "_hr", because of
   135     // concurrency.
   137 #if HRRS_VERBOSE
   138     gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
   139                            from, *from);
   140 #endif
   142     HeapRegion* loc_hr = hr();
   143     // If the test below fails, then this table was reused concurrently
   144     // with this operation.  This is OK, since the old table was coarsened,
   145     // and adding a bit to the new table is never incorrect.
   146     if (loc_hr->is_in_reserved(from)) {
   147       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
   148       CardIdx_t from_card = (CardIdx_t)
   149           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
   151       assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion,
   152              "Must be in range.");
   153       add_card_work(from_card, par);
   154     }
   155   }
   157 public:
   159   HeapRegion* hr() const { return _hr; }
   161 #if PRT_COUNT_OCCUPIED
   162   jint occupied() const {
   163     // Overkill, but if we ever need it...
   164     // guarantee(_occupied == _bm.count_one_bits(), "Check");
   165     return _occupied;
   166   }
   167 #else
   168   jint occupied() const {
   169     return _bm.count_one_bits();
   170   }
   171 #endif
   173   void init(HeapRegion* hr) {
   174     _hr = hr;
   175 #if PRT_COUNT_OCCUPIED
   176     _occupied = 0;
   177 #endif
   178     _bm.clear();
   179   }
   181   void add_reference(OopOrNarrowOopStar from) {
   182     add_reference_work(from, /*parallel*/ true);
   183   }
   185   void seq_add_reference(OopOrNarrowOopStar from) {
   186     add_reference_work(from, /*parallel*/ false);
   187   }
   189   void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
   190     HeapWord* hr_bot = hr()->bottom();
   191     size_t hr_first_card_index = ctbs->index_for(hr_bot);
   192     bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
   193 #if PRT_COUNT_OCCUPIED
   194     recount_occupied();
   195 #endif
   196   }
   198   void add_card(CardIdx_t from_card_index) {
   199     add_card_work(from_card_index, /*parallel*/ true);
   200   }
   202   void seq_add_card(CardIdx_t from_card_index) {
   203     add_card_work(from_card_index, /*parallel*/ false);
   204   }
   206   // (Destructively) union the bitmap of the current table into the given
   207   // bitmap (which is assumed to be of the same size.)
   208   void union_bitmap_into(BitMap* bm) {
   209     bm->set_union(_bm);
   210   }
   212   // Mem size in bytes.
   213   size_t mem_size() const {
   214     return sizeof(this) + _bm.size_in_words() * HeapWordSize;
   215   }
   217   static size_t fl_mem_size() {
   218     PerRegionTable* cur = _free_list;
   219     size_t res = 0;
   220     while (cur != NULL) {
   221       res += sizeof(PerRegionTable);
   222       cur = cur->next_free();
   223     }
   224     return res;
   225   }
   227   // Requires "from" to be in "hr()".
   228   bool contains_reference(OopOrNarrowOopStar from) const {
   229     assert(hr()->is_in_reserved(from), "Precondition.");
   230     size_t card_ind = pointer_delta(from, hr()->bottom(),
   231                                     CardTableModRefBS::card_size);
   232     return _bm.at(card_ind);
   233   }
   234 };
   236 PerRegionTable* PerRegionTable::_free_list = NULL;
   239 #define COUNT_PAR_EXPANDS 0
   241 #if COUNT_PAR_EXPANDS
   242 static jint n_par_expands = 0;
   243 static jint n_par_contracts = 0;
   244 static jint par_expand_list_len = 0;
   245 static jint max_par_expand_list_len = 0;
   247 static void print_par_expand() {
   248   Atomic::inc(&n_par_expands);
   249   Atomic::inc(&par_expand_list_len);
   250   if (par_expand_list_len > max_par_expand_list_len) {
   251     max_par_expand_list_len = par_expand_list_len;
   252   }
   253   if ((n_par_expands % 10) == 0) {
   254     gclog_or_tty->print_cr("\n\n%d par expands: %d contracts, "
   255                   "len = %d, max_len = %d\n.",
   256                   n_par_expands, n_par_contracts, par_expand_list_len,
   257                   max_par_expand_list_len);
   258   }
   259 }
   260 #endif
   262 class PosParPRT: public PerRegionTable {
   263   PerRegionTable** _par_tables;
   265   enum SomePrivateConstants {
   266     ReserveParTableExpansion = 1
   267   };
   269   void par_contract() {
   270     assert(_par_tables != NULL, "Precondition.");
   271     int n = HeapRegionRemSet::num_par_rem_sets()-1;
   272     for (int i = 0; i < n; i++) {
   273       _par_tables[i]->union_bitmap_into(bm());
   274       PerRegionTable::free(_par_tables[i]);
   275       _par_tables[i] = NULL;
   276     }
   277 #if PRT_COUNT_OCCUPIED
   278     // We must recount the "occupied."
   279     recount_occupied();
   280 #endif
   281     FREE_C_HEAP_ARRAY(PerRegionTable*, _par_tables);
   282     _par_tables = NULL;
   283 #if COUNT_PAR_EXPANDS
   284     Atomic::inc(&n_par_contracts);
   285     Atomic::dec(&par_expand_list_len);
   286 #endif
   287   }
   289   static PerRegionTable** _par_table_fl;
   291   PosParPRT* _next;
   293   static PosParPRT* _free_list;
   295   PerRegionTable** par_tables() const {
   296     assert(uintptr_t(NULL) == 0, "Assumption.");
   297     if (uintptr_t(_par_tables) <= ReserveParTableExpansion)
   298       return NULL;
   299     else
   300       return _par_tables;
   301   }
   303   PosParPRT* _next_par_expanded;
   304   PosParPRT* next_par_expanded() { return _next_par_expanded; }
   305   void set_next_par_expanded(PosParPRT* ppprt) { _next_par_expanded = ppprt; }
   306   static PosParPRT* _par_expanded_list;
   308 public:
   310   PosParPRT(HeapRegion* hr) : PerRegionTable(hr), _par_tables(NULL) {}
   312   jint occupied() const {
   313     jint res = PerRegionTable::occupied();
   314     if (par_tables() != NULL) {
   315       for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
   316         res += par_tables()[i]->occupied();
   317       }
   318     }
   319     return res;
   320   }
   322   void init(HeapRegion* hr) {
   323     PerRegionTable::init(hr);
   324     _next = NULL;
   325     if (par_tables() != NULL) {
   326       for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
   327         par_tables()[i]->init(hr);
   328       }
   329     }
   330   }
   332   static void free(PosParPRT* prt) {
   333     while (true) {
   334       PosParPRT* fl = _free_list;
   335       prt->set_next(fl);
   336       PosParPRT* res =
   337         (PosParPRT*)
   338         Atomic::cmpxchg_ptr(prt, &_free_list, fl);
   339       if (res == fl) return;
   340     }
   341     ShouldNotReachHere();
   342   }
   344   static PosParPRT* alloc(HeapRegion* hr) {
   345     PosParPRT* fl = _free_list;
   346     while (fl != NULL) {
   347       PosParPRT* nxt = fl->next();
   348       PosParPRT* res =
   349         (PosParPRT*)
   350         Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
   351       if (res == fl) {
   352         fl->init(hr);
   353         return fl;
   354       } else {
   355         fl = _free_list;
   356       }
   357     }
   358     assert(fl == NULL, "Loop condition.");
   359     return new PosParPRT(hr);
   360   }
   362   PosParPRT* next() const { return _next; }
   363   void set_next(PosParPRT* nxt) { _next = nxt; }
   364   PosParPRT** next_addr() { return &_next; }
   366   bool should_expand(int tid) {
   367     // Given that we now defer RSet updates for after a GC we don't
   368     // really need to expand the tables any more. This code should be
   369     // cleaned up in the future (see CR 6921087).
   370     return false;
   371   }
   373   void par_expand() {
   374     int n = HeapRegionRemSet::num_par_rem_sets()-1;
   375     if (n <= 0) return;
   376     if (_par_tables == NULL) {
   377       PerRegionTable* res =
   378         (PerRegionTable*)
   379         Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion,
   380                             &_par_tables, NULL);
   381       if (res != NULL) return;
   382       // Otherwise, we reserved the right to do the expansion.
   384       PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n);
   385       for (int i = 0; i < n; i++) {
   386         PerRegionTable* ptable = PerRegionTable::alloc(hr());
   387         ptables[i] = ptable;
   388       }
   389       // Here we do not need an atomic.
   390       _par_tables = ptables;
   391 #if COUNT_PAR_EXPANDS
   392       print_par_expand();
   393 #endif
   394       // We must put this table on the expanded list.
   395       PosParPRT* exp_head = _par_expanded_list;
   396       while (true) {
   397         set_next_par_expanded(exp_head);
   398         PosParPRT* res =
   399           (PosParPRT*)
   400           Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head);
   401         if (res == exp_head) return;
   402         // Otherwise.
   403         exp_head = res;
   404       }
   405       ShouldNotReachHere();
   406     }
   407   }
   409   void add_reference(OopOrNarrowOopStar from, int tid) {
   410     // Expand if necessary.
   411     PerRegionTable** pt = par_tables();
   412     if (pt != NULL) {
   413       // We always have to assume that mods to table 0 are in parallel,
   414       // because of the claiming scheme in parallel expansion.  A thread
   415       // with tid != 0 that finds the table to be NULL, but doesn't succeed
   416       // in claiming the right of expanding it, will end up in the else
   417       // clause of the above if test.  That thread could be delayed, and a
   418       // thread 0 add reference could see the table expanded, and come
   419       // here.  Both threads would be adding in parallel.  But we get to
   420       // not use atomics for tids > 0.
   421       if (tid == 0) {
   422         PerRegionTable::add_reference(from);
   423       } else {
   424         pt[tid-1]->seq_add_reference(from);
   425       }
   426     } else {
   427       // Not expanded -- add to the base table.
   428       PerRegionTable::add_reference(from);
   429     }
   430   }
   432   void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
   433     assert(_par_tables == NULL, "Precondition");
   434     PerRegionTable::scrub(ctbs, card_bm);
   435   }
   437   size_t mem_size() const {
   438     size_t res =
   439       PerRegionTable::mem_size() + sizeof(this) - sizeof(PerRegionTable);
   440     if (_par_tables != NULL) {
   441       for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
   442         res += _par_tables[i]->mem_size();
   443       }
   444     }
   445     return res;
   446   }
   448   static size_t fl_mem_size() {
   449     PosParPRT* cur = _free_list;
   450     size_t res = 0;
   451     while (cur != NULL) {
   452       res += sizeof(PosParPRT);
   453       cur = cur->next();
   454     }
   455     return res;
   456   }
   458   bool contains_reference(OopOrNarrowOopStar from) const {
   459     if (PerRegionTable::contains_reference(from)) return true;
   460     if (_par_tables != NULL) {
   461       for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
   462         if (_par_tables[i]->contains_reference(from)) return true;
   463       }
   464     }
   465     return false;
   466   }
   468   static void par_contract_all();
   469 };
   471 void PosParPRT::par_contract_all() {
   472   PosParPRT* hd = _par_expanded_list;
   473   while (hd != NULL) {
   474     PosParPRT* nxt = hd->next_par_expanded();
   475     PosParPRT* res =
   476       (PosParPRT*)
   477       Atomic::cmpxchg_ptr(nxt, &_par_expanded_list, hd);
   478     if (res == hd) {
   479       // We claimed the right to contract this table.
   480       hd->set_next_par_expanded(NULL);
   481       hd->par_contract();
   482       hd = _par_expanded_list;
   483     } else {
   484       hd = res;
   485     }
   486   }
   487 }
   489 PosParPRT* PosParPRT::_free_list = NULL;
   490 PosParPRT* PosParPRT::_par_expanded_list = NULL;
   492 jint OtherRegionsTable::_cache_probes = 0;
   493 jint OtherRegionsTable::_cache_hits = 0;
   495 size_t OtherRegionsTable::_max_fine_entries = 0;
   496 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
   497 #if SAMPLE_FOR_EVICTION
   498 size_t OtherRegionsTable::_fine_eviction_stride = 0;
   499 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
   500 #endif
   502 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
   503   _g1h(G1CollectedHeap::heap()),
   504   _m(Mutex::leaf, "An OtherRegionsTable lock", true),
   505   _hr(hr),
   506   _coarse_map(G1CollectedHeap::heap()->max_regions(),
   507               false /* in-resource-area */),
   508   _fine_grain_regions(NULL),
   509   _n_fine_entries(0), _n_coarse_entries(0),
   510 #if SAMPLE_FOR_EVICTION
   511   _fine_eviction_start(0),
   512 #endif
   513   _sparse_table(hr)
   514 {
   515   typedef PosParPRT* PosParPRTPtr;
   516   if (_max_fine_entries == 0) {
   517     assert(_mod_max_fine_entries_mask == 0, "Both or none.");
   518     size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
   519     _max_fine_entries = (size_t)(1 << max_entries_log);
   520     _mod_max_fine_entries_mask = _max_fine_entries - 1;
   521 #if SAMPLE_FOR_EVICTION
   522     assert(_fine_eviction_sample_size == 0
   523            && _fine_eviction_stride == 0, "All init at same time.");
   524     _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
   525     _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
   526 #endif
   527   }
   528   _fine_grain_regions = new PosParPRTPtr[_max_fine_entries];
   529   if (_fine_grain_regions == NULL)
   530     vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries,
   531                           "Failed to allocate _fine_grain_entries.");
   532   for (size_t i = 0; i < _max_fine_entries; i++) {
   533     _fine_grain_regions[i] = NULL;
   534   }
   535 }
   537 int** OtherRegionsTable::_from_card_cache = NULL;
   538 size_t OtherRegionsTable::_from_card_cache_max_regions = 0;
   539 size_t OtherRegionsTable::_from_card_cache_mem_size = 0;
   541 void OtherRegionsTable::init_from_card_cache(size_t max_regions) {
   542   _from_card_cache_max_regions = max_regions;
   544   int n_par_rs = HeapRegionRemSet::num_par_rem_sets();
   545   _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs);
   546   for (int i = 0; i < n_par_rs; i++) {
   547     _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions);
   548     for (size_t j = 0; j < max_regions; j++) {
   549       _from_card_cache[i][j] = -1;  // An invalid value.
   550     }
   551   }
   552   _from_card_cache_mem_size = n_par_rs * max_regions * sizeof(int);
   553 }
   555 void OtherRegionsTable::shrink_from_card_cache(size_t new_n_regs) {
   556   for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
   557     assert(new_n_regs <= _from_card_cache_max_regions, "Must be within max.");
   558     for (size_t j = new_n_regs; j < _from_card_cache_max_regions; j++) {
   559       _from_card_cache[i][j] = -1;  // An invalid value.
   560     }
   561   }
   562 }
   564 #ifndef PRODUCT
   565 void OtherRegionsTable::print_from_card_cache() {
   566   for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
   567     for (size_t j = 0; j < _from_card_cache_max_regions; j++) {
   568       gclog_or_tty->print_cr("_from_card_cache[%d][%d] = %d.",
   569                     i, j, _from_card_cache[i][j]);
   570     }
   571   }
   572 }
   573 #endif
   575 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
   576   size_t cur_hrs_ind = hr()->hrs_index();
   578 #if HRRS_VERBOSE
   579   gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
   580                                                   from,
   581                                                   UseCompressedOops
   582                                                   ? oopDesc::load_decode_heap_oop((narrowOop*)from)
   583                                                   : oopDesc::load_decode_heap_oop((oop*)from));
   584 #endif
   586   int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
   588 #if HRRS_VERBOSE
   589   gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
   590                 hr()->bottom(), from_card,
   591                 _from_card_cache[tid][cur_hrs_ind]);
   592 #endif
   594 #define COUNT_CACHE 0
   595 #if COUNT_CACHE
   596   jint p = Atomic::add(1, &_cache_probes);
   597   if ((p % 10000) == 0) {
   598     jint hits = _cache_hits;
   599     gclog_or_tty->print_cr("%d/%d = %5.2f%% RS cache hits.",
   600                   _cache_hits, p, 100.0* (float)hits/(float)p);
   601   }
   602 #endif
   603   if (from_card == _from_card_cache[tid][cur_hrs_ind]) {
   604 #if HRRS_VERBOSE
   605     gclog_or_tty->print_cr("  from-card cache hit.");
   606 #endif
   607 #if COUNT_CACHE
   608     Atomic::inc(&_cache_hits);
   609 #endif
   610     assert(contains_reference(from), "We just added it!");
   611     return;
   612   } else {
   613     _from_card_cache[tid][cur_hrs_ind] = from_card;
   614   }
   616   // Note that this may be a continued H region.
   617   HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
   618   RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
   620   // If the region is already coarsened, return.
   621   if (_coarse_map.at(from_hrs_ind)) {
   622 #if HRRS_VERBOSE
   623     gclog_or_tty->print_cr("  coarse map hit.");
   624 #endif
   625     assert(contains_reference(from), "We just added it!");
   626     return;
   627   }
   629   // Otherwise find a per-region table to add it to.
   630   size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
   631   PosParPRT* prt = find_region_table(ind, from_hr);
   632   if (prt == NULL) {
   633     MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
   634     // Confirm that it's really not there...
   635     prt = find_region_table(ind, from_hr);
   636     if (prt == NULL) {
   638       uintptr_t from_hr_bot_card_index =
   639         uintptr_t(from_hr->bottom())
   640           >> CardTableModRefBS::card_shift;
   641       CardIdx_t card_index = from_card - from_hr_bot_card_index;
   642       assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
   643              "Must be in range.");
   644       if (G1HRRSUseSparseTable &&
   645           _sparse_table.add_card(from_hrs_ind, card_index)) {
   646         if (G1RecordHRRSOops) {
   647           HeapRegionRemSet::record(hr(), from);
   648 #if HRRS_VERBOSE
   649           gclog_or_tty->print("   Added card " PTR_FORMAT " to region "
   650                               "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
   651                               align_size_down(uintptr_t(from),
   652                                               CardTableModRefBS::card_size),
   653                               hr()->bottom(), from);
   654 #endif
   655         }
   656 #if HRRS_VERBOSE
   657         gclog_or_tty->print_cr("   added card to sparse table.");
   658 #endif
   659         assert(contains_reference_locked(from), "We just added it!");
   660         return;
   661       } else {
   662 #if HRRS_VERBOSE
   663         gclog_or_tty->print_cr("   [tid %d] sparse table entry "
   664                       "overflow(f: %d, t: %d)",
   665                       tid, from_hrs_ind, cur_hrs_ind);
   666 #endif
   667       }
   669       if (_n_fine_entries == _max_fine_entries) {
   670         prt = delete_region_table();
   671       } else {
   672         prt = PosParPRT::alloc(from_hr);
   673       }
   674       prt->init(from_hr);
   676       PosParPRT* first_prt = _fine_grain_regions[ind];
   677       prt->set_next(first_prt);  // XXX Maybe move to init?
   678       _fine_grain_regions[ind] = prt;
   679       _n_fine_entries++;
   681       if (G1HRRSUseSparseTable) {
   682         // Transfer from sparse to fine-grain.
   683         SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
   684         assert(sprt_entry != NULL, "There should have been an entry");
   685         for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
   686           CardIdx_t c = sprt_entry->card(i);
   687           if (c != SparsePRTEntry::NullEntry) {
   688             prt->add_card(c);
   689           }
   690         }
   691         // Now we can delete the sparse entry.
   692         bool res = _sparse_table.delete_entry(from_hrs_ind);
   693         assert(res, "It should have been there.");
   694       }
   695     }
   696     assert(prt != NULL && prt->hr() == from_hr, "consequence");
   697   }
   698   // Note that we can't assert "prt->hr() == from_hr", because of the
   699   // possibility of concurrent reuse.  But see head comment of
   700   // OtherRegionsTable for why this is OK.
   701   assert(prt != NULL, "Inv");
   703   if (prt->should_expand(tid)) {
   704     MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
   705     HeapRegion* prt_hr = prt->hr();
   706     if (prt_hr == from_hr) {
   707       // Make sure the table still corresponds to the same region
   708       prt->par_expand();
   709       prt->add_reference(from, tid);
   710     }
   711     // else: The table has been concurrently coarsened, evicted, and
   712     // the table data structure re-used for another table. So, we
   713     // don't need to add the reference any more given that the table
   714     // has been coarsened and the whole region will be scanned anyway.
   715   } else {
   716     prt->add_reference(from, tid);
   717   }
   718   if (G1RecordHRRSOops) {
   719     HeapRegionRemSet::record(hr(), from);
   720 #if HRRS_VERBOSE
   721     gclog_or_tty->print("Added card " PTR_FORMAT " to region "
   722                         "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
   723                         align_size_down(uintptr_t(from),
   724                                         CardTableModRefBS::card_size),
   725                         hr()->bottom(), from);
   726 #endif
   727   }
   728   assert(contains_reference(from), "We just added it!");
   729 }
   731 PosParPRT*
   732 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
   733   assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
   734   PosParPRT* prt = _fine_grain_regions[ind];
   735   while (prt != NULL && prt->hr() != hr) {
   736     prt = prt->next();
   737   }
   738   // Loop postcondition is the method postcondition.
   739   return prt;
   740 }
   743 #define DRT_CENSUS 0
   745 #if DRT_CENSUS
   746 static const int HistoSize = 6;
   747 static int global_histo[HistoSize] = { 0, 0, 0, 0, 0, 0 };
   748 static int coarsenings = 0;
   749 static int occ_sum = 0;
   750 #endif
   752 jint OtherRegionsTable::_n_coarsenings = 0;
   754 PosParPRT* OtherRegionsTable::delete_region_table() {
   755 #if DRT_CENSUS
   756   int histo[HistoSize] = { 0, 0, 0, 0, 0, 0 };
   757   const int histo_limits[] = { 1, 4, 16, 64, 256, 2048 };
   758 #endif
   760   assert(_m.owned_by_self(), "Precondition");
   761   assert(_n_fine_entries == _max_fine_entries, "Precondition");
   762   PosParPRT* max = NULL;
   763   jint max_occ = 0;
   764   PosParPRT** max_prev;
   765   size_t max_ind;
   767 #if SAMPLE_FOR_EVICTION
   768   size_t i = _fine_eviction_start;
   769   for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
   770     size_t ii = i;
   771     // Make sure we get a non-NULL sample.
   772     while (_fine_grain_regions[ii] == NULL) {
   773       ii++;
   774       if (ii == _max_fine_entries) ii = 0;
   775       guarantee(ii != i, "We must find one.");
   776     }
   777     PosParPRT** prev = &_fine_grain_regions[ii];
   778     PosParPRT* cur = *prev;
   779     while (cur != NULL) {
   780       jint cur_occ = cur->occupied();
   781       if (max == NULL || cur_occ > max_occ) {
   782         max = cur;
   783         max_prev = prev;
   784         max_ind = i;
   785         max_occ = cur_occ;
   786       }
   787       prev = cur->next_addr();
   788       cur = cur->next();
   789     }
   790     i = i + _fine_eviction_stride;
   791     if (i >= _n_fine_entries) i = i - _n_fine_entries;
   792   }
   793   _fine_eviction_start++;
   794   if (_fine_eviction_start >= _n_fine_entries)
   795     _fine_eviction_start -= _n_fine_entries;
   796 #else
   797   for (int i = 0; i < _max_fine_entries; i++) {
   798     PosParPRT** prev = &_fine_grain_regions[i];
   799     PosParPRT* cur = *prev;
   800     while (cur != NULL) {
   801       jint cur_occ = cur->occupied();
   802 #if DRT_CENSUS
   803       for (int k = 0; k < HistoSize; k++) {
   804         if (cur_occ <= histo_limits[k]) {
   805           histo[k]++; global_histo[k]++; break;
   806         }
   807       }
   808 #endif
   809       if (max == NULL || cur_occ > max_occ) {
   810         max = cur;
   811         max_prev = prev;
   812         max_ind = i;
   813         max_occ = cur_occ;
   814       }
   815       prev = cur->next_addr();
   816       cur = cur->next();
   817     }
   818   }
   819 #endif
   820   // XXX
   821   guarantee(max != NULL, "Since _n_fine_entries > 0");
   822 #if DRT_CENSUS
   823   gclog_or_tty->print_cr("In a coarsening: histo of occs:");
   824   for (int k = 0; k < HistoSize; k++) {
   825     gclog_or_tty->print_cr("  <= %4d: %5d.", histo_limits[k], histo[k]);
   826   }
   827   coarsenings++;
   828   occ_sum += max_occ;
   829   if ((coarsenings % 100) == 0) {
   830     gclog_or_tty->print_cr("\ncoarsenings = %d; global summary:", coarsenings);
   831     for (int k = 0; k < HistoSize; k++) {
   832       gclog_or_tty->print_cr("  <= %4d: %5d.", histo_limits[k], global_histo[k]);
   833     }
   834     gclog_or_tty->print_cr("Avg occ of deleted region = %6.2f.",
   835                   (float)occ_sum/(float)coarsenings);
   836   }
   837 #endif
   839   // Set the corresponding coarse bit.
   840   size_t max_hrs_index = max->hr()->hrs_index();
   841   if (!_coarse_map.at(max_hrs_index)) {
   842     _coarse_map.at_put(max_hrs_index, true);
   843     _n_coarse_entries++;
   844 #if 0
   845     gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
   846                "for region [" PTR_FORMAT "...] (%d coarse entries).\n",
   847                hr()->bottom(),
   848                max->hr()->bottom(),
   849                _n_coarse_entries);
   850 #endif
   851   }
   853   // Unsplice.
   854   *max_prev = max->next();
   855   Atomic::inc(&_n_coarsenings);
   856   _n_fine_entries--;
   857   return max;
   858 }
   861 // At present, this must be called stop-world single-threaded.
   862 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
   863                               BitMap* region_bm, BitMap* card_bm) {
   864   // First eliminated garbage regions from the coarse map.
   865   if (G1RSScrubVerbose)
   866     gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":",
   867                            hr()->hrs_index());
   869   assert(_coarse_map.size() == region_bm->size(), "Precondition");
   870   if (G1RSScrubVerbose)
   871     gclog_or_tty->print("   Coarse map: before = %d...", _n_coarse_entries);
   872   _coarse_map.set_intersection(*region_bm);
   873   _n_coarse_entries = _coarse_map.count_one_bits();
   874   if (G1RSScrubVerbose)
   875     gclog_or_tty->print_cr("   after = %d.", _n_coarse_entries);
   877   // Now do the fine-grained maps.
   878   for (size_t i = 0; i < _max_fine_entries; i++) {
   879     PosParPRT* cur = _fine_grain_regions[i];
   880     PosParPRT** prev = &_fine_grain_regions[i];
   881     while (cur != NULL) {
   882       PosParPRT* nxt = cur->next();
   883       // If the entire region is dead, eliminate.
   884       if (G1RSScrubVerbose)
   885         gclog_or_tty->print_cr("     For other region "SIZE_FORMAT":",
   886                                cur->hr()->hrs_index());
   887       if (!region_bm->at(cur->hr()->hrs_index())) {
   888         *prev = nxt;
   889         cur->set_next(NULL);
   890         _n_fine_entries--;
   891         if (G1RSScrubVerbose)
   892           gclog_or_tty->print_cr("          deleted via region map.");
   893         PosParPRT::free(cur);
   894       } else {
   895         // Do fine-grain elimination.
   896         if (G1RSScrubVerbose)
   897           gclog_or_tty->print("          occ: before = %4d.", cur->occupied());
   898         cur->scrub(ctbs, card_bm);
   899         if (G1RSScrubVerbose)
   900           gclog_or_tty->print_cr("          after = %4d.", cur->occupied());
   901         // Did that empty the table completely?
   902         if (cur->occupied() == 0) {
   903           *prev = nxt;
   904           cur->set_next(NULL);
   905           _n_fine_entries--;
   906           PosParPRT::free(cur);
   907         } else {
   908           prev = cur->next_addr();
   909         }
   910       }
   911       cur = nxt;
   912     }
   913   }
   914   // Since we may have deleted a from_card_cache entry from the RS, clear
   915   // the FCC.
   916   clear_fcc();
   917 }
   920 size_t OtherRegionsTable::occupied() const {
   921   // Cast away const in this case.
   922   MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
   923   size_t sum = occ_fine();
   924   sum += occ_sparse();
   925   sum += occ_coarse();
   926   return sum;
   927 }
   929 size_t OtherRegionsTable::occ_fine() const {
   930   size_t sum = 0;
   931   for (size_t i = 0; i < _max_fine_entries; i++) {
   932     PosParPRT* cur = _fine_grain_regions[i];
   933     while (cur != NULL) {
   934       sum += cur->occupied();
   935       cur = cur->next();
   936     }
   937   }
   938   return sum;
   939 }
   941 size_t OtherRegionsTable::occ_coarse() const {
   942   return (_n_coarse_entries * HeapRegion::CardsPerRegion);
   943 }
   945 size_t OtherRegionsTable::occ_sparse() const {
   946   return _sparse_table.occupied();
   947 }
   949 size_t OtherRegionsTable::mem_size() const {
   950   // Cast away const in this case.
   951   MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
   952   size_t sum = 0;
   953   for (size_t i = 0; i < _max_fine_entries; i++) {
   954     PosParPRT* cur = _fine_grain_regions[i];
   955     while (cur != NULL) {
   956       sum += cur->mem_size();
   957       cur = cur->next();
   958     }
   959   }
   960   sum += (sizeof(PosParPRT*) * _max_fine_entries);
   961   sum += (_coarse_map.size_in_words() * HeapWordSize);
   962   sum += (_sparse_table.mem_size());
   963   sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above.
   964   return sum;
   965 }
   967 size_t OtherRegionsTable::static_mem_size() {
   968   return _from_card_cache_mem_size;
   969 }
   971 size_t OtherRegionsTable::fl_mem_size() {
   972   return PerRegionTable::fl_mem_size() + PosParPRT::fl_mem_size();
   973 }
   975 void OtherRegionsTable::clear_fcc() {
   976   for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
   977     _from_card_cache[i][hr()->hrs_index()] = -1;
   978   }
   979 }
   981 void OtherRegionsTable::clear() {
   982   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
   983   for (size_t i = 0; i < _max_fine_entries; i++) {
   984     PosParPRT* cur = _fine_grain_regions[i];
   985     while (cur != NULL) {
   986       PosParPRT* nxt = cur->next();
   987       PosParPRT::free(cur);
   988       cur = nxt;
   989     }
   990     _fine_grain_regions[i] = NULL;
   991   }
   992   _sparse_table.clear();
   993   _coarse_map.clear();
   994   _n_fine_entries = 0;
   995   _n_coarse_entries = 0;
   997   clear_fcc();
   998 }
  1000 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
  1001   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
  1002   size_t hrs_ind = from_hr->hrs_index();
  1003   size_t ind = hrs_ind & _mod_max_fine_entries_mask;
  1004   if (del_single_region_table(ind, from_hr)) {
  1005     assert(!_coarse_map.at(hrs_ind), "Inv");
  1006   } else {
  1007     _coarse_map.par_at_put(hrs_ind, 0);
  1009   // Check to see if any of the fcc entries come from here.
  1010   size_t hr_ind = hr()->hrs_index();
  1011   for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
  1012     int fcc_ent = _from_card_cache[tid][hr_ind];
  1013     if (fcc_ent != -1) {
  1014       HeapWord* card_addr = (HeapWord*)
  1015         (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift);
  1016       if (hr()->is_in_reserved(card_addr)) {
  1017         // Clear the from card cache.
  1018         _from_card_cache[tid][hr_ind] = -1;
  1024 bool OtherRegionsTable::del_single_region_table(size_t ind,
  1025                                                 HeapRegion* hr) {
  1026   assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
  1027   PosParPRT** prev_addr = &_fine_grain_regions[ind];
  1028   PosParPRT* prt = *prev_addr;
  1029   while (prt != NULL && prt->hr() != hr) {
  1030     prev_addr = prt->next_addr();
  1031     prt = prt->next();
  1033   if (prt != NULL) {
  1034     assert(prt->hr() == hr, "Loop postcondition.");
  1035     *prev_addr = prt->next();
  1036     PosParPRT::free(prt);
  1037     _n_fine_entries--;
  1038     return true;
  1039   } else {
  1040     return false;
  1044 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
  1045   // Cast away const in this case.
  1046   MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
  1047   return contains_reference_locked(from);
  1050 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
  1051   HeapRegion* hr = _g1h->heap_region_containing_raw(from);
  1052   if (hr == NULL) return false;
  1053   RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
  1054   // Is this region in the coarse map?
  1055   if (_coarse_map.at(hr_ind)) return true;
  1057   PosParPRT* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
  1058                                      hr);
  1059   if (prt != NULL) {
  1060     return prt->contains_reference(from);
  1062   } else {
  1063     uintptr_t from_card =
  1064       (uintptr_t(from) >> CardTableModRefBS::card_shift);
  1065     uintptr_t hr_bot_card_index =
  1066       uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
  1067     assert(from_card >= hr_bot_card_index, "Inv");
  1068     CardIdx_t card_index = from_card - hr_bot_card_index;
  1069     assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
  1070            "Must be in range.");
  1071     return _sparse_table.contains_card(hr_ind, card_index);
  1077 void
  1078 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
  1079   _sparse_table.do_cleanup_work(hrrs_cleanup_task);
  1082 // Determines how many threads can add records to an rset in parallel.
  1083 // This can be done by either mutator threads together with the
  1084 // concurrent refinement threads or GC threads.
  1085 int HeapRegionRemSet::num_par_rem_sets() {
  1086   return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
  1089 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
  1090                                    HeapRegion* hr)
  1091   : _bosa(bosa), _other_regions(hr) {
  1092   reset_for_par_iteration();
  1095 void HeapRegionRemSet::setup_remset_size() {
  1096   // Setup sparse and fine-grain tables sizes.
  1097   // table_size = base * (log(region_size / 1M) + 1)
  1098   int region_size_log_mb = MAX2((int)HeapRegion::LogOfHRGrainBytes - (int)LOG_M, 0);
  1099   if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
  1100     G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
  1102   if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
  1103     G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
  1105   guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
  1108 bool HeapRegionRemSet::claim_iter() {
  1109   if (_iter_state != Unclaimed) return false;
  1110   jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
  1111   return (res == Unclaimed);
  1114 void HeapRegionRemSet::set_iter_complete() {
  1115   _iter_state = Complete;
  1118 bool HeapRegionRemSet::iter_is_complete() {
  1119   return _iter_state == Complete;
  1122 void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
  1123   iter->initialize(this);
  1126 #ifndef PRODUCT
  1127 void HeapRegionRemSet::print() const {
  1128   HeapRegionRemSetIterator iter;
  1129   init_iterator(&iter);
  1130   size_t card_index;
  1131   while (iter.has_next(card_index)) {
  1132     HeapWord* card_start =
  1133       G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
  1134     gclog_or_tty->print_cr("  Card " PTR_FORMAT, card_start);
  1136   // XXX
  1137   if (iter.n_yielded() != occupied()) {
  1138     gclog_or_tty->print_cr("Yielded disagrees with occupied:");
  1139     gclog_or_tty->print_cr("  %6d yielded (%6d coarse, %6d fine).",
  1140                   iter.n_yielded(),
  1141                   iter.n_yielded_coarse(), iter.n_yielded_fine());
  1142     gclog_or_tty->print_cr("  %6d occ     (%6d coarse, %6d fine).",
  1143                   occupied(), occ_coarse(), occ_fine());
  1145   guarantee(iter.n_yielded() == occupied(),
  1146             "We should have yielded all the represented cards.");
  1148 #endif
  1150 void HeapRegionRemSet::cleanup() {
  1151   SparsePRT::cleanup_all();
  1154 void HeapRegionRemSet::par_cleanup() {
  1155   PosParPRT::par_contract_all();
  1158 void HeapRegionRemSet::clear() {
  1159   _other_regions.clear();
  1160   assert(occupied() == 0, "Should be clear.");
  1161   reset_for_par_iteration();
  1164 void HeapRegionRemSet::reset_for_par_iteration() {
  1165   _iter_state = Unclaimed;
  1166   _iter_claimed = 0;
  1167   // It's good to check this to make sure that the two methods are in sync.
  1168   assert(verify_ready_for_par_iteration(), "post-condition");
  1171 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
  1172                              BitMap* region_bm, BitMap* card_bm) {
  1173   _other_regions.scrub(ctbs, region_bm, card_bm);
  1176 //-------------------- Iteration --------------------
  1178 HeapRegionRemSetIterator::
  1179 HeapRegionRemSetIterator() :
  1180   _hrrs(NULL),
  1181   _g1h(G1CollectedHeap::heap()),
  1182   _bosa(NULL),
  1183   _sparse_iter() { }
  1185 void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
  1186   _hrrs = hrrs;
  1187   _coarse_map = &_hrrs->_other_regions._coarse_map;
  1188   _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
  1189   _bosa = _hrrs->bosa();
  1191   _is = Sparse;
  1192   // Set these values so that we increment to the first region.
  1193   _coarse_cur_region_index = -1;
  1194   _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);;
  1196   _cur_region_cur_card = 0;
  1198   _fine_array_index = -1;
  1199   _fine_cur_prt = NULL;
  1201   _n_yielded_coarse = 0;
  1202   _n_yielded_fine = 0;
  1203   _n_yielded_sparse = 0;
  1205   _sparse_iter.init(&hrrs->_other_regions._sparse_table);
  1208 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
  1209   if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
  1210   // Go to the next card.
  1211   _coarse_cur_region_cur_card++;
  1212   // Was the last the last card in the current region?
  1213   if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
  1214     // Yes: find the next region.  This may leave _coarse_cur_region_index
  1215     // Set to the last index, in which case there are no more coarse
  1216     // regions.
  1217     _coarse_cur_region_index =
  1218       (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
  1219     if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
  1220       _coarse_cur_region_cur_card = 0;
  1221       HeapWord* r_bot =
  1222         _g1h->region_at(_coarse_cur_region_index)->bottom();
  1223       _cur_region_card_offset = _bosa->index_for(r_bot);
  1224     } else {
  1225       return false;
  1228   // If we didn't return false above, then we can yield a card.
  1229   card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
  1230   return true;
  1233 void HeapRegionRemSetIterator::fine_find_next_non_null_prt() {
  1234   // Otherwise, find the next bucket list in the array.
  1235   _fine_array_index++;
  1236   while (_fine_array_index < (int) OtherRegionsTable::_max_fine_entries) {
  1237     _fine_cur_prt = _fine_grain_regions[_fine_array_index];
  1238     if (_fine_cur_prt != NULL) return;
  1239     else _fine_array_index++;
  1241   assert(_fine_cur_prt == NULL, "Loop post");
  1244 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
  1245   if (fine_has_next()) {
  1246     _cur_region_cur_card =
  1247       _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
  1249   while (!fine_has_next()) {
  1250     if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
  1251       _cur_region_cur_card = 0;
  1252       _fine_cur_prt = _fine_cur_prt->next();
  1254     if (_fine_cur_prt == NULL) {
  1255       fine_find_next_non_null_prt();
  1256       if (_fine_cur_prt == NULL) return false;
  1258     assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0,
  1259            "inv.");
  1260     HeapWord* r_bot =
  1261       _fine_cur_prt->hr()->bottom();
  1262     _cur_region_card_offset = _bosa->index_for(r_bot);
  1263     _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0);
  1265   assert(fine_has_next(), "Or else we exited the loop via the return.");
  1266   card_index = _cur_region_card_offset + _cur_region_cur_card;
  1267   return true;
  1270 bool HeapRegionRemSetIterator::fine_has_next() {
  1271   return
  1272     _fine_cur_prt != NULL &&
  1273     _cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion;
  1276 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
  1277   switch (_is) {
  1278   case Sparse:
  1279     if (_sparse_iter.has_next(card_index)) {
  1280       _n_yielded_sparse++;
  1281       return true;
  1283     // Otherwise, deliberate fall-through
  1284     _is = Fine;
  1285   case Fine:
  1286     if (fine_has_next(card_index)) {
  1287       _n_yielded_fine++;
  1288       return true;
  1290     // Otherwise, deliberate fall-through
  1291     _is = Coarse;
  1292   case Coarse:
  1293     if (coarse_has_next(card_index)) {
  1294       _n_yielded_coarse++;
  1295       return true;
  1297     // Otherwise...
  1298     break;
  1300   assert(ParallelGCThreads > 1 ||
  1301          n_yielded() == _hrrs->occupied(),
  1302          "Should have yielded all the cards in the rem set "
  1303          "(in the non-par case).");
  1304   return false;
  1309 OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
  1310 HeapWord**          HeapRegionRemSet::_recorded_cards = NULL;
  1311 HeapRegion**        HeapRegionRemSet::_recorded_regions = NULL;
  1312 int                 HeapRegionRemSet::_n_recorded = 0;
  1314 HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
  1315 int*         HeapRegionRemSet::_recorded_event_index = NULL;
  1316 int          HeapRegionRemSet::_n_recorded_events = 0;
  1318 void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
  1319   if (_recorded_oops == NULL) {
  1320     assert(_n_recorded == 0
  1321            && _recorded_cards == NULL
  1322            && _recorded_regions == NULL,
  1323            "Inv");
  1324     _recorded_oops    = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded);
  1325     _recorded_cards   = NEW_C_HEAP_ARRAY(HeapWord*,          MaxRecorded);
  1326     _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*,        MaxRecorded);
  1328   if (_n_recorded == MaxRecorded) {
  1329     gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
  1330   } else {
  1331     _recorded_cards[_n_recorded] =
  1332       (HeapWord*)align_size_down(uintptr_t(f),
  1333                                  CardTableModRefBS::card_size);
  1334     _recorded_oops[_n_recorded] = f;
  1335     _recorded_regions[_n_recorded] = hr;
  1336     _n_recorded++;
  1340 void HeapRegionRemSet::record_event(Event evnt) {
  1341   if (!G1RecordHRRSEvents) return;
  1343   if (_recorded_events == NULL) {
  1344     assert(_n_recorded_events == 0
  1345            && _recorded_event_index == NULL,
  1346            "Inv");
  1347     _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents);
  1348     _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents);
  1350   if (_n_recorded_events == MaxRecordedEvents) {
  1351     gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
  1352   } else {
  1353     _recorded_events[_n_recorded_events] = evnt;
  1354     _recorded_event_index[_n_recorded_events] = _n_recorded;
  1355     _n_recorded_events++;
  1359 void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
  1360   switch (evnt) {
  1361   case Event_EvacStart:
  1362     str->print("Evac Start");
  1363     break;
  1364   case Event_EvacEnd:
  1365     str->print("Evac End");
  1366     break;
  1367   case Event_RSUpdateEnd:
  1368     str->print("RS Update End");
  1369     break;
  1373 void HeapRegionRemSet::print_recorded() {
  1374   int cur_evnt = 0;
  1375   Event cur_evnt_kind;
  1376   int cur_evnt_ind = 0;
  1377   if (_n_recorded_events > 0) {
  1378     cur_evnt_kind = _recorded_events[cur_evnt];
  1379     cur_evnt_ind = _recorded_event_index[cur_evnt];
  1382   for (int i = 0; i < _n_recorded; i++) {
  1383     while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
  1384       gclog_or_tty->print("Event: ");
  1385       print_event(gclog_or_tty, cur_evnt_kind);
  1386       gclog_or_tty->print_cr("");
  1387       cur_evnt++;
  1388       if (cur_evnt < MaxRecordedEvents) {
  1389         cur_evnt_kind = _recorded_events[cur_evnt];
  1390         cur_evnt_ind = _recorded_event_index[cur_evnt];
  1393     gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
  1394                         " for ref " PTR_FORMAT ".\n",
  1395                         _recorded_cards[i], _recorded_regions[i]->bottom(),
  1396                         _recorded_oops[i]);
  1400 void HeapRegionRemSet::reset_for_cleanup_tasks() {
  1401   SparsePRT::reset_for_cleanup_tasks();
  1404 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
  1405   _other_regions.do_cleanup_work(hrrs_cleanup_task);
  1408 void
  1409 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
  1410   SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
  1413 #ifndef PRODUCT
  1414 void HeapRegionRemSet::test() {
  1415   os::sleep(Thread::current(), (jlong)5000, false);
  1416   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1418   // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
  1419   // hash bucket.
  1420   HeapRegion* hr0 = g1h->region_at(0);
  1421   HeapRegion* hr1 = g1h->region_at(1);
  1422   HeapRegion* hr2 = g1h->region_at(5);
  1423   HeapRegion* hr3 = g1h->region_at(6);
  1424   HeapRegion* hr4 = g1h->region_at(7);
  1425   HeapRegion* hr5 = g1h->region_at(8);
  1427   HeapWord* hr1_start = hr1->bottom();
  1428   HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
  1429   HeapWord* hr1_last = hr1->end() - 1;
  1431   HeapWord* hr2_start = hr2->bottom();
  1432   HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
  1433   HeapWord* hr2_last = hr2->end() - 1;
  1435   HeapWord* hr3_start = hr3->bottom();
  1436   HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
  1437   HeapWord* hr3_last = hr3->end() - 1;
  1439   HeapRegionRemSet* hrrs = hr0->rem_set();
  1441   // Make three references from region 0x101...
  1442   hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
  1443   hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
  1444   hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
  1446   hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
  1447   hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
  1448   hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
  1450   hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
  1451   hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
  1452   hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
  1454   // Now cause a coarsening.
  1455   hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
  1456   hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
  1458   // Now, does iteration yield these three?
  1459   HeapRegionRemSetIterator iter;
  1460   hrrs->init_iterator(&iter);
  1461   size_t sum = 0;
  1462   size_t card_index;
  1463   while (iter.has_next(card_index)) {
  1464     HeapWord* card_start =
  1465       G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
  1466     gclog_or_tty->print_cr("  Card " PTR_FORMAT ".", card_start);
  1467     sum++;
  1469   guarantee(sum == 11 - 3 + 2048, "Failure");
  1470   guarantee(sum == hrrs->occupied(), "Failure");
  1472 #endif

mercurial