src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2493
97ba643ea3ed
child 2963
c3f1170908be
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

     1 /*
     2  * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
    27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
    28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    29 #include "gc_implementation/g1/heapRegionRemSet.hpp"
    30 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    31 #include "memory/allocation.hpp"
    32 #include "memory/space.inline.hpp"
    33 #include "utilities/bitMap.inline.hpp"
    34 #include "utilities/globalDefinitions.hpp"
    36 #define HRRS_VERBOSE 0
    38 #define PRT_COUNT_OCCUPIED 1
    40 // OtherRegionsTable
    42 class PerRegionTable: public CHeapObj {
    43   friend class OtherRegionsTable;
    44   friend class HeapRegionRemSetIterator;
    46   HeapRegion*     _hr;
    47   BitMap          _bm;
    48 #if PRT_COUNT_OCCUPIED
    49   jint            _occupied;
    50 #endif
    51   PerRegionTable* _next_free;
    53   PerRegionTable* next_free() { return _next_free; }
    54   void set_next_free(PerRegionTable* prt) { _next_free = prt; }
    57   static PerRegionTable* _free_list;
    59 #ifdef _MSC_VER
    60   // For some reason even though the classes are marked as friend they are unable
    61   // to access CardsPerRegion when private/protected. Only the windows c++ compiler
    62   // says this Sun CC and linux gcc don't have a problem with access when private
    64   public:
    66 #endif // _MSC_VER
    68 protected:
    69   // We need access in order to union things into the base table.
    70   BitMap* bm() { return &_bm; }
    72 #if PRT_COUNT_OCCUPIED
    73   void recount_occupied() {
    74     _occupied = (jint) bm()->count_one_bits();
    75   }
    76 #endif
    78   PerRegionTable(HeapRegion* hr) :
    79     _hr(hr),
    80 #if PRT_COUNT_OCCUPIED
    81     _occupied(0),
    82 #endif
    83     _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */)
    84   {}
    86   static void free(PerRegionTable* prt) {
    87     while (true) {
    88       PerRegionTable* fl = _free_list;
    89       prt->set_next_free(fl);
    90       PerRegionTable* res =
    91         (PerRegionTable*)
    92         Atomic::cmpxchg_ptr(prt, &_free_list, fl);
    93       if (res == fl) return;
    94     }
    95     ShouldNotReachHere();
    96   }
    98   static PerRegionTable* alloc(HeapRegion* hr) {
    99     PerRegionTable* fl = _free_list;
   100     while (fl != NULL) {
   101       PerRegionTable* nxt = fl->next_free();
   102       PerRegionTable* res =
   103         (PerRegionTable*)
   104         Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
   105       if (res == fl) {
   106         fl->init(hr);
   107         return fl;
   108       } else {
   109         fl = _free_list;
   110       }
   111     }
   112     assert(fl == NULL, "Loop condition.");
   113     return new PerRegionTable(hr);
   114   }
   116   void add_card_work(CardIdx_t from_card, bool par) {
   117     if (!_bm.at(from_card)) {
   118       if (par) {
   119         if (_bm.par_at_put(from_card, 1)) {
   120 #if PRT_COUNT_OCCUPIED
   121           Atomic::inc(&_occupied);
   122 #endif
   123         }
   124       } else {
   125         _bm.at_put(from_card, 1);
   126 #if PRT_COUNT_OCCUPIED
   127         _occupied++;
   128 #endif
   129       }
   130     }
   131   }
   133   void add_reference_work(OopOrNarrowOopStar from, bool par) {
   134     // Must make this robust in case "from" is not in "_hr", because of
   135     // concurrency.
   137 #if HRRS_VERBOSE
   138     gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
   139                            from, *from);
   140 #endif
   142     HeapRegion* loc_hr = hr();
   143     // If the test below fails, then this table was reused concurrently
   144     // with this operation.  This is OK, since the old table was coarsened,
   145     // and adding a bit to the new table is never incorrect.
   146     if (loc_hr->is_in_reserved(from)) {
   147       size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
   148       CardIdx_t from_card = (CardIdx_t)
   149           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
   151       assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion,
   152              "Must be in range.");
   153       add_card_work(from_card, par);
   154     }
   155   }
   157 public:
   159   HeapRegion* hr() const { return _hr; }
   161 #if PRT_COUNT_OCCUPIED
   162   jint occupied() const {
   163     // Overkill, but if we ever need it...
   164     // guarantee(_occupied == _bm.count_one_bits(), "Check");
   165     return _occupied;
   166   }
   167 #else
   168   jint occupied() const {
   169     return _bm.count_one_bits();
   170   }
   171 #endif
   173   void init(HeapRegion* hr) {
   174     _hr = hr;
   175 #if PRT_COUNT_OCCUPIED
   176     _occupied = 0;
   177 #endif
   178     _bm.clear();
   179   }
   181   void add_reference(OopOrNarrowOopStar from) {
   182     add_reference_work(from, /*parallel*/ true);
   183   }
   185   void seq_add_reference(OopOrNarrowOopStar from) {
   186     add_reference_work(from, /*parallel*/ false);
   187   }
   189   void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
   190     HeapWord* hr_bot = hr()->bottom();
   191     size_t hr_first_card_index = ctbs->index_for(hr_bot);
   192     bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
   193 #if PRT_COUNT_OCCUPIED
   194     recount_occupied();
   195 #endif
   196   }
   198   void add_card(CardIdx_t from_card_index) {
   199     add_card_work(from_card_index, /*parallel*/ true);
   200   }
   202   void seq_add_card(CardIdx_t from_card_index) {
   203     add_card_work(from_card_index, /*parallel*/ false);
   204   }
   206   // (Destructively) union the bitmap of the current table into the given
   207   // bitmap (which is assumed to be of the same size.)
   208   void union_bitmap_into(BitMap* bm) {
   209     bm->set_union(_bm);
   210   }
   212   // Mem size in bytes.
   213   size_t mem_size() const {
   214     return sizeof(this) + _bm.size_in_words() * HeapWordSize;
   215   }
   217   static size_t fl_mem_size() {
   218     PerRegionTable* cur = _free_list;
   219     size_t res = 0;
   220     while (cur != NULL) {
   221       res += sizeof(PerRegionTable);
   222       cur = cur->next_free();
   223     }
   224     return res;
   225   }
   227   // Requires "from" to be in "hr()".
   228   bool contains_reference(OopOrNarrowOopStar from) const {
   229     assert(hr()->is_in_reserved(from), "Precondition.");
   230     size_t card_ind = pointer_delta(from, hr()->bottom(),
   231                                     CardTableModRefBS::card_size);
   232     return _bm.at(card_ind);
   233   }
   234 };
   236 PerRegionTable* PerRegionTable::_free_list = NULL;
   239 #define COUNT_PAR_EXPANDS 0
   241 #if COUNT_PAR_EXPANDS
   242 static jint n_par_expands = 0;
   243 static jint n_par_contracts = 0;
   244 static jint par_expand_list_len = 0;
   245 static jint max_par_expand_list_len = 0;
   247 static void print_par_expand() {
   248   Atomic::inc(&n_par_expands);
   249   Atomic::inc(&par_expand_list_len);
   250   if (par_expand_list_len > max_par_expand_list_len) {
   251     max_par_expand_list_len = par_expand_list_len;
   252   }
   253   if ((n_par_expands % 10) == 0) {
   254     gclog_or_tty->print_cr("\n\n%d par expands: %d contracts, "
   255                   "len = %d, max_len = %d\n.",
   256                   n_par_expands, n_par_contracts, par_expand_list_len,
   257                   max_par_expand_list_len);
   258   }
   259 }
   260 #endif
   262 class PosParPRT: public PerRegionTable {
   263   PerRegionTable** _par_tables;
   265   enum SomePrivateConstants {
   266     ReserveParTableExpansion = 1
   267   };
   269   void par_contract() {
   270     assert(_par_tables != NULL, "Precondition.");
   271     int n = HeapRegionRemSet::num_par_rem_sets()-1;
   272     for (int i = 0; i < n; i++) {
   273       _par_tables[i]->union_bitmap_into(bm());
   274       PerRegionTable::free(_par_tables[i]);
   275       _par_tables[i] = NULL;
   276     }
   277 #if PRT_COUNT_OCCUPIED
   278     // We must recount the "occupied."
   279     recount_occupied();
   280 #endif
   281     FREE_C_HEAP_ARRAY(PerRegionTable*, _par_tables);
   282     _par_tables = NULL;
   283 #if COUNT_PAR_EXPANDS
   284     Atomic::inc(&n_par_contracts);
   285     Atomic::dec(&par_expand_list_len);
   286 #endif
   287   }
   289   static PerRegionTable** _par_table_fl;
   291   PosParPRT* _next;
   293   static PosParPRT* _free_list;
   295   PerRegionTable** par_tables() const {
   296     assert(uintptr_t(NULL) == 0, "Assumption.");
   297     if (uintptr_t(_par_tables) <= ReserveParTableExpansion)
   298       return NULL;
   299     else
   300       return _par_tables;
   301   }
   303   PosParPRT* _next_par_expanded;
   304   PosParPRT* next_par_expanded() { return _next_par_expanded; }
   305   void set_next_par_expanded(PosParPRT* ppprt) { _next_par_expanded = ppprt; }
   306   static PosParPRT* _par_expanded_list;
   308 public:
   310   PosParPRT(HeapRegion* hr) : PerRegionTable(hr), _par_tables(NULL) {}
   312   jint occupied() const {
   313     jint res = PerRegionTable::occupied();
   314     if (par_tables() != NULL) {
   315       for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
   316         res += par_tables()[i]->occupied();
   317       }
   318     }
   319     return res;
   320   }
   322   void init(HeapRegion* hr) {
   323     PerRegionTable::init(hr);
   324     _next = NULL;
   325     if (par_tables() != NULL) {
   326       for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
   327         par_tables()[i]->init(hr);
   328       }
   329     }
   330   }
   332   static void free(PosParPRT* prt) {
   333     while (true) {
   334       PosParPRT* fl = _free_list;
   335       prt->set_next(fl);
   336       PosParPRT* res =
   337         (PosParPRT*)
   338         Atomic::cmpxchg_ptr(prt, &_free_list, fl);
   339       if (res == fl) return;
   340     }
   341     ShouldNotReachHere();
   342   }
   344   static PosParPRT* alloc(HeapRegion* hr) {
   345     PosParPRT* fl = _free_list;
   346     while (fl != NULL) {
   347       PosParPRT* nxt = fl->next();
   348       PosParPRT* res =
   349         (PosParPRT*)
   350         Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
   351       if (res == fl) {
   352         fl->init(hr);
   353         return fl;
   354       } else {
   355         fl = _free_list;
   356       }
   357     }
   358     assert(fl == NULL, "Loop condition.");
   359     return new PosParPRT(hr);
   360   }
   362   PosParPRT* next() const { return _next; }
   363   void set_next(PosParPRT* nxt) { _next = nxt; }
   364   PosParPRT** next_addr() { return &_next; }
   366   bool should_expand(int tid) {
   367     return par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region();
   368   }
   370   void par_expand() {
   371     int n = HeapRegionRemSet::num_par_rem_sets()-1;
   372     if (n <= 0) return;
   373     if (_par_tables == NULL) {
   374       PerRegionTable* res =
   375         (PerRegionTable*)
   376         Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion,
   377                             &_par_tables, NULL);
   378       if (res != NULL) return;
   379       // Otherwise, we reserved the right to do the expansion.
   381       PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n);
   382       for (int i = 0; i < n; i++) {
   383         PerRegionTable* ptable = PerRegionTable::alloc(hr());
   384         ptables[i] = ptable;
   385       }
   386       // Here we do not need an atomic.
   387       _par_tables = ptables;
   388 #if COUNT_PAR_EXPANDS
   389       print_par_expand();
   390 #endif
   391       // We must put this table on the expanded list.
   392       PosParPRT* exp_head = _par_expanded_list;
   393       while (true) {
   394         set_next_par_expanded(exp_head);
   395         PosParPRT* res =
   396           (PosParPRT*)
   397           Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head);
   398         if (res == exp_head) return;
   399         // Otherwise.
   400         exp_head = res;
   401       }
   402       ShouldNotReachHere();
   403     }
   404   }
   406   void add_reference(OopOrNarrowOopStar from, int tid) {
   407     // Expand if necessary.
   408     PerRegionTable** pt = par_tables();
   409     if (pt != NULL) {
   410       // We always have to assume that mods to table 0 are in parallel,
   411       // because of the claiming scheme in parallel expansion.  A thread
   412       // with tid != 0 that finds the table to be NULL, but doesn't succeed
   413       // in claiming the right of expanding it, will end up in the else
   414       // clause of the above if test.  That thread could be delayed, and a
   415       // thread 0 add reference could see the table expanded, and come
   416       // here.  Both threads would be adding in parallel.  But we get to
   417       // not use atomics for tids > 0.
   418       if (tid == 0) {
   419         PerRegionTable::add_reference(from);
   420       } else {
   421         pt[tid-1]->seq_add_reference(from);
   422       }
   423     } else {
   424       // Not expanded -- add to the base table.
   425       PerRegionTable::add_reference(from);
   426     }
   427   }
   429   void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
   430     assert(_par_tables == NULL, "Precondition");
   431     PerRegionTable::scrub(ctbs, card_bm);
   432   }
   434   size_t mem_size() const {
   435     size_t res =
   436       PerRegionTable::mem_size() + sizeof(this) - sizeof(PerRegionTable);
   437     if (_par_tables != NULL) {
   438       for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
   439         res += _par_tables[i]->mem_size();
   440       }
   441     }
   442     return res;
   443   }
   445   static size_t fl_mem_size() {
   446     PosParPRT* cur = _free_list;
   447     size_t res = 0;
   448     while (cur != NULL) {
   449       res += sizeof(PosParPRT);
   450       cur = cur->next();
   451     }
   452     return res;
   453   }
   455   bool contains_reference(OopOrNarrowOopStar from) const {
   456     if (PerRegionTable::contains_reference(from)) return true;
   457     if (_par_tables != NULL) {
   458       for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
   459         if (_par_tables[i]->contains_reference(from)) return true;
   460       }
   461     }
   462     return false;
   463   }
   465   static void par_contract_all();
   466 };
   468 void PosParPRT::par_contract_all() {
   469   PosParPRT* hd = _par_expanded_list;
   470   while (hd != NULL) {
   471     PosParPRT* nxt = hd->next_par_expanded();
   472     PosParPRT* res =
   473       (PosParPRT*)
   474       Atomic::cmpxchg_ptr(nxt, &_par_expanded_list, hd);
   475     if (res == hd) {
   476       // We claimed the right to contract this table.
   477       hd->set_next_par_expanded(NULL);
   478       hd->par_contract();
   479       hd = _par_expanded_list;
   480     } else {
   481       hd = res;
   482     }
   483   }
   484 }
   486 PosParPRT* PosParPRT::_free_list = NULL;
   487 PosParPRT* PosParPRT::_par_expanded_list = NULL;
   489 jint OtherRegionsTable::_cache_probes = 0;
   490 jint OtherRegionsTable::_cache_hits = 0;
   492 size_t OtherRegionsTable::_max_fine_entries = 0;
   493 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
   494 #if SAMPLE_FOR_EVICTION
   495 size_t OtherRegionsTable::_fine_eviction_stride = 0;
   496 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
   497 #endif
   499 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
   500   _g1h(G1CollectedHeap::heap()),
   501   _m(Mutex::leaf, "An OtherRegionsTable lock", true),
   502   _hr(hr),
   503   _coarse_map(G1CollectedHeap::heap()->max_regions(),
   504               false /* in-resource-area */),
   505   _fine_grain_regions(NULL),
   506   _n_fine_entries(0), _n_coarse_entries(0),
   507 #if SAMPLE_FOR_EVICTION
   508   _fine_eviction_start(0),
   509 #endif
   510   _sparse_table(hr)
   511 {
   512   typedef PosParPRT* PosParPRTPtr;
   513   if (_max_fine_entries == 0) {
   514     assert(_mod_max_fine_entries_mask == 0, "Both or none.");
   515     size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
   516     _max_fine_entries = (size_t)(1 << max_entries_log);
   517     _mod_max_fine_entries_mask = _max_fine_entries - 1;
   518 #if SAMPLE_FOR_EVICTION
   519     assert(_fine_eviction_sample_size == 0
   520            && _fine_eviction_stride == 0, "All init at same time.");
   521     _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
   522     _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
   523 #endif
   524   }
   525   _fine_grain_regions = new PosParPRTPtr[_max_fine_entries];
   526   if (_fine_grain_regions == NULL)
   527     vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries,
   528                           "Failed to allocate _fine_grain_entries.");
   529   for (size_t i = 0; i < _max_fine_entries; i++) {
   530     _fine_grain_regions[i] = NULL;
   531   }
   532 }
   534 int** OtherRegionsTable::_from_card_cache = NULL;
   535 size_t OtherRegionsTable::_from_card_cache_max_regions = 0;
   536 size_t OtherRegionsTable::_from_card_cache_mem_size = 0;
   538 void OtherRegionsTable::init_from_card_cache(size_t max_regions) {
   539   _from_card_cache_max_regions = max_regions;
   541   int n_par_rs = HeapRegionRemSet::num_par_rem_sets();
   542   _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs);
   543   for (int i = 0; i < n_par_rs; i++) {
   544     _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions);
   545     for (size_t j = 0; j < max_regions; j++) {
   546       _from_card_cache[i][j] = -1;  // An invalid value.
   547     }
   548   }
   549   _from_card_cache_mem_size = n_par_rs * max_regions * sizeof(int);
   550 }
   552 void OtherRegionsTable::shrink_from_card_cache(size_t new_n_regs) {
   553   for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
   554     assert(new_n_regs <= _from_card_cache_max_regions, "Must be within max.");
   555     for (size_t j = new_n_regs; j < _from_card_cache_max_regions; j++) {
   556       _from_card_cache[i][j] = -1;  // An invalid value.
   557     }
   558   }
   559 }
   561 #ifndef PRODUCT
   562 void OtherRegionsTable::print_from_card_cache() {
   563   for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
   564     for (size_t j = 0; j < _from_card_cache_max_regions; j++) {
   565       gclog_or_tty->print_cr("_from_card_cache[%d][%d] = %d.",
   566                     i, j, _from_card_cache[i][j]);
   567     }
   568   }
   569 }
   570 #endif
   572 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
   573   size_t cur_hrs_ind = hr()->hrs_index();
   575 #if HRRS_VERBOSE
   576   gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
   577                                                   from,
   578                                                   UseCompressedOops
   579                                                   ? oopDesc::load_decode_heap_oop((narrowOop*)from)
   580                                                   : oopDesc::load_decode_heap_oop((oop*)from));
   581 #endif
   583   int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
   585 #if HRRS_VERBOSE
   586   gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
   587                 hr()->bottom(), from_card,
   588                 _from_card_cache[tid][cur_hrs_ind]);
   589 #endif
   591 #define COUNT_CACHE 0
   592 #if COUNT_CACHE
   593   jint p = Atomic::add(1, &_cache_probes);
   594   if ((p % 10000) == 0) {
   595     jint hits = _cache_hits;
   596     gclog_or_tty->print_cr("%d/%d = %5.2f%% RS cache hits.",
   597                   _cache_hits, p, 100.0* (float)hits/(float)p);
   598   }
   599 #endif
   600   if (from_card == _from_card_cache[tid][cur_hrs_ind]) {
   601 #if HRRS_VERBOSE
   602     gclog_or_tty->print_cr("  from-card cache hit.");
   603 #endif
   604 #if COUNT_CACHE
   605     Atomic::inc(&_cache_hits);
   606 #endif
   607     assert(contains_reference(from), "We just added it!");
   608     return;
   609   } else {
   610     _from_card_cache[tid][cur_hrs_ind] = from_card;
   611   }
   613   // Note that this may be a continued H region.
   614   HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
   615   RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
   617   // If the region is already coarsened, return.
   618   if (_coarse_map.at(from_hrs_ind)) {
   619 #if HRRS_VERBOSE
   620     gclog_or_tty->print_cr("  coarse map hit.");
   621 #endif
   622     assert(contains_reference(from), "We just added it!");
   623     return;
   624   }
   626   // Otherwise find a per-region table to add it to.
   627   size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
   628   PosParPRT* prt = find_region_table(ind, from_hr);
   629   if (prt == NULL) {
   630     MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
   631     // Confirm that it's really not there...
   632     prt = find_region_table(ind, from_hr);
   633     if (prt == NULL) {
   635       uintptr_t from_hr_bot_card_index =
   636         uintptr_t(from_hr->bottom())
   637           >> CardTableModRefBS::card_shift;
   638       CardIdx_t card_index = from_card - from_hr_bot_card_index;
   639       assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
   640              "Must be in range.");
   641       if (G1HRRSUseSparseTable &&
   642           _sparse_table.add_card(from_hrs_ind, card_index)) {
   643         if (G1RecordHRRSOops) {
   644           HeapRegionRemSet::record(hr(), from);
   645 #if HRRS_VERBOSE
   646           gclog_or_tty->print("   Added card " PTR_FORMAT " to region "
   647                               "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
   648                               align_size_down(uintptr_t(from),
   649                                               CardTableModRefBS::card_size),
   650                               hr()->bottom(), from);
   651 #endif
   652         }
   653 #if HRRS_VERBOSE
   654         gclog_or_tty->print_cr("   added card to sparse table.");
   655 #endif
   656         assert(contains_reference_locked(from), "We just added it!");
   657         return;
   658       } else {
   659 #if HRRS_VERBOSE
   660         gclog_or_tty->print_cr("   [tid %d] sparse table entry "
   661                       "overflow(f: %d, t: %d)",
   662                       tid, from_hrs_ind, cur_hrs_ind);
   663 #endif
   664       }
   666       if (_n_fine_entries == _max_fine_entries) {
   667         prt = delete_region_table();
   668       } else {
   669         prt = PosParPRT::alloc(from_hr);
   670       }
   671       prt->init(from_hr);
   673       PosParPRT* first_prt = _fine_grain_regions[ind];
   674       prt->set_next(first_prt);  // XXX Maybe move to init?
   675       _fine_grain_regions[ind] = prt;
   676       _n_fine_entries++;
   678       if (G1HRRSUseSparseTable) {
   679         // Transfer from sparse to fine-grain.
   680         SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
   681         assert(sprt_entry != NULL, "There should have been an entry");
   682         for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
   683           CardIdx_t c = sprt_entry->card(i);
   684           if (c != SparsePRTEntry::NullEntry) {
   685             prt->add_card(c);
   686           }
   687         }
   688         // Now we can delete the sparse entry.
   689         bool res = _sparse_table.delete_entry(from_hrs_ind);
   690         assert(res, "It should have been there.");
   691       }
   692     }
   693     assert(prt != NULL && prt->hr() == from_hr, "consequence");
   694   }
   695   // Note that we can't assert "prt->hr() == from_hr", because of the
   696   // possibility of concurrent reuse.  But see head comment of
   697   // OtherRegionsTable for why this is OK.
   698   assert(prt != NULL, "Inv");
   700   if (prt->should_expand(tid)) {
   701     MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
   702     HeapRegion* prt_hr = prt->hr();
   703     if (prt_hr == from_hr) {
   704       // Make sure the table still corresponds to the same region
   705       prt->par_expand();
   706       prt->add_reference(from, tid);
   707     }
   708     // else: The table has been concurrently coarsened, evicted, and
   709     // the table data structure re-used for another table. So, we
   710     // don't need to add the reference any more given that the table
   711     // has been coarsened and the whole region will be scanned anyway.
   712   } else {
   713     prt->add_reference(from, tid);
   714   }
   715   if (G1RecordHRRSOops) {
   716     HeapRegionRemSet::record(hr(), from);
   717 #if HRRS_VERBOSE
   718     gclog_or_tty->print("Added card " PTR_FORMAT " to region "
   719                         "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
   720                         align_size_down(uintptr_t(from),
   721                                         CardTableModRefBS::card_size),
   722                         hr()->bottom(), from);
   723 #endif
   724   }
   725   assert(contains_reference(from), "We just added it!");
   726 }
   728 PosParPRT*
   729 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
   730   assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
   731   PosParPRT* prt = _fine_grain_regions[ind];
   732   while (prt != NULL && prt->hr() != hr) {
   733     prt = prt->next();
   734   }
   735   // Loop postcondition is the method postcondition.
   736   return prt;
   737 }
   740 #define DRT_CENSUS 0
   742 #if DRT_CENSUS
   743 static const int HistoSize = 6;
   744 static int global_histo[HistoSize] = { 0, 0, 0, 0, 0, 0 };
   745 static int coarsenings = 0;
   746 static int occ_sum = 0;
   747 #endif
   749 jint OtherRegionsTable::_n_coarsenings = 0;
   751 PosParPRT* OtherRegionsTable::delete_region_table() {
   752 #if DRT_CENSUS
   753   int histo[HistoSize] = { 0, 0, 0, 0, 0, 0 };
   754   const int histo_limits[] = { 1, 4, 16, 64, 256, 2048 };
   755 #endif
   757   assert(_m.owned_by_self(), "Precondition");
   758   assert(_n_fine_entries == _max_fine_entries, "Precondition");
   759   PosParPRT* max = NULL;
   760   jint max_occ = 0;
   761   PosParPRT** max_prev;
   762   size_t max_ind;
   764 #if SAMPLE_FOR_EVICTION
   765   size_t i = _fine_eviction_start;
   766   for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
   767     size_t ii = i;
   768     // Make sure we get a non-NULL sample.
   769     while (_fine_grain_regions[ii] == NULL) {
   770       ii++;
   771       if (ii == _max_fine_entries) ii = 0;
   772       guarantee(ii != i, "We must find one.");
   773     }
   774     PosParPRT** prev = &_fine_grain_regions[ii];
   775     PosParPRT* cur = *prev;
   776     while (cur != NULL) {
   777       jint cur_occ = cur->occupied();
   778       if (max == NULL || cur_occ > max_occ) {
   779         max = cur;
   780         max_prev = prev;
   781         max_ind = i;
   782         max_occ = cur_occ;
   783       }
   784       prev = cur->next_addr();
   785       cur = cur->next();
   786     }
   787     i = i + _fine_eviction_stride;
   788     if (i >= _n_fine_entries) i = i - _n_fine_entries;
   789   }
   790   _fine_eviction_start++;
   791   if (_fine_eviction_start >= _n_fine_entries)
   792     _fine_eviction_start -= _n_fine_entries;
   793 #else
   794   for (int i = 0; i < _max_fine_entries; i++) {
   795     PosParPRT** prev = &_fine_grain_regions[i];
   796     PosParPRT* cur = *prev;
   797     while (cur != NULL) {
   798       jint cur_occ = cur->occupied();
   799 #if DRT_CENSUS
   800       for (int k = 0; k < HistoSize; k++) {
   801         if (cur_occ <= histo_limits[k]) {
   802           histo[k]++; global_histo[k]++; break;
   803         }
   804       }
   805 #endif
   806       if (max == NULL || cur_occ > max_occ) {
   807         max = cur;
   808         max_prev = prev;
   809         max_ind = i;
   810         max_occ = cur_occ;
   811       }
   812       prev = cur->next_addr();
   813       cur = cur->next();
   814     }
   815   }
   816 #endif
   817   // XXX
   818   guarantee(max != NULL, "Since _n_fine_entries > 0");
   819 #if DRT_CENSUS
   820   gclog_or_tty->print_cr("In a coarsening: histo of occs:");
   821   for (int k = 0; k < HistoSize; k++) {
   822     gclog_or_tty->print_cr("  <= %4d: %5d.", histo_limits[k], histo[k]);
   823   }
   824   coarsenings++;
   825   occ_sum += max_occ;
   826   if ((coarsenings % 100) == 0) {
   827     gclog_or_tty->print_cr("\ncoarsenings = %d; global summary:", coarsenings);
   828     for (int k = 0; k < HistoSize; k++) {
   829       gclog_or_tty->print_cr("  <= %4d: %5d.", histo_limits[k], global_histo[k]);
   830     }
   831     gclog_or_tty->print_cr("Avg occ of deleted region = %6.2f.",
   832                   (float)occ_sum/(float)coarsenings);
   833   }
   834 #endif
   836   // Set the corresponding coarse bit.
   837   int max_hrs_index = max->hr()->hrs_index();
   838   if (!_coarse_map.at(max_hrs_index)) {
   839     _coarse_map.at_put(max_hrs_index, true);
   840     _n_coarse_entries++;
   841 #if 0
   842     gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
   843                "for region [" PTR_FORMAT "...] (%d coarse entries).\n",
   844                hr()->bottom(),
   845                max->hr()->bottom(),
   846                _n_coarse_entries);
   847 #endif
   848   }
   850   // Unsplice.
   851   *max_prev = max->next();
   852   Atomic::inc(&_n_coarsenings);
   853   _n_fine_entries--;
   854   return max;
   855 }
   858 // At present, this must be called stop-world single-threaded.
   859 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
   860                               BitMap* region_bm, BitMap* card_bm) {
   861   // First eliminated garbage regions from the coarse map.
   862   if (G1RSScrubVerbose)
   863     gclog_or_tty->print_cr("Scrubbing region %d:", hr()->hrs_index());
   865   assert(_coarse_map.size() == region_bm->size(), "Precondition");
   866   if (G1RSScrubVerbose)
   867     gclog_or_tty->print("   Coarse map: before = %d...", _n_coarse_entries);
   868   _coarse_map.set_intersection(*region_bm);
   869   _n_coarse_entries = _coarse_map.count_one_bits();
   870   if (G1RSScrubVerbose)
   871     gclog_or_tty->print_cr("   after = %d.", _n_coarse_entries);
   873   // Now do the fine-grained maps.
   874   for (size_t i = 0; i < _max_fine_entries; i++) {
   875     PosParPRT* cur = _fine_grain_regions[i];
   876     PosParPRT** prev = &_fine_grain_regions[i];
   877     while (cur != NULL) {
   878       PosParPRT* nxt = cur->next();
   879       // If the entire region is dead, eliminate.
   880       if (G1RSScrubVerbose)
   881         gclog_or_tty->print_cr("     For other region %d:", cur->hr()->hrs_index());
   882       if (!region_bm->at(cur->hr()->hrs_index())) {
   883         *prev = nxt;
   884         cur->set_next(NULL);
   885         _n_fine_entries--;
   886         if (G1RSScrubVerbose)
   887           gclog_or_tty->print_cr("          deleted via region map.");
   888         PosParPRT::free(cur);
   889       } else {
   890         // Do fine-grain elimination.
   891         if (G1RSScrubVerbose)
   892           gclog_or_tty->print("          occ: before = %4d.", cur->occupied());
   893         cur->scrub(ctbs, card_bm);
   894         if (G1RSScrubVerbose)
   895           gclog_or_tty->print_cr("          after = %4d.", cur->occupied());
   896         // Did that empty the table completely?
   897         if (cur->occupied() == 0) {
   898           *prev = nxt;
   899           cur->set_next(NULL);
   900           _n_fine_entries--;
   901           PosParPRT::free(cur);
   902         } else {
   903           prev = cur->next_addr();
   904         }
   905       }
   906       cur = nxt;
   907     }
   908   }
   909   // Since we may have deleted a from_card_cache entry from the RS, clear
   910   // the FCC.
   911   clear_fcc();
   912 }
   915 size_t OtherRegionsTable::occupied() const {
   916   // Cast away const in this case.
   917   MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
   918   size_t sum = occ_fine();
   919   sum += occ_sparse();
   920   sum += occ_coarse();
   921   return sum;
   922 }
   924 size_t OtherRegionsTable::occ_fine() const {
   925   size_t sum = 0;
   926   for (size_t i = 0; i < _max_fine_entries; i++) {
   927     PosParPRT* cur = _fine_grain_regions[i];
   928     while (cur != NULL) {
   929       sum += cur->occupied();
   930       cur = cur->next();
   931     }
   932   }
   933   return sum;
   934 }
   936 size_t OtherRegionsTable::occ_coarse() const {
   937   return (_n_coarse_entries * HeapRegion::CardsPerRegion);
   938 }
   940 size_t OtherRegionsTable::occ_sparse() const {
   941   return _sparse_table.occupied();
   942 }
   944 size_t OtherRegionsTable::mem_size() const {
   945   // Cast away const in this case.
   946   MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
   947   size_t sum = 0;
   948   for (size_t i = 0; i < _max_fine_entries; i++) {
   949     PosParPRT* cur = _fine_grain_regions[i];
   950     while (cur != NULL) {
   951       sum += cur->mem_size();
   952       cur = cur->next();
   953     }
   954   }
   955   sum += (sizeof(PosParPRT*) * _max_fine_entries);
   956   sum += (_coarse_map.size_in_words() * HeapWordSize);
   957   sum += (_sparse_table.mem_size());
   958   sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above.
   959   return sum;
   960 }
   962 size_t OtherRegionsTable::static_mem_size() {
   963   return _from_card_cache_mem_size;
   964 }
   966 size_t OtherRegionsTable::fl_mem_size() {
   967   return PerRegionTable::fl_mem_size() + PosParPRT::fl_mem_size();
   968 }
   970 void OtherRegionsTable::clear_fcc() {
   971   for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
   972     _from_card_cache[i][hr()->hrs_index()] = -1;
   973   }
   974 }
   976 void OtherRegionsTable::clear() {
   977   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
   978   for (size_t i = 0; i < _max_fine_entries; i++) {
   979     PosParPRT* cur = _fine_grain_regions[i];
   980     while (cur != NULL) {
   981       PosParPRT* nxt = cur->next();
   982       PosParPRT::free(cur);
   983       cur = nxt;
   984     }
   985     _fine_grain_regions[i] = NULL;
   986   }
   987   _sparse_table.clear();
   988   _coarse_map.clear();
   989   _n_fine_entries = 0;
   990   _n_coarse_entries = 0;
   992   clear_fcc();
   993 }
   995 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
   996   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
   997   size_t hrs_ind = (size_t)from_hr->hrs_index();
   998   size_t ind = hrs_ind & _mod_max_fine_entries_mask;
   999   if (del_single_region_table(ind, from_hr)) {
  1000     assert(!_coarse_map.at(hrs_ind), "Inv");
  1001   } else {
  1002     _coarse_map.par_at_put(hrs_ind, 0);
  1004   // Check to see if any of the fcc entries come from here.
  1005   int hr_ind = hr()->hrs_index();
  1006   for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
  1007     int fcc_ent = _from_card_cache[tid][hr_ind];
  1008     if (fcc_ent != -1) {
  1009       HeapWord* card_addr = (HeapWord*)
  1010         (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift);
  1011       if (hr()->is_in_reserved(card_addr)) {
  1012         // Clear the from card cache.
  1013         _from_card_cache[tid][hr_ind] = -1;
  1019 bool OtherRegionsTable::del_single_region_table(size_t ind,
  1020                                                 HeapRegion* hr) {
  1021   assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
  1022   PosParPRT** prev_addr = &_fine_grain_regions[ind];
  1023   PosParPRT* prt = *prev_addr;
  1024   while (prt != NULL && prt->hr() != hr) {
  1025     prev_addr = prt->next_addr();
  1026     prt = prt->next();
  1028   if (prt != NULL) {
  1029     assert(prt->hr() == hr, "Loop postcondition.");
  1030     *prev_addr = prt->next();
  1031     PosParPRT::free(prt);
  1032     _n_fine_entries--;
  1033     return true;
  1034   } else {
  1035     return false;
  1039 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
  1040   // Cast away const in this case.
  1041   MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
  1042   return contains_reference_locked(from);
  1045 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
  1046   HeapRegion* hr = _g1h->heap_region_containing_raw(from);
  1047   if (hr == NULL) return false;
  1048   RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
  1049   // Is this region in the coarse map?
  1050   if (_coarse_map.at(hr_ind)) return true;
  1052   PosParPRT* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
  1053                                      hr);
  1054   if (prt != NULL) {
  1055     return prt->contains_reference(from);
  1057   } else {
  1058     uintptr_t from_card =
  1059       (uintptr_t(from) >> CardTableModRefBS::card_shift);
  1060     uintptr_t hr_bot_card_index =
  1061       uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
  1062     assert(from_card >= hr_bot_card_index, "Inv");
  1063     CardIdx_t card_index = from_card - hr_bot_card_index;
  1064     assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
  1065            "Must be in range.");
  1066     return _sparse_table.contains_card(hr_ind, card_index);
  1072 void
  1073 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
  1074   _sparse_table.do_cleanup_work(hrrs_cleanup_task);
  1077 // Determines how many threads can add records to an rset in parallel.
  1078 // This can be done by either mutator threads together with the
  1079 // concurrent refinement threads or GC threads.
  1080 int HeapRegionRemSet::num_par_rem_sets() {
  1081   return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
  1084 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
  1085                                    HeapRegion* hr)
  1086   : _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { }
  1089 void HeapRegionRemSet::setup_remset_size() {
  1090   // Setup sparse and fine-grain tables sizes.
  1091   // table_size = base * (log(region_size / 1M) + 1)
  1092   int region_size_log_mb = MAX2((int)HeapRegion::LogOfHRGrainBytes - (int)LOG_M, 0);
  1093   if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
  1094     G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
  1096   if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
  1097     G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
  1099   guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
  1102 void HeapRegionRemSet::init_for_par_iteration() {
  1103   _iter_state = Unclaimed;
  1106 bool HeapRegionRemSet::claim_iter() {
  1107   if (_iter_state != Unclaimed) return false;
  1108   jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
  1109   return (res == Unclaimed);
  1112 void HeapRegionRemSet::set_iter_complete() {
  1113   _iter_state = Complete;
  1116 bool HeapRegionRemSet::iter_is_complete() {
  1117   return _iter_state == Complete;
  1121 void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
  1122   iter->initialize(this);
  1125 #ifndef PRODUCT
  1126 void HeapRegionRemSet::print() const {
  1127   HeapRegionRemSetIterator iter;
  1128   init_iterator(&iter);
  1129   size_t card_index;
  1130   while (iter.has_next(card_index)) {
  1131     HeapWord* card_start =
  1132       G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
  1133     gclog_or_tty->print_cr("  Card " PTR_FORMAT ".", card_start);
  1135   // XXX
  1136   if (iter.n_yielded() != occupied()) {
  1137     gclog_or_tty->print_cr("Yielded disagrees with occupied:");
  1138     gclog_or_tty->print_cr("  %6d yielded (%6d coarse, %6d fine).",
  1139                   iter.n_yielded(),
  1140                   iter.n_yielded_coarse(), iter.n_yielded_fine());
  1141     gclog_or_tty->print_cr("  %6d occ     (%6d coarse, %6d fine).",
  1142                   occupied(), occ_coarse(), occ_fine());
  1144   guarantee(iter.n_yielded() == occupied(),
  1145             "We should have yielded all the represented cards.");
  1147 #endif
  1149 void HeapRegionRemSet::cleanup() {
  1150   SparsePRT::cleanup_all();
  1153 void HeapRegionRemSet::par_cleanup() {
  1154   PosParPRT::par_contract_all();
  1157 void HeapRegionRemSet::clear() {
  1158   _other_regions.clear();
  1159   assert(occupied() == 0, "Should be clear.");
  1162 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
  1163                              BitMap* region_bm, BitMap* card_bm) {
  1164   _other_regions.scrub(ctbs, region_bm, card_bm);
  1167 //-------------------- Iteration --------------------
  1169 HeapRegionRemSetIterator::
  1170 HeapRegionRemSetIterator() :
  1171   _hrrs(NULL),
  1172   _g1h(G1CollectedHeap::heap()),
  1173   _bosa(NULL),
  1174   _sparse_iter() { }
  1176 void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
  1177   _hrrs = hrrs;
  1178   _coarse_map = &_hrrs->_other_regions._coarse_map;
  1179   _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
  1180   _bosa = _hrrs->bosa();
  1182   _is = Sparse;
  1183   // Set these values so that we increment to the first region.
  1184   _coarse_cur_region_index = -1;
  1185   _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);;
  1187   _cur_region_cur_card = 0;
  1189   _fine_array_index = -1;
  1190   _fine_cur_prt = NULL;
  1192   _n_yielded_coarse = 0;
  1193   _n_yielded_fine = 0;
  1194   _n_yielded_sparse = 0;
  1196   _sparse_iter.init(&hrrs->_other_regions._sparse_table);
  1199 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
  1200   if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
  1201   // Go to the next card.
  1202   _coarse_cur_region_cur_card++;
  1203   // Was the last the last card in the current region?
  1204   if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
  1205     // Yes: find the next region.  This may leave _coarse_cur_region_index
  1206     // Set to the last index, in which case there are no more coarse
  1207     // regions.
  1208     _coarse_cur_region_index =
  1209       (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
  1210     if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
  1211       _coarse_cur_region_cur_card = 0;
  1212       HeapWord* r_bot =
  1213         _g1h->region_at(_coarse_cur_region_index)->bottom();
  1214       _cur_region_card_offset = _bosa->index_for(r_bot);
  1215     } else {
  1216       return false;
  1219   // If we didn't return false above, then we can yield a card.
  1220   card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
  1221   return true;
  1224 void HeapRegionRemSetIterator::fine_find_next_non_null_prt() {
  1225   // Otherwise, find the next bucket list in the array.
  1226   _fine_array_index++;
  1227   while (_fine_array_index < (int) OtherRegionsTable::_max_fine_entries) {
  1228     _fine_cur_prt = _fine_grain_regions[_fine_array_index];
  1229     if (_fine_cur_prt != NULL) return;
  1230     else _fine_array_index++;
  1232   assert(_fine_cur_prt == NULL, "Loop post");
  1235 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
  1236   if (fine_has_next()) {
  1237     _cur_region_cur_card =
  1238       _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
  1240   while (!fine_has_next()) {
  1241     if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
  1242       _cur_region_cur_card = 0;
  1243       _fine_cur_prt = _fine_cur_prt->next();
  1245     if (_fine_cur_prt == NULL) {
  1246       fine_find_next_non_null_prt();
  1247       if (_fine_cur_prt == NULL) return false;
  1249     assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0,
  1250            "inv.");
  1251     HeapWord* r_bot =
  1252       _fine_cur_prt->hr()->bottom();
  1253     _cur_region_card_offset = _bosa->index_for(r_bot);
  1254     _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0);
  1256   assert(fine_has_next(), "Or else we exited the loop via the return.");
  1257   card_index = _cur_region_card_offset + _cur_region_cur_card;
  1258   return true;
  1261 bool HeapRegionRemSetIterator::fine_has_next() {
  1262   return
  1263     _fine_cur_prt != NULL &&
  1264     _cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion;
  1267 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
  1268   switch (_is) {
  1269   case Sparse:
  1270     if (_sparse_iter.has_next(card_index)) {
  1271       _n_yielded_sparse++;
  1272       return true;
  1274     // Otherwise, deliberate fall-through
  1275     _is = Fine;
  1276   case Fine:
  1277     if (fine_has_next(card_index)) {
  1278       _n_yielded_fine++;
  1279       return true;
  1281     // Otherwise, deliberate fall-through
  1282     _is = Coarse;
  1283   case Coarse:
  1284     if (coarse_has_next(card_index)) {
  1285       _n_yielded_coarse++;
  1286       return true;
  1288     // Otherwise...
  1289     break;
  1291   assert(ParallelGCThreads > 1 ||
  1292          n_yielded() == _hrrs->occupied(),
  1293          "Should have yielded all the cards in the rem set "
  1294          "(in the non-par case).");
  1295   return false;
  1300 OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
  1301 HeapWord**          HeapRegionRemSet::_recorded_cards = NULL;
  1302 HeapRegion**        HeapRegionRemSet::_recorded_regions = NULL;
  1303 int                 HeapRegionRemSet::_n_recorded = 0;
  1305 HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
  1306 int*         HeapRegionRemSet::_recorded_event_index = NULL;
  1307 int          HeapRegionRemSet::_n_recorded_events = 0;
  1309 void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
  1310   if (_recorded_oops == NULL) {
  1311     assert(_n_recorded == 0
  1312            && _recorded_cards == NULL
  1313            && _recorded_regions == NULL,
  1314            "Inv");
  1315     _recorded_oops    = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded);
  1316     _recorded_cards   = NEW_C_HEAP_ARRAY(HeapWord*,          MaxRecorded);
  1317     _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*,        MaxRecorded);
  1319   if (_n_recorded == MaxRecorded) {
  1320     gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
  1321   } else {
  1322     _recorded_cards[_n_recorded] =
  1323       (HeapWord*)align_size_down(uintptr_t(f),
  1324                                  CardTableModRefBS::card_size);
  1325     _recorded_oops[_n_recorded] = f;
  1326     _recorded_regions[_n_recorded] = hr;
  1327     _n_recorded++;
  1331 void HeapRegionRemSet::record_event(Event evnt) {
  1332   if (!G1RecordHRRSEvents) return;
  1334   if (_recorded_events == NULL) {
  1335     assert(_n_recorded_events == 0
  1336            && _recorded_event_index == NULL,
  1337            "Inv");
  1338     _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents);
  1339     _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents);
  1341   if (_n_recorded_events == MaxRecordedEvents) {
  1342     gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
  1343   } else {
  1344     _recorded_events[_n_recorded_events] = evnt;
  1345     _recorded_event_index[_n_recorded_events] = _n_recorded;
  1346     _n_recorded_events++;
  1350 void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
  1351   switch (evnt) {
  1352   case Event_EvacStart:
  1353     str->print("Evac Start");
  1354     break;
  1355   case Event_EvacEnd:
  1356     str->print("Evac End");
  1357     break;
  1358   case Event_RSUpdateEnd:
  1359     str->print("RS Update End");
  1360     break;
  1364 void HeapRegionRemSet::print_recorded() {
  1365   int cur_evnt = 0;
  1366   Event cur_evnt_kind;
  1367   int cur_evnt_ind = 0;
  1368   if (_n_recorded_events > 0) {
  1369     cur_evnt_kind = _recorded_events[cur_evnt];
  1370     cur_evnt_ind = _recorded_event_index[cur_evnt];
  1373   for (int i = 0; i < _n_recorded; i++) {
  1374     while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
  1375       gclog_or_tty->print("Event: ");
  1376       print_event(gclog_or_tty, cur_evnt_kind);
  1377       gclog_or_tty->print_cr("");
  1378       cur_evnt++;
  1379       if (cur_evnt < MaxRecordedEvents) {
  1380         cur_evnt_kind = _recorded_events[cur_evnt];
  1381         cur_evnt_ind = _recorded_event_index[cur_evnt];
  1384     gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
  1385                         " for ref " PTR_FORMAT ".\n",
  1386                         _recorded_cards[i], _recorded_regions[i]->bottom(),
  1387                         _recorded_oops[i]);
  1391 void HeapRegionRemSet::reset_for_cleanup_tasks() {
  1392   SparsePRT::reset_for_cleanup_tasks();
  1395 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
  1396   _other_regions.do_cleanup_work(hrrs_cleanup_task);
  1399 void
  1400 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
  1401   SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
  1404 #ifndef PRODUCT
  1405 void HeapRegionRemSet::test() {
  1406   os::sleep(Thread::current(), (jlong)5000, false);
  1407   G1CollectedHeap* g1h = G1CollectedHeap::heap();
  1409   // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
  1410   // hash bucket.
  1411   HeapRegion* hr0 = g1h->region_at(0);
  1412   HeapRegion* hr1 = g1h->region_at(1);
  1413   HeapRegion* hr2 = g1h->region_at(5);
  1414   HeapRegion* hr3 = g1h->region_at(6);
  1415   HeapRegion* hr4 = g1h->region_at(7);
  1416   HeapRegion* hr5 = g1h->region_at(8);
  1418   HeapWord* hr1_start = hr1->bottom();
  1419   HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
  1420   HeapWord* hr1_last = hr1->end() - 1;
  1422   HeapWord* hr2_start = hr2->bottom();
  1423   HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
  1424   HeapWord* hr2_last = hr2->end() - 1;
  1426   HeapWord* hr3_start = hr3->bottom();
  1427   HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
  1428   HeapWord* hr3_last = hr3->end() - 1;
  1430   HeapRegionRemSet* hrrs = hr0->rem_set();
  1432   // Make three references from region 0x101...
  1433   hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
  1434   hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
  1435   hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
  1437   hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
  1438   hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
  1439   hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
  1441   hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
  1442   hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
  1443   hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
  1445   // Now cause a coarsening.
  1446   hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
  1447   hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
  1449   // Now, does iteration yield these three?
  1450   HeapRegionRemSetIterator iter;
  1451   hrrs->init_iterator(&iter);
  1452   size_t sum = 0;
  1453   size_t card_index;
  1454   while (iter.has_next(card_index)) {
  1455     HeapWord* card_start =
  1456       G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
  1457     gclog_or_tty->print_cr("  Card " PTR_FORMAT ".", card_start);
  1458     sum++;
  1460   guarantee(sum == 11 - 3 + 2048, "Failure");
  1461   guarantee(sum == hrrs->occupied(), "Failure");
  1463 #endif

mercurial