src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp

Tue, 10 Jan 2012 18:58:13 -0500

author
tonyp
date
Tue, 10 Jan 2012 18:58:13 -0500
changeset 3416
2ace1c4ee8da
parent 3216
5e5d4821bf07
child 3713
720b6a76dd9d
permissions
-rw-r--r--

6888336: G1: avoid explicitly marking and pushing objects in survivor spaces
Summary: This change simplifies the interaction between GC and concurrent marking. By disabling survivor spaces during the initial-mark pause we don't need to propagate marks of objects we copy during each GC (since we never need to copy an explicitly marked object).
Reviewed-by: johnc, brutisso

ysr@777 1 /*
tonyp@2493 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@2314 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 30 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@2314 31 #include "memory/allocation.hpp"
stefank@2314 32 #include "memory/space.inline.hpp"
stefank@2314 33 #include "utilities/bitMap.inline.hpp"
stefank@2314 34 #include "utilities/globalDefinitions.hpp"
ysr@777 35
ysr@777 36 #define HRRS_VERBOSE 0
ysr@777 37
ysr@777 38 #define PRT_COUNT_OCCUPIED 1
ysr@777 39
ysr@777 40 // OtherRegionsTable
ysr@777 41
ysr@777 42 class PerRegionTable: public CHeapObj {
ysr@777 43 friend class OtherRegionsTable;
ysr@777 44 friend class HeapRegionRemSetIterator;
ysr@777 45
ysr@777 46 HeapRegion* _hr;
ysr@777 47 BitMap _bm;
ysr@777 48 #if PRT_COUNT_OCCUPIED
ysr@777 49 jint _occupied;
ysr@777 50 #endif
ysr@777 51 PerRegionTable* _next_free;
ysr@777 52
ysr@777 53 PerRegionTable* next_free() { return _next_free; }
ysr@777 54 void set_next_free(PerRegionTable* prt) { _next_free = prt; }
ysr@777 55
ysr@777 56
ysr@777 57 static PerRegionTable* _free_list;
ysr@777 58
ysr@777 59 #ifdef _MSC_VER
ysr@777 60 // For some reason even though the classes are marked as friend they are unable
ysr@777 61 // to access CardsPerRegion when private/protected. Only the windows c++ compiler
ysr@777 62 // says this Sun CC and linux gcc don't have a problem with access when private
ysr@777 63
ysr@777 64 public:
ysr@777 65
ysr@777 66 #endif // _MSC_VER
ysr@777 67
ysr@777 68 protected:
ysr@777 69 // We need access in order to union things into the base table.
ysr@777 70 BitMap* bm() { return &_bm; }
ysr@777 71
apetrusenko@980 72 #if PRT_COUNT_OCCUPIED
ysr@777 73 void recount_occupied() {
ysr@777 74 _occupied = (jint) bm()->count_one_bits();
ysr@777 75 }
apetrusenko@980 76 #endif
ysr@777 77
ysr@777 78 PerRegionTable(HeapRegion* hr) :
ysr@777 79 _hr(hr),
ysr@777 80 #if PRT_COUNT_OCCUPIED
ysr@777 81 _occupied(0),
ysr@777 82 #endif
tonyp@1377 83 _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */)
ysr@777 84 {}
ysr@777 85
ysr@777 86 static void free(PerRegionTable* prt) {
ysr@777 87 while (true) {
ysr@777 88 PerRegionTable* fl = _free_list;
ysr@777 89 prt->set_next_free(fl);
ysr@777 90 PerRegionTable* res =
ysr@777 91 (PerRegionTable*)
ysr@777 92 Atomic::cmpxchg_ptr(prt, &_free_list, fl);
ysr@777 93 if (res == fl) return;
ysr@777 94 }
ysr@777 95 ShouldNotReachHere();
ysr@777 96 }
ysr@777 97
ysr@777 98 static PerRegionTable* alloc(HeapRegion* hr) {
ysr@777 99 PerRegionTable* fl = _free_list;
ysr@777 100 while (fl != NULL) {
ysr@777 101 PerRegionTable* nxt = fl->next_free();
ysr@777 102 PerRegionTable* res =
ysr@777 103 (PerRegionTable*)
ysr@777 104 Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
ysr@777 105 if (res == fl) {
ysr@777 106 fl->init(hr);
ysr@777 107 return fl;
ysr@777 108 } else {
ysr@777 109 fl = _free_list;
ysr@777 110 }
ysr@777 111 }
ysr@777 112 assert(fl == NULL, "Loop condition.");
ysr@777 113 return new PerRegionTable(hr);
ysr@777 114 }
ysr@777 115
johnc@1242 116 void add_card_work(CardIdx_t from_card, bool par) {
ysr@777 117 if (!_bm.at(from_card)) {
ysr@777 118 if (par) {
ysr@777 119 if (_bm.par_at_put(from_card, 1)) {
ysr@777 120 #if PRT_COUNT_OCCUPIED
ysr@777 121 Atomic::inc(&_occupied);
ysr@777 122 #endif
ysr@777 123 }
ysr@777 124 } else {
ysr@777 125 _bm.at_put(from_card, 1);
ysr@777 126 #if PRT_COUNT_OCCUPIED
ysr@777 127 _occupied++;
ysr@777 128 #endif
ysr@777 129 }
ysr@777 130 }
ysr@777 131 }
ysr@777 132
ysr@1280 133 void add_reference_work(OopOrNarrowOopStar from, bool par) {
ysr@777 134 // Must make this robust in case "from" is not in "_hr", because of
ysr@777 135 // concurrency.
ysr@777 136
ysr@777 137 #if HRRS_VERBOSE
ysr@777 138 gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
ysr@777 139 from, *from);
ysr@777 140 #endif
ysr@777 141
ysr@777 142 HeapRegion* loc_hr = hr();
ysr@777 143 // If the test below fails, then this table was reused concurrently
ysr@777 144 // with this operation. This is OK, since the old table was coarsened,
ysr@777 145 // and adding a bit to the new table is never incorrect.
brutisso@3216 146 // If the table used to belong to a continues humongous region and is
brutisso@3216 147 // now reused for the corresponding start humongous region, we need to
brutisso@3216 148 // make sure that we detect this. Thus, we call is_in_reserved_raw()
brutisso@3216 149 // instead of just is_in_reserved() here.
brutisso@3216 150 if (loc_hr->is_in_reserved_raw(from)) {
ysr@777 151 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
johnc@1242 152 CardIdx_t from_card = (CardIdx_t)
johnc@1242 153 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
ysr@777 154
johnc@3182 155 assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
tonyp@1377 156 "Must be in range.");
johnc@1242 157 add_card_work(from_card, par);
ysr@777 158 }
ysr@777 159 }
ysr@777 160
ysr@777 161 public:
ysr@777 162
ysr@777 163 HeapRegion* hr() const { return _hr; }
ysr@777 164
ysr@777 165 #if PRT_COUNT_OCCUPIED
ysr@777 166 jint occupied() const {
ysr@777 167 // Overkill, but if we ever need it...
ysr@777 168 // guarantee(_occupied == _bm.count_one_bits(), "Check");
ysr@777 169 return _occupied;
ysr@777 170 }
ysr@777 171 #else
ysr@777 172 jint occupied() const {
ysr@777 173 return _bm.count_one_bits();
ysr@777 174 }
ysr@777 175 #endif
ysr@777 176
ysr@777 177 void init(HeapRegion* hr) {
ysr@777 178 _hr = hr;
ysr@777 179 #if PRT_COUNT_OCCUPIED
ysr@777 180 _occupied = 0;
ysr@777 181 #endif
ysr@777 182 _bm.clear();
ysr@777 183 }
ysr@777 184
ysr@1280 185 void add_reference(OopOrNarrowOopStar from) {
ysr@777 186 add_reference_work(from, /*parallel*/ true);
ysr@777 187 }
ysr@777 188
ysr@1280 189 void seq_add_reference(OopOrNarrowOopStar from) {
ysr@777 190 add_reference_work(from, /*parallel*/ false);
ysr@777 191 }
ysr@777 192
ysr@777 193 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
ysr@777 194 HeapWord* hr_bot = hr()->bottom();
swamyv@924 195 size_t hr_first_card_index = ctbs->index_for(hr_bot);
ysr@777 196 bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
ysr@777 197 #if PRT_COUNT_OCCUPIED
ysr@777 198 recount_occupied();
ysr@777 199 #endif
ysr@777 200 }
ysr@777 201
johnc@1242 202 void add_card(CardIdx_t from_card_index) {
ysr@777 203 add_card_work(from_card_index, /*parallel*/ true);
ysr@777 204 }
ysr@777 205
johnc@1242 206 void seq_add_card(CardIdx_t from_card_index) {
ysr@777 207 add_card_work(from_card_index, /*parallel*/ false);
ysr@777 208 }
ysr@777 209
ysr@777 210 // (Destructively) union the bitmap of the current table into the given
ysr@777 211 // bitmap (which is assumed to be of the same size.)
ysr@777 212 void union_bitmap_into(BitMap* bm) {
ysr@777 213 bm->set_union(_bm);
ysr@777 214 }
ysr@777 215
ysr@777 216 // Mem size in bytes.
ysr@777 217 size_t mem_size() const {
ysr@777 218 return sizeof(this) + _bm.size_in_words() * HeapWordSize;
ysr@777 219 }
ysr@777 220
ysr@777 221 static size_t fl_mem_size() {
ysr@777 222 PerRegionTable* cur = _free_list;
ysr@777 223 size_t res = 0;
ysr@777 224 while (cur != NULL) {
ysr@777 225 res += sizeof(PerRegionTable);
ysr@777 226 cur = cur->next_free();
ysr@777 227 }
ysr@777 228 return res;
ysr@777 229 }
ysr@777 230
ysr@777 231 // Requires "from" to be in "hr()".
ysr@1280 232 bool contains_reference(OopOrNarrowOopStar from) const {
ysr@777 233 assert(hr()->is_in_reserved(from), "Precondition.");
ysr@777 234 size_t card_ind = pointer_delta(from, hr()->bottom(),
ysr@777 235 CardTableModRefBS::card_size);
ysr@777 236 return _bm.at(card_ind);
ysr@777 237 }
ysr@777 238 };
ysr@777 239
ysr@777 240 PerRegionTable* PerRegionTable::_free_list = NULL;
ysr@777 241
ysr@777 242
ysr@777 243 #define COUNT_PAR_EXPANDS 0
ysr@777 244
ysr@777 245 #if COUNT_PAR_EXPANDS
ysr@777 246 static jint n_par_expands = 0;
ysr@777 247 static jint n_par_contracts = 0;
ysr@777 248 static jint par_expand_list_len = 0;
ysr@777 249 static jint max_par_expand_list_len = 0;
ysr@777 250
ysr@777 251 static void print_par_expand() {
ysr@777 252 Atomic::inc(&n_par_expands);
ysr@777 253 Atomic::inc(&par_expand_list_len);
ysr@777 254 if (par_expand_list_len > max_par_expand_list_len) {
ysr@777 255 max_par_expand_list_len = par_expand_list_len;
ysr@777 256 }
ysr@777 257 if ((n_par_expands % 10) == 0) {
ysr@777 258 gclog_or_tty->print_cr("\n\n%d par expands: %d contracts, "
ysr@777 259 "len = %d, max_len = %d\n.",
ysr@777 260 n_par_expands, n_par_contracts, par_expand_list_len,
ysr@777 261 max_par_expand_list_len);
ysr@777 262 }
ysr@777 263 }
ysr@777 264 #endif
ysr@777 265
ysr@777 266 class PosParPRT: public PerRegionTable {
ysr@777 267 PerRegionTable** _par_tables;
ysr@777 268
ysr@777 269 enum SomePrivateConstants {
ysr@777 270 ReserveParTableExpansion = 1
ysr@777 271 };
ysr@777 272
ysr@777 273 void par_contract() {
ysr@777 274 assert(_par_tables != NULL, "Precondition.");
ysr@777 275 int n = HeapRegionRemSet::num_par_rem_sets()-1;
ysr@777 276 for (int i = 0; i < n; i++) {
ysr@777 277 _par_tables[i]->union_bitmap_into(bm());
ysr@777 278 PerRegionTable::free(_par_tables[i]);
ysr@777 279 _par_tables[i] = NULL;
ysr@777 280 }
ysr@777 281 #if PRT_COUNT_OCCUPIED
ysr@777 282 // We must recount the "occupied."
ysr@777 283 recount_occupied();
ysr@777 284 #endif
ysr@777 285 FREE_C_HEAP_ARRAY(PerRegionTable*, _par_tables);
ysr@777 286 _par_tables = NULL;
ysr@777 287 #if COUNT_PAR_EXPANDS
ysr@777 288 Atomic::inc(&n_par_contracts);
ysr@777 289 Atomic::dec(&par_expand_list_len);
ysr@777 290 #endif
ysr@777 291 }
ysr@777 292
ysr@777 293 static PerRegionTable** _par_table_fl;
ysr@777 294
ysr@777 295 PosParPRT* _next;
ysr@777 296
ysr@777 297 static PosParPRT* _free_list;
ysr@777 298
ysr@777 299 PerRegionTable** par_tables() const {
ysr@777 300 assert(uintptr_t(NULL) == 0, "Assumption.");
ysr@777 301 if (uintptr_t(_par_tables) <= ReserveParTableExpansion)
ysr@777 302 return NULL;
ysr@777 303 else
ysr@777 304 return _par_tables;
ysr@777 305 }
ysr@777 306
ysr@777 307 PosParPRT* _next_par_expanded;
ysr@777 308 PosParPRT* next_par_expanded() { return _next_par_expanded; }
ysr@777 309 void set_next_par_expanded(PosParPRT* ppprt) { _next_par_expanded = ppprt; }
ysr@777 310 static PosParPRT* _par_expanded_list;
ysr@777 311
ysr@777 312 public:
ysr@777 313
ysr@777 314 PosParPRT(HeapRegion* hr) : PerRegionTable(hr), _par_tables(NULL) {}
ysr@777 315
ysr@777 316 jint occupied() const {
ysr@777 317 jint res = PerRegionTable::occupied();
ysr@777 318 if (par_tables() != NULL) {
ysr@777 319 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
ysr@777 320 res += par_tables()[i]->occupied();
ysr@777 321 }
ysr@777 322 }
ysr@777 323 return res;
ysr@777 324 }
ysr@777 325
ysr@777 326 void init(HeapRegion* hr) {
ysr@777 327 PerRegionTable::init(hr);
ysr@777 328 _next = NULL;
ysr@777 329 if (par_tables() != NULL) {
ysr@777 330 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
ysr@777 331 par_tables()[i]->init(hr);
ysr@777 332 }
ysr@777 333 }
ysr@777 334 }
ysr@777 335
ysr@777 336 static void free(PosParPRT* prt) {
ysr@777 337 while (true) {
ysr@777 338 PosParPRT* fl = _free_list;
ysr@777 339 prt->set_next(fl);
ysr@777 340 PosParPRT* res =
ysr@777 341 (PosParPRT*)
ysr@777 342 Atomic::cmpxchg_ptr(prt, &_free_list, fl);
ysr@777 343 if (res == fl) return;
ysr@777 344 }
ysr@777 345 ShouldNotReachHere();
ysr@777 346 }
ysr@777 347
ysr@777 348 static PosParPRT* alloc(HeapRegion* hr) {
ysr@777 349 PosParPRT* fl = _free_list;
ysr@777 350 while (fl != NULL) {
ysr@777 351 PosParPRT* nxt = fl->next();
ysr@777 352 PosParPRT* res =
ysr@777 353 (PosParPRT*)
ysr@777 354 Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
ysr@777 355 if (res == fl) {
ysr@777 356 fl->init(hr);
ysr@777 357 return fl;
ysr@777 358 } else {
ysr@777 359 fl = _free_list;
ysr@777 360 }
ysr@777 361 }
ysr@777 362 assert(fl == NULL, "Loop condition.");
ysr@777 363 return new PosParPRT(hr);
ysr@777 364 }
ysr@777 365
ysr@777 366 PosParPRT* next() const { return _next; }
ysr@777 367 void set_next(PosParPRT* nxt) { _next = nxt; }
ysr@777 368 PosParPRT** next_addr() { return &_next; }
ysr@777 369
tonyp@1694 370 bool should_expand(int tid) {
tonyp@3028 371 // Given that we now defer RSet updates for after a GC we don't
tonyp@3028 372 // really need to expand the tables any more. This code should be
tonyp@3028 373 // cleaned up in the future (see CR 6921087).
tonyp@3028 374 return false;
tonyp@1694 375 }
tonyp@1694 376
tonyp@1694 377 void par_expand() {
tonyp@1694 378 int n = HeapRegionRemSet::num_par_rem_sets()-1;
tonyp@1694 379 if (n <= 0) return;
tonyp@1694 380 if (_par_tables == NULL) {
tonyp@1694 381 PerRegionTable* res =
tonyp@1694 382 (PerRegionTable*)
tonyp@1694 383 Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion,
tonyp@1694 384 &_par_tables, NULL);
tonyp@1694 385 if (res != NULL) return;
tonyp@1694 386 // Otherwise, we reserved the right to do the expansion.
tonyp@1694 387
tonyp@1694 388 PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n);
tonyp@1694 389 for (int i = 0; i < n; i++) {
tonyp@1694 390 PerRegionTable* ptable = PerRegionTable::alloc(hr());
tonyp@1694 391 ptables[i] = ptable;
tonyp@1694 392 }
tonyp@1694 393 // Here we do not need an atomic.
tonyp@1694 394 _par_tables = ptables;
tonyp@1694 395 #if COUNT_PAR_EXPANDS
tonyp@1694 396 print_par_expand();
tonyp@1694 397 #endif
tonyp@1694 398 // We must put this table on the expanded list.
tonyp@1694 399 PosParPRT* exp_head = _par_expanded_list;
tonyp@1694 400 while (true) {
tonyp@1694 401 set_next_par_expanded(exp_head);
tonyp@1694 402 PosParPRT* res =
tonyp@1694 403 (PosParPRT*)
tonyp@1694 404 Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head);
tonyp@1694 405 if (res == exp_head) return;
tonyp@1694 406 // Otherwise.
tonyp@1694 407 exp_head = res;
tonyp@1694 408 }
tonyp@1694 409 ShouldNotReachHere();
tonyp@1694 410 }
tonyp@1694 411 }
tonyp@1694 412
ysr@1280 413 void add_reference(OopOrNarrowOopStar from, int tid) {
ysr@777 414 // Expand if necessary.
ysr@777 415 PerRegionTable** pt = par_tables();
ysr@777 416 if (pt != NULL) {
ysr@777 417 // We always have to assume that mods to table 0 are in parallel,
ysr@777 418 // because of the claiming scheme in parallel expansion. A thread
ysr@777 419 // with tid != 0 that finds the table to be NULL, but doesn't succeed
ysr@777 420 // in claiming the right of expanding it, will end up in the else
ysr@777 421 // clause of the above if test. That thread could be delayed, and a
ysr@777 422 // thread 0 add reference could see the table expanded, and come
ysr@777 423 // here. Both threads would be adding in parallel. But we get to
ysr@777 424 // not use atomics for tids > 0.
ysr@777 425 if (tid == 0) {
ysr@777 426 PerRegionTable::add_reference(from);
ysr@777 427 } else {
ysr@777 428 pt[tid-1]->seq_add_reference(from);
ysr@777 429 }
ysr@777 430 } else {
ysr@777 431 // Not expanded -- add to the base table.
ysr@777 432 PerRegionTable::add_reference(from);
ysr@777 433 }
ysr@777 434 }
ysr@777 435
ysr@777 436 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
ysr@777 437 assert(_par_tables == NULL, "Precondition");
ysr@777 438 PerRegionTable::scrub(ctbs, card_bm);
ysr@777 439 }
ysr@777 440
ysr@777 441 size_t mem_size() const {
ysr@777 442 size_t res =
ysr@777 443 PerRegionTable::mem_size() + sizeof(this) - sizeof(PerRegionTable);
ysr@777 444 if (_par_tables != NULL) {
ysr@777 445 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
ysr@777 446 res += _par_tables[i]->mem_size();
ysr@777 447 }
ysr@777 448 }
ysr@777 449 return res;
ysr@777 450 }
ysr@777 451
ysr@777 452 static size_t fl_mem_size() {
ysr@777 453 PosParPRT* cur = _free_list;
ysr@777 454 size_t res = 0;
ysr@777 455 while (cur != NULL) {
ysr@777 456 res += sizeof(PosParPRT);
ysr@777 457 cur = cur->next();
ysr@777 458 }
ysr@777 459 return res;
ysr@777 460 }
ysr@777 461
ysr@1280 462 bool contains_reference(OopOrNarrowOopStar from) const {
ysr@777 463 if (PerRegionTable::contains_reference(from)) return true;
ysr@777 464 if (_par_tables != NULL) {
ysr@777 465 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
ysr@777 466 if (_par_tables[i]->contains_reference(from)) return true;
ysr@777 467 }
ysr@777 468 }
ysr@777 469 return false;
ysr@777 470 }
ysr@777 471
ysr@777 472 static void par_contract_all();
ysr@777 473 };
ysr@777 474
ysr@777 475 void PosParPRT::par_contract_all() {
ysr@777 476 PosParPRT* hd = _par_expanded_list;
ysr@777 477 while (hd != NULL) {
ysr@777 478 PosParPRT* nxt = hd->next_par_expanded();
ysr@777 479 PosParPRT* res =
ysr@777 480 (PosParPRT*)
ysr@777 481 Atomic::cmpxchg_ptr(nxt, &_par_expanded_list, hd);
ysr@777 482 if (res == hd) {
ysr@777 483 // We claimed the right to contract this table.
ysr@777 484 hd->set_next_par_expanded(NULL);
ysr@777 485 hd->par_contract();
ysr@777 486 hd = _par_expanded_list;
ysr@777 487 } else {
ysr@777 488 hd = res;
ysr@777 489 }
ysr@777 490 }
ysr@777 491 }
ysr@777 492
ysr@777 493 PosParPRT* PosParPRT::_free_list = NULL;
ysr@777 494 PosParPRT* PosParPRT::_par_expanded_list = NULL;
ysr@777 495
ysr@777 496 jint OtherRegionsTable::_cache_probes = 0;
ysr@777 497 jint OtherRegionsTable::_cache_hits = 0;
ysr@777 498
ysr@777 499 size_t OtherRegionsTable::_max_fine_entries = 0;
ysr@777 500 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
ysr@777 501 #if SAMPLE_FOR_EVICTION
ysr@777 502 size_t OtherRegionsTable::_fine_eviction_stride = 0;
ysr@777 503 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
ysr@777 504 #endif
ysr@777 505
ysr@777 506 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
ysr@777 507 _g1h(G1CollectedHeap::heap()),
ysr@777 508 _m(Mutex::leaf, "An OtherRegionsTable lock", true),
ysr@777 509 _hr(hr),
ysr@777 510 _coarse_map(G1CollectedHeap::heap()->max_regions(),
ysr@777 511 false /* in-resource-area */),
ysr@777 512 _fine_grain_regions(NULL),
ysr@777 513 _n_fine_entries(0), _n_coarse_entries(0),
ysr@777 514 #if SAMPLE_FOR_EVICTION
ysr@777 515 _fine_eviction_start(0),
ysr@777 516 #endif
ysr@777 517 _sparse_table(hr)
ysr@777 518 {
ysr@777 519 typedef PosParPRT* PosParPRTPtr;
ysr@777 520 if (_max_fine_entries == 0) {
ysr@777 521 assert(_mod_max_fine_entries_mask == 0, "Both or none.");
iveresov@1696 522 size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
iveresov@1696 523 _max_fine_entries = (size_t)(1 << max_entries_log);
ysr@777 524 _mod_max_fine_entries_mask = _max_fine_entries - 1;
ysr@777 525 #if SAMPLE_FOR_EVICTION
ysr@777 526 assert(_fine_eviction_sample_size == 0
ysr@777 527 && _fine_eviction_stride == 0, "All init at same time.");
iveresov@1696 528 _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
ysr@777 529 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
ysr@777 530 #endif
ysr@777 531 }
ysr@777 532 _fine_grain_regions = new PosParPRTPtr[_max_fine_entries];
ysr@777 533 if (_fine_grain_regions == NULL)
ysr@777 534 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries,
ysr@777 535 "Failed to allocate _fine_grain_entries.");
ysr@777 536 for (size_t i = 0; i < _max_fine_entries; i++) {
ysr@777 537 _fine_grain_regions[i] = NULL;
ysr@777 538 }
ysr@777 539 }
ysr@777 540
ysr@777 541 int** OtherRegionsTable::_from_card_cache = NULL;
ysr@777 542 size_t OtherRegionsTable::_from_card_cache_max_regions = 0;
ysr@777 543 size_t OtherRegionsTable::_from_card_cache_mem_size = 0;
ysr@777 544
ysr@777 545 void OtherRegionsTable::init_from_card_cache(size_t max_regions) {
ysr@777 546 _from_card_cache_max_regions = max_regions;
ysr@777 547
ysr@777 548 int n_par_rs = HeapRegionRemSet::num_par_rem_sets();
ysr@777 549 _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs);
ysr@777 550 for (int i = 0; i < n_par_rs; i++) {
ysr@777 551 _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions);
ysr@777 552 for (size_t j = 0; j < max_regions; j++) {
ysr@777 553 _from_card_cache[i][j] = -1; // An invalid value.
ysr@777 554 }
ysr@777 555 }
ysr@777 556 _from_card_cache_mem_size = n_par_rs * max_regions * sizeof(int);
ysr@777 557 }
ysr@777 558
ysr@777 559 void OtherRegionsTable::shrink_from_card_cache(size_t new_n_regs) {
ysr@777 560 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
ysr@777 561 assert(new_n_regs <= _from_card_cache_max_regions, "Must be within max.");
ysr@777 562 for (size_t j = new_n_regs; j < _from_card_cache_max_regions; j++) {
ysr@777 563 _from_card_cache[i][j] = -1; // An invalid value.
ysr@777 564 }
ysr@777 565 }
ysr@777 566 }
ysr@777 567
ysr@777 568 #ifndef PRODUCT
ysr@777 569 void OtherRegionsTable::print_from_card_cache() {
ysr@777 570 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
ysr@777 571 for (size_t j = 0; j < _from_card_cache_max_regions; j++) {
ysr@777 572 gclog_or_tty->print_cr("_from_card_cache[%d][%d] = %d.",
ysr@777 573 i, j, _from_card_cache[i][j]);
ysr@777 574 }
ysr@777 575 }
ysr@777 576 }
ysr@777 577 #endif
ysr@777 578
ysr@1280 579 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
ysr@777 580 size_t cur_hrs_ind = hr()->hrs_index();
ysr@777 581
ysr@777 582 #if HRRS_VERBOSE
ysr@777 583 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
ysr@1280 584 from,
ysr@1280 585 UseCompressedOops
ysr@1280 586 ? oopDesc::load_decode_heap_oop((narrowOop*)from)
ysr@1280 587 : oopDesc::load_decode_heap_oop((oop*)from));
ysr@777 588 #endif
ysr@777 589
ysr@777 590 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
ysr@777 591
ysr@777 592 #if HRRS_VERBOSE
ysr@777 593 gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
ysr@777 594 hr()->bottom(), from_card,
ysr@777 595 _from_card_cache[tid][cur_hrs_ind]);
ysr@777 596 #endif
ysr@777 597
ysr@777 598 #define COUNT_CACHE 0
ysr@777 599 #if COUNT_CACHE
ysr@777 600 jint p = Atomic::add(1, &_cache_probes);
ysr@777 601 if ((p % 10000) == 0) {
ysr@777 602 jint hits = _cache_hits;
ysr@777 603 gclog_or_tty->print_cr("%d/%d = %5.2f%% RS cache hits.",
ysr@777 604 _cache_hits, p, 100.0* (float)hits/(float)p);
ysr@777 605 }
ysr@777 606 #endif
ysr@777 607 if (from_card == _from_card_cache[tid][cur_hrs_ind]) {
ysr@777 608 #if HRRS_VERBOSE
ysr@777 609 gclog_or_tty->print_cr(" from-card cache hit.");
ysr@777 610 #endif
ysr@777 611 #if COUNT_CACHE
ysr@777 612 Atomic::inc(&_cache_hits);
ysr@777 613 #endif
ysr@777 614 assert(contains_reference(from), "We just added it!");
ysr@777 615 return;
ysr@777 616 } else {
ysr@777 617 _from_card_cache[tid][cur_hrs_ind] = from_card;
ysr@777 618 }
ysr@777 619
ysr@777 620 // Note that this may be a continued H region.
ysr@777 621 HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
johnc@1242 622 RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
ysr@777 623
ysr@777 624 // If the region is already coarsened, return.
ysr@777 625 if (_coarse_map.at(from_hrs_ind)) {
ysr@777 626 #if HRRS_VERBOSE
ysr@777 627 gclog_or_tty->print_cr(" coarse map hit.");
ysr@777 628 #endif
ysr@777 629 assert(contains_reference(from), "We just added it!");
ysr@777 630 return;
ysr@777 631 }
ysr@777 632
ysr@777 633 // Otherwise find a per-region table to add it to.
ysr@777 634 size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
ysr@777 635 PosParPRT* prt = find_region_table(ind, from_hr);
ysr@777 636 if (prt == NULL) {
ysr@777 637 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
ysr@777 638 // Confirm that it's really not there...
ysr@777 639 prt = find_region_table(ind, from_hr);
ysr@777 640 if (prt == NULL) {
ysr@777 641
ysr@777 642 uintptr_t from_hr_bot_card_index =
ysr@777 643 uintptr_t(from_hr->bottom())
ysr@777 644 >> CardTableModRefBS::card_shift;
johnc@1242 645 CardIdx_t card_index = from_card - from_hr_bot_card_index;
johnc@3182 646 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
ysr@777 647 "Must be in range.");
ysr@777 648 if (G1HRRSUseSparseTable &&
johnc@1242 649 _sparse_table.add_card(from_hrs_ind, card_index)) {
ysr@777 650 if (G1RecordHRRSOops) {
ysr@777 651 HeapRegionRemSet::record(hr(), from);
ysr@777 652 #if HRRS_VERBOSE
ysr@777 653 gclog_or_tty->print(" Added card " PTR_FORMAT " to region "
ysr@777 654 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
ysr@777 655 align_size_down(uintptr_t(from),
ysr@777 656 CardTableModRefBS::card_size),
ysr@777 657 hr()->bottom(), from);
ysr@777 658 #endif
ysr@777 659 }
ysr@777 660 #if HRRS_VERBOSE
ysr@777 661 gclog_or_tty->print_cr(" added card to sparse table.");
ysr@777 662 #endif
ysr@777 663 assert(contains_reference_locked(from), "We just added it!");
ysr@777 664 return;
ysr@777 665 } else {
ysr@777 666 #if HRRS_VERBOSE
ysr@777 667 gclog_or_tty->print_cr(" [tid %d] sparse table entry "
ysr@777 668 "overflow(f: %d, t: %d)",
ysr@777 669 tid, from_hrs_ind, cur_hrs_ind);
ysr@777 670 #endif
ysr@777 671 }
ysr@777 672
ysr@777 673 if (_n_fine_entries == _max_fine_entries) {
ysr@777 674 prt = delete_region_table();
ysr@777 675 } else {
ysr@777 676 prt = PosParPRT::alloc(from_hr);
ysr@777 677 }
ysr@777 678 prt->init(from_hr);
ysr@777 679
ysr@777 680 PosParPRT* first_prt = _fine_grain_regions[ind];
ysr@777 681 prt->set_next(first_prt); // XXX Maybe move to init?
ysr@777 682 _fine_grain_regions[ind] = prt;
ysr@777 683 _n_fine_entries++;
ysr@777 684
ysr@777 685 if (G1HRRSUseSparseTable) {
iveresov@1696 686 // Transfer from sparse to fine-grain.
iveresov@1696 687 SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
iveresov@1696 688 assert(sprt_entry != NULL, "There should have been an entry");
iveresov@1696 689 for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
iveresov@1696 690 CardIdx_t c = sprt_entry->card(i);
ysr@777 691 if (c != SparsePRTEntry::NullEntry) {
ysr@777 692 prt->add_card(c);
ysr@777 693 }
ysr@777 694 }
ysr@777 695 // Now we can delete the sparse entry.
johnc@1242 696 bool res = _sparse_table.delete_entry(from_hrs_ind);
ysr@777 697 assert(res, "It should have been there.");
ysr@777 698 }
ysr@777 699 }
ysr@777 700 assert(prt != NULL && prt->hr() == from_hr, "consequence");
ysr@777 701 }
ysr@777 702 // Note that we can't assert "prt->hr() == from_hr", because of the
ysr@777 703 // possibility of concurrent reuse. But see head comment of
ysr@777 704 // OtherRegionsTable for why this is OK.
ysr@777 705 assert(prt != NULL, "Inv");
ysr@777 706
tonyp@1694 707 if (prt->should_expand(tid)) {
tonyp@1694 708 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
tonyp@1694 709 HeapRegion* prt_hr = prt->hr();
tonyp@1694 710 if (prt_hr == from_hr) {
tonyp@1694 711 // Make sure the table still corresponds to the same region
tonyp@1694 712 prt->par_expand();
tonyp@1694 713 prt->add_reference(from, tid);
tonyp@1694 714 }
tonyp@1694 715 // else: The table has been concurrently coarsened, evicted, and
tonyp@1694 716 // the table data structure re-used for another table. So, we
tonyp@1694 717 // don't need to add the reference any more given that the table
tonyp@1694 718 // has been coarsened and the whole region will be scanned anyway.
tonyp@1694 719 } else {
tonyp@1694 720 prt->add_reference(from, tid);
tonyp@1694 721 }
ysr@777 722 if (G1RecordHRRSOops) {
ysr@777 723 HeapRegionRemSet::record(hr(), from);
ysr@777 724 #if HRRS_VERBOSE
ysr@777 725 gclog_or_tty->print("Added card " PTR_FORMAT " to region "
ysr@777 726 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
ysr@777 727 align_size_down(uintptr_t(from),
ysr@777 728 CardTableModRefBS::card_size),
ysr@777 729 hr()->bottom(), from);
ysr@777 730 #endif
ysr@777 731 }
ysr@777 732 assert(contains_reference(from), "We just added it!");
ysr@777 733 }
ysr@777 734
ysr@777 735 PosParPRT*
ysr@777 736 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
ysr@777 737 assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
ysr@777 738 PosParPRT* prt = _fine_grain_regions[ind];
ysr@777 739 while (prt != NULL && prt->hr() != hr) {
ysr@777 740 prt = prt->next();
ysr@777 741 }
ysr@777 742 // Loop postcondition is the method postcondition.
ysr@777 743 return prt;
ysr@777 744 }
ysr@777 745
ysr@777 746
ysr@777 747 #define DRT_CENSUS 0
ysr@777 748
ysr@777 749 #if DRT_CENSUS
ysr@777 750 static const int HistoSize = 6;
ysr@777 751 static int global_histo[HistoSize] = { 0, 0, 0, 0, 0, 0 };
ysr@777 752 static int coarsenings = 0;
ysr@777 753 static int occ_sum = 0;
ysr@777 754 #endif
ysr@777 755
ysr@777 756 jint OtherRegionsTable::_n_coarsenings = 0;
ysr@777 757
ysr@777 758 PosParPRT* OtherRegionsTable::delete_region_table() {
ysr@777 759 #if DRT_CENSUS
ysr@777 760 int histo[HistoSize] = { 0, 0, 0, 0, 0, 0 };
ysr@777 761 const int histo_limits[] = { 1, 4, 16, 64, 256, 2048 };
ysr@777 762 #endif
ysr@777 763
ysr@777 764 assert(_m.owned_by_self(), "Precondition");
ysr@777 765 assert(_n_fine_entries == _max_fine_entries, "Precondition");
ysr@777 766 PosParPRT* max = NULL;
ysr@777 767 jint max_occ = 0;
ysr@777 768 PosParPRT** max_prev;
ysr@777 769 size_t max_ind;
ysr@777 770
ysr@777 771 #if SAMPLE_FOR_EVICTION
ysr@777 772 size_t i = _fine_eviction_start;
ysr@777 773 for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
ysr@777 774 size_t ii = i;
ysr@777 775 // Make sure we get a non-NULL sample.
ysr@777 776 while (_fine_grain_regions[ii] == NULL) {
ysr@777 777 ii++;
ysr@777 778 if (ii == _max_fine_entries) ii = 0;
ysr@777 779 guarantee(ii != i, "We must find one.");
ysr@777 780 }
ysr@777 781 PosParPRT** prev = &_fine_grain_regions[ii];
ysr@777 782 PosParPRT* cur = *prev;
ysr@777 783 while (cur != NULL) {
ysr@777 784 jint cur_occ = cur->occupied();
ysr@777 785 if (max == NULL || cur_occ > max_occ) {
ysr@777 786 max = cur;
ysr@777 787 max_prev = prev;
ysr@777 788 max_ind = i;
ysr@777 789 max_occ = cur_occ;
ysr@777 790 }
ysr@777 791 prev = cur->next_addr();
ysr@777 792 cur = cur->next();
ysr@777 793 }
ysr@777 794 i = i + _fine_eviction_stride;
ysr@777 795 if (i >= _n_fine_entries) i = i - _n_fine_entries;
ysr@777 796 }
ysr@777 797 _fine_eviction_start++;
ysr@777 798 if (_fine_eviction_start >= _n_fine_entries)
ysr@777 799 _fine_eviction_start -= _n_fine_entries;
ysr@777 800 #else
ysr@777 801 for (int i = 0; i < _max_fine_entries; i++) {
ysr@777 802 PosParPRT** prev = &_fine_grain_regions[i];
ysr@777 803 PosParPRT* cur = *prev;
ysr@777 804 while (cur != NULL) {
ysr@777 805 jint cur_occ = cur->occupied();
ysr@777 806 #if DRT_CENSUS
ysr@777 807 for (int k = 0; k < HistoSize; k++) {
ysr@777 808 if (cur_occ <= histo_limits[k]) {
ysr@777 809 histo[k]++; global_histo[k]++; break;
ysr@777 810 }
ysr@777 811 }
ysr@777 812 #endif
ysr@777 813 if (max == NULL || cur_occ > max_occ) {
ysr@777 814 max = cur;
ysr@777 815 max_prev = prev;
ysr@777 816 max_ind = i;
ysr@777 817 max_occ = cur_occ;
ysr@777 818 }
ysr@777 819 prev = cur->next_addr();
ysr@777 820 cur = cur->next();
ysr@777 821 }
ysr@777 822 }
ysr@777 823 #endif
ysr@777 824 // XXX
ysr@777 825 guarantee(max != NULL, "Since _n_fine_entries > 0");
ysr@777 826 #if DRT_CENSUS
ysr@777 827 gclog_or_tty->print_cr("In a coarsening: histo of occs:");
ysr@777 828 for (int k = 0; k < HistoSize; k++) {
ysr@777 829 gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], histo[k]);
ysr@777 830 }
ysr@777 831 coarsenings++;
ysr@777 832 occ_sum += max_occ;
ysr@777 833 if ((coarsenings % 100) == 0) {
ysr@777 834 gclog_or_tty->print_cr("\ncoarsenings = %d; global summary:", coarsenings);
ysr@777 835 for (int k = 0; k < HistoSize; k++) {
ysr@777 836 gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], global_histo[k]);
ysr@777 837 }
ysr@777 838 gclog_or_tty->print_cr("Avg occ of deleted region = %6.2f.",
ysr@777 839 (float)occ_sum/(float)coarsenings);
ysr@777 840 }
ysr@777 841 #endif
ysr@777 842
ysr@777 843 // Set the corresponding coarse bit.
tonyp@2963 844 size_t max_hrs_index = max->hr()->hrs_index();
ysr@777 845 if (!_coarse_map.at(max_hrs_index)) {
ysr@777 846 _coarse_map.at_put(max_hrs_index, true);
ysr@777 847 _n_coarse_entries++;
ysr@777 848 #if 0
ysr@777 849 gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
ysr@777 850 "for region [" PTR_FORMAT "...] (%d coarse entries).\n",
ysr@777 851 hr()->bottom(),
ysr@777 852 max->hr()->bottom(),
ysr@777 853 _n_coarse_entries);
ysr@777 854 #endif
ysr@777 855 }
ysr@777 856
ysr@777 857 // Unsplice.
ysr@777 858 *max_prev = max->next();
ysr@777 859 Atomic::inc(&_n_coarsenings);
ysr@777 860 _n_fine_entries--;
ysr@777 861 return max;
ysr@777 862 }
ysr@777 863
ysr@777 864
ysr@777 865 // At present, this must be called stop-world single-threaded.
ysr@777 866 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
ysr@777 867 BitMap* region_bm, BitMap* card_bm) {
ysr@777 868 // First eliminated garbage regions from the coarse map.
ysr@777 869 if (G1RSScrubVerbose)
tonyp@2963 870 gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":",
tonyp@2963 871 hr()->hrs_index());
ysr@777 872
ysr@777 873 assert(_coarse_map.size() == region_bm->size(), "Precondition");
ysr@777 874 if (G1RSScrubVerbose)
ysr@777 875 gclog_or_tty->print(" Coarse map: before = %d...", _n_coarse_entries);
ysr@777 876 _coarse_map.set_intersection(*region_bm);
ysr@777 877 _n_coarse_entries = _coarse_map.count_one_bits();
ysr@777 878 if (G1RSScrubVerbose)
ysr@777 879 gclog_or_tty->print_cr(" after = %d.", _n_coarse_entries);
ysr@777 880
ysr@777 881 // Now do the fine-grained maps.
ysr@777 882 for (size_t i = 0; i < _max_fine_entries; i++) {
ysr@777 883 PosParPRT* cur = _fine_grain_regions[i];
ysr@777 884 PosParPRT** prev = &_fine_grain_regions[i];
ysr@777 885 while (cur != NULL) {
ysr@777 886 PosParPRT* nxt = cur->next();
ysr@777 887 // If the entire region is dead, eliminate.
ysr@777 888 if (G1RSScrubVerbose)
tonyp@2963 889 gclog_or_tty->print_cr(" For other region "SIZE_FORMAT":",
tonyp@2963 890 cur->hr()->hrs_index());
ysr@777 891 if (!region_bm->at(cur->hr()->hrs_index())) {
ysr@777 892 *prev = nxt;
ysr@777 893 cur->set_next(NULL);
ysr@777 894 _n_fine_entries--;
ysr@777 895 if (G1RSScrubVerbose)
ysr@777 896 gclog_or_tty->print_cr(" deleted via region map.");
ysr@777 897 PosParPRT::free(cur);
ysr@777 898 } else {
ysr@777 899 // Do fine-grain elimination.
ysr@777 900 if (G1RSScrubVerbose)
ysr@777 901 gclog_or_tty->print(" occ: before = %4d.", cur->occupied());
ysr@777 902 cur->scrub(ctbs, card_bm);
ysr@777 903 if (G1RSScrubVerbose)
ysr@777 904 gclog_or_tty->print_cr(" after = %4d.", cur->occupied());
ysr@777 905 // Did that empty the table completely?
ysr@777 906 if (cur->occupied() == 0) {
ysr@777 907 *prev = nxt;
ysr@777 908 cur->set_next(NULL);
ysr@777 909 _n_fine_entries--;
ysr@777 910 PosParPRT::free(cur);
ysr@777 911 } else {
ysr@777 912 prev = cur->next_addr();
ysr@777 913 }
ysr@777 914 }
ysr@777 915 cur = nxt;
ysr@777 916 }
ysr@777 917 }
ysr@777 918 // Since we may have deleted a from_card_cache entry from the RS, clear
ysr@777 919 // the FCC.
ysr@777 920 clear_fcc();
ysr@777 921 }
ysr@777 922
ysr@777 923
ysr@777 924 size_t OtherRegionsTable::occupied() const {
ysr@777 925 // Cast away const in this case.
ysr@777 926 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
ysr@777 927 size_t sum = occ_fine();
ysr@777 928 sum += occ_sparse();
ysr@777 929 sum += occ_coarse();
ysr@777 930 return sum;
ysr@777 931 }
ysr@777 932
ysr@777 933 size_t OtherRegionsTable::occ_fine() const {
ysr@777 934 size_t sum = 0;
ysr@777 935 for (size_t i = 0; i < _max_fine_entries; i++) {
ysr@777 936 PosParPRT* cur = _fine_grain_regions[i];
ysr@777 937 while (cur != NULL) {
ysr@777 938 sum += cur->occupied();
ysr@777 939 cur = cur->next();
ysr@777 940 }
ysr@777 941 }
ysr@777 942 return sum;
ysr@777 943 }
ysr@777 944
ysr@777 945 size_t OtherRegionsTable::occ_coarse() const {
tonyp@1377 946 return (_n_coarse_entries * HeapRegion::CardsPerRegion);
ysr@777 947 }
ysr@777 948
ysr@777 949 size_t OtherRegionsTable::occ_sparse() const {
ysr@777 950 return _sparse_table.occupied();
ysr@777 951 }
ysr@777 952
ysr@777 953 size_t OtherRegionsTable::mem_size() const {
ysr@777 954 // Cast away const in this case.
ysr@777 955 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
ysr@777 956 size_t sum = 0;
ysr@777 957 for (size_t i = 0; i < _max_fine_entries; i++) {
ysr@777 958 PosParPRT* cur = _fine_grain_regions[i];
ysr@777 959 while (cur != NULL) {
ysr@777 960 sum += cur->mem_size();
ysr@777 961 cur = cur->next();
ysr@777 962 }
ysr@777 963 }
ysr@777 964 sum += (sizeof(PosParPRT*) * _max_fine_entries);
ysr@777 965 sum += (_coarse_map.size_in_words() * HeapWordSize);
ysr@777 966 sum += (_sparse_table.mem_size());
ysr@777 967 sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above.
ysr@777 968 return sum;
ysr@777 969 }
ysr@777 970
ysr@777 971 size_t OtherRegionsTable::static_mem_size() {
ysr@777 972 return _from_card_cache_mem_size;
ysr@777 973 }
ysr@777 974
ysr@777 975 size_t OtherRegionsTable::fl_mem_size() {
ysr@777 976 return PerRegionTable::fl_mem_size() + PosParPRT::fl_mem_size();
ysr@777 977 }
ysr@777 978
ysr@777 979 void OtherRegionsTable::clear_fcc() {
ysr@777 980 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
ysr@777 981 _from_card_cache[i][hr()->hrs_index()] = -1;
ysr@777 982 }
ysr@777 983 }
ysr@777 984
ysr@777 985 void OtherRegionsTable::clear() {
ysr@777 986 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
ysr@777 987 for (size_t i = 0; i < _max_fine_entries; i++) {
ysr@777 988 PosParPRT* cur = _fine_grain_regions[i];
ysr@777 989 while (cur != NULL) {
ysr@777 990 PosParPRT* nxt = cur->next();
ysr@777 991 PosParPRT::free(cur);
ysr@777 992 cur = nxt;
ysr@777 993 }
ysr@777 994 _fine_grain_regions[i] = NULL;
ysr@777 995 }
ysr@777 996 _sparse_table.clear();
ysr@777 997 _coarse_map.clear();
ysr@777 998 _n_fine_entries = 0;
ysr@777 999 _n_coarse_entries = 0;
ysr@777 1000
ysr@777 1001 clear_fcc();
ysr@777 1002 }
ysr@777 1003
ysr@777 1004 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
ysr@777 1005 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
tonyp@2963 1006 size_t hrs_ind = from_hr->hrs_index();
ysr@777 1007 size_t ind = hrs_ind & _mod_max_fine_entries_mask;
ysr@777 1008 if (del_single_region_table(ind, from_hr)) {
ysr@777 1009 assert(!_coarse_map.at(hrs_ind), "Inv");
ysr@777 1010 } else {
ysr@777 1011 _coarse_map.par_at_put(hrs_ind, 0);
ysr@777 1012 }
ysr@777 1013 // Check to see if any of the fcc entries come from here.
tonyp@2963 1014 size_t hr_ind = hr()->hrs_index();
ysr@777 1015 for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
ysr@777 1016 int fcc_ent = _from_card_cache[tid][hr_ind];
ysr@777 1017 if (fcc_ent != -1) {
ysr@777 1018 HeapWord* card_addr = (HeapWord*)
ysr@777 1019 (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift);
ysr@777 1020 if (hr()->is_in_reserved(card_addr)) {
ysr@777 1021 // Clear the from card cache.
ysr@777 1022 _from_card_cache[tid][hr_ind] = -1;
ysr@777 1023 }
ysr@777 1024 }
ysr@777 1025 }
ysr@777 1026 }
ysr@777 1027
ysr@777 1028 bool OtherRegionsTable::del_single_region_table(size_t ind,
ysr@777 1029 HeapRegion* hr) {
ysr@777 1030 assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
ysr@777 1031 PosParPRT** prev_addr = &_fine_grain_regions[ind];
ysr@777 1032 PosParPRT* prt = *prev_addr;
ysr@777 1033 while (prt != NULL && prt->hr() != hr) {
ysr@777 1034 prev_addr = prt->next_addr();
ysr@777 1035 prt = prt->next();
ysr@777 1036 }
ysr@777 1037 if (prt != NULL) {
ysr@777 1038 assert(prt->hr() == hr, "Loop postcondition.");
ysr@777 1039 *prev_addr = prt->next();
ysr@777 1040 PosParPRT::free(prt);
ysr@777 1041 _n_fine_entries--;
ysr@777 1042 return true;
ysr@777 1043 } else {
ysr@777 1044 return false;
ysr@777 1045 }
ysr@777 1046 }
ysr@777 1047
ysr@1280 1048 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
ysr@777 1049 // Cast away const in this case.
ysr@777 1050 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
ysr@777 1051 return contains_reference_locked(from);
ysr@777 1052 }
ysr@777 1053
ysr@1280 1054 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
ysr@777 1055 HeapRegion* hr = _g1h->heap_region_containing_raw(from);
ysr@777 1056 if (hr == NULL) return false;
johnc@1242 1057 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
ysr@777 1058 // Is this region in the coarse map?
ysr@777 1059 if (_coarse_map.at(hr_ind)) return true;
ysr@777 1060
ysr@777 1061 PosParPRT* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
ysr@777 1062 hr);
ysr@777 1063 if (prt != NULL) {
ysr@777 1064 return prt->contains_reference(from);
ysr@777 1065
ysr@777 1066 } else {
ysr@777 1067 uintptr_t from_card =
ysr@777 1068 (uintptr_t(from) >> CardTableModRefBS::card_shift);
ysr@777 1069 uintptr_t hr_bot_card_index =
ysr@777 1070 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
ysr@777 1071 assert(from_card >= hr_bot_card_index, "Inv");
johnc@1242 1072 CardIdx_t card_index = from_card - hr_bot_card_index;
johnc@3182 1073 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
tonyp@1377 1074 "Must be in range.");
johnc@1242 1075 return _sparse_table.contains_card(hr_ind, card_index);
ysr@777 1076 }
ysr@777 1077
ysr@777 1078
ysr@777 1079 }
ysr@777 1080
tonyp@2493 1081 void
tonyp@2493 1082 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
tonyp@2493 1083 _sparse_table.do_cleanup_work(hrrs_cleanup_task);
tonyp@2493 1084 }
tonyp@2493 1085
iveresov@1230 1086 // Determines how many threads can add records to an rset in parallel.
iveresov@1230 1087 // This can be done by either mutator threads together with the
iveresov@1230 1088 // concurrent refinement threads or GC threads.
ysr@777 1089 int HeapRegionRemSet::num_par_rem_sets() {
iveresov@1230 1090 return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
ysr@777 1091 }
ysr@777 1092
ysr@777 1093 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
ysr@777 1094 HeapRegion* hr)
tonyp@2974 1095 : _bosa(bosa), _other_regions(hr) {
tonyp@2974 1096 reset_for_par_iteration();
tonyp@2974 1097 }
ysr@777 1098
iveresov@1696 1099 void HeapRegionRemSet::setup_remset_size() {
iveresov@1696 1100 // Setup sparse and fine-grain tables sizes.
iveresov@1696 1101 // table_size = base * (log(region_size / 1M) + 1)
iveresov@1696 1102 int region_size_log_mb = MAX2((int)HeapRegion::LogOfHRGrainBytes - (int)LOG_M, 0);
iveresov@1696 1103 if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
iveresov@1696 1104 G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
iveresov@1696 1105 }
iveresov@1696 1106 if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
iveresov@1696 1107 G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
iveresov@1696 1108 }
iveresov@1696 1109 guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
iveresov@1696 1110 }
iveresov@1696 1111
ysr@777 1112 bool HeapRegionRemSet::claim_iter() {
ysr@777 1113 if (_iter_state != Unclaimed) return false;
ysr@777 1114 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
ysr@777 1115 return (res == Unclaimed);
ysr@777 1116 }
ysr@777 1117
ysr@777 1118 void HeapRegionRemSet::set_iter_complete() {
ysr@777 1119 _iter_state = Complete;
ysr@777 1120 }
ysr@777 1121
ysr@777 1122 bool HeapRegionRemSet::iter_is_complete() {
ysr@777 1123 return _iter_state == Complete;
ysr@777 1124 }
ysr@777 1125
ysr@777 1126 void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
ysr@777 1127 iter->initialize(this);
ysr@777 1128 }
ysr@777 1129
ysr@777 1130 #ifndef PRODUCT
ysr@777 1131 void HeapRegionRemSet::print() const {
ysr@777 1132 HeapRegionRemSetIterator iter;
ysr@777 1133 init_iterator(&iter);
ysr@777 1134 size_t card_index;
ysr@777 1135 while (iter.has_next(card_index)) {
ysr@777 1136 HeapWord* card_start =
ysr@777 1137 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
tonyp@2974 1138 gclog_or_tty->print_cr(" Card " PTR_FORMAT, card_start);
ysr@777 1139 }
ysr@777 1140 // XXX
ysr@777 1141 if (iter.n_yielded() != occupied()) {
ysr@777 1142 gclog_or_tty->print_cr("Yielded disagrees with occupied:");
ysr@777 1143 gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).",
ysr@777 1144 iter.n_yielded(),
ysr@777 1145 iter.n_yielded_coarse(), iter.n_yielded_fine());
ysr@777 1146 gclog_or_tty->print_cr(" %6d occ (%6d coarse, %6d fine).",
ysr@777 1147 occupied(), occ_coarse(), occ_fine());
ysr@777 1148 }
ysr@777 1149 guarantee(iter.n_yielded() == occupied(),
ysr@777 1150 "We should have yielded all the represented cards.");
ysr@777 1151 }
ysr@777 1152 #endif
ysr@777 1153
ysr@777 1154 void HeapRegionRemSet::cleanup() {
ysr@777 1155 SparsePRT::cleanup_all();
ysr@777 1156 }
ysr@777 1157
ysr@777 1158 void HeapRegionRemSet::par_cleanup() {
ysr@777 1159 PosParPRT::par_contract_all();
ysr@777 1160 }
ysr@777 1161
ysr@777 1162 void HeapRegionRemSet::clear() {
ysr@777 1163 _other_regions.clear();
ysr@777 1164 assert(occupied() == 0, "Should be clear.");
tonyp@2974 1165 reset_for_par_iteration();
tonyp@2974 1166 }
tonyp@2974 1167
tonyp@2974 1168 void HeapRegionRemSet::reset_for_par_iteration() {
tonyp@2974 1169 _iter_state = Unclaimed;
tonyp@2974 1170 _iter_claimed = 0;
tonyp@2974 1171 // It's good to check this to make sure that the two methods are in sync.
tonyp@2974 1172 assert(verify_ready_for_par_iteration(), "post-condition");
ysr@777 1173 }
ysr@777 1174
ysr@777 1175 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
ysr@777 1176 BitMap* region_bm, BitMap* card_bm) {
ysr@777 1177 _other_regions.scrub(ctbs, region_bm, card_bm);
ysr@777 1178 }
ysr@777 1179
ysr@777 1180 //-------------------- Iteration --------------------
ysr@777 1181
ysr@777 1182 HeapRegionRemSetIterator::
ysr@777 1183 HeapRegionRemSetIterator() :
ysr@777 1184 _hrrs(NULL),
ysr@777 1185 _g1h(G1CollectedHeap::heap()),
ysr@777 1186 _bosa(NULL),
tonyp@2239 1187 _sparse_iter() { }
ysr@777 1188
ysr@777 1189 void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
ysr@777 1190 _hrrs = hrrs;
ysr@777 1191 _coarse_map = &_hrrs->_other_regions._coarse_map;
ysr@777 1192 _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
ysr@777 1193 _bosa = _hrrs->bosa();
ysr@777 1194
ysr@777 1195 _is = Sparse;
ysr@777 1196 // Set these values so that we increment to the first region.
ysr@777 1197 _coarse_cur_region_index = -1;
johnc@3182 1198 _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);
ysr@777 1199
ysr@777 1200 _cur_region_cur_card = 0;
ysr@777 1201
ysr@777 1202 _fine_array_index = -1;
ysr@777 1203 _fine_cur_prt = NULL;
ysr@777 1204
ysr@777 1205 _n_yielded_coarse = 0;
ysr@777 1206 _n_yielded_fine = 0;
ysr@777 1207 _n_yielded_sparse = 0;
ysr@777 1208
ysr@777 1209 _sparse_iter.init(&hrrs->_other_regions._sparse_table);
ysr@777 1210 }
ysr@777 1211
ysr@777 1212 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
ysr@777 1213 if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
ysr@777 1214 // Go to the next card.
ysr@777 1215 _coarse_cur_region_cur_card++;
ysr@777 1216 // Was the last the last card in the current region?
tonyp@1377 1217 if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
ysr@777 1218 // Yes: find the next region. This may leave _coarse_cur_region_index
ysr@777 1219 // Set to the last index, in which case there are no more coarse
ysr@777 1220 // regions.
ysr@777 1221 _coarse_cur_region_index =
ysr@777 1222 (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
ysr@777 1223 if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
ysr@777 1224 _coarse_cur_region_cur_card = 0;
ysr@777 1225 HeapWord* r_bot =
ysr@777 1226 _g1h->region_at(_coarse_cur_region_index)->bottom();
ysr@777 1227 _cur_region_card_offset = _bosa->index_for(r_bot);
ysr@777 1228 } else {
ysr@777 1229 return false;
ysr@777 1230 }
ysr@777 1231 }
ysr@777 1232 // If we didn't return false above, then we can yield a card.
ysr@777 1233 card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
ysr@777 1234 return true;
ysr@777 1235 }
ysr@777 1236
ysr@777 1237 void HeapRegionRemSetIterator::fine_find_next_non_null_prt() {
ysr@777 1238 // Otherwise, find the next bucket list in the array.
ysr@777 1239 _fine_array_index++;
ysr@777 1240 while (_fine_array_index < (int) OtherRegionsTable::_max_fine_entries) {
ysr@777 1241 _fine_cur_prt = _fine_grain_regions[_fine_array_index];
ysr@777 1242 if (_fine_cur_prt != NULL) return;
ysr@777 1243 else _fine_array_index++;
ysr@777 1244 }
ysr@777 1245 assert(_fine_cur_prt == NULL, "Loop post");
ysr@777 1246 }
ysr@777 1247
ysr@777 1248 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
ysr@777 1249 if (fine_has_next()) {
ysr@777 1250 _cur_region_cur_card =
ysr@777 1251 _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
ysr@777 1252 }
ysr@777 1253 while (!fine_has_next()) {
tonyp@1377 1254 if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
ysr@777 1255 _cur_region_cur_card = 0;
ysr@777 1256 _fine_cur_prt = _fine_cur_prt->next();
ysr@777 1257 }
ysr@777 1258 if (_fine_cur_prt == NULL) {
ysr@777 1259 fine_find_next_non_null_prt();
ysr@777 1260 if (_fine_cur_prt == NULL) return false;
ysr@777 1261 }
ysr@777 1262 assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0,
ysr@777 1263 "inv.");
ysr@777 1264 HeapWord* r_bot =
ysr@777 1265 _fine_cur_prt->hr()->bottom();
ysr@777 1266 _cur_region_card_offset = _bosa->index_for(r_bot);
ysr@777 1267 _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0);
ysr@777 1268 }
ysr@777 1269 assert(fine_has_next(), "Or else we exited the loop via the return.");
ysr@777 1270 card_index = _cur_region_card_offset + _cur_region_cur_card;
ysr@777 1271 return true;
ysr@777 1272 }
ysr@777 1273
ysr@777 1274 bool HeapRegionRemSetIterator::fine_has_next() {
ysr@777 1275 return
ysr@777 1276 _fine_cur_prt != NULL &&
johnc@3182 1277 _cur_region_cur_card < HeapRegion::CardsPerRegion;
ysr@777 1278 }
ysr@777 1279
ysr@777 1280 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
ysr@777 1281 switch (_is) {
ysr@777 1282 case Sparse:
ysr@777 1283 if (_sparse_iter.has_next(card_index)) {
ysr@777 1284 _n_yielded_sparse++;
ysr@777 1285 return true;
ysr@777 1286 }
ysr@777 1287 // Otherwise, deliberate fall-through
ysr@777 1288 _is = Fine;
ysr@777 1289 case Fine:
ysr@777 1290 if (fine_has_next(card_index)) {
ysr@777 1291 _n_yielded_fine++;
ysr@777 1292 return true;
ysr@777 1293 }
ysr@777 1294 // Otherwise, deliberate fall-through
ysr@777 1295 _is = Coarse;
ysr@777 1296 case Coarse:
ysr@777 1297 if (coarse_has_next(card_index)) {
ysr@777 1298 _n_yielded_coarse++;
ysr@777 1299 return true;
ysr@777 1300 }
ysr@777 1301 // Otherwise...
ysr@777 1302 break;
ysr@777 1303 }
ysr@777 1304 assert(ParallelGCThreads > 1 ||
ysr@777 1305 n_yielded() == _hrrs->occupied(),
ysr@777 1306 "Should have yielded all the cards in the rem set "
ysr@777 1307 "(in the non-par case).");
ysr@777 1308 return false;
ysr@777 1309 }
ysr@777 1310
ysr@777 1311
ysr@777 1312
ysr@1280 1313 OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
ysr@1280 1314 HeapWord** HeapRegionRemSet::_recorded_cards = NULL;
ysr@1280 1315 HeapRegion** HeapRegionRemSet::_recorded_regions = NULL;
ysr@1280 1316 int HeapRegionRemSet::_n_recorded = 0;
ysr@777 1317
ysr@777 1318 HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
ysr@777 1319 int* HeapRegionRemSet::_recorded_event_index = NULL;
ysr@777 1320 int HeapRegionRemSet::_n_recorded_events = 0;
ysr@777 1321
ysr@1280 1322 void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
ysr@777 1323 if (_recorded_oops == NULL) {
ysr@777 1324 assert(_n_recorded == 0
ysr@777 1325 && _recorded_cards == NULL
ysr@777 1326 && _recorded_regions == NULL,
ysr@777 1327 "Inv");
ysr@1280 1328 _recorded_oops = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded);
ysr@1280 1329 _recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded);
ysr@1280 1330 _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded);
ysr@777 1331 }
ysr@777 1332 if (_n_recorded == MaxRecorded) {
ysr@777 1333 gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
ysr@777 1334 } else {
ysr@777 1335 _recorded_cards[_n_recorded] =
ysr@777 1336 (HeapWord*)align_size_down(uintptr_t(f),
ysr@777 1337 CardTableModRefBS::card_size);
ysr@777 1338 _recorded_oops[_n_recorded] = f;
ysr@777 1339 _recorded_regions[_n_recorded] = hr;
ysr@777 1340 _n_recorded++;
ysr@777 1341 }
ysr@777 1342 }
ysr@777 1343
ysr@777 1344 void HeapRegionRemSet::record_event(Event evnt) {
ysr@777 1345 if (!G1RecordHRRSEvents) return;
ysr@777 1346
ysr@777 1347 if (_recorded_events == NULL) {
ysr@777 1348 assert(_n_recorded_events == 0
ysr@777 1349 && _recorded_event_index == NULL,
ysr@777 1350 "Inv");
ysr@777 1351 _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents);
ysr@777 1352 _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents);
ysr@777 1353 }
ysr@777 1354 if (_n_recorded_events == MaxRecordedEvents) {
ysr@777 1355 gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
ysr@777 1356 } else {
ysr@777 1357 _recorded_events[_n_recorded_events] = evnt;
ysr@777 1358 _recorded_event_index[_n_recorded_events] = _n_recorded;
ysr@777 1359 _n_recorded_events++;
ysr@777 1360 }
ysr@777 1361 }
ysr@777 1362
ysr@777 1363 void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
ysr@777 1364 switch (evnt) {
ysr@777 1365 case Event_EvacStart:
ysr@777 1366 str->print("Evac Start");
ysr@777 1367 break;
ysr@777 1368 case Event_EvacEnd:
ysr@777 1369 str->print("Evac End");
ysr@777 1370 break;
ysr@777 1371 case Event_RSUpdateEnd:
ysr@777 1372 str->print("RS Update End");
ysr@777 1373 break;
ysr@777 1374 }
ysr@777 1375 }
ysr@777 1376
ysr@777 1377 void HeapRegionRemSet::print_recorded() {
ysr@777 1378 int cur_evnt = 0;
ysr@777 1379 Event cur_evnt_kind;
ysr@777 1380 int cur_evnt_ind = 0;
ysr@777 1381 if (_n_recorded_events > 0) {
ysr@777 1382 cur_evnt_kind = _recorded_events[cur_evnt];
ysr@777 1383 cur_evnt_ind = _recorded_event_index[cur_evnt];
ysr@777 1384 }
ysr@777 1385
ysr@777 1386 for (int i = 0; i < _n_recorded; i++) {
ysr@777 1387 while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
ysr@777 1388 gclog_or_tty->print("Event: ");
ysr@777 1389 print_event(gclog_or_tty, cur_evnt_kind);
ysr@777 1390 gclog_or_tty->print_cr("");
ysr@777 1391 cur_evnt++;
ysr@777 1392 if (cur_evnt < MaxRecordedEvents) {
ysr@777 1393 cur_evnt_kind = _recorded_events[cur_evnt];
ysr@777 1394 cur_evnt_ind = _recorded_event_index[cur_evnt];
ysr@777 1395 }
ysr@777 1396 }
ysr@777 1397 gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
ysr@777 1398 " for ref " PTR_FORMAT ".\n",
ysr@777 1399 _recorded_cards[i], _recorded_regions[i]->bottom(),
ysr@777 1400 _recorded_oops[i]);
ysr@777 1401 }
ysr@777 1402 }
ysr@777 1403
tonyp@2493 1404 void HeapRegionRemSet::reset_for_cleanup_tasks() {
tonyp@2493 1405 SparsePRT::reset_for_cleanup_tasks();
tonyp@2493 1406 }
tonyp@2493 1407
tonyp@2493 1408 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
tonyp@2493 1409 _other_regions.do_cleanup_work(hrrs_cleanup_task);
tonyp@2493 1410 }
tonyp@2493 1411
tonyp@2493 1412 void
tonyp@2493 1413 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
tonyp@2493 1414 SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
tonyp@2493 1415 }
tonyp@2493 1416
ysr@777 1417 #ifndef PRODUCT
ysr@777 1418 void HeapRegionRemSet::test() {
ysr@777 1419 os::sleep(Thread::current(), (jlong)5000, false);
ysr@777 1420 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 1421
iveresov@1696 1422 // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
ysr@777 1423 // hash bucket.
ysr@777 1424 HeapRegion* hr0 = g1h->region_at(0);
ysr@777 1425 HeapRegion* hr1 = g1h->region_at(1);
ysr@777 1426 HeapRegion* hr2 = g1h->region_at(5);
ysr@777 1427 HeapRegion* hr3 = g1h->region_at(6);
ysr@777 1428 HeapRegion* hr4 = g1h->region_at(7);
ysr@777 1429 HeapRegion* hr5 = g1h->region_at(8);
ysr@777 1430
ysr@777 1431 HeapWord* hr1_start = hr1->bottom();
ysr@777 1432 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
ysr@777 1433 HeapWord* hr1_last = hr1->end() - 1;
ysr@777 1434
ysr@777 1435 HeapWord* hr2_start = hr2->bottom();
ysr@777 1436 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
ysr@777 1437 HeapWord* hr2_last = hr2->end() - 1;
ysr@777 1438
ysr@777 1439 HeapWord* hr3_start = hr3->bottom();
ysr@777 1440 HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
ysr@777 1441 HeapWord* hr3_last = hr3->end() - 1;
ysr@777 1442
ysr@777 1443 HeapRegionRemSet* hrrs = hr0->rem_set();
ysr@777 1444
ysr@777 1445 // Make three references from region 0x101...
ysr@1280 1446 hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
ysr@1280 1447 hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
ysr@1280 1448 hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
ysr@777 1449
ysr@1280 1450 hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
ysr@1280 1451 hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
ysr@1280 1452 hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
ysr@777 1453
ysr@1280 1454 hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
ysr@1280 1455 hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
ysr@1280 1456 hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
ysr@777 1457
ysr@777 1458 // Now cause a coarsening.
ysr@1280 1459 hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
ysr@1280 1460 hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
ysr@777 1461
ysr@777 1462 // Now, does iteration yield these three?
ysr@777 1463 HeapRegionRemSetIterator iter;
ysr@777 1464 hrrs->init_iterator(&iter);
ysr@777 1465 size_t sum = 0;
ysr@777 1466 size_t card_index;
ysr@777 1467 while (iter.has_next(card_index)) {
ysr@777 1468 HeapWord* card_start =
ysr@777 1469 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
ysr@777 1470 gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start);
ysr@777 1471 sum++;
ysr@777 1472 }
ysr@777 1473 guarantee(sum == 11 - 3 + 2048, "Failure");
ysr@777 1474 guarantee(sum == hrrs->occupied(), "Failure");
ysr@777 1475 }
ysr@777 1476 #endif

mercurial