src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 3028
f44782f04dd4
child 3182
65a8ff39a6da
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

ysr@777 1 /*
tonyp@2493 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@2314 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/heapRegionRemSet.hpp"
stefank@2314 30 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@2314 31 #include "memory/allocation.hpp"
stefank@2314 32 #include "memory/space.inline.hpp"
stefank@2314 33 #include "utilities/bitMap.inline.hpp"
stefank@2314 34 #include "utilities/globalDefinitions.hpp"
ysr@777 35
ysr@777 36 #define HRRS_VERBOSE 0
ysr@777 37
ysr@777 38 #define PRT_COUNT_OCCUPIED 1
ysr@777 39
ysr@777 40 // OtherRegionsTable
ysr@777 41
ysr@777 42 class PerRegionTable: public CHeapObj {
ysr@777 43 friend class OtherRegionsTable;
ysr@777 44 friend class HeapRegionRemSetIterator;
ysr@777 45
ysr@777 46 HeapRegion* _hr;
ysr@777 47 BitMap _bm;
ysr@777 48 #if PRT_COUNT_OCCUPIED
ysr@777 49 jint _occupied;
ysr@777 50 #endif
ysr@777 51 PerRegionTable* _next_free;
ysr@777 52
ysr@777 53 PerRegionTable* next_free() { return _next_free; }
ysr@777 54 void set_next_free(PerRegionTable* prt) { _next_free = prt; }
ysr@777 55
ysr@777 56
ysr@777 57 static PerRegionTable* _free_list;
ysr@777 58
ysr@777 59 #ifdef _MSC_VER
ysr@777 60 // For some reason even though the classes are marked as friend they are unable
ysr@777 61 // to access CardsPerRegion when private/protected. Only the windows c++ compiler
ysr@777 62 // says this Sun CC and linux gcc don't have a problem with access when private
ysr@777 63
ysr@777 64 public:
ysr@777 65
ysr@777 66 #endif // _MSC_VER
ysr@777 67
ysr@777 68 protected:
ysr@777 69 // We need access in order to union things into the base table.
ysr@777 70 BitMap* bm() { return &_bm; }
ysr@777 71
apetrusenko@980 72 #if PRT_COUNT_OCCUPIED
ysr@777 73 void recount_occupied() {
ysr@777 74 _occupied = (jint) bm()->count_one_bits();
ysr@777 75 }
apetrusenko@980 76 #endif
ysr@777 77
ysr@777 78 PerRegionTable(HeapRegion* hr) :
ysr@777 79 _hr(hr),
ysr@777 80 #if PRT_COUNT_OCCUPIED
ysr@777 81 _occupied(0),
ysr@777 82 #endif
tonyp@1377 83 _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */)
ysr@777 84 {}
ysr@777 85
ysr@777 86 static void free(PerRegionTable* prt) {
ysr@777 87 while (true) {
ysr@777 88 PerRegionTable* fl = _free_list;
ysr@777 89 prt->set_next_free(fl);
ysr@777 90 PerRegionTable* res =
ysr@777 91 (PerRegionTable*)
ysr@777 92 Atomic::cmpxchg_ptr(prt, &_free_list, fl);
ysr@777 93 if (res == fl) return;
ysr@777 94 }
ysr@777 95 ShouldNotReachHere();
ysr@777 96 }
ysr@777 97
ysr@777 98 static PerRegionTable* alloc(HeapRegion* hr) {
ysr@777 99 PerRegionTable* fl = _free_list;
ysr@777 100 while (fl != NULL) {
ysr@777 101 PerRegionTable* nxt = fl->next_free();
ysr@777 102 PerRegionTable* res =
ysr@777 103 (PerRegionTable*)
ysr@777 104 Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
ysr@777 105 if (res == fl) {
ysr@777 106 fl->init(hr);
ysr@777 107 return fl;
ysr@777 108 } else {
ysr@777 109 fl = _free_list;
ysr@777 110 }
ysr@777 111 }
ysr@777 112 assert(fl == NULL, "Loop condition.");
ysr@777 113 return new PerRegionTable(hr);
ysr@777 114 }
ysr@777 115
johnc@1242 116 void add_card_work(CardIdx_t from_card, bool par) {
ysr@777 117 if (!_bm.at(from_card)) {
ysr@777 118 if (par) {
ysr@777 119 if (_bm.par_at_put(from_card, 1)) {
ysr@777 120 #if PRT_COUNT_OCCUPIED
ysr@777 121 Atomic::inc(&_occupied);
ysr@777 122 #endif
ysr@777 123 }
ysr@777 124 } else {
ysr@777 125 _bm.at_put(from_card, 1);
ysr@777 126 #if PRT_COUNT_OCCUPIED
ysr@777 127 _occupied++;
ysr@777 128 #endif
ysr@777 129 }
ysr@777 130 }
ysr@777 131 }
ysr@777 132
ysr@1280 133 void add_reference_work(OopOrNarrowOopStar from, bool par) {
ysr@777 134 // Must make this robust in case "from" is not in "_hr", because of
ysr@777 135 // concurrency.
ysr@777 136
ysr@777 137 #if HRRS_VERBOSE
ysr@777 138 gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
ysr@777 139 from, *from);
ysr@777 140 #endif
ysr@777 141
ysr@777 142 HeapRegion* loc_hr = hr();
ysr@777 143 // If the test below fails, then this table was reused concurrently
ysr@777 144 // with this operation. This is OK, since the old table was coarsened,
ysr@777 145 // and adding a bit to the new table is never incorrect.
ysr@777 146 if (loc_hr->is_in_reserved(from)) {
ysr@777 147 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
johnc@1242 148 CardIdx_t from_card = (CardIdx_t)
johnc@1242 149 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
ysr@777 150
tonyp@1377 151 assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion,
tonyp@1377 152 "Must be in range.");
johnc@1242 153 add_card_work(from_card, par);
ysr@777 154 }
ysr@777 155 }
ysr@777 156
ysr@777 157 public:
ysr@777 158
ysr@777 159 HeapRegion* hr() const { return _hr; }
ysr@777 160
ysr@777 161 #if PRT_COUNT_OCCUPIED
ysr@777 162 jint occupied() const {
ysr@777 163 // Overkill, but if we ever need it...
ysr@777 164 // guarantee(_occupied == _bm.count_one_bits(), "Check");
ysr@777 165 return _occupied;
ysr@777 166 }
ysr@777 167 #else
ysr@777 168 jint occupied() const {
ysr@777 169 return _bm.count_one_bits();
ysr@777 170 }
ysr@777 171 #endif
ysr@777 172
ysr@777 173 void init(HeapRegion* hr) {
ysr@777 174 _hr = hr;
ysr@777 175 #if PRT_COUNT_OCCUPIED
ysr@777 176 _occupied = 0;
ysr@777 177 #endif
ysr@777 178 _bm.clear();
ysr@777 179 }
ysr@777 180
ysr@1280 181 void add_reference(OopOrNarrowOopStar from) {
ysr@777 182 add_reference_work(from, /*parallel*/ true);
ysr@777 183 }
ysr@777 184
ysr@1280 185 void seq_add_reference(OopOrNarrowOopStar from) {
ysr@777 186 add_reference_work(from, /*parallel*/ false);
ysr@777 187 }
ysr@777 188
ysr@777 189 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
ysr@777 190 HeapWord* hr_bot = hr()->bottom();
swamyv@924 191 size_t hr_first_card_index = ctbs->index_for(hr_bot);
ysr@777 192 bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
ysr@777 193 #if PRT_COUNT_OCCUPIED
ysr@777 194 recount_occupied();
ysr@777 195 #endif
ysr@777 196 }
ysr@777 197
johnc@1242 198 void add_card(CardIdx_t from_card_index) {
ysr@777 199 add_card_work(from_card_index, /*parallel*/ true);
ysr@777 200 }
ysr@777 201
johnc@1242 202 void seq_add_card(CardIdx_t from_card_index) {
ysr@777 203 add_card_work(from_card_index, /*parallel*/ false);
ysr@777 204 }
ysr@777 205
ysr@777 206 // (Destructively) union the bitmap of the current table into the given
ysr@777 207 // bitmap (which is assumed to be of the same size.)
ysr@777 208 void union_bitmap_into(BitMap* bm) {
ysr@777 209 bm->set_union(_bm);
ysr@777 210 }
ysr@777 211
ysr@777 212 // Mem size in bytes.
ysr@777 213 size_t mem_size() const {
ysr@777 214 return sizeof(this) + _bm.size_in_words() * HeapWordSize;
ysr@777 215 }
ysr@777 216
ysr@777 217 static size_t fl_mem_size() {
ysr@777 218 PerRegionTable* cur = _free_list;
ysr@777 219 size_t res = 0;
ysr@777 220 while (cur != NULL) {
ysr@777 221 res += sizeof(PerRegionTable);
ysr@777 222 cur = cur->next_free();
ysr@777 223 }
ysr@777 224 return res;
ysr@777 225 }
ysr@777 226
ysr@777 227 // Requires "from" to be in "hr()".
ysr@1280 228 bool contains_reference(OopOrNarrowOopStar from) const {
ysr@777 229 assert(hr()->is_in_reserved(from), "Precondition.");
ysr@777 230 size_t card_ind = pointer_delta(from, hr()->bottom(),
ysr@777 231 CardTableModRefBS::card_size);
ysr@777 232 return _bm.at(card_ind);
ysr@777 233 }
ysr@777 234 };
ysr@777 235
ysr@777 236 PerRegionTable* PerRegionTable::_free_list = NULL;
ysr@777 237
ysr@777 238
ysr@777 239 #define COUNT_PAR_EXPANDS 0
ysr@777 240
ysr@777 241 #if COUNT_PAR_EXPANDS
ysr@777 242 static jint n_par_expands = 0;
ysr@777 243 static jint n_par_contracts = 0;
ysr@777 244 static jint par_expand_list_len = 0;
ysr@777 245 static jint max_par_expand_list_len = 0;
ysr@777 246
ysr@777 247 static void print_par_expand() {
ysr@777 248 Atomic::inc(&n_par_expands);
ysr@777 249 Atomic::inc(&par_expand_list_len);
ysr@777 250 if (par_expand_list_len > max_par_expand_list_len) {
ysr@777 251 max_par_expand_list_len = par_expand_list_len;
ysr@777 252 }
ysr@777 253 if ((n_par_expands % 10) == 0) {
ysr@777 254 gclog_or_tty->print_cr("\n\n%d par expands: %d contracts, "
ysr@777 255 "len = %d, max_len = %d\n.",
ysr@777 256 n_par_expands, n_par_contracts, par_expand_list_len,
ysr@777 257 max_par_expand_list_len);
ysr@777 258 }
ysr@777 259 }
ysr@777 260 #endif
ysr@777 261
ysr@777 262 class PosParPRT: public PerRegionTable {
ysr@777 263 PerRegionTable** _par_tables;
ysr@777 264
ysr@777 265 enum SomePrivateConstants {
ysr@777 266 ReserveParTableExpansion = 1
ysr@777 267 };
ysr@777 268
ysr@777 269 void par_contract() {
ysr@777 270 assert(_par_tables != NULL, "Precondition.");
ysr@777 271 int n = HeapRegionRemSet::num_par_rem_sets()-1;
ysr@777 272 for (int i = 0; i < n; i++) {
ysr@777 273 _par_tables[i]->union_bitmap_into(bm());
ysr@777 274 PerRegionTable::free(_par_tables[i]);
ysr@777 275 _par_tables[i] = NULL;
ysr@777 276 }
ysr@777 277 #if PRT_COUNT_OCCUPIED
ysr@777 278 // We must recount the "occupied."
ysr@777 279 recount_occupied();
ysr@777 280 #endif
ysr@777 281 FREE_C_HEAP_ARRAY(PerRegionTable*, _par_tables);
ysr@777 282 _par_tables = NULL;
ysr@777 283 #if COUNT_PAR_EXPANDS
ysr@777 284 Atomic::inc(&n_par_contracts);
ysr@777 285 Atomic::dec(&par_expand_list_len);
ysr@777 286 #endif
ysr@777 287 }
ysr@777 288
ysr@777 289 static PerRegionTable** _par_table_fl;
ysr@777 290
ysr@777 291 PosParPRT* _next;
ysr@777 292
ysr@777 293 static PosParPRT* _free_list;
ysr@777 294
ysr@777 295 PerRegionTable** par_tables() const {
ysr@777 296 assert(uintptr_t(NULL) == 0, "Assumption.");
ysr@777 297 if (uintptr_t(_par_tables) <= ReserveParTableExpansion)
ysr@777 298 return NULL;
ysr@777 299 else
ysr@777 300 return _par_tables;
ysr@777 301 }
ysr@777 302
ysr@777 303 PosParPRT* _next_par_expanded;
ysr@777 304 PosParPRT* next_par_expanded() { return _next_par_expanded; }
ysr@777 305 void set_next_par_expanded(PosParPRT* ppprt) { _next_par_expanded = ppprt; }
ysr@777 306 static PosParPRT* _par_expanded_list;
ysr@777 307
ysr@777 308 public:
ysr@777 309
ysr@777 310 PosParPRT(HeapRegion* hr) : PerRegionTable(hr), _par_tables(NULL) {}
ysr@777 311
ysr@777 312 jint occupied() const {
ysr@777 313 jint res = PerRegionTable::occupied();
ysr@777 314 if (par_tables() != NULL) {
ysr@777 315 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
ysr@777 316 res += par_tables()[i]->occupied();
ysr@777 317 }
ysr@777 318 }
ysr@777 319 return res;
ysr@777 320 }
ysr@777 321
ysr@777 322 void init(HeapRegion* hr) {
ysr@777 323 PerRegionTable::init(hr);
ysr@777 324 _next = NULL;
ysr@777 325 if (par_tables() != NULL) {
ysr@777 326 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
ysr@777 327 par_tables()[i]->init(hr);
ysr@777 328 }
ysr@777 329 }
ysr@777 330 }
ysr@777 331
ysr@777 332 static void free(PosParPRT* prt) {
ysr@777 333 while (true) {
ysr@777 334 PosParPRT* fl = _free_list;
ysr@777 335 prt->set_next(fl);
ysr@777 336 PosParPRT* res =
ysr@777 337 (PosParPRT*)
ysr@777 338 Atomic::cmpxchg_ptr(prt, &_free_list, fl);
ysr@777 339 if (res == fl) return;
ysr@777 340 }
ysr@777 341 ShouldNotReachHere();
ysr@777 342 }
ysr@777 343
ysr@777 344 static PosParPRT* alloc(HeapRegion* hr) {
ysr@777 345 PosParPRT* fl = _free_list;
ysr@777 346 while (fl != NULL) {
ysr@777 347 PosParPRT* nxt = fl->next();
ysr@777 348 PosParPRT* res =
ysr@777 349 (PosParPRT*)
ysr@777 350 Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
ysr@777 351 if (res == fl) {
ysr@777 352 fl->init(hr);
ysr@777 353 return fl;
ysr@777 354 } else {
ysr@777 355 fl = _free_list;
ysr@777 356 }
ysr@777 357 }
ysr@777 358 assert(fl == NULL, "Loop condition.");
ysr@777 359 return new PosParPRT(hr);
ysr@777 360 }
ysr@777 361
ysr@777 362 PosParPRT* next() const { return _next; }
ysr@777 363 void set_next(PosParPRT* nxt) { _next = nxt; }
ysr@777 364 PosParPRT** next_addr() { return &_next; }
ysr@777 365
tonyp@1694 366 bool should_expand(int tid) {
tonyp@3028 367 // Given that we now defer RSet updates for after a GC we don't
tonyp@3028 368 // really need to expand the tables any more. This code should be
tonyp@3028 369 // cleaned up in the future (see CR 6921087).
tonyp@3028 370 return false;
tonyp@1694 371 }
tonyp@1694 372
tonyp@1694 373 void par_expand() {
tonyp@1694 374 int n = HeapRegionRemSet::num_par_rem_sets()-1;
tonyp@1694 375 if (n <= 0) return;
tonyp@1694 376 if (_par_tables == NULL) {
tonyp@1694 377 PerRegionTable* res =
tonyp@1694 378 (PerRegionTable*)
tonyp@1694 379 Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion,
tonyp@1694 380 &_par_tables, NULL);
tonyp@1694 381 if (res != NULL) return;
tonyp@1694 382 // Otherwise, we reserved the right to do the expansion.
tonyp@1694 383
tonyp@1694 384 PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n);
tonyp@1694 385 for (int i = 0; i < n; i++) {
tonyp@1694 386 PerRegionTable* ptable = PerRegionTable::alloc(hr());
tonyp@1694 387 ptables[i] = ptable;
tonyp@1694 388 }
tonyp@1694 389 // Here we do not need an atomic.
tonyp@1694 390 _par_tables = ptables;
tonyp@1694 391 #if COUNT_PAR_EXPANDS
tonyp@1694 392 print_par_expand();
tonyp@1694 393 #endif
tonyp@1694 394 // We must put this table on the expanded list.
tonyp@1694 395 PosParPRT* exp_head = _par_expanded_list;
tonyp@1694 396 while (true) {
tonyp@1694 397 set_next_par_expanded(exp_head);
tonyp@1694 398 PosParPRT* res =
tonyp@1694 399 (PosParPRT*)
tonyp@1694 400 Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head);
tonyp@1694 401 if (res == exp_head) return;
tonyp@1694 402 // Otherwise.
tonyp@1694 403 exp_head = res;
tonyp@1694 404 }
tonyp@1694 405 ShouldNotReachHere();
tonyp@1694 406 }
tonyp@1694 407 }
tonyp@1694 408
ysr@1280 409 void add_reference(OopOrNarrowOopStar from, int tid) {
ysr@777 410 // Expand if necessary.
ysr@777 411 PerRegionTable** pt = par_tables();
ysr@777 412 if (pt != NULL) {
ysr@777 413 // We always have to assume that mods to table 0 are in parallel,
ysr@777 414 // because of the claiming scheme in parallel expansion. A thread
ysr@777 415 // with tid != 0 that finds the table to be NULL, but doesn't succeed
ysr@777 416 // in claiming the right of expanding it, will end up in the else
ysr@777 417 // clause of the above if test. That thread could be delayed, and a
ysr@777 418 // thread 0 add reference could see the table expanded, and come
ysr@777 419 // here. Both threads would be adding in parallel. But we get to
ysr@777 420 // not use atomics for tids > 0.
ysr@777 421 if (tid == 0) {
ysr@777 422 PerRegionTable::add_reference(from);
ysr@777 423 } else {
ysr@777 424 pt[tid-1]->seq_add_reference(from);
ysr@777 425 }
ysr@777 426 } else {
ysr@777 427 // Not expanded -- add to the base table.
ysr@777 428 PerRegionTable::add_reference(from);
ysr@777 429 }
ysr@777 430 }
ysr@777 431
ysr@777 432 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
ysr@777 433 assert(_par_tables == NULL, "Precondition");
ysr@777 434 PerRegionTable::scrub(ctbs, card_bm);
ysr@777 435 }
ysr@777 436
ysr@777 437 size_t mem_size() const {
ysr@777 438 size_t res =
ysr@777 439 PerRegionTable::mem_size() + sizeof(this) - sizeof(PerRegionTable);
ysr@777 440 if (_par_tables != NULL) {
ysr@777 441 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
ysr@777 442 res += _par_tables[i]->mem_size();
ysr@777 443 }
ysr@777 444 }
ysr@777 445 return res;
ysr@777 446 }
ysr@777 447
ysr@777 448 static size_t fl_mem_size() {
ysr@777 449 PosParPRT* cur = _free_list;
ysr@777 450 size_t res = 0;
ysr@777 451 while (cur != NULL) {
ysr@777 452 res += sizeof(PosParPRT);
ysr@777 453 cur = cur->next();
ysr@777 454 }
ysr@777 455 return res;
ysr@777 456 }
ysr@777 457
ysr@1280 458 bool contains_reference(OopOrNarrowOopStar from) const {
ysr@777 459 if (PerRegionTable::contains_reference(from)) return true;
ysr@777 460 if (_par_tables != NULL) {
ysr@777 461 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
ysr@777 462 if (_par_tables[i]->contains_reference(from)) return true;
ysr@777 463 }
ysr@777 464 }
ysr@777 465 return false;
ysr@777 466 }
ysr@777 467
ysr@777 468 static void par_contract_all();
ysr@777 469 };
ysr@777 470
ysr@777 471 void PosParPRT::par_contract_all() {
ysr@777 472 PosParPRT* hd = _par_expanded_list;
ysr@777 473 while (hd != NULL) {
ysr@777 474 PosParPRT* nxt = hd->next_par_expanded();
ysr@777 475 PosParPRT* res =
ysr@777 476 (PosParPRT*)
ysr@777 477 Atomic::cmpxchg_ptr(nxt, &_par_expanded_list, hd);
ysr@777 478 if (res == hd) {
ysr@777 479 // We claimed the right to contract this table.
ysr@777 480 hd->set_next_par_expanded(NULL);
ysr@777 481 hd->par_contract();
ysr@777 482 hd = _par_expanded_list;
ysr@777 483 } else {
ysr@777 484 hd = res;
ysr@777 485 }
ysr@777 486 }
ysr@777 487 }
ysr@777 488
ysr@777 489 PosParPRT* PosParPRT::_free_list = NULL;
ysr@777 490 PosParPRT* PosParPRT::_par_expanded_list = NULL;
ysr@777 491
ysr@777 492 jint OtherRegionsTable::_cache_probes = 0;
ysr@777 493 jint OtherRegionsTable::_cache_hits = 0;
ysr@777 494
ysr@777 495 size_t OtherRegionsTable::_max_fine_entries = 0;
ysr@777 496 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
ysr@777 497 #if SAMPLE_FOR_EVICTION
ysr@777 498 size_t OtherRegionsTable::_fine_eviction_stride = 0;
ysr@777 499 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
ysr@777 500 #endif
ysr@777 501
ysr@777 502 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
ysr@777 503 _g1h(G1CollectedHeap::heap()),
ysr@777 504 _m(Mutex::leaf, "An OtherRegionsTable lock", true),
ysr@777 505 _hr(hr),
ysr@777 506 _coarse_map(G1CollectedHeap::heap()->max_regions(),
ysr@777 507 false /* in-resource-area */),
ysr@777 508 _fine_grain_regions(NULL),
ysr@777 509 _n_fine_entries(0), _n_coarse_entries(0),
ysr@777 510 #if SAMPLE_FOR_EVICTION
ysr@777 511 _fine_eviction_start(0),
ysr@777 512 #endif
ysr@777 513 _sparse_table(hr)
ysr@777 514 {
ysr@777 515 typedef PosParPRT* PosParPRTPtr;
ysr@777 516 if (_max_fine_entries == 0) {
ysr@777 517 assert(_mod_max_fine_entries_mask == 0, "Both or none.");
iveresov@1696 518 size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
iveresov@1696 519 _max_fine_entries = (size_t)(1 << max_entries_log);
ysr@777 520 _mod_max_fine_entries_mask = _max_fine_entries - 1;
ysr@777 521 #if SAMPLE_FOR_EVICTION
ysr@777 522 assert(_fine_eviction_sample_size == 0
ysr@777 523 && _fine_eviction_stride == 0, "All init at same time.");
iveresov@1696 524 _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
ysr@777 525 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
ysr@777 526 #endif
ysr@777 527 }
ysr@777 528 _fine_grain_regions = new PosParPRTPtr[_max_fine_entries];
ysr@777 529 if (_fine_grain_regions == NULL)
ysr@777 530 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries,
ysr@777 531 "Failed to allocate _fine_grain_entries.");
ysr@777 532 for (size_t i = 0; i < _max_fine_entries; i++) {
ysr@777 533 _fine_grain_regions[i] = NULL;
ysr@777 534 }
ysr@777 535 }
ysr@777 536
ysr@777 537 int** OtherRegionsTable::_from_card_cache = NULL;
ysr@777 538 size_t OtherRegionsTable::_from_card_cache_max_regions = 0;
ysr@777 539 size_t OtherRegionsTable::_from_card_cache_mem_size = 0;
ysr@777 540
ysr@777 541 void OtherRegionsTable::init_from_card_cache(size_t max_regions) {
ysr@777 542 _from_card_cache_max_regions = max_regions;
ysr@777 543
ysr@777 544 int n_par_rs = HeapRegionRemSet::num_par_rem_sets();
ysr@777 545 _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs);
ysr@777 546 for (int i = 0; i < n_par_rs; i++) {
ysr@777 547 _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions);
ysr@777 548 for (size_t j = 0; j < max_regions; j++) {
ysr@777 549 _from_card_cache[i][j] = -1; // An invalid value.
ysr@777 550 }
ysr@777 551 }
ysr@777 552 _from_card_cache_mem_size = n_par_rs * max_regions * sizeof(int);
ysr@777 553 }
ysr@777 554
ysr@777 555 void OtherRegionsTable::shrink_from_card_cache(size_t new_n_regs) {
ysr@777 556 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
ysr@777 557 assert(new_n_regs <= _from_card_cache_max_regions, "Must be within max.");
ysr@777 558 for (size_t j = new_n_regs; j < _from_card_cache_max_regions; j++) {
ysr@777 559 _from_card_cache[i][j] = -1; // An invalid value.
ysr@777 560 }
ysr@777 561 }
ysr@777 562 }
ysr@777 563
ysr@777 564 #ifndef PRODUCT
ysr@777 565 void OtherRegionsTable::print_from_card_cache() {
ysr@777 566 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
ysr@777 567 for (size_t j = 0; j < _from_card_cache_max_regions; j++) {
ysr@777 568 gclog_or_tty->print_cr("_from_card_cache[%d][%d] = %d.",
ysr@777 569 i, j, _from_card_cache[i][j]);
ysr@777 570 }
ysr@777 571 }
ysr@777 572 }
ysr@777 573 #endif
ysr@777 574
ysr@1280 575 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
ysr@777 576 size_t cur_hrs_ind = hr()->hrs_index();
ysr@777 577
ysr@777 578 #if HRRS_VERBOSE
ysr@777 579 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
ysr@1280 580 from,
ysr@1280 581 UseCompressedOops
ysr@1280 582 ? oopDesc::load_decode_heap_oop((narrowOop*)from)
ysr@1280 583 : oopDesc::load_decode_heap_oop((oop*)from));
ysr@777 584 #endif
ysr@777 585
ysr@777 586 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
ysr@777 587
ysr@777 588 #if HRRS_VERBOSE
ysr@777 589 gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
ysr@777 590 hr()->bottom(), from_card,
ysr@777 591 _from_card_cache[tid][cur_hrs_ind]);
ysr@777 592 #endif
ysr@777 593
ysr@777 594 #define COUNT_CACHE 0
ysr@777 595 #if COUNT_CACHE
ysr@777 596 jint p = Atomic::add(1, &_cache_probes);
ysr@777 597 if ((p % 10000) == 0) {
ysr@777 598 jint hits = _cache_hits;
ysr@777 599 gclog_or_tty->print_cr("%d/%d = %5.2f%% RS cache hits.",
ysr@777 600 _cache_hits, p, 100.0* (float)hits/(float)p);
ysr@777 601 }
ysr@777 602 #endif
ysr@777 603 if (from_card == _from_card_cache[tid][cur_hrs_ind]) {
ysr@777 604 #if HRRS_VERBOSE
ysr@777 605 gclog_or_tty->print_cr(" from-card cache hit.");
ysr@777 606 #endif
ysr@777 607 #if COUNT_CACHE
ysr@777 608 Atomic::inc(&_cache_hits);
ysr@777 609 #endif
ysr@777 610 assert(contains_reference(from), "We just added it!");
ysr@777 611 return;
ysr@777 612 } else {
ysr@777 613 _from_card_cache[tid][cur_hrs_ind] = from_card;
ysr@777 614 }
ysr@777 615
ysr@777 616 // Note that this may be a continued H region.
ysr@777 617 HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
johnc@1242 618 RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
ysr@777 619
ysr@777 620 // If the region is already coarsened, return.
ysr@777 621 if (_coarse_map.at(from_hrs_ind)) {
ysr@777 622 #if HRRS_VERBOSE
ysr@777 623 gclog_or_tty->print_cr(" coarse map hit.");
ysr@777 624 #endif
ysr@777 625 assert(contains_reference(from), "We just added it!");
ysr@777 626 return;
ysr@777 627 }
ysr@777 628
ysr@777 629 // Otherwise find a per-region table to add it to.
ysr@777 630 size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
ysr@777 631 PosParPRT* prt = find_region_table(ind, from_hr);
ysr@777 632 if (prt == NULL) {
ysr@777 633 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
ysr@777 634 // Confirm that it's really not there...
ysr@777 635 prt = find_region_table(ind, from_hr);
ysr@777 636 if (prt == NULL) {
ysr@777 637
ysr@777 638 uintptr_t from_hr_bot_card_index =
ysr@777 639 uintptr_t(from_hr->bottom())
ysr@777 640 >> CardTableModRefBS::card_shift;
johnc@1242 641 CardIdx_t card_index = from_card - from_hr_bot_card_index;
tonyp@1377 642 assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
ysr@777 643 "Must be in range.");
ysr@777 644 if (G1HRRSUseSparseTable &&
johnc@1242 645 _sparse_table.add_card(from_hrs_ind, card_index)) {
ysr@777 646 if (G1RecordHRRSOops) {
ysr@777 647 HeapRegionRemSet::record(hr(), from);
ysr@777 648 #if HRRS_VERBOSE
ysr@777 649 gclog_or_tty->print(" Added card " PTR_FORMAT " to region "
ysr@777 650 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
ysr@777 651 align_size_down(uintptr_t(from),
ysr@777 652 CardTableModRefBS::card_size),
ysr@777 653 hr()->bottom(), from);
ysr@777 654 #endif
ysr@777 655 }
ysr@777 656 #if HRRS_VERBOSE
ysr@777 657 gclog_or_tty->print_cr(" added card to sparse table.");
ysr@777 658 #endif
ysr@777 659 assert(contains_reference_locked(from), "We just added it!");
ysr@777 660 return;
ysr@777 661 } else {
ysr@777 662 #if HRRS_VERBOSE
ysr@777 663 gclog_or_tty->print_cr(" [tid %d] sparse table entry "
ysr@777 664 "overflow(f: %d, t: %d)",
ysr@777 665 tid, from_hrs_ind, cur_hrs_ind);
ysr@777 666 #endif
ysr@777 667 }
ysr@777 668
ysr@777 669 if (_n_fine_entries == _max_fine_entries) {
ysr@777 670 prt = delete_region_table();
ysr@777 671 } else {
ysr@777 672 prt = PosParPRT::alloc(from_hr);
ysr@777 673 }
ysr@777 674 prt->init(from_hr);
ysr@777 675
ysr@777 676 PosParPRT* first_prt = _fine_grain_regions[ind];
ysr@777 677 prt->set_next(first_prt); // XXX Maybe move to init?
ysr@777 678 _fine_grain_regions[ind] = prt;
ysr@777 679 _n_fine_entries++;
ysr@777 680
ysr@777 681 if (G1HRRSUseSparseTable) {
iveresov@1696 682 // Transfer from sparse to fine-grain.
iveresov@1696 683 SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
iveresov@1696 684 assert(sprt_entry != NULL, "There should have been an entry");
iveresov@1696 685 for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
iveresov@1696 686 CardIdx_t c = sprt_entry->card(i);
ysr@777 687 if (c != SparsePRTEntry::NullEntry) {
ysr@777 688 prt->add_card(c);
ysr@777 689 }
ysr@777 690 }
ysr@777 691 // Now we can delete the sparse entry.
johnc@1242 692 bool res = _sparse_table.delete_entry(from_hrs_ind);
ysr@777 693 assert(res, "It should have been there.");
ysr@777 694 }
ysr@777 695 }
ysr@777 696 assert(prt != NULL && prt->hr() == from_hr, "consequence");
ysr@777 697 }
ysr@777 698 // Note that we can't assert "prt->hr() == from_hr", because of the
ysr@777 699 // possibility of concurrent reuse. But see head comment of
ysr@777 700 // OtherRegionsTable for why this is OK.
ysr@777 701 assert(prt != NULL, "Inv");
ysr@777 702
tonyp@1694 703 if (prt->should_expand(tid)) {
tonyp@1694 704 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
tonyp@1694 705 HeapRegion* prt_hr = prt->hr();
tonyp@1694 706 if (prt_hr == from_hr) {
tonyp@1694 707 // Make sure the table still corresponds to the same region
tonyp@1694 708 prt->par_expand();
tonyp@1694 709 prt->add_reference(from, tid);
tonyp@1694 710 }
tonyp@1694 711 // else: The table has been concurrently coarsened, evicted, and
tonyp@1694 712 // the table data structure re-used for another table. So, we
tonyp@1694 713 // don't need to add the reference any more given that the table
tonyp@1694 714 // has been coarsened and the whole region will be scanned anyway.
tonyp@1694 715 } else {
tonyp@1694 716 prt->add_reference(from, tid);
tonyp@1694 717 }
ysr@777 718 if (G1RecordHRRSOops) {
ysr@777 719 HeapRegionRemSet::record(hr(), from);
ysr@777 720 #if HRRS_VERBOSE
ysr@777 721 gclog_or_tty->print("Added card " PTR_FORMAT " to region "
ysr@777 722 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
ysr@777 723 align_size_down(uintptr_t(from),
ysr@777 724 CardTableModRefBS::card_size),
ysr@777 725 hr()->bottom(), from);
ysr@777 726 #endif
ysr@777 727 }
ysr@777 728 assert(contains_reference(from), "We just added it!");
ysr@777 729 }
ysr@777 730
ysr@777 731 PosParPRT*
ysr@777 732 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
ysr@777 733 assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
ysr@777 734 PosParPRT* prt = _fine_grain_regions[ind];
ysr@777 735 while (prt != NULL && prt->hr() != hr) {
ysr@777 736 prt = prt->next();
ysr@777 737 }
ysr@777 738 // Loop postcondition is the method postcondition.
ysr@777 739 return prt;
ysr@777 740 }
ysr@777 741
ysr@777 742
ysr@777 743 #define DRT_CENSUS 0
ysr@777 744
ysr@777 745 #if DRT_CENSUS
ysr@777 746 static const int HistoSize = 6;
ysr@777 747 static int global_histo[HistoSize] = { 0, 0, 0, 0, 0, 0 };
ysr@777 748 static int coarsenings = 0;
ysr@777 749 static int occ_sum = 0;
ysr@777 750 #endif
ysr@777 751
ysr@777 752 jint OtherRegionsTable::_n_coarsenings = 0;
ysr@777 753
ysr@777 754 PosParPRT* OtherRegionsTable::delete_region_table() {
ysr@777 755 #if DRT_CENSUS
ysr@777 756 int histo[HistoSize] = { 0, 0, 0, 0, 0, 0 };
ysr@777 757 const int histo_limits[] = { 1, 4, 16, 64, 256, 2048 };
ysr@777 758 #endif
ysr@777 759
ysr@777 760 assert(_m.owned_by_self(), "Precondition");
ysr@777 761 assert(_n_fine_entries == _max_fine_entries, "Precondition");
ysr@777 762 PosParPRT* max = NULL;
ysr@777 763 jint max_occ = 0;
ysr@777 764 PosParPRT** max_prev;
ysr@777 765 size_t max_ind;
ysr@777 766
ysr@777 767 #if SAMPLE_FOR_EVICTION
ysr@777 768 size_t i = _fine_eviction_start;
ysr@777 769 for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
ysr@777 770 size_t ii = i;
ysr@777 771 // Make sure we get a non-NULL sample.
ysr@777 772 while (_fine_grain_regions[ii] == NULL) {
ysr@777 773 ii++;
ysr@777 774 if (ii == _max_fine_entries) ii = 0;
ysr@777 775 guarantee(ii != i, "We must find one.");
ysr@777 776 }
ysr@777 777 PosParPRT** prev = &_fine_grain_regions[ii];
ysr@777 778 PosParPRT* cur = *prev;
ysr@777 779 while (cur != NULL) {
ysr@777 780 jint cur_occ = cur->occupied();
ysr@777 781 if (max == NULL || cur_occ > max_occ) {
ysr@777 782 max = cur;
ysr@777 783 max_prev = prev;
ysr@777 784 max_ind = i;
ysr@777 785 max_occ = cur_occ;
ysr@777 786 }
ysr@777 787 prev = cur->next_addr();
ysr@777 788 cur = cur->next();
ysr@777 789 }
ysr@777 790 i = i + _fine_eviction_stride;
ysr@777 791 if (i >= _n_fine_entries) i = i - _n_fine_entries;
ysr@777 792 }
ysr@777 793 _fine_eviction_start++;
ysr@777 794 if (_fine_eviction_start >= _n_fine_entries)
ysr@777 795 _fine_eviction_start -= _n_fine_entries;
ysr@777 796 #else
ysr@777 797 for (int i = 0; i < _max_fine_entries; i++) {
ysr@777 798 PosParPRT** prev = &_fine_grain_regions[i];
ysr@777 799 PosParPRT* cur = *prev;
ysr@777 800 while (cur != NULL) {
ysr@777 801 jint cur_occ = cur->occupied();
ysr@777 802 #if DRT_CENSUS
ysr@777 803 for (int k = 0; k < HistoSize; k++) {
ysr@777 804 if (cur_occ <= histo_limits[k]) {
ysr@777 805 histo[k]++; global_histo[k]++; break;
ysr@777 806 }
ysr@777 807 }
ysr@777 808 #endif
ysr@777 809 if (max == NULL || cur_occ > max_occ) {
ysr@777 810 max = cur;
ysr@777 811 max_prev = prev;
ysr@777 812 max_ind = i;
ysr@777 813 max_occ = cur_occ;
ysr@777 814 }
ysr@777 815 prev = cur->next_addr();
ysr@777 816 cur = cur->next();
ysr@777 817 }
ysr@777 818 }
ysr@777 819 #endif
ysr@777 820 // XXX
ysr@777 821 guarantee(max != NULL, "Since _n_fine_entries > 0");
ysr@777 822 #if DRT_CENSUS
ysr@777 823 gclog_or_tty->print_cr("In a coarsening: histo of occs:");
ysr@777 824 for (int k = 0; k < HistoSize; k++) {
ysr@777 825 gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], histo[k]);
ysr@777 826 }
ysr@777 827 coarsenings++;
ysr@777 828 occ_sum += max_occ;
ysr@777 829 if ((coarsenings % 100) == 0) {
ysr@777 830 gclog_or_tty->print_cr("\ncoarsenings = %d; global summary:", coarsenings);
ysr@777 831 for (int k = 0; k < HistoSize; k++) {
ysr@777 832 gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], global_histo[k]);
ysr@777 833 }
ysr@777 834 gclog_or_tty->print_cr("Avg occ of deleted region = %6.2f.",
ysr@777 835 (float)occ_sum/(float)coarsenings);
ysr@777 836 }
ysr@777 837 #endif
ysr@777 838
ysr@777 839 // Set the corresponding coarse bit.
tonyp@2963 840 size_t max_hrs_index = max->hr()->hrs_index();
ysr@777 841 if (!_coarse_map.at(max_hrs_index)) {
ysr@777 842 _coarse_map.at_put(max_hrs_index, true);
ysr@777 843 _n_coarse_entries++;
ysr@777 844 #if 0
ysr@777 845 gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
ysr@777 846 "for region [" PTR_FORMAT "...] (%d coarse entries).\n",
ysr@777 847 hr()->bottom(),
ysr@777 848 max->hr()->bottom(),
ysr@777 849 _n_coarse_entries);
ysr@777 850 #endif
ysr@777 851 }
ysr@777 852
ysr@777 853 // Unsplice.
ysr@777 854 *max_prev = max->next();
ysr@777 855 Atomic::inc(&_n_coarsenings);
ysr@777 856 _n_fine_entries--;
ysr@777 857 return max;
ysr@777 858 }
ysr@777 859
ysr@777 860
ysr@777 861 // At present, this must be called stop-world single-threaded.
ysr@777 862 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
ysr@777 863 BitMap* region_bm, BitMap* card_bm) {
ysr@777 864 // First eliminated garbage regions from the coarse map.
ysr@777 865 if (G1RSScrubVerbose)
tonyp@2963 866 gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":",
tonyp@2963 867 hr()->hrs_index());
ysr@777 868
ysr@777 869 assert(_coarse_map.size() == region_bm->size(), "Precondition");
ysr@777 870 if (G1RSScrubVerbose)
ysr@777 871 gclog_or_tty->print(" Coarse map: before = %d...", _n_coarse_entries);
ysr@777 872 _coarse_map.set_intersection(*region_bm);
ysr@777 873 _n_coarse_entries = _coarse_map.count_one_bits();
ysr@777 874 if (G1RSScrubVerbose)
ysr@777 875 gclog_or_tty->print_cr(" after = %d.", _n_coarse_entries);
ysr@777 876
ysr@777 877 // Now do the fine-grained maps.
ysr@777 878 for (size_t i = 0; i < _max_fine_entries; i++) {
ysr@777 879 PosParPRT* cur = _fine_grain_regions[i];
ysr@777 880 PosParPRT** prev = &_fine_grain_regions[i];
ysr@777 881 while (cur != NULL) {
ysr@777 882 PosParPRT* nxt = cur->next();
ysr@777 883 // If the entire region is dead, eliminate.
ysr@777 884 if (G1RSScrubVerbose)
tonyp@2963 885 gclog_or_tty->print_cr(" For other region "SIZE_FORMAT":",
tonyp@2963 886 cur->hr()->hrs_index());
ysr@777 887 if (!region_bm->at(cur->hr()->hrs_index())) {
ysr@777 888 *prev = nxt;
ysr@777 889 cur->set_next(NULL);
ysr@777 890 _n_fine_entries--;
ysr@777 891 if (G1RSScrubVerbose)
ysr@777 892 gclog_or_tty->print_cr(" deleted via region map.");
ysr@777 893 PosParPRT::free(cur);
ysr@777 894 } else {
ysr@777 895 // Do fine-grain elimination.
ysr@777 896 if (G1RSScrubVerbose)
ysr@777 897 gclog_or_tty->print(" occ: before = %4d.", cur->occupied());
ysr@777 898 cur->scrub(ctbs, card_bm);
ysr@777 899 if (G1RSScrubVerbose)
ysr@777 900 gclog_or_tty->print_cr(" after = %4d.", cur->occupied());
ysr@777 901 // Did that empty the table completely?
ysr@777 902 if (cur->occupied() == 0) {
ysr@777 903 *prev = nxt;
ysr@777 904 cur->set_next(NULL);
ysr@777 905 _n_fine_entries--;
ysr@777 906 PosParPRT::free(cur);
ysr@777 907 } else {
ysr@777 908 prev = cur->next_addr();
ysr@777 909 }
ysr@777 910 }
ysr@777 911 cur = nxt;
ysr@777 912 }
ysr@777 913 }
ysr@777 914 // Since we may have deleted a from_card_cache entry from the RS, clear
ysr@777 915 // the FCC.
ysr@777 916 clear_fcc();
ysr@777 917 }
ysr@777 918
ysr@777 919
ysr@777 920 size_t OtherRegionsTable::occupied() const {
ysr@777 921 // Cast away const in this case.
ysr@777 922 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
ysr@777 923 size_t sum = occ_fine();
ysr@777 924 sum += occ_sparse();
ysr@777 925 sum += occ_coarse();
ysr@777 926 return sum;
ysr@777 927 }
ysr@777 928
ysr@777 929 size_t OtherRegionsTable::occ_fine() const {
ysr@777 930 size_t sum = 0;
ysr@777 931 for (size_t i = 0; i < _max_fine_entries; i++) {
ysr@777 932 PosParPRT* cur = _fine_grain_regions[i];
ysr@777 933 while (cur != NULL) {
ysr@777 934 sum += cur->occupied();
ysr@777 935 cur = cur->next();
ysr@777 936 }
ysr@777 937 }
ysr@777 938 return sum;
ysr@777 939 }
ysr@777 940
ysr@777 941 size_t OtherRegionsTable::occ_coarse() const {
tonyp@1377 942 return (_n_coarse_entries * HeapRegion::CardsPerRegion);
ysr@777 943 }
ysr@777 944
ysr@777 945 size_t OtherRegionsTable::occ_sparse() const {
ysr@777 946 return _sparse_table.occupied();
ysr@777 947 }
ysr@777 948
ysr@777 949 size_t OtherRegionsTable::mem_size() const {
ysr@777 950 // Cast away const in this case.
ysr@777 951 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
ysr@777 952 size_t sum = 0;
ysr@777 953 for (size_t i = 0; i < _max_fine_entries; i++) {
ysr@777 954 PosParPRT* cur = _fine_grain_regions[i];
ysr@777 955 while (cur != NULL) {
ysr@777 956 sum += cur->mem_size();
ysr@777 957 cur = cur->next();
ysr@777 958 }
ysr@777 959 }
ysr@777 960 sum += (sizeof(PosParPRT*) * _max_fine_entries);
ysr@777 961 sum += (_coarse_map.size_in_words() * HeapWordSize);
ysr@777 962 sum += (_sparse_table.mem_size());
ysr@777 963 sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above.
ysr@777 964 return sum;
ysr@777 965 }
ysr@777 966
ysr@777 967 size_t OtherRegionsTable::static_mem_size() {
ysr@777 968 return _from_card_cache_mem_size;
ysr@777 969 }
ysr@777 970
ysr@777 971 size_t OtherRegionsTable::fl_mem_size() {
ysr@777 972 return PerRegionTable::fl_mem_size() + PosParPRT::fl_mem_size();
ysr@777 973 }
ysr@777 974
ysr@777 975 void OtherRegionsTable::clear_fcc() {
ysr@777 976 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
ysr@777 977 _from_card_cache[i][hr()->hrs_index()] = -1;
ysr@777 978 }
ysr@777 979 }
ysr@777 980
ysr@777 981 void OtherRegionsTable::clear() {
ysr@777 982 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
ysr@777 983 for (size_t i = 0; i < _max_fine_entries; i++) {
ysr@777 984 PosParPRT* cur = _fine_grain_regions[i];
ysr@777 985 while (cur != NULL) {
ysr@777 986 PosParPRT* nxt = cur->next();
ysr@777 987 PosParPRT::free(cur);
ysr@777 988 cur = nxt;
ysr@777 989 }
ysr@777 990 _fine_grain_regions[i] = NULL;
ysr@777 991 }
ysr@777 992 _sparse_table.clear();
ysr@777 993 _coarse_map.clear();
ysr@777 994 _n_fine_entries = 0;
ysr@777 995 _n_coarse_entries = 0;
ysr@777 996
ysr@777 997 clear_fcc();
ysr@777 998 }
ysr@777 999
ysr@777 1000 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
ysr@777 1001 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
tonyp@2963 1002 size_t hrs_ind = from_hr->hrs_index();
ysr@777 1003 size_t ind = hrs_ind & _mod_max_fine_entries_mask;
ysr@777 1004 if (del_single_region_table(ind, from_hr)) {
ysr@777 1005 assert(!_coarse_map.at(hrs_ind), "Inv");
ysr@777 1006 } else {
ysr@777 1007 _coarse_map.par_at_put(hrs_ind, 0);
ysr@777 1008 }
ysr@777 1009 // Check to see if any of the fcc entries come from here.
tonyp@2963 1010 size_t hr_ind = hr()->hrs_index();
ysr@777 1011 for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
ysr@777 1012 int fcc_ent = _from_card_cache[tid][hr_ind];
ysr@777 1013 if (fcc_ent != -1) {
ysr@777 1014 HeapWord* card_addr = (HeapWord*)
ysr@777 1015 (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift);
ysr@777 1016 if (hr()->is_in_reserved(card_addr)) {
ysr@777 1017 // Clear the from card cache.
ysr@777 1018 _from_card_cache[tid][hr_ind] = -1;
ysr@777 1019 }
ysr@777 1020 }
ysr@777 1021 }
ysr@777 1022 }
ysr@777 1023
ysr@777 1024 bool OtherRegionsTable::del_single_region_table(size_t ind,
ysr@777 1025 HeapRegion* hr) {
ysr@777 1026 assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
ysr@777 1027 PosParPRT** prev_addr = &_fine_grain_regions[ind];
ysr@777 1028 PosParPRT* prt = *prev_addr;
ysr@777 1029 while (prt != NULL && prt->hr() != hr) {
ysr@777 1030 prev_addr = prt->next_addr();
ysr@777 1031 prt = prt->next();
ysr@777 1032 }
ysr@777 1033 if (prt != NULL) {
ysr@777 1034 assert(prt->hr() == hr, "Loop postcondition.");
ysr@777 1035 *prev_addr = prt->next();
ysr@777 1036 PosParPRT::free(prt);
ysr@777 1037 _n_fine_entries--;
ysr@777 1038 return true;
ysr@777 1039 } else {
ysr@777 1040 return false;
ysr@777 1041 }
ysr@777 1042 }
ysr@777 1043
ysr@1280 1044 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
ysr@777 1045 // Cast away const in this case.
ysr@777 1046 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
ysr@777 1047 return contains_reference_locked(from);
ysr@777 1048 }
ysr@777 1049
ysr@1280 1050 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
ysr@777 1051 HeapRegion* hr = _g1h->heap_region_containing_raw(from);
ysr@777 1052 if (hr == NULL) return false;
johnc@1242 1053 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
ysr@777 1054 // Is this region in the coarse map?
ysr@777 1055 if (_coarse_map.at(hr_ind)) return true;
ysr@777 1056
ysr@777 1057 PosParPRT* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
ysr@777 1058 hr);
ysr@777 1059 if (prt != NULL) {
ysr@777 1060 return prt->contains_reference(from);
ysr@777 1061
ysr@777 1062 } else {
ysr@777 1063 uintptr_t from_card =
ysr@777 1064 (uintptr_t(from) >> CardTableModRefBS::card_shift);
ysr@777 1065 uintptr_t hr_bot_card_index =
ysr@777 1066 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
ysr@777 1067 assert(from_card >= hr_bot_card_index, "Inv");
johnc@1242 1068 CardIdx_t card_index = from_card - hr_bot_card_index;
tonyp@1377 1069 assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
tonyp@1377 1070 "Must be in range.");
johnc@1242 1071 return _sparse_table.contains_card(hr_ind, card_index);
ysr@777 1072 }
ysr@777 1073
ysr@777 1074
ysr@777 1075 }
ysr@777 1076
tonyp@2493 1077 void
tonyp@2493 1078 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
tonyp@2493 1079 _sparse_table.do_cleanup_work(hrrs_cleanup_task);
tonyp@2493 1080 }
tonyp@2493 1081
iveresov@1230 1082 // Determines how many threads can add records to an rset in parallel.
iveresov@1230 1083 // This can be done by either mutator threads together with the
iveresov@1230 1084 // concurrent refinement threads or GC threads.
ysr@777 1085 int HeapRegionRemSet::num_par_rem_sets() {
iveresov@1230 1086 return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
ysr@777 1087 }
ysr@777 1088
ysr@777 1089 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
ysr@777 1090 HeapRegion* hr)
tonyp@2974 1091 : _bosa(bosa), _other_regions(hr) {
tonyp@2974 1092 reset_for_par_iteration();
tonyp@2974 1093 }
ysr@777 1094
iveresov@1696 1095 void HeapRegionRemSet::setup_remset_size() {
iveresov@1696 1096 // Setup sparse and fine-grain tables sizes.
iveresov@1696 1097 // table_size = base * (log(region_size / 1M) + 1)
iveresov@1696 1098 int region_size_log_mb = MAX2((int)HeapRegion::LogOfHRGrainBytes - (int)LOG_M, 0);
iveresov@1696 1099 if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
iveresov@1696 1100 G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
iveresov@1696 1101 }
iveresov@1696 1102 if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
iveresov@1696 1103 G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
iveresov@1696 1104 }
iveresov@1696 1105 guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
iveresov@1696 1106 }
iveresov@1696 1107
ysr@777 1108 bool HeapRegionRemSet::claim_iter() {
ysr@777 1109 if (_iter_state != Unclaimed) return false;
ysr@777 1110 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
ysr@777 1111 return (res == Unclaimed);
ysr@777 1112 }
ysr@777 1113
ysr@777 1114 void HeapRegionRemSet::set_iter_complete() {
ysr@777 1115 _iter_state = Complete;
ysr@777 1116 }
ysr@777 1117
ysr@777 1118 bool HeapRegionRemSet::iter_is_complete() {
ysr@777 1119 return _iter_state == Complete;
ysr@777 1120 }
ysr@777 1121
ysr@777 1122 void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
ysr@777 1123 iter->initialize(this);
ysr@777 1124 }
ysr@777 1125
ysr@777 1126 #ifndef PRODUCT
ysr@777 1127 void HeapRegionRemSet::print() const {
ysr@777 1128 HeapRegionRemSetIterator iter;
ysr@777 1129 init_iterator(&iter);
ysr@777 1130 size_t card_index;
ysr@777 1131 while (iter.has_next(card_index)) {
ysr@777 1132 HeapWord* card_start =
ysr@777 1133 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
tonyp@2974 1134 gclog_or_tty->print_cr(" Card " PTR_FORMAT, card_start);
ysr@777 1135 }
ysr@777 1136 // XXX
ysr@777 1137 if (iter.n_yielded() != occupied()) {
ysr@777 1138 gclog_or_tty->print_cr("Yielded disagrees with occupied:");
ysr@777 1139 gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).",
ysr@777 1140 iter.n_yielded(),
ysr@777 1141 iter.n_yielded_coarse(), iter.n_yielded_fine());
ysr@777 1142 gclog_or_tty->print_cr(" %6d occ (%6d coarse, %6d fine).",
ysr@777 1143 occupied(), occ_coarse(), occ_fine());
ysr@777 1144 }
ysr@777 1145 guarantee(iter.n_yielded() == occupied(),
ysr@777 1146 "We should have yielded all the represented cards.");
ysr@777 1147 }
ysr@777 1148 #endif
ysr@777 1149
ysr@777 1150 void HeapRegionRemSet::cleanup() {
ysr@777 1151 SparsePRT::cleanup_all();
ysr@777 1152 }
ysr@777 1153
ysr@777 1154 void HeapRegionRemSet::par_cleanup() {
ysr@777 1155 PosParPRT::par_contract_all();
ysr@777 1156 }
ysr@777 1157
ysr@777 1158 void HeapRegionRemSet::clear() {
ysr@777 1159 _other_regions.clear();
ysr@777 1160 assert(occupied() == 0, "Should be clear.");
tonyp@2974 1161 reset_for_par_iteration();
tonyp@2974 1162 }
tonyp@2974 1163
tonyp@2974 1164 void HeapRegionRemSet::reset_for_par_iteration() {
tonyp@2974 1165 _iter_state = Unclaimed;
tonyp@2974 1166 _iter_claimed = 0;
tonyp@2974 1167 // It's good to check this to make sure that the two methods are in sync.
tonyp@2974 1168 assert(verify_ready_for_par_iteration(), "post-condition");
ysr@777 1169 }
ysr@777 1170
ysr@777 1171 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
ysr@777 1172 BitMap* region_bm, BitMap* card_bm) {
ysr@777 1173 _other_regions.scrub(ctbs, region_bm, card_bm);
ysr@777 1174 }
ysr@777 1175
ysr@777 1176 //-------------------- Iteration --------------------
ysr@777 1177
ysr@777 1178 HeapRegionRemSetIterator::
ysr@777 1179 HeapRegionRemSetIterator() :
ysr@777 1180 _hrrs(NULL),
ysr@777 1181 _g1h(G1CollectedHeap::heap()),
ysr@777 1182 _bosa(NULL),
tonyp@2239 1183 _sparse_iter() { }
ysr@777 1184
ysr@777 1185 void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
ysr@777 1186 _hrrs = hrrs;
ysr@777 1187 _coarse_map = &_hrrs->_other_regions._coarse_map;
ysr@777 1188 _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
ysr@777 1189 _bosa = _hrrs->bosa();
ysr@777 1190
ysr@777 1191 _is = Sparse;
ysr@777 1192 // Set these values so that we increment to the first region.
ysr@777 1193 _coarse_cur_region_index = -1;
tonyp@1377 1194 _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);;
ysr@777 1195
ysr@777 1196 _cur_region_cur_card = 0;
ysr@777 1197
ysr@777 1198 _fine_array_index = -1;
ysr@777 1199 _fine_cur_prt = NULL;
ysr@777 1200
ysr@777 1201 _n_yielded_coarse = 0;
ysr@777 1202 _n_yielded_fine = 0;
ysr@777 1203 _n_yielded_sparse = 0;
ysr@777 1204
ysr@777 1205 _sparse_iter.init(&hrrs->_other_regions._sparse_table);
ysr@777 1206 }
ysr@777 1207
ysr@777 1208 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
ysr@777 1209 if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
ysr@777 1210 // Go to the next card.
ysr@777 1211 _coarse_cur_region_cur_card++;
ysr@777 1212 // Was the last the last card in the current region?
tonyp@1377 1213 if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
ysr@777 1214 // Yes: find the next region. This may leave _coarse_cur_region_index
ysr@777 1215 // Set to the last index, in which case there are no more coarse
ysr@777 1216 // regions.
ysr@777 1217 _coarse_cur_region_index =
ysr@777 1218 (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
ysr@777 1219 if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
ysr@777 1220 _coarse_cur_region_cur_card = 0;
ysr@777 1221 HeapWord* r_bot =
ysr@777 1222 _g1h->region_at(_coarse_cur_region_index)->bottom();
ysr@777 1223 _cur_region_card_offset = _bosa->index_for(r_bot);
ysr@777 1224 } else {
ysr@777 1225 return false;
ysr@777 1226 }
ysr@777 1227 }
ysr@777 1228 // If we didn't return false above, then we can yield a card.
ysr@777 1229 card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
ysr@777 1230 return true;
ysr@777 1231 }
ysr@777 1232
ysr@777 1233 void HeapRegionRemSetIterator::fine_find_next_non_null_prt() {
ysr@777 1234 // Otherwise, find the next bucket list in the array.
ysr@777 1235 _fine_array_index++;
ysr@777 1236 while (_fine_array_index < (int) OtherRegionsTable::_max_fine_entries) {
ysr@777 1237 _fine_cur_prt = _fine_grain_regions[_fine_array_index];
ysr@777 1238 if (_fine_cur_prt != NULL) return;
ysr@777 1239 else _fine_array_index++;
ysr@777 1240 }
ysr@777 1241 assert(_fine_cur_prt == NULL, "Loop post");
ysr@777 1242 }
ysr@777 1243
ysr@777 1244 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
ysr@777 1245 if (fine_has_next()) {
ysr@777 1246 _cur_region_cur_card =
ysr@777 1247 _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
ysr@777 1248 }
ysr@777 1249 while (!fine_has_next()) {
tonyp@1377 1250 if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
ysr@777 1251 _cur_region_cur_card = 0;
ysr@777 1252 _fine_cur_prt = _fine_cur_prt->next();
ysr@777 1253 }
ysr@777 1254 if (_fine_cur_prt == NULL) {
ysr@777 1255 fine_find_next_non_null_prt();
ysr@777 1256 if (_fine_cur_prt == NULL) return false;
ysr@777 1257 }
ysr@777 1258 assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0,
ysr@777 1259 "inv.");
ysr@777 1260 HeapWord* r_bot =
ysr@777 1261 _fine_cur_prt->hr()->bottom();
ysr@777 1262 _cur_region_card_offset = _bosa->index_for(r_bot);
ysr@777 1263 _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0);
ysr@777 1264 }
ysr@777 1265 assert(fine_has_next(), "Or else we exited the loop via the return.");
ysr@777 1266 card_index = _cur_region_card_offset + _cur_region_cur_card;
ysr@777 1267 return true;
ysr@777 1268 }
ysr@777 1269
ysr@777 1270 bool HeapRegionRemSetIterator::fine_has_next() {
ysr@777 1271 return
ysr@777 1272 _fine_cur_prt != NULL &&
tonyp@1377 1273 _cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion;
ysr@777 1274 }
ysr@777 1275
ysr@777 1276 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
ysr@777 1277 switch (_is) {
ysr@777 1278 case Sparse:
ysr@777 1279 if (_sparse_iter.has_next(card_index)) {
ysr@777 1280 _n_yielded_sparse++;
ysr@777 1281 return true;
ysr@777 1282 }
ysr@777 1283 // Otherwise, deliberate fall-through
ysr@777 1284 _is = Fine;
ysr@777 1285 case Fine:
ysr@777 1286 if (fine_has_next(card_index)) {
ysr@777 1287 _n_yielded_fine++;
ysr@777 1288 return true;
ysr@777 1289 }
ysr@777 1290 // Otherwise, deliberate fall-through
ysr@777 1291 _is = Coarse;
ysr@777 1292 case Coarse:
ysr@777 1293 if (coarse_has_next(card_index)) {
ysr@777 1294 _n_yielded_coarse++;
ysr@777 1295 return true;
ysr@777 1296 }
ysr@777 1297 // Otherwise...
ysr@777 1298 break;
ysr@777 1299 }
ysr@777 1300 assert(ParallelGCThreads > 1 ||
ysr@777 1301 n_yielded() == _hrrs->occupied(),
ysr@777 1302 "Should have yielded all the cards in the rem set "
ysr@777 1303 "(in the non-par case).");
ysr@777 1304 return false;
ysr@777 1305 }
ysr@777 1306
ysr@777 1307
ysr@777 1308
ysr@1280 1309 OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
ysr@1280 1310 HeapWord** HeapRegionRemSet::_recorded_cards = NULL;
ysr@1280 1311 HeapRegion** HeapRegionRemSet::_recorded_regions = NULL;
ysr@1280 1312 int HeapRegionRemSet::_n_recorded = 0;
ysr@777 1313
ysr@777 1314 HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
ysr@777 1315 int* HeapRegionRemSet::_recorded_event_index = NULL;
ysr@777 1316 int HeapRegionRemSet::_n_recorded_events = 0;
ysr@777 1317
ysr@1280 1318 void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
ysr@777 1319 if (_recorded_oops == NULL) {
ysr@777 1320 assert(_n_recorded == 0
ysr@777 1321 && _recorded_cards == NULL
ysr@777 1322 && _recorded_regions == NULL,
ysr@777 1323 "Inv");
ysr@1280 1324 _recorded_oops = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded);
ysr@1280 1325 _recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded);
ysr@1280 1326 _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded);
ysr@777 1327 }
ysr@777 1328 if (_n_recorded == MaxRecorded) {
ysr@777 1329 gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
ysr@777 1330 } else {
ysr@777 1331 _recorded_cards[_n_recorded] =
ysr@777 1332 (HeapWord*)align_size_down(uintptr_t(f),
ysr@777 1333 CardTableModRefBS::card_size);
ysr@777 1334 _recorded_oops[_n_recorded] = f;
ysr@777 1335 _recorded_regions[_n_recorded] = hr;
ysr@777 1336 _n_recorded++;
ysr@777 1337 }
ysr@777 1338 }
ysr@777 1339
ysr@777 1340 void HeapRegionRemSet::record_event(Event evnt) {
ysr@777 1341 if (!G1RecordHRRSEvents) return;
ysr@777 1342
ysr@777 1343 if (_recorded_events == NULL) {
ysr@777 1344 assert(_n_recorded_events == 0
ysr@777 1345 && _recorded_event_index == NULL,
ysr@777 1346 "Inv");
ysr@777 1347 _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents);
ysr@777 1348 _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents);
ysr@777 1349 }
ysr@777 1350 if (_n_recorded_events == MaxRecordedEvents) {
ysr@777 1351 gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
ysr@777 1352 } else {
ysr@777 1353 _recorded_events[_n_recorded_events] = evnt;
ysr@777 1354 _recorded_event_index[_n_recorded_events] = _n_recorded;
ysr@777 1355 _n_recorded_events++;
ysr@777 1356 }
ysr@777 1357 }
ysr@777 1358
ysr@777 1359 void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
ysr@777 1360 switch (evnt) {
ysr@777 1361 case Event_EvacStart:
ysr@777 1362 str->print("Evac Start");
ysr@777 1363 break;
ysr@777 1364 case Event_EvacEnd:
ysr@777 1365 str->print("Evac End");
ysr@777 1366 break;
ysr@777 1367 case Event_RSUpdateEnd:
ysr@777 1368 str->print("RS Update End");
ysr@777 1369 break;
ysr@777 1370 }
ysr@777 1371 }
ysr@777 1372
ysr@777 1373 void HeapRegionRemSet::print_recorded() {
ysr@777 1374 int cur_evnt = 0;
ysr@777 1375 Event cur_evnt_kind;
ysr@777 1376 int cur_evnt_ind = 0;
ysr@777 1377 if (_n_recorded_events > 0) {
ysr@777 1378 cur_evnt_kind = _recorded_events[cur_evnt];
ysr@777 1379 cur_evnt_ind = _recorded_event_index[cur_evnt];
ysr@777 1380 }
ysr@777 1381
ysr@777 1382 for (int i = 0; i < _n_recorded; i++) {
ysr@777 1383 while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
ysr@777 1384 gclog_or_tty->print("Event: ");
ysr@777 1385 print_event(gclog_or_tty, cur_evnt_kind);
ysr@777 1386 gclog_or_tty->print_cr("");
ysr@777 1387 cur_evnt++;
ysr@777 1388 if (cur_evnt < MaxRecordedEvents) {
ysr@777 1389 cur_evnt_kind = _recorded_events[cur_evnt];
ysr@777 1390 cur_evnt_ind = _recorded_event_index[cur_evnt];
ysr@777 1391 }
ysr@777 1392 }
ysr@777 1393 gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
ysr@777 1394 " for ref " PTR_FORMAT ".\n",
ysr@777 1395 _recorded_cards[i], _recorded_regions[i]->bottom(),
ysr@777 1396 _recorded_oops[i]);
ysr@777 1397 }
ysr@777 1398 }
ysr@777 1399
tonyp@2493 1400 void HeapRegionRemSet::reset_for_cleanup_tasks() {
tonyp@2493 1401 SparsePRT::reset_for_cleanup_tasks();
tonyp@2493 1402 }
tonyp@2493 1403
tonyp@2493 1404 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
tonyp@2493 1405 _other_regions.do_cleanup_work(hrrs_cleanup_task);
tonyp@2493 1406 }
tonyp@2493 1407
tonyp@2493 1408 void
tonyp@2493 1409 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
tonyp@2493 1410 SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
tonyp@2493 1411 }
tonyp@2493 1412
ysr@777 1413 #ifndef PRODUCT
ysr@777 1414 void HeapRegionRemSet::test() {
ysr@777 1415 os::sleep(Thread::current(), (jlong)5000, false);
ysr@777 1416 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 1417
iveresov@1696 1418 // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
ysr@777 1419 // hash bucket.
ysr@777 1420 HeapRegion* hr0 = g1h->region_at(0);
ysr@777 1421 HeapRegion* hr1 = g1h->region_at(1);
ysr@777 1422 HeapRegion* hr2 = g1h->region_at(5);
ysr@777 1423 HeapRegion* hr3 = g1h->region_at(6);
ysr@777 1424 HeapRegion* hr4 = g1h->region_at(7);
ysr@777 1425 HeapRegion* hr5 = g1h->region_at(8);
ysr@777 1426
ysr@777 1427 HeapWord* hr1_start = hr1->bottom();
ysr@777 1428 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
ysr@777 1429 HeapWord* hr1_last = hr1->end() - 1;
ysr@777 1430
ysr@777 1431 HeapWord* hr2_start = hr2->bottom();
ysr@777 1432 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
ysr@777 1433 HeapWord* hr2_last = hr2->end() - 1;
ysr@777 1434
ysr@777 1435 HeapWord* hr3_start = hr3->bottom();
ysr@777 1436 HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
ysr@777 1437 HeapWord* hr3_last = hr3->end() - 1;
ysr@777 1438
ysr@777 1439 HeapRegionRemSet* hrrs = hr0->rem_set();
ysr@777 1440
ysr@777 1441 // Make three references from region 0x101...
ysr@1280 1442 hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
ysr@1280 1443 hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
ysr@1280 1444 hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
ysr@777 1445
ysr@1280 1446 hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
ysr@1280 1447 hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
ysr@1280 1448 hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
ysr@777 1449
ysr@1280 1450 hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
ysr@1280 1451 hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
ysr@1280 1452 hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
ysr@777 1453
ysr@777 1454 // Now cause a coarsening.
ysr@1280 1455 hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
ysr@1280 1456 hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
ysr@777 1457
ysr@777 1458 // Now, does iteration yield these three?
ysr@777 1459 HeapRegionRemSetIterator iter;
ysr@777 1460 hrrs->init_iterator(&iter);
ysr@777 1461 size_t sum = 0;
ysr@777 1462 size_t card_index;
ysr@777 1463 while (iter.has_next(card_index)) {
ysr@777 1464 HeapWord* card_start =
ysr@777 1465 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
ysr@777 1466 gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start);
ysr@777 1467 sum++;
ysr@777 1468 }
ysr@777 1469 guarantee(sum == 11 - 3 + 2048, "Failure");
ysr@777 1470 guarantee(sum == hrrs->occupied(), "Failure");
ysr@777 1471 }
ysr@777 1472 #endif

mercurial