Wed, 07 Jan 2015 15:15:37 +0100
8048179: Early reclaim of large objects that are referenced by a few objects
Summary: Push the remembered sets of large objects with few referenced into the dirty card queue at the beginning of the evacuation so that they may end up with zero remembered set entries at the end of the collection, and are potentially reclaimed. Also improve timing measurements of the early reclaim mechanism, and shorten flag names.
Reviewed-by: brutisso, jmasa, dfazunen
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 #include "gc_implementation/g1/heapRegionRemSet.hpp"
30 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/padded.inline.hpp"
33 #include "memory/space.inline.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "utilities/bitMap.inline.hpp"
36 #include "utilities/globalDefinitions.hpp"
37 #include "utilities/growableArray.hpp"
39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
41 class PerRegionTable: public CHeapObj<mtGC> {
42 friend class OtherRegionsTable;
43 friend class HeapRegionRemSetIterator;
45 HeapRegion* _hr;
46 BitMap _bm;
47 jint _occupied;
49 // next pointer for free/allocated 'all' list
50 PerRegionTable* _next;
52 // prev pointer for the allocated 'all' list
53 PerRegionTable* _prev;
55 // next pointer in collision list
56 PerRegionTable * _collision_list_next;
58 // Global free list of PRTs
59 static PerRegionTable* _free_list;
61 protected:
62 // We need access in order to union things into the base table.
63 BitMap* bm() { return &_bm; }
65 void recount_occupied() {
66 _occupied = (jint) bm()->count_one_bits();
67 }
69 PerRegionTable(HeapRegion* hr) :
70 _hr(hr),
71 _occupied(0),
72 _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */),
73 _collision_list_next(NULL), _next(NULL), _prev(NULL)
74 {}
76 void add_card_work(CardIdx_t from_card, bool par) {
77 if (!_bm.at(from_card)) {
78 if (par) {
79 if (_bm.par_at_put(from_card, 1)) {
80 Atomic::inc(&_occupied);
81 }
82 } else {
83 _bm.at_put(from_card, 1);
84 _occupied++;
85 }
86 }
87 }
89 void add_reference_work(OopOrNarrowOopStar from, bool par) {
90 // Must make this robust in case "from" is not in "_hr", because of
91 // concurrency.
93 if (G1TraceHeapRegionRememberedSet) {
94 gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
95 from,
96 UseCompressedOops
97 ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
98 : (void *)oopDesc::load_decode_heap_oop((oop*)from));
99 }
101 HeapRegion* loc_hr = hr();
102 // If the test below fails, then this table was reused concurrently
103 // with this operation. This is OK, since the old table was coarsened,
104 // and adding a bit to the new table is never incorrect.
105 // If the table used to belong to a continues humongous region and is
106 // now reused for the corresponding start humongous region, we need to
107 // make sure that we detect this. Thus, we call is_in_reserved_raw()
108 // instead of just is_in_reserved() here.
109 if (loc_hr->is_in_reserved_raw(from)) {
110 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
111 CardIdx_t from_card = (CardIdx_t)
112 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
114 assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
115 "Must be in range.");
116 add_card_work(from_card, par);
117 }
118 }
120 public:
122 HeapRegion* hr() const { return _hr; }
124 jint occupied() const {
125 // Overkill, but if we ever need it...
126 // guarantee(_occupied == _bm.count_one_bits(), "Check");
127 return _occupied;
128 }
130 void init(HeapRegion* hr, bool clear_links_to_all_list) {
131 if (clear_links_to_all_list) {
132 set_next(NULL);
133 set_prev(NULL);
134 }
135 _hr = hr;
136 _collision_list_next = NULL;
137 _occupied = 0;
138 _bm.clear();
139 }
141 void add_reference(OopOrNarrowOopStar from) {
142 add_reference_work(from, /*parallel*/ true);
143 }
145 void seq_add_reference(OopOrNarrowOopStar from) {
146 add_reference_work(from, /*parallel*/ false);
147 }
149 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
150 HeapWord* hr_bot = hr()->bottom();
151 size_t hr_first_card_index = ctbs->index_for(hr_bot);
152 bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
153 recount_occupied();
154 }
156 void add_card(CardIdx_t from_card_index) {
157 add_card_work(from_card_index, /*parallel*/ true);
158 }
160 void seq_add_card(CardIdx_t from_card_index) {
161 add_card_work(from_card_index, /*parallel*/ false);
162 }
164 // (Destructively) union the bitmap of the current table into the given
165 // bitmap (which is assumed to be of the same size.)
166 void union_bitmap_into(BitMap* bm) {
167 bm->set_union(_bm);
168 }
170 // Mem size in bytes.
171 size_t mem_size() const {
172 return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
173 }
175 // Requires "from" to be in "hr()".
176 bool contains_reference(OopOrNarrowOopStar from) const {
177 assert(hr()->is_in_reserved(from), "Precondition.");
178 size_t card_ind = pointer_delta(from, hr()->bottom(),
179 CardTableModRefBS::card_size);
180 return _bm.at(card_ind);
181 }
183 // Bulk-free the PRTs from prt to last, assumes that they are
184 // linked together using their _next field.
185 static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
186 while (true) {
187 PerRegionTable* fl = _free_list;
188 last->set_next(fl);
189 PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
190 if (res == fl) {
191 return;
192 }
193 }
194 ShouldNotReachHere();
195 }
197 static void free(PerRegionTable* prt) {
198 bulk_free(prt, prt);
199 }
201 // Returns an initialized PerRegionTable instance.
202 static PerRegionTable* alloc(HeapRegion* hr) {
203 PerRegionTable* fl = _free_list;
204 while (fl != NULL) {
205 PerRegionTable* nxt = fl->next();
206 PerRegionTable* res =
207 (PerRegionTable*)
208 Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
209 if (res == fl) {
210 fl->init(hr, true);
211 return fl;
212 } else {
213 fl = _free_list;
214 }
215 }
216 assert(fl == NULL, "Loop condition.");
217 return new PerRegionTable(hr);
218 }
220 PerRegionTable* next() const { return _next; }
221 void set_next(PerRegionTable* next) { _next = next; }
222 PerRegionTable* prev() const { return _prev; }
223 void set_prev(PerRegionTable* prev) { _prev = prev; }
225 // Accessor and Modification routines for the pointer for the
226 // singly linked collision list that links the PRTs within the
227 // OtherRegionsTable::_fine_grain_regions hash table.
228 //
229 // It might be useful to also make the collision list doubly linked
230 // to avoid iteration over the collisions list during scrubbing/deletion.
231 // OTOH there might not be many collisions.
233 PerRegionTable* collision_list_next() const {
234 return _collision_list_next;
235 }
237 void set_collision_list_next(PerRegionTable* next) {
238 _collision_list_next = next;
239 }
241 PerRegionTable** collision_list_next_addr() {
242 return &_collision_list_next;
243 }
245 static size_t fl_mem_size() {
246 PerRegionTable* cur = _free_list;
247 size_t res = 0;
248 while (cur != NULL) {
249 res += cur->mem_size();
250 cur = cur->next();
251 }
252 return res;
253 }
255 static void test_fl_mem_size();
256 };
258 PerRegionTable* PerRegionTable::_free_list = NULL;
260 size_t OtherRegionsTable::_max_fine_entries = 0;
261 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
262 size_t OtherRegionsTable::_fine_eviction_stride = 0;
263 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
265 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
266 _g1h(G1CollectedHeap::heap()),
267 _hr(hr), _m(m),
268 _coarse_map(G1CollectedHeap::heap()->max_regions(),
269 false /* in-resource-area */),
270 _fine_grain_regions(NULL),
271 _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
272 _n_fine_entries(0), _n_coarse_entries(0),
273 _fine_eviction_start(0),
274 _sparse_table(hr)
275 {
276 typedef PerRegionTable* PerRegionTablePtr;
278 if (_max_fine_entries == 0) {
279 assert(_mod_max_fine_entries_mask == 0, "Both or none.");
280 size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
281 _max_fine_entries = (size_t)1 << max_entries_log;
282 _mod_max_fine_entries_mask = _max_fine_entries - 1;
284 assert(_fine_eviction_sample_size == 0
285 && _fine_eviction_stride == 0, "All init at same time.");
286 _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
287 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
288 }
290 _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
291 mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
293 if (_fine_grain_regions == NULL) {
294 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
295 "Failed to allocate _fine_grain_entries.");
296 }
298 for (size_t i = 0; i < _max_fine_entries; i++) {
299 _fine_grain_regions[i] = NULL;
300 }
301 }
303 void OtherRegionsTable::link_to_all(PerRegionTable* prt) {
304 // We always append to the beginning of the list for convenience;
305 // the order of entries in this list does not matter.
306 if (_first_all_fine_prts != NULL) {
307 assert(_first_all_fine_prts->prev() == NULL, "invariant");
308 _first_all_fine_prts->set_prev(prt);
309 prt->set_next(_first_all_fine_prts);
310 } else {
311 // this is the first element we insert. Adjust the "last" pointer
312 _last_all_fine_prts = prt;
313 assert(prt->next() == NULL, "just checking");
314 }
315 // the new element is always the first element without a predecessor
316 prt->set_prev(NULL);
317 _first_all_fine_prts = prt;
319 assert(prt->prev() == NULL, "just checking");
320 assert(_first_all_fine_prts == prt, "just checking");
321 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
322 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
323 "just checking");
324 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
325 "just checking");
326 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
327 "just checking");
328 }
330 void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
331 if (prt->prev() != NULL) {
332 assert(_first_all_fine_prts != prt, "just checking");
333 prt->prev()->set_next(prt->next());
334 // removing the last element in the list?
335 if (_last_all_fine_prts == prt) {
336 _last_all_fine_prts = prt->prev();
337 }
338 } else {
339 assert(_first_all_fine_prts == prt, "just checking");
340 _first_all_fine_prts = prt->next();
341 // list is empty now?
342 if (_first_all_fine_prts == NULL) {
343 _last_all_fine_prts = NULL;
344 }
345 }
347 if (prt->next() != NULL) {
348 prt->next()->set_prev(prt->prev());
349 }
351 prt->set_next(NULL);
352 prt->set_prev(NULL);
354 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
355 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
356 "just checking");
357 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
358 "just checking");
359 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
360 "just checking");
361 }
363 int** FromCardCache::_cache = NULL;
364 uint FromCardCache::_max_regions = 0;
365 size_t FromCardCache::_static_mem_size = 0;
367 void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
368 guarantee(_cache == NULL, "Should not call this multiple times");
370 _max_regions = max_num_regions;
371 _cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
372 _max_regions,
373 &_static_mem_size);
375 invalidate(0, _max_regions);
376 }
378 void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
379 guarantee((size_t)start_idx + new_num_regions <= max_uintx,
380 err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT,
381 start_idx, new_num_regions));
382 for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
383 uint end_idx = (start_idx + (uint)new_num_regions);
384 assert(end_idx <= _max_regions, "Must be within max.");
385 for (uint j = start_idx; j < end_idx; j++) {
386 set(i, j, InvalidCard);
387 }
388 }
389 }
391 #ifndef PRODUCT
392 void FromCardCache::print(outputStream* out) {
393 for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
394 for (uint j = 0; j < _max_regions; j++) {
395 out->print_cr("_from_card_cache["UINT32_FORMAT"]["UINT32_FORMAT"] = "INT32_FORMAT".",
396 i, j, at(i, j));
397 }
398 }
399 }
400 #endif
402 void FromCardCache::clear(uint region_idx) {
403 uint num_par_remsets = HeapRegionRemSet::num_par_rem_sets();
404 for (uint i = 0; i < num_par_remsets; i++) {
405 set(i, region_idx, InvalidCard);
406 }
407 }
409 void OtherRegionsTable::initialize(uint max_regions) {
410 FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
411 }
413 void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
414 FromCardCache::invalidate(start_idx, num_regions);
415 }
417 void OtherRegionsTable::print_from_card_cache() {
418 FromCardCache::print();
419 }
421 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
422 uint cur_hrm_ind = hr()->hrm_index();
424 if (G1TraceHeapRegionRememberedSet) {
425 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
426 from,
427 UseCompressedOops
428 ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
429 : (void *)oopDesc::load_decode_heap_oop((oop*)from));
430 }
432 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
434 if (G1TraceHeapRegionRememberedSet) {
435 gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = "INT32_FORMAT")",
436 hr()->bottom(), from_card,
437 FromCardCache::at((uint)tid, cur_hrm_ind));
438 }
440 if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) {
441 if (G1TraceHeapRegionRememberedSet) {
442 gclog_or_tty->print_cr(" from-card cache hit.");
443 }
444 assert(contains_reference(from), "We just added it!");
445 return;
446 }
448 // Note that this may be a continued H region.
449 HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
450 RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
452 // If the region is already coarsened, return.
453 if (_coarse_map.at(from_hrm_ind)) {
454 if (G1TraceHeapRegionRememberedSet) {
455 gclog_or_tty->print_cr(" coarse map hit.");
456 }
457 assert(contains_reference(from), "We just added it!");
458 return;
459 }
461 // Otherwise find a per-region table to add it to.
462 size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
463 PerRegionTable* prt = find_region_table(ind, from_hr);
464 if (prt == NULL) {
465 MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
466 // Confirm that it's really not there...
467 prt = find_region_table(ind, from_hr);
468 if (prt == NULL) {
470 uintptr_t from_hr_bot_card_index =
471 uintptr_t(from_hr->bottom())
472 >> CardTableModRefBS::card_shift;
473 CardIdx_t card_index = from_card - from_hr_bot_card_index;
474 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
475 "Must be in range.");
476 if (G1HRRSUseSparseTable &&
477 _sparse_table.add_card(from_hrm_ind, card_index)) {
478 if (G1RecordHRRSOops) {
479 HeapRegionRemSet::record(hr(), from);
480 if (G1TraceHeapRegionRememberedSet) {
481 gclog_or_tty->print(" Added card " PTR_FORMAT " to region "
482 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
483 align_size_down(uintptr_t(from),
484 CardTableModRefBS::card_size),
485 hr()->bottom(), from);
486 }
487 }
488 if (G1TraceHeapRegionRememberedSet) {
489 gclog_or_tty->print_cr(" added card to sparse table.");
490 }
491 assert(contains_reference_locked(from), "We just added it!");
492 return;
493 } else {
494 if (G1TraceHeapRegionRememberedSet) {
495 gclog_or_tty->print_cr(" [tid %d] sparse table entry "
496 "overflow(f: %d, t: %u)",
497 tid, from_hrm_ind, cur_hrm_ind);
498 }
499 }
501 if (_n_fine_entries == _max_fine_entries) {
502 prt = delete_region_table();
503 // There is no need to clear the links to the 'all' list here:
504 // prt will be reused immediately, i.e. remain in the 'all' list.
505 prt->init(from_hr, false /* clear_links_to_all_list */);
506 } else {
507 prt = PerRegionTable::alloc(from_hr);
508 link_to_all(prt);
509 }
511 PerRegionTable* first_prt = _fine_grain_regions[ind];
512 prt->set_collision_list_next(first_prt);
513 _fine_grain_regions[ind] = prt;
514 _n_fine_entries++;
516 if (G1HRRSUseSparseTable) {
517 // Transfer from sparse to fine-grain.
518 SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
519 assert(sprt_entry != NULL, "There should have been an entry");
520 for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
521 CardIdx_t c = sprt_entry->card(i);
522 if (c != SparsePRTEntry::NullEntry) {
523 prt->add_card(c);
524 }
525 }
526 // Now we can delete the sparse entry.
527 bool res = _sparse_table.delete_entry(from_hrm_ind);
528 assert(res, "It should have been there.");
529 }
530 }
531 assert(prt != NULL && prt->hr() == from_hr, "consequence");
532 }
533 // Note that we can't assert "prt->hr() == from_hr", because of the
534 // possibility of concurrent reuse. But see head comment of
535 // OtherRegionsTable for why this is OK.
536 assert(prt != NULL, "Inv");
538 prt->add_reference(from);
540 if (G1RecordHRRSOops) {
541 HeapRegionRemSet::record(hr(), from);
542 if (G1TraceHeapRegionRememberedSet) {
543 gclog_or_tty->print("Added card " PTR_FORMAT " to region "
544 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
545 align_size_down(uintptr_t(from),
546 CardTableModRefBS::card_size),
547 hr()->bottom(), from);
548 }
549 }
550 assert(contains_reference(from), "We just added it!");
551 }
553 PerRegionTable*
554 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
555 assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
556 PerRegionTable* prt = _fine_grain_regions[ind];
557 while (prt != NULL && prt->hr() != hr) {
558 prt = prt->collision_list_next();
559 }
560 // Loop postcondition is the method postcondition.
561 return prt;
562 }
564 jint OtherRegionsTable::_n_coarsenings = 0;
566 PerRegionTable* OtherRegionsTable::delete_region_table() {
567 assert(_m->owned_by_self(), "Precondition");
568 assert(_n_fine_entries == _max_fine_entries, "Precondition");
569 PerRegionTable* max = NULL;
570 jint max_occ = 0;
571 PerRegionTable** max_prev;
572 size_t max_ind;
574 size_t i = _fine_eviction_start;
575 for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
576 size_t ii = i;
577 // Make sure we get a non-NULL sample.
578 while (_fine_grain_regions[ii] == NULL) {
579 ii++;
580 if (ii == _max_fine_entries) ii = 0;
581 guarantee(ii != i, "We must find one.");
582 }
583 PerRegionTable** prev = &_fine_grain_regions[ii];
584 PerRegionTable* cur = *prev;
585 while (cur != NULL) {
586 jint cur_occ = cur->occupied();
587 if (max == NULL || cur_occ > max_occ) {
588 max = cur;
589 max_prev = prev;
590 max_ind = i;
591 max_occ = cur_occ;
592 }
593 prev = cur->collision_list_next_addr();
594 cur = cur->collision_list_next();
595 }
596 i = i + _fine_eviction_stride;
597 if (i >= _n_fine_entries) i = i - _n_fine_entries;
598 }
600 _fine_eviction_start++;
602 if (_fine_eviction_start >= _n_fine_entries) {
603 _fine_eviction_start -= _n_fine_entries;
604 }
606 guarantee(max != NULL, "Since _n_fine_entries > 0");
608 // Set the corresponding coarse bit.
609 size_t max_hrm_index = (size_t) max->hr()->hrm_index();
610 if (!_coarse_map.at(max_hrm_index)) {
611 _coarse_map.at_put(max_hrm_index, true);
612 _n_coarse_entries++;
613 if (G1TraceHeapRegionRememberedSet) {
614 gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
615 "for region [" PTR_FORMAT "...] (%d coarse entries).\n",
616 hr()->bottom(),
617 max->hr()->bottom(),
618 _n_coarse_entries);
619 }
620 }
622 // Unsplice.
623 *max_prev = max->collision_list_next();
624 Atomic::inc(&_n_coarsenings);
625 _n_fine_entries--;
626 return max;
627 }
630 // At present, this must be called stop-world single-threaded.
631 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
632 BitMap* region_bm, BitMap* card_bm) {
633 // First eliminated garbage regions from the coarse map.
634 if (G1RSScrubVerbose) {
635 gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index());
636 }
638 assert(_coarse_map.size() == region_bm->size(), "Precondition");
639 if (G1RSScrubVerbose) {
640 gclog_or_tty->print(" Coarse map: before = "SIZE_FORMAT"...",
641 _n_coarse_entries);
642 }
643 _coarse_map.set_intersection(*region_bm);
644 _n_coarse_entries = _coarse_map.count_one_bits();
645 if (G1RSScrubVerbose) {
646 gclog_or_tty->print_cr(" after = "SIZE_FORMAT".", _n_coarse_entries);
647 }
649 // Now do the fine-grained maps.
650 for (size_t i = 0; i < _max_fine_entries; i++) {
651 PerRegionTable* cur = _fine_grain_regions[i];
652 PerRegionTable** prev = &_fine_grain_regions[i];
653 while (cur != NULL) {
654 PerRegionTable* nxt = cur->collision_list_next();
655 // If the entire region is dead, eliminate.
656 if (G1RSScrubVerbose) {
657 gclog_or_tty->print_cr(" For other region %u:",
658 cur->hr()->hrm_index());
659 }
660 if (!region_bm->at((size_t) cur->hr()->hrm_index())) {
661 *prev = nxt;
662 cur->set_collision_list_next(NULL);
663 _n_fine_entries--;
664 if (G1RSScrubVerbose) {
665 gclog_or_tty->print_cr(" deleted via region map.");
666 }
667 unlink_from_all(cur);
668 PerRegionTable::free(cur);
669 } else {
670 // Do fine-grain elimination.
671 if (G1RSScrubVerbose) {
672 gclog_or_tty->print(" occ: before = %4d.", cur->occupied());
673 }
674 cur->scrub(ctbs, card_bm);
675 if (G1RSScrubVerbose) {
676 gclog_or_tty->print_cr(" after = %4d.", cur->occupied());
677 }
678 // Did that empty the table completely?
679 if (cur->occupied() == 0) {
680 *prev = nxt;
681 cur->set_collision_list_next(NULL);
682 _n_fine_entries--;
683 unlink_from_all(cur);
684 PerRegionTable::free(cur);
685 } else {
686 prev = cur->collision_list_next_addr();
687 }
688 }
689 cur = nxt;
690 }
691 }
692 // Since we may have deleted a from_card_cache entry from the RS, clear
693 // the FCC.
694 clear_fcc();
695 }
697 bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const {
698 if (limit <= (size_t)G1RSetSparseRegionEntries) {
699 return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit;
700 } else {
701 // Current uses of this method may only use values less than G1RSetSparseRegionEntries
702 // for the limit. The solution, comparing against occupied() would be too slow
703 // at this time.
704 Unimplemented();
705 return false;
706 }
707 }
709 bool OtherRegionsTable::is_empty() const {
710 return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL;
711 }
713 size_t OtherRegionsTable::occupied() const {
714 size_t sum = occ_fine();
715 sum += occ_sparse();
716 sum += occ_coarse();
717 return sum;
718 }
720 size_t OtherRegionsTable::occ_fine() const {
721 size_t sum = 0;
723 size_t num = 0;
724 PerRegionTable * cur = _first_all_fine_prts;
725 while (cur != NULL) {
726 sum += cur->occupied();
727 cur = cur->next();
728 num++;
729 }
730 guarantee(num == _n_fine_entries, "just checking");
731 return sum;
732 }
734 size_t OtherRegionsTable::occ_coarse() const {
735 return (_n_coarse_entries * HeapRegion::CardsPerRegion);
736 }
738 size_t OtherRegionsTable::occ_sparse() const {
739 return _sparse_table.occupied();
740 }
742 size_t OtherRegionsTable::mem_size() const {
743 size_t sum = 0;
744 // all PRTs are of the same size so it is sufficient to query only one of them.
745 if (_first_all_fine_prts != NULL) {
746 assert(_last_all_fine_prts != NULL &&
747 _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
748 sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
749 }
750 sum += (sizeof(PerRegionTable*) * _max_fine_entries);
751 sum += (_coarse_map.size_in_words() * HeapWordSize);
752 sum += (_sparse_table.mem_size());
753 sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above.
754 return sum;
755 }
757 size_t OtherRegionsTable::static_mem_size() {
758 return FromCardCache::static_mem_size();
759 }
761 size_t OtherRegionsTable::fl_mem_size() {
762 return PerRegionTable::fl_mem_size();
763 }
765 void OtherRegionsTable::clear_fcc() {
766 FromCardCache::clear(hr()->hrm_index());
767 }
769 void OtherRegionsTable::clear() {
770 // if there are no entries, skip this step
771 if (_first_all_fine_prts != NULL) {
772 guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
773 PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
774 memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
775 } else {
776 guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
777 }
779 _first_all_fine_prts = _last_all_fine_prts = NULL;
780 _sparse_table.clear();
781 _coarse_map.clear();
782 _n_fine_entries = 0;
783 _n_coarse_entries = 0;
785 clear_fcc();
786 }
788 bool OtherRegionsTable::del_single_region_table(size_t ind,
789 HeapRegion* hr) {
790 assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
791 PerRegionTable** prev_addr = &_fine_grain_regions[ind];
792 PerRegionTable* prt = *prev_addr;
793 while (prt != NULL && prt->hr() != hr) {
794 prev_addr = prt->collision_list_next_addr();
795 prt = prt->collision_list_next();
796 }
797 if (prt != NULL) {
798 assert(prt->hr() == hr, "Loop postcondition.");
799 *prev_addr = prt->collision_list_next();
800 unlink_from_all(prt);
801 PerRegionTable::free(prt);
802 _n_fine_entries--;
803 return true;
804 } else {
805 return false;
806 }
807 }
809 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
810 // Cast away const in this case.
811 MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
812 return contains_reference_locked(from);
813 }
815 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
816 HeapRegion* hr = _g1h->heap_region_containing_raw(from);
817 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
818 // Is this region in the coarse map?
819 if (_coarse_map.at(hr_ind)) return true;
821 PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
822 hr);
823 if (prt != NULL) {
824 return prt->contains_reference(from);
826 } else {
827 uintptr_t from_card =
828 (uintptr_t(from) >> CardTableModRefBS::card_shift);
829 uintptr_t hr_bot_card_index =
830 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
831 assert(from_card >= hr_bot_card_index, "Inv");
832 CardIdx_t card_index = from_card - hr_bot_card_index;
833 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
834 "Must be in range.");
835 return _sparse_table.contains_card(hr_ind, card_index);
836 }
837 }
839 void
840 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
841 _sparse_table.do_cleanup_work(hrrs_cleanup_task);
842 }
844 // Determines how many threads can add records to an rset in parallel.
845 // This can be done by either mutator threads together with the
846 // concurrent refinement threads or GC threads.
847 uint HeapRegionRemSet::num_par_rem_sets() {
848 return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
849 }
851 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
852 HeapRegion* hr)
853 : _bosa(bosa),
854 _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true),
855 _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
856 reset_for_par_iteration();
857 }
859 void HeapRegionRemSet::setup_remset_size() {
860 // Setup sparse and fine-grain tables sizes.
861 // table_size = base * (log(region_size / 1M) + 1)
862 const int LOG_M = 20;
863 int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
864 if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
865 G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
866 }
867 if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
868 G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
869 }
870 guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
871 }
873 bool HeapRegionRemSet::claim_iter() {
874 if (_iter_state != Unclaimed) return false;
875 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
876 return (res == Unclaimed);
877 }
879 void HeapRegionRemSet::set_iter_complete() {
880 _iter_state = Complete;
881 }
883 bool HeapRegionRemSet::iter_is_complete() {
884 return _iter_state == Complete;
885 }
887 #ifndef PRODUCT
888 void HeapRegionRemSet::print() {
889 HeapRegionRemSetIterator iter(this);
890 size_t card_index;
891 while (iter.has_next(card_index)) {
892 HeapWord* card_start =
893 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
894 gclog_or_tty->print_cr(" Card " PTR_FORMAT, card_start);
895 }
896 if (iter.n_yielded() != occupied()) {
897 gclog_or_tty->print_cr("Yielded disagrees with occupied:");
898 gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).",
899 iter.n_yielded(),
900 iter.n_yielded_coarse(), iter.n_yielded_fine());
901 gclog_or_tty->print_cr(" %6d occ (%6d coarse, %6d fine).",
902 occupied(), occ_coarse(), occ_fine());
903 }
904 guarantee(iter.n_yielded() == occupied(),
905 "We should have yielded all the represented cards.");
906 }
907 #endif
909 void HeapRegionRemSet::cleanup() {
910 SparsePRT::cleanup_all();
911 }
913 void HeapRegionRemSet::clear() {
914 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
915 clear_locked();
916 }
918 void HeapRegionRemSet::clear_locked() {
919 _code_roots.clear();
920 _other_regions.clear();
921 assert(occupied_locked() == 0, "Should be clear.");
922 reset_for_par_iteration();
923 }
925 void HeapRegionRemSet::reset_for_par_iteration() {
926 _iter_state = Unclaimed;
927 _iter_claimed = 0;
928 // It's good to check this to make sure that the two methods are in sync.
929 assert(verify_ready_for_par_iteration(), "post-condition");
930 }
932 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
933 BitMap* region_bm, BitMap* card_bm) {
934 _other_regions.scrub(ctbs, region_bm, card_bm);
935 }
937 // Code roots support
938 //
939 // The code root set is protected by two separate locking schemes
940 // When at safepoint the per-hrrs lock must be held during modifications
941 // except when doing a full gc.
942 // When not at safepoint the CodeCache_lock must be held during modifications.
943 // When concurrent readers access the contains() function
944 // (during the evacuation phase) no removals are allowed.
946 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
947 assert(nm != NULL, "sanity");
948 // Optimistic unlocked contains-check
949 if (!_code_roots.contains(nm)) {
950 MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
951 add_strong_code_root_locked(nm);
952 }
953 }
955 void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
956 assert(nm != NULL, "sanity");
957 _code_roots.add(nm);
958 }
960 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
961 assert(nm != NULL, "sanity");
962 assert_locked_or_safepoint(CodeCache_lock);
964 MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
965 _code_roots.remove(nm);
967 // Check that there were no duplicates
968 guarantee(!_code_roots.contains(nm), "duplicate entry found");
969 }
971 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
972 _code_roots.nmethods_do(blk);
973 }
975 void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) {
976 _code_roots.clean(hr);
977 }
979 size_t HeapRegionRemSet::strong_code_roots_mem_size() {
980 return _code_roots.mem_size();
981 }
983 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
984 _hrrs(hrrs),
985 _g1h(G1CollectedHeap::heap()),
986 _coarse_map(&hrrs->_other_regions._coarse_map),
987 _bosa(hrrs->bosa()),
988 _is(Sparse),
989 // Set these values so that we increment to the first region.
990 _coarse_cur_region_index(-1),
991 _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
992 _cur_card_in_prt(HeapRegion::CardsPerRegion),
993 _fine_cur_prt(NULL),
994 _n_yielded_coarse(0),
995 _n_yielded_fine(0),
996 _n_yielded_sparse(0),
997 _sparse_iter(&hrrs->_other_regions._sparse_table) {}
999 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
1000 if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
1001 // Go to the next card.
1002 _coarse_cur_region_cur_card++;
1003 // Was the last the last card in the current region?
1004 if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
1005 // Yes: find the next region. This may leave _coarse_cur_region_index
1006 // Set to the last index, in which case there are no more coarse
1007 // regions.
1008 _coarse_cur_region_index =
1009 (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
1010 if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
1011 _coarse_cur_region_cur_card = 0;
1012 HeapWord* r_bot =
1013 _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
1014 _cur_region_card_offset = _bosa->index_for(r_bot);
1015 } else {
1016 return false;
1017 }
1018 }
1019 // If we didn't return false above, then we can yield a card.
1020 card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
1021 return true;
1022 }
1024 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
1025 if (fine_has_next()) {
1026 _cur_card_in_prt =
1027 _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1028 }
1029 if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
1030 // _fine_cur_prt may still be NULL in case if there are not PRTs at all for
1031 // the remembered set.
1032 if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
1033 return false;
1034 }
1035 PerRegionTable* next_prt = _fine_cur_prt->next();
1036 switch_to_prt(next_prt);
1037 _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1038 }
1040 card_index = _cur_region_card_offset + _cur_card_in_prt;
1041 guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
1042 err_msg("Card index "SIZE_FORMAT" must be within the region", _cur_card_in_prt));
1043 return true;
1044 }
1046 bool HeapRegionRemSetIterator::fine_has_next() {
1047 return _cur_card_in_prt != HeapRegion::CardsPerRegion;
1048 }
1050 void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
1051 assert(prt != NULL, "Cannot switch to NULL prt");
1052 _fine_cur_prt = prt;
1054 HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
1055 _cur_region_card_offset = _bosa->index_for(r_bot);
1057 // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
1058 // To avoid special-casing this start case, and not miss the first bitmap
1059 // entry, initialize _cur_region_cur_card with -1 instead of 0.
1060 _cur_card_in_prt = (size_t)-1;
1061 }
1063 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
1064 switch (_is) {
1065 case Sparse: {
1066 if (_sparse_iter.has_next(card_index)) {
1067 _n_yielded_sparse++;
1068 return true;
1069 }
1070 // Otherwise, deliberate fall-through
1071 _is = Fine;
1072 PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts;
1073 if (initial_fine_prt != NULL) {
1074 switch_to_prt(_hrrs->_other_regions._first_all_fine_prts);
1075 }
1076 }
1077 case Fine:
1078 if (fine_has_next(card_index)) {
1079 _n_yielded_fine++;
1080 return true;
1081 }
1082 // Otherwise, deliberate fall-through
1083 _is = Coarse;
1084 case Coarse:
1085 if (coarse_has_next(card_index)) {
1086 _n_yielded_coarse++;
1087 return true;
1088 }
1089 // Otherwise...
1090 break;
1091 }
1092 assert(ParallelGCThreads > 1 ||
1093 n_yielded() == _hrrs->occupied(),
1094 "Should have yielded all the cards in the rem set "
1095 "(in the non-par case).");
1096 return false;
1097 }
1101 OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
1102 HeapWord** HeapRegionRemSet::_recorded_cards = NULL;
1103 HeapRegion** HeapRegionRemSet::_recorded_regions = NULL;
1104 int HeapRegionRemSet::_n_recorded = 0;
1106 HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
1107 int* HeapRegionRemSet::_recorded_event_index = NULL;
1108 int HeapRegionRemSet::_n_recorded_events = 0;
1110 void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
1111 if (_recorded_oops == NULL) {
1112 assert(_n_recorded == 0
1113 && _recorded_cards == NULL
1114 && _recorded_regions == NULL,
1115 "Inv");
1116 _recorded_oops = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded, mtGC);
1117 _recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded, mtGC);
1118 _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded, mtGC);
1119 }
1120 if (_n_recorded == MaxRecorded) {
1121 gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
1122 } else {
1123 _recorded_cards[_n_recorded] =
1124 (HeapWord*)align_size_down(uintptr_t(f),
1125 CardTableModRefBS::card_size);
1126 _recorded_oops[_n_recorded] = f;
1127 _recorded_regions[_n_recorded] = hr;
1128 _n_recorded++;
1129 }
1130 }
1132 void HeapRegionRemSet::record_event(Event evnt) {
1133 if (!G1RecordHRRSEvents) return;
1135 if (_recorded_events == NULL) {
1136 assert(_n_recorded_events == 0
1137 && _recorded_event_index == NULL,
1138 "Inv");
1139 _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents, mtGC);
1140 _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents, mtGC);
1141 }
1142 if (_n_recorded_events == MaxRecordedEvents) {
1143 gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
1144 } else {
1145 _recorded_events[_n_recorded_events] = evnt;
1146 _recorded_event_index[_n_recorded_events] = _n_recorded;
1147 _n_recorded_events++;
1148 }
1149 }
1151 void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
1152 switch (evnt) {
1153 case Event_EvacStart:
1154 str->print("Evac Start");
1155 break;
1156 case Event_EvacEnd:
1157 str->print("Evac End");
1158 break;
1159 case Event_RSUpdateEnd:
1160 str->print("RS Update End");
1161 break;
1162 }
1163 }
1165 void HeapRegionRemSet::print_recorded() {
1166 int cur_evnt = 0;
1167 Event cur_evnt_kind;
1168 int cur_evnt_ind = 0;
1169 if (_n_recorded_events > 0) {
1170 cur_evnt_kind = _recorded_events[cur_evnt];
1171 cur_evnt_ind = _recorded_event_index[cur_evnt];
1172 }
1174 for (int i = 0; i < _n_recorded; i++) {
1175 while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
1176 gclog_or_tty->print("Event: ");
1177 print_event(gclog_or_tty, cur_evnt_kind);
1178 gclog_or_tty->cr();
1179 cur_evnt++;
1180 if (cur_evnt < MaxRecordedEvents) {
1181 cur_evnt_kind = _recorded_events[cur_evnt];
1182 cur_evnt_ind = _recorded_event_index[cur_evnt];
1183 }
1184 }
1185 gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
1186 " for ref " PTR_FORMAT ".\n",
1187 _recorded_cards[i], _recorded_regions[i]->bottom(),
1188 _recorded_oops[i]);
1189 }
1190 }
1192 void HeapRegionRemSet::reset_for_cleanup_tasks() {
1193 SparsePRT::reset_for_cleanup_tasks();
1194 }
1196 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
1197 _other_regions.do_cleanup_work(hrrs_cleanup_task);
1198 }
1200 void
1201 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
1202 SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
1203 }
1205 #ifndef PRODUCT
1206 void PerRegionTable::test_fl_mem_size() {
1207 PerRegionTable* dummy = alloc(NULL);
1209 size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
1210 assert(dummy->mem_size() > min_prt_size,
1211 err_msg("PerRegionTable memory usage is suspiciously small, only has "SIZE_FORMAT" bytes. "
1212 "Should be at least "SIZE_FORMAT" bytes.", dummy->mem_size(), min_prt_size));
1213 free(dummy);
1214 guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
1215 // try to reset the state
1216 _free_list = NULL;
1217 delete dummy;
1218 }
1220 void HeapRegionRemSet::test_prt() {
1221 PerRegionTable::test_fl_mem_size();
1222 }
1224 void HeapRegionRemSet::test() {
1225 os::sleep(Thread::current(), (jlong)5000, false);
1226 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1228 // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
1229 // hash bucket.
1230 HeapRegion* hr0 = g1h->region_at(0);
1231 HeapRegion* hr1 = g1h->region_at(1);
1232 HeapRegion* hr2 = g1h->region_at(5);
1233 HeapRegion* hr3 = g1h->region_at(6);
1234 HeapRegion* hr4 = g1h->region_at(7);
1235 HeapRegion* hr5 = g1h->region_at(8);
1237 HeapWord* hr1_start = hr1->bottom();
1238 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
1239 HeapWord* hr1_last = hr1->end() - 1;
1241 HeapWord* hr2_start = hr2->bottom();
1242 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
1243 HeapWord* hr2_last = hr2->end() - 1;
1245 HeapWord* hr3_start = hr3->bottom();
1246 HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
1247 HeapWord* hr3_last = hr3->end() - 1;
1249 HeapRegionRemSet* hrrs = hr0->rem_set();
1251 // Make three references from region 0x101...
1252 hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
1253 hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
1254 hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
1256 hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
1257 hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
1258 hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
1260 hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
1261 hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
1262 hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
1264 // Now cause a coarsening.
1265 hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
1266 hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
1268 // Now, does iteration yield these three?
1269 HeapRegionRemSetIterator iter(hrrs);
1270 size_t sum = 0;
1271 size_t card_index;
1272 while (iter.has_next(card_index)) {
1273 HeapWord* card_start =
1274 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
1275 gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start);
1276 sum++;
1277 }
1278 guarantee(sum == 11 - 3 + 2048, "Failure");
1279 guarantee(sum == hrrs->occupied(), "Failure");
1280 }
1281 #endif