Mon, 21 Jul 2014 09:40:19 +0200
8040792: G1: Memory usage calculation uses sizeof(this) instead of sizeof(classname)
Summary: A few locations in the code use sizeof(this) which returns the size of the pointer instead of sizeof(classname) which returns the size of the sum of its members. This change fixes these errors and adds a few tests.
Reviewed-by: mgerdin, brutisso
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 #include "gc_implementation/g1/heapRegionRemSet.hpp"
30 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/padded.inline.hpp"
33 #include "memory/space.inline.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "utilities/bitMap.inline.hpp"
36 #include "utilities/globalDefinitions.hpp"
37 #include "utilities/growableArray.hpp"
39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
41 class PerRegionTable: public CHeapObj<mtGC> {
42 friend class OtherRegionsTable;
43 friend class HeapRegionRemSetIterator;
45 HeapRegion* _hr;
46 BitMap _bm;
47 jint _occupied;
49 // next pointer for free/allocated 'all' list
50 PerRegionTable* _next;
52 // prev pointer for the allocated 'all' list
53 PerRegionTable* _prev;
55 // next pointer in collision list
56 PerRegionTable * _collision_list_next;
58 // Global free list of PRTs
59 static PerRegionTable* _free_list;
61 protected:
62 // We need access in order to union things into the base table.
63 BitMap* bm() { return &_bm; }
65 void recount_occupied() {
66 _occupied = (jint) bm()->count_one_bits();
67 }
69 PerRegionTable(HeapRegion* hr) :
70 _hr(hr),
71 _occupied(0),
72 _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */),
73 _collision_list_next(NULL), _next(NULL), _prev(NULL)
74 {}
76 void add_card_work(CardIdx_t from_card, bool par) {
77 if (!_bm.at(from_card)) {
78 if (par) {
79 if (_bm.par_at_put(from_card, 1)) {
80 Atomic::inc(&_occupied);
81 }
82 } else {
83 _bm.at_put(from_card, 1);
84 _occupied++;
85 }
86 }
87 }
89 void add_reference_work(OopOrNarrowOopStar from, bool par) {
90 // Must make this robust in case "from" is not in "_hr", because of
91 // concurrency.
93 if (G1TraceHeapRegionRememberedSet) {
94 gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
95 from,
96 UseCompressedOops
97 ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
98 : (void *)oopDesc::load_decode_heap_oop((oop*)from));
99 }
101 HeapRegion* loc_hr = hr();
102 // If the test below fails, then this table was reused concurrently
103 // with this operation. This is OK, since the old table was coarsened,
104 // and adding a bit to the new table is never incorrect.
105 // If the table used to belong to a continues humongous region and is
106 // now reused for the corresponding start humongous region, we need to
107 // make sure that we detect this. Thus, we call is_in_reserved_raw()
108 // instead of just is_in_reserved() here.
109 if (loc_hr->is_in_reserved_raw(from)) {
110 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
111 CardIdx_t from_card = (CardIdx_t)
112 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
114 assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
115 "Must be in range.");
116 add_card_work(from_card, par);
117 }
118 }
120 public:
122 HeapRegion* hr() const { return _hr; }
124 jint occupied() const {
125 // Overkill, but if we ever need it...
126 // guarantee(_occupied == _bm.count_one_bits(), "Check");
127 return _occupied;
128 }
130 void init(HeapRegion* hr, bool clear_links_to_all_list) {
131 if (clear_links_to_all_list) {
132 set_next(NULL);
133 set_prev(NULL);
134 }
135 _hr = hr;
136 _collision_list_next = NULL;
137 _occupied = 0;
138 _bm.clear();
139 }
141 void add_reference(OopOrNarrowOopStar from) {
142 add_reference_work(from, /*parallel*/ true);
143 }
145 void seq_add_reference(OopOrNarrowOopStar from) {
146 add_reference_work(from, /*parallel*/ false);
147 }
149 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
150 HeapWord* hr_bot = hr()->bottom();
151 size_t hr_first_card_index = ctbs->index_for(hr_bot);
152 bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
153 recount_occupied();
154 }
156 void add_card(CardIdx_t from_card_index) {
157 add_card_work(from_card_index, /*parallel*/ true);
158 }
160 void seq_add_card(CardIdx_t from_card_index) {
161 add_card_work(from_card_index, /*parallel*/ false);
162 }
164 // (Destructively) union the bitmap of the current table into the given
165 // bitmap (which is assumed to be of the same size.)
166 void union_bitmap_into(BitMap* bm) {
167 bm->set_union(_bm);
168 }
170 // Mem size in bytes.
171 size_t mem_size() const {
172 return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
173 }
175 // Requires "from" to be in "hr()".
176 bool contains_reference(OopOrNarrowOopStar from) const {
177 assert(hr()->is_in_reserved(from), "Precondition.");
178 size_t card_ind = pointer_delta(from, hr()->bottom(),
179 CardTableModRefBS::card_size);
180 return _bm.at(card_ind);
181 }
183 // Bulk-free the PRTs from prt to last, assumes that they are
184 // linked together using their _next field.
185 static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
186 while (true) {
187 PerRegionTable* fl = _free_list;
188 last->set_next(fl);
189 PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
190 if (res == fl) {
191 return;
192 }
193 }
194 ShouldNotReachHere();
195 }
197 static void free(PerRegionTable* prt) {
198 bulk_free(prt, prt);
199 }
201 // Returns an initialized PerRegionTable instance.
202 static PerRegionTable* alloc(HeapRegion* hr) {
203 PerRegionTable* fl = _free_list;
204 while (fl != NULL) {
205 PerRegionTable* nxt = fl->next();
206 PerRegionTable* res =
207 (PerRegionTable*)
208 Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
209 if (res == fl) {
210 fl->init(hr, true);
211 return fl;
212 } else {
213 fl = _free_list;
214 }
215 }
216 assert(fl == NULL, "Loop condition.");
217 return new PerRegionTable(hr);
218 }
220 PerRegionTable* next() const { return _next; }
221 void set_next(PerRegionTable* next) { _next = next; }
222 PerRegionTable* prev() const { return _prev; }
223 void set_prev(PerRegionTable* prev) { _prev = prev; }
225 // Accessor and Modification routines for the pointer for the
226 // singly linked collision list that links the PRTs within the
227 // OtherRegionsTable::_fine_grain_regions hash table.
228 //
229 // It might be useful to also make the collision list doubly linked
230 // to avoid iteration over the collisions list during scrubbing/deletion.
231 // OTOH there might not be many collisions.
233 PerRegionTable* collision_list_next() const {
234 return _collision_list_next;
235 }
237 void set_collision_list_next(PerRegionTable* next) {
238 _collision_list_next = next;
239 }
241 PerRegionTable** collision_list_next_addr() {
242 return &_collision_list_next;
243 }
245 static size_t fl_mem_size() {
246 PerRegionTable* cur = _free_list;
247 size_t res = 0;
248 while (cur != NULL) {
249 res += cur->mem_size();
250 cur = cur->next();
251 }
252 return res;
253 }
255 static void test_fl_mem_size();
256 };
258 PerRegionTable* PerRegionTable::_free_list = NULL;
260 size_t OtherRegionsTable::_max_fine_entries = 0;
261 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
262 size_t OtherRegionsTable::_fine_eviction_stride = 0;
263 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
265 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
266 _g1h(G1CollectedHeap::heap()),
267 _hr(hr), _m(m),
268 _coarse_map(G1CollectedHeap::heap()->max_regions(),
269 false /* in-resource-area */),
270 _fine_grain_regions(NULL),
271 _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
272 _n_fine_entries(0), _n_coarse_entries(0),
273 _fine_eviction_start(0),
274 _sparse_table(hr)
275 {
276 typedef PerRegionTable* PerRegionTablePtr;
278 if (_max_fine_entries == 0) {
279 assert(_mod_max_fine_entries_mask == 0, "Both or none.");
280 size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
281 _max_fine_entries = (size_t)1 << max_entries_log;
282 _mod_max_fine_entries_mask = _max_fine_entries - 1;
284 assert(_fine_eviction_sample_size == 0
285 && _fine_eviction_stride == 0, "All init at same time.");
286 _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
287 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
288 }
290 _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
291 mtGC, 0, AllocFailStrategy::RETURN_NULL);
293 if (_fine_grain_regions == NULL) {
294 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
295 "Failed to allocate _fine_grain_entries.");
296 }
298 for (size_t i = 0; i < _max_fine_entries; i++) {
299 _fine_grain_regions[i] = NULL;
300 }
301 }
303 void OtherRegionsTable::link_to_all(PerRegionTable* prt) {
304 // We always append to the beginning of the list for convenience;
305 // the order of entries in this list does not matter.
306 if (_first_all_fine_prts != NULL) {
307 assert(_first_all_fine_prts->prev() == NULL, "invariant");
308 _first_all_fine_prts->set_prev(prt);
309 prt->set_next(_first_all_fine_prts);
310 } else {
311 // this is the first element we insert. Adjust the "last" pointer
312 _last_all_fine_prts = prt;
313 assert(prt->next() == NULL, "just checking");
314 }
315 // the new element is always the first element without a predecessor
316 prt->set_prev(NULL);
317 _first_all_fine_prts = prt;
319 assert(prt->prev() == NULL, "just checking");
320 assert(_first_all_fine_prts == prt, "just checking");
321 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
322 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
323 "just checking");
324 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
325 "just checking");
326 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
327 "just checking");
328 }
330 void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
331 if (prt->prev() != NULL) {
332 assert(_first_all_fine_prts != prt, "just checking");
333 prt->prev()->set_next(prt->next());
334 // removing the last element in the list?
335 if (_last_all_fine_prts == prt) {
336 _last_all_fine_prts = prt->prev();
337 }
338 } else {
339 assert(_first_all_fine_prts == prt, "just checking");
340 _first_all_fine_prts = prt->next();
341 // list is empty now?
342 if (_first_all_fine_prts == NULL) {
343 _last_all_fine_prts = NULL;
344 }
345 }
347 if (prt->next() != NULL) {
348 prt->next()->set_prev(prt->prev());
349 }
351 prt->set_next(NULL);
352 prt->set_prev(NULL);
354 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
355 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
356 "just checking");
357 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
358 "just checking");
359 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
360 "just checking");
361 }
363 int** FromCardCache::_cache = NULL;
364 uint FromCardCache::_max_regions = 0;
365 size_t FromCardCache::_static_mem_size = 0;
367 void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
368 guarantee(_cache == NULL, "Should not call this multiple times");
370 _max_regions = max_num_regions;
371 _cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
372 _max_regions,
373 &_static_mem_size);
375 for (uint i = 0; i < n_par_rs; i++) {
376 for (uint j = 0; j < _max_regions; j++) {
377 set(i, j, InvalidCard);
378 }
379 }
380 }
382 void FromCardCache::shrink(uint new_num_regions) {
383 for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
384 assert(new_num_regions <= _max_regions, "Must be within max.");
385 for (uint j = new_num_regions; j < _max_regions; j++) {
386 set(i, j, InvalidCard);
387 }
388 }
389 }
391 #ifndef PRODUCT
392 void FromCardCache::print(outputStream* out) {
393 for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
394 for (uint j = 0; j < _max_regions; j++) {
395 out->print_cr("_from_card_cache["UINT32_FORMAT"]["UINT32_FORMAT"] = "INT32_FORMAT".",
396 i, j, at(i, j));
397 }
398 }
399 }
400 #endif
402 void FromCardCache::clear(uint region_idx) {
403 uint num_par_remsets = HeapRegionRemSet::num_par_rem_sets();
404 for (uint i = 0; i < num_par_remsets; i++) {
405 set(i, region_idx, InvalidCard);
406 }
407 }
409 void OtherRegionsTable::init_from_card_cache(uint max_regions) {
410 FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
411 }
413 void OtherRegionsTable::shrink_from_card_cache(uint new_num_regions) {
414 FromCardCache::shrink(new_num_regions);
415 }
417 void OtherRegionsTable::print_from_card_cache() {
418 FromCardCache::print();
419 }
421 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
422 uint cur_hrs_ind = hr()->hrs_index();
424 if (G1TraceHeapRegionRememberedSet) {
425 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
426 from,
427 UseCompressedOops
428 ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
429 : (void *)oopDesc::load_decode_heap_oop((oop*)from));
430 }
432 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
434 if (G1TraceHeapRegionRememberedSet) {
435 gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = "INT32_FORMAT")",
436 hr()->bottom(), from_card,
437 FromCardCache::at((uint)tid, cur_hrs_ind));
438 }
440 if (FromCardCache::contains_or_replace((uint)tid, cur_hrs_ind, from_card)) {
441 if (G1TraceHeapRegionRememberedSet) {
442 gclog_or_tty->print_cr(" from-card cache hit.");
443 }
444 assert(contains_reference(from), "We just added it!");
445 return;
446 }
448 // Note that this may be a continued H region.
449 HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
450 RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
452 // If the region is already coarsened, return.
453 if (_coarse_map.at(from_hrs_ind)) {
454 if (G1TraceHeapRegionRememberedSet) {
455 gclog_or_tty->print_cr(" coarse map hit.");
456 }
457 assert(contains_reference(from), "We just added it!");
458 return;
459 }
461 // Otherwise find a per-region table to add it to.
462 size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
463 PerRegionTable* prt = find_region_table(ind, from_hr);
464 if (prt == NULL) {
465 MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
466 // Confirm that it's really not there...
467 prt = find_region_table(ind, from_hr);
468 if (prt == NULL) {
470 uintptr_t from_hr_bot_card_index =
471 uintptr_t(from_hr->bottom())
472 >> CardTableModRefBS::card_shift;
473 CardIdx_t card_index = from_card - from_hr_bot_card_index;
474 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
475 "Must be in range.");
476 if (G1HRRSUseSparseTable &&
477 _sparse_table.add_card(from_hrs_ind, card_index)) {
478 if (G1RecordHRRSOops) {
479 HeapRegionRemSet::record(hr(), from);
480 if (G1TraceHeapRegionRememberedSet) {
481 gclog_or_tty->print(" Added card " PTR_FORMAT " to region "
482 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
483 align_size_down(uintptr_t(from),
484 CardTableModRefBS::card_size),
485 hr()->bottom(), from);
486 }
487 }
488 if (G1TraceHeapRegionRememberedSet) {
489 gclog_or_tty->print_cr(" added card to sparse table.");
490 }
491 assert(contains_reference_locked(from), "We just added it!");
492 return;
493 } else {
494 if (G1TraceHeapRegionRememberedSet) {
495 gclog_or_tty->print_cr(" [tid %d] sparse table entry "
496 "overflow(f: %d, t: %d)",
497 tid, from_hrs_ind, cur_hrs_ind);
498 }
499 }
501 if (_n_fine_entries == _max_fine_entries) {
502 prt = delete_region_table();
503 // There is no need to clear the links to the 'all' list here:
504 // prt will be reused immediately, i.e. remain in the 'all' list.
505 prt->init(from_hr, false /* clear_links_to_all_list */);
506 } else {
507 prt = PerRegionTable::alloc(from_hr);
508 link_to_all(prt);
509 }
511 PerRegionTable* first_prt = _fine_grain_regions[ind];
512 prt->set_collision_list_next(first_prt);
513 _fine_grain_regions[ind] = prt;
514 _n_fine_entries++;
516 if (G1HRRSUseSparseTable) {
517 // Transfer from sparse to fine-grain.
518 SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
519 assert(sprt_entry != NULL, "There should have been an entry");
520 for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
521 CardIdx_t c = sprt_entry->card(i);
522 if (c != SparsePRTEntry::NullEntry) {
523 prt->add_card(c);
524 }
525 }
526 // Now we can delete the sparse entry.
527 bool res = _sparse_table.delete_entry(from_hrs_ind);
528 assert(res, "It should have been there.");
529 }
530 }
531 assert(prt != NULL && prt->hr() == from_hr, "consequence");
532 }
533 // Note that we can't assert "prt->hr() == from_hr", because of the
534 // possibility of concurrent reuse. But see head comment of
535 // OtherRegionsTable for why this is OK.
536 assert(prt != NULL, "Inv");
538 prt->add_reference(from);
540 if (G1RecordHRRSOops) {
541 HeapRegionRemSet::record(hr(), from);
542 if (G1TraceHeapRegionRememberedSet) {
543 gclog_or_tty->print("Added card " PTR_FORMAT " to region "
544 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
545 align_size_down(uintptr_t(from),
546 CardTableModRefBS::card_size),
547 hr()->bottom(), from);
548 }
549 }
550 assert(contains_reference(from), "We just added it!");
551 }
553 PerRegionTable*
554 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
555 assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
556 PerRegionTable* prt = _fine_grain_regions[ind];
557 while (prt != NULL && prt->hr() != hr) {
558 prt = prt->collision_list_next();
559 }
560 // Loop postcondition is the method postcondition.
561 return prt;
562 }
564 jint OtherRegionsTable::_n_coarsenings = 0;
566 PerRegionTable* OtherRegionsTable::delete_region_table() {
567 assert(_m->owned_by_self(), "Precondition");
568 assert(_n_fine_entries == _max_fine_entries, "Precondition");
569 PerRegionTable* max = NULL;
570 jint max_occ = 0;
571 PerRegionTable** max_prev;
572 size_t max_ind;
574 size_t i = _fine_eviction_start;
575 for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
576 size_t ii = i;
577 // Make sure we get a non-NULL sample.
578 while (_fine_grain_regions[ii] == NULL) {
579 ii++;
580 if (ii == _max_fine_entries) ii = 0;
581 guarantee(ii != i, "We must find one.");
582 }
583 PerRegionTable** prev = &_fine_grain_regions[ii];
584 PerRegionTable* cur = *prev;
585 while (cur != NULL) {
586 jint cur_occ = cur->occupied();
587 if (max == NULL || cur_occ > max_occ) {
588 max = cur;
589 max_prev = prev;
590 max_ind = i;
591 max_occ = cur_occ;
592 }
593 prev = cur->collision_list_next_addr();
594 cur = cur->collision_list_next();
595 }
596 i = i + _fine_eviction_stride;
597 if (i >= _n_fine_entries) i = i - _n_fine_entries;
598 }
600 _fine_eviction_start++;
602 if (_fine_eviction_start >= _n_fine_entries) {
603 _fine_eviction_start -= _n_fine_entries;
604 }
606 guarantee(max != NULL, "Since _n_fine_entries > 0");
608 // Set the corresponding coarse bit.
609 size_t max_hrs_index = (size_t) max->hr()->hrs_index();
610 if (!_coarse_map.at(max_hrs_index)) {
611 _coarse_map.at_put(max_hrs_index, true);
612 _n_coarse_entries++;
613 if (G1TraceHeapRegionRememberedSet) {
614 gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
615 "for region [" PTR_FORMAT "...] (%d coarse entries).\n",
616 hr()->bottom(),
617 max->hr()->bottom(),
618 _n_coarse_entries);
619 }
620 }
622 // Unsplice.
623 *max_prev = max->collision_list_next();
624 Atomic::inc(&_n_coarsenings);
625 _n_fine_entries--;
626 return max;
627 }
630 // At present, this must be called stop-world single-threaded.
631 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
632 BitMap* region_bm, BitMap* card_bm) {
633 // First eliminated garbage regions from the coarse map.
634 if (G1RSScrubVerbose) {
635 gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index());
636 }
638 assert(_coarse_map.size() == region_bm->size(), "Precondition");
639 if (G1RSScrubVerbose) {
640 gclog_or_tty->print(" Coarse map: before = "SIZE_FORMAT"...",
641 _n_coarse_entries);
642 }
643 _coarse_map.set_intersection(*region_bm);
644 _n_coarse_entries = _coarse_map.count_one_bits();
645 if (G1RSScrubVerbose) {
646 gclog_or_tty->print_cr(" after = "SIZE_FORMAT".", _n_coarse_entries);
647 }
649 // Now do the fine-grained maps.
650 for (size_t i = 0; i < _max_fine_entries; i++) {
651 PerRegionTable* cur = _fine_grain_regions[i];
652 PerRegionTable** prev = &_fine_grain_regions[i];
653 while (cur != NULL) {
654 PerRegionTable* nxt = cur->collision_list_next();
655 // If the entire region is dead, eliminate.
656 if (G1RSScrubVerbose) {
657 gclog_or_tty->print_cr(" For other region %u:",
658 cur->hr()->hrs_index());
659 }
660 if (!region_bm->at((size_t) cur->hr()->hrs_index())) {
661 *prev = nxt;
662 cur->set_collision_list_next(NULL);
663 _n_fine_entries--;
664 if (G1RSScrubVerbose) {
665 gclog_or_tty->print_cr(" deleted via region map.");
666 }
667 unlink_from_all(cur);
668 PerRegionTable::free(cur);
669 } else {
670 // Do fine-grain elimination.
671 if (G1RSScrubVerbose) {
672 gclog_or_tty->print(" occ: before = %4d.", cur->occupied());
673 }
674 cur->scrub(ctbs, card_bm);
675 if (G1RSScrubVerbose) {
676 gclog_or_tty->print_cr(" after = %4d.", cur->occupied());
677 }
678 // Did that empty the table completely?
679 if (cur->occupied() == 0) {
680 *prev = nxt;
681 cur->set_collision_list_next(NULL);
682 _n_fine_entries--;
683 unlink_from_all(cur);
684 PerRegionTable::free(cur);
685 } else {
686 prev = cur->collision_list_next_addr();
687 }
688 }
689 cur = nxt;
690 }
691 }
692 // Since we may have deleted a from_card_cache entry from the RS, clear
693 // the FCC.
694 clear_fcc();
695 }
698 size_t OtherRegionsTable::occupied() const {
699 size_t sum = occ_fine();
700 sum += occ_sparse();
701 sum += occ_coarse();
702 return sum;
703 }
705 size_t OtherRegionsTable::occ_fine() const {
706 size_t sum = 0;
708 size_t num = 0;
709 PerRegionTable * cur = _first_all_fine_prts;
710 while (cur != NULL) {
711 sum += cur->occupied();
712 cur = cur->next();
713 num++;
714 }
715 guarantee(num == _n_fine_entries, "just checking");
716 return sum;
717 }
719 size_t OtherRegionsTable::occ_coarse() const {
720 return (_n_coarse_entries * HeapRegion::CardsPerRegion);
721 }
723 size_t OtherRegionsTable::occ_sparse() const {
724 return _sparse_table.occupied();
725 }
727 size_t OtherRegionsTable::mem_size() const {
728 size_t sum = 0;
729 // all PRTs are of the same size so it is sufficient to query only one of them.
730 if (_first_all_fine_prts != NULL) {
731 assert(_last_all_fine_prts != NULL &&
732 _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
733 sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
734 }
735 sum += (sizeof(PerRegionTable*) * _max_fine_entries);
736 sum += (_coarse_map.size_in_words() * HeapWordSize);
737 sum += (_sparse_table.mem_size());
738 sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above.
739 return sum;
740 }
742 size_t OtherRegionsTable::static_mem_size() {
743 return FromCardCache::static_mem_size();
744 }
746 size_t OtherRegionsTable::fl_mem_size() {
747 return PerRegionTable::fl_mem_size();
748 }
750 void OtherRegionsTable::clear_fcc() {
751 FromCardCache::clear(hr()->hrs_index());
752 }
754 void OtherRegionsTable::clear() {
755 // if there are no entries, skip this step
756 if (_first_all_fine_prts != NULL) {
757 guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
758 PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
759 memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
760 } else {
761 guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
762 }
764 _first_all_fine_prts = _last_all_fine_prts = NULL;
765 _sparse_table.clear();
766 _coarse_map.clear();
767 _n_fine_entries = 0;
768 _n_coarse_entries = 0;
770 clear_fcc();
771 }
773 bool OtherRegionsTable::del_single_region_table(size_t ind,
774 HeapRegion* hr) {
775 assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
776 PerRegionTable** prev_addr = &_fine_grain_regions[ind];
777 PerRegionTable* prt = *prev_addr;
778 while (prt != NULL && prt->hr() != hr) {
779 prev_addr = prt->collision_list_next_addr();
780 prt = prt->collision_list_next();
781 }
782 if (prt != NULL) {
783 assert(prt->hr() == hr, "Loop postcondition.");
784 *prev_addr = prt->collision_list_next();
785 unlink_from_all(prt);
786 PerRegionTable::free(prt);
787 _n_fine_entries--;
788 return true;
789 } else {
790 return false;
791 }
792 }
794 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
795 // Cast away const in this case.
796 MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
797 return contains_reference_locked(from);
798 }
800 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
801 HeapRegion* hr = _g1h->heap_region_containing_raw(from);
802 if (hr == NULL) return false;
803 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
804 // Is this region in the coarse map?
805 if (_coarse_map.at(hr_ind)) return true;
807 PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
808 hr);
809 if (prt != NULL) {
810 return prt->contains_reference(from);
812 } else {
813 uintptr_t from_card =
814 (uintptr_t(from) >> CardTableModRefBS::card_shift);
815 uintptr_t hr_bot_card_index =
816 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
817 assert(from_card >= hr_bot_card_index, "Inv");
818 CardIdx_t card_index = from_card - hr_bot_card_index;
819 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
820 "Must be in range.");
821 return _sparse_table.contains_card(hr_ind, card_index);
822 }
823 }
825 void
826 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
827 _sparse_table.do_cleanup_work(hrrs_cleanup_task);
828 }
830 // Determines how many threads can add records to an rset in parallel.
831 // This can be done by either mutator threads together with the
832 // concurrent refinement threads or GC threads.
833 uint HeapRegionRemSet::num_par_rem_sets() {
834 return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
835 }
837 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
838 HeapRegion* hr)
839 : _bosa(bosa),
840 _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #"UINT32_FORMAT, hr->hrs_index()), true),
841 _code_roots(), _other_regions(hr, &_m) {
842 reset_for_par_iteration();
843 }
845 void HeapRegionRemSet::setup_remset_size() {
846 // Setup sparse and fine-grain tables sizes.
847 // table_size = base * (log(region_size / 1M) + 1)
848 const int LOG_M = 20;
849 int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
850 if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
851 G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
852 }
853 if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
854 G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
855 }
856 guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
857 }
859 bool HeapRegionRemSet::claim_iter() {
860 if (_iter_state != Unclaimed) return false;
861 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
862 return (res == Unclaimed);
863 }
865 void HeapRegionRemSet::set_iter_complete() {
866 _iter_state = Complete;
867 }
869 bool HeapRegionRemSet::iter_is_complete() {
870 return _iter_state == Complete;
871 }
873 #ifndef PRODUCT
874 void HeapRegionRemSet::print() {
875 HeapRegionRemSetIterator iter(this);
876 size_t card_index;
877 while (iter.has_next(card_index)) {
878 HeapWord* card_start =
879 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
880 gclog_or_tty->print_cr(" Card " PTR_FORMAT, card_start);
881 }
882 if (iter.n_yielded() != occupied()) {
883 gclog_or_tty->print_cr("Yielded disagrees with occupied:");
884 gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).",
885 iter.n_yielded(),
886 iter.n_yielded_coarse(), iter.n_yielded_fine());
887 gclog_or_tty->print_cr(" %6d occ (%6d coarse, %6d fine).",
888 occupied(), occ_coarse(), occ_fine());
889 }
890 guarantee(iter.n_yielded() == occupied(),
891 "We should have yielded all the represented cards.");
892 }
893 #endif
895 void HeapRegionRemSet::cleanup() {
896 SparsePRT::cleanup_all();
897 }
899 void HeapRegionRemSet::clear() {
900 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
901 clear_locked();
902 }
904 void HeapRegionRemSet::clear_locked() {
905 _code_roots.clear();
906 _other_regions.clear();
907 assert(occupied_locked() == 0, "Should be clear.");
908 reset_for_par_iteration();
909 }
911 void HeapRegionRemSet::reset_for_par_iteration() {
912 _iter_state = Unclaimed;
913 _iter_claimed = 0;
914 // It's good to check this to make sure that the two methods are in sync.
915 assert(verify_ready_for_par_iteration(), "post-condition");
916 }
918 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
919 BitMap* region_bm, BitMap* card_bm) {
920 _other_regions.scrub(ctbs, region_bm, card_bm);
921 }
923 // Code roots support
925 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
926 assert(nm != NULL, "sanity");
927 _code_roots.add(nm);
928 }
930 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
931 assert(nm != NULL, "sanity");
932 _code_roots.remove(nm);
933 // Check that there were no duplicates
934 guarantee(!_code_roots.contains(nm), "duplicate entry found");
935 }
937 class NMethodMigrationOopClosure : public OopClosure {
938 G1CollectedHeap* _g1h;
939 HeapRegion* _from;
940 nmethod* _nm;
942 uint _num_self_forwarded;
944 template <class T> void do_oop_work(T* p) {
945 T heap_oop = oopDesc::load_heap_oop(p);
946 if (!oopDesc::is_null(heap_oop)) {
947 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
948 if (_from->is_in(obj)) {
949 // Reference still points into the source region.
950 // Since roots are immediately evacuated this means that
951 // we must have self forwarded the object
952 assert(obj->is_forwarded(),
953 err_msg("code roots should be immediately evacuated. "
954 "Ref: "PTR_FORMAT", "
955 "Obj: "PTR_FORMAT", "
956 "Region: "HR_FORMAT,
957 p, (void*) obj, HR_FORMAT_PARAMS(_from)));
958 assert(obj->forwardee() == obj,
959 err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
961 // The object has been self forwarded.
962 // Note, if we're during an initial mark pause, there is
963 // no need to explicitly mark object. It will be marked
964 // during the regular evacuation failure handling code.
965 _num_self_forwarded++;
966 } else {
967 // The reference points into a promotion or to-space region
968 HeapRegion* to = _g1h->heap_region_containing(obj);
969 to->rem_set()->add_strong_code_root(_nm);
970 }
971 }
972 }
974 public:
975 NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
976 _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
978 void do_oop(narrowOop* p) { do_oop_work(p); }
979 void do_oop(oop* p) { do_oop_work(p); }
981 uint retain() { return _num_self_forwarded > 0; }
982 };
984 void HeapRegionRemSet::migrate_strong_code_roots() {
985 assert(hr()->in_collection_set(), "only collection set regions");
986 assert(!hr()->isHumongous(),
987 err_msg("humongous region "HR_FORMAT" should not have been added to the collection set",
988 HR_FORMAT_PARAMS(hr())));
990 ResourceMark rm;
992 // List of code blobs to retain for this region
993 GrowableArray<nmethod*> to_be_retained(10);
994 G1CollectedHeap* g1h = G1CollectedHeap::heap();
996 while (!_code_roots.is_empty()) {
997 nmethod *nm = _code_roots.pop();
998 if (nm != NULL) {
999 NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
1000 nm->oops_do(&oop_cl);
1001 if (oop_cl.retain()) {
1002 to_be_retained.push(nm);
1003 }
1004 }
1005 }
1007 // Now push any code roots we need to retain
1008 assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
1009 "Retained nmethod list must be empty or "
1010 "evacuation of this region failed");
1012 while (to_be_retained.is_nonempty()) {
1013 nmethod* nm = to_be_retained.pop();
1014 assert(nm != NULL, "sanity");
1015 add_strong_code_root(nm);
1016 }
1017 }
1019 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
1020 _code_roots.nmethods_do(blk);
1021 }
1023 size_t HeapRegionRemSet::strong_code_roots_mem_size() {
1024 return _code_roots.mem_size();
1025 }
1027 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
1028 _hrrs(hrrs),
1029 _g1h(G1CollectedHeap::heap()),
1030 _coarse_map(&hrrs->_other_regions._coarse_map),
1031 _bosa(hrrs->bosa()),
1032 _is(Sparse),
1033 // Set these values so that we increment to the first region.
1034 _coarse_cur_region_index(-1),
1035 _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
1036 _cur_card_in_prt(HeapRegion::CardsPerRegion),
1037 _fine_cur_prt(NULL),
1038 _n_yielded_coarse(0),
1039 _n_yielded_fine(0),
1040 _n_yielded_sparse(0),
1041 _sparse_iter(&hrrs->_other_regions._sparse_table) {}
1043 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
1044 if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
1045 // Go to the next card.
1046 _coarse_cur_region_cur_card++;
1047 // Was the last the last card in the current region?
1048 if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
1049 // Yes: find the next region. This may leave _coarse_cur_region_index
1050 // Set to the last index, in which case there are no more coarse
1051 // regions.
1052 _coarse_cur_region_index =
1053 (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
1054 if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
1055 _coarse_cur_region_cur_card = 0;
1056 HeapWord* r_bot =
1057 _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
1058 _cur_region_card_offset = _bosa->index_for(r_bot);
1059 } else {
1060 return false;
1061 }
1062 }
1063 // If we didn't return false above, then we can yield a card.
1064 card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
1065 return true;
1066 }
1068 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
1069 if (fine_has_next()) {
1070 _cur_card_in_prt =
1071 _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1072 }
1073 if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
1074 // _fine_cur_prt may still be NULL in case if there are not PRTs at all for
1075 // the remembered set.
1076 if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
1077 return false;
1078 }
1079 PerRegionTable* next_prt = _fine_cur_prt->next();
1080 switch_to_prt(next_prt);
1081 _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1082 }
1084 card_index = _cur_region_card_offset + _cur_card_in_prt;
1085 guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
1086 err_msg("Card index "SIZE_FORMAT" must be within the region", _cur_card_in_prt));
1087 return true;
1088 }
1090 bool HeapRegionRemSetIterator::fine_has_next() {
1091 return _cur_card_in_prt != HeapRegion::CardsPerRegion;
1092 }
1094 void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
1095 assert(prt != NULL, "Cannot switch to NULL prt");
1096 _fine_cur_prt = prt;
1098 HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
1099 _cur_region_card_offset = _bosa->index_for(r_bot);
1101 // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
1102 // To avoid special-casing this start case, and not miss the first bitmap
1103 // entry, initialize _cur_region_cur_card with -1 instead of 0.
1104 _cur_card_in_prt = (size_t)-1;
1105 }
1107 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
1108 switch (_is) {
1109 case Sparse: {
1110 if (_sparse_iter.has_next(card_index)) {
1111 _n_yielded_sparse++;
1112 return true;
1113 }
1114 // Otherwise, deliberate fall-through
1115 _is = Fine;
1116 PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts;
1117 if (initial_fine_prt != NULL) {
1118 switch_to_prt(_hrrs->_other_regions._first_all_fine_prts);
1119 }
1120 }
1121 case Fine:
1122 if (fine_has_next(card_index)) {
1123 _n_yielded_fine++;
1124 return true;
1125 }
1126 // Otherwise, deliberate fall-through
1127 _is = Coarse;
1128 case Coarse:
1129 if (coarse_has_next(card_index)) {
1130 _n_yielded_coarse++;
1131 return true;
1132 }
1133 // Otherwise...
1134 break;
1135 }
1136 assert(ParallelGCThreads > 1 ||
1137 n_yielded() == _hrrs->occupied(),
1138 "Should have yielded all the cards in the rem set "
1139 "(in the non-par case).");
1140 return false;
1141 }
1145 OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
1146 HeapWord** HeapRegionRemSet::_recorded_cards = NULL;
1147 HeapRegion** HeapRegionRemSet::_recorded_regions = NULL;
1148 int HeapRegionRemSet::_n_recorded = 0;
1150 HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
1151 int* HeapRegionRemSet::_recorded_event_index = NULL;
1152 int HeapRegionRemSet::_n_recorded_events = 0;
1154 void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
1155 if (_recorded_oops == NULL) {
1156 assert(_n_recorded == 0
1157 && _recorded_cards == NULL
1158 && _recorded_regions == NULL,
1159 "Inv");
1160 _recorded_oops = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded, mtGC);
1161 _recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded, mtGC);
1162 _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded, mtGC);
1163 }
1164 if (_n_recorded == MaxRecorded) {
1165 gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
1166 } else {
1167 _recorded_cards[_n_recorded] =
1168 (HeapWord*)align_size_down(uintptr_t(f),
1169 CardTableModRefBS::card_size);
1170 _recorded_oops[_n_recorded] = f;
1171 _recorded_regions[_n_recorded] = hr;
1172 _n_recorded++;
1173 }
1174 }
1176 void HeapRegionRemSet::record_event(Event evnt) {
1177 if (!G1RecordHRRSEvents) return;
1179 if (_recorded_events == NULL) {
1180 assert(_n_recorded_events == 0
1181 && _recorded_event_index == NULL,
1182 "Inv");
1183 _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents, mtGC);
1184 _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents, mtGC);
1185 }
1186 if (_n_recorded_events == MaxRecordedEvents) {
1187 gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
1188 } else {
1189 _recorded_events[_n_recorded_events] = evnt;
1190 _recorded_event_index[_n_recorded_events] = _n_recorded;
1191 _n_recorded_events++;
1192 }
1193 }
1195 void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
1196 switch (evnt) {
1197 case Event_EvacStart:
1198 str->print("Evac Start");
1199 break;
1200 case Event_EvacEnd:
1201 str->print("Evac End");
1202 break;
1203 case Event_RSUpdateEnd:
1204 str->print("RS Update End");
1205 break;
1206 }
1207 }
1209 void HeapRegionRemSet::print_recorded() {
1210 int cur_evnt = 0;
1211 Event cur_evnt_kind;
1212 int cur_evnt_ind = 0;
1213 if (_n_recorded_events > 0) {
1214 cur_evnt_kind = _recorded_events[cur_evnt];
1215 cur_evnt_ind = _recorded_event_index[cur_evnt];
1216 }
1218 for (int i = 0; i < _n_recorded; i++) {
1219 while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
1220 gclog_or_tty->print("Event: ");
1221 print_event(gclog_or_tty, cur_evnt_kind);
1222 gclog_or_tty->cr();
1223 cur_evnt++;
1224 if (cur_evnt < MaxRecordedEvents) {
1225 cur_evnt_kind = _recorded_events[cur_evnt];
1226 cur_evnt_ind = _recorded_event_index[cur_evnt];
1227 }
1228 }
1229 gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
1230 " for ref " PTR_FORMAT ".\n",
1231 _recorded_cards[i], _recorded_regions[i]->bottom(),
1232 _recorded_oops[i]);
1233 }
1234 }
1236 void HeapRegionRemSet::reset_for_cleanup_tasks() {
1237 SparsePRT::reset_for_cleanup_tasks();
1238 }
1240 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
1241 _other_regions.do_cleanup_work(hrrs_cleanup_task);
1242 }
1244 void
1245 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
1246 SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
1247 }
1249 #ifndef PRODUCT
1250 void PerRegionTable::test_fl_mem_size() {
1251 PerRegionTable* dummy = alloc(NULL);
1253 size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
1254 assert(dummy->mem_size() > min_prt_size,
1255 err_msg("PerRegionTable memory usage is suspiciously small, only has "SIZE_FORMAT" bytes. "
1256 "Should be at least "SIZE_FORMAT" bytes.", dummy->mem_size(), min_prt_size));
1257 free(dummy);
1258 guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
1259 // try to reset the state
1260 _free_list = NULL;
1261 delete dummy;
1262 }
1264 void HeapRegionRemSet::test_prt() {
1265 PerRegionTable::test_fl_mem_size();
1266 }
1268 void HeapRegionRemSet::test() {
1269 os::sleep(Thread::current(), (jlong)5000, false);
1270 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1272 // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
1273 // hash bucket.
1274 HeapRegion* hr0 = g1h->region_at(0);
1275 HeapRegion* hr1 = g1h->region_at(1);
1276 HeapRegion* hr2 = g1h->region_at(5);
1277 HeapRegion* hr3 = g1h->region_at(6);
1278 HeapRegion* hr4 = g1h->region_at(7);
1279 HeapRegion* hr5 = g1h->region_at(8);
1281 HeapWord* hr1_start = hr1->bottom();
1282 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
1283 HeapWord* hr1_last = hr1->end() - 1;
1285 HeapWord* hr2_start = hr2->bottom();
1286 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
1287 HeapWord* hr2_last = hr2->end() - 1;
1289 HeapWord* hr3_start = hr3->bottom();
1290 HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
1291 HeapWord* hr3_last = hr3->end() - 1;
1293 HeapRegionRemSet* hrrs = hr0->rem_set();
1295 // Make three references from region 0x101...
1296 hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
1297 hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
1298 hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
1300 hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
1301 hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
1302 hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
1304 hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
1305 hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
1306 hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
1308 // Now cause a coarsening.
1309 hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
1310 hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
1312 // Now, does iteration yield these three?
1313 HeapRegionRemSetIterator iter(hrrs);
1314 size_t sum = 0;
1315 size_t card_index;
1316 while (iter.has_next(card_index)) {
1317 HeapWord* card_start =
1318 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
1319 gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start);
1320 sum++;
1321 }
1322 guarantee(sum == 11 - 3 + 2048, "Failure");
1323 guarantee(sum == hrrs->occupied(), "Failure");
1324 }
1325 #endif