Mon, 19 Aug 2019 10:11:31 +0200
8229401: Fix JFR code cache test failures
8223689: Add JFR Thread Sampling Support
8223690: Add JFR BiasedLock Event Support
8223691: Add JFR G1 Region Type Change Event Support
8223692: Add JFR G1 Heap Summary Event Support
Summary: Backport JFR from JDK11, additional fixes
Reviewed-by: neugens, apetushkov
Contributed-by: denghui.ddh@alibaba-inc.com
1 /*
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
27 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 #include "gc_implementation/g1/heapRegionRemSet.hpp"
30 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/padded.inline.hpp"
33 #include "memory/space.inline.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "utilities/bitMap.inline.hpp"
36 #include "utilities/globalDefinitions.hpp"
37 #include "utilities/growableArray.hpp"
39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
41 class PerRegionTable: public CHeapObj<mtGC> {
42 friend class OtherRegionsTable;
43 friend class HeapRegionRemSetIterator;
45 HeapRegion* _hr;
46 BitMap _bm;
47 jint _occupied;
49 // next pointer for free/allocated 'all' list
50 PerRegionTable* _next;
52 // prev pointer for the allocated 'all' list
53 PerRegionTable* _prev;
55 // next pointer in collision list
56 PerRegionTable * _collision_list_next;
58 // Global free list of PRTs
59 static PerRegionTable* _free_list;
61 protected:
62 // We need access in order to union things into the base table.
63 BitMap* bm() { return &_bm; }
65 void recount_occupied() {
66 _occupied = (jint) bm()->count_one_bits();
67 }
69 PerRegionTable(HeapRegion* hr) :
70 _hr(hr),
71 _occupied(0),
72 _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */),
73 _collision_list_next(NULL), _next(NULL), _prev(NULL)
74 {}
76 void add_card_work(CardIdx_t from_card, bool par) {
77 if (!_bm.at(from_card)) {
78 if (par) {
79 if (_bm.par_at_put(from_card, 1)) {
80 Atomic::inc(&_occupied);
81 }
82 } else {
83 _bm.at_put(from_card, 1);
84 _occupied++;
85 }
86 }
87 }
89 void add_reference_work(OopOrNarrowOopStar from, bool par) {
90 // Must make this robust in case "from" is not in "_hr", because of
91 // concurrency.
93 if (G1TraceHeapRegionRememberedSet) {
94 gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
95 from,
96 UseCompressedOops
97 ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
98 : (void *)oopDesc::load_decode_heap_oop((oop*)from));
99 }
101 HeapRegion* loc_hr = hr();
102 // If the test below fails, then this table was reused concurrently
103 // with this operation. This is OK, since the old table was coarsened,
104 // and adding a bit to the new table is never incorrect.
105 // If the table used to belong to a continues humongous region and is
106 // now reused for the corresponding start humongous region, we need to
107 // make sure that we detect this. Thus, we call is_in_reserved_raw()
108 // instead of just is_in_reserved() here.
109 if (loc_hr->is_in_reserved_raw(from)) {
110 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
111 CardIdx_t from_card = (CardIdx_t)
112 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
114 assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
115 "Must be in range.");
116 add_card_work(from_card, par);
117 }
118 }
120 public:
122 HeapRegion* hr() const {
123 return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
124 }
126 jint occupied() const {
127 // Overkill, but if we ever need it...
128 // guarantee(_occupied == _bm.count_one_bits(), "Check");
129 return _occupied;
130 }
132 void init(HeapRegion* hr, bool clear_links_to_all_list) {
133 if (clear_links_to_all_list) {
134 set_next(NULL);
135 set_prev(NULL);
136 }
137 _collision_list_next = NULL;
138 _occupied = 0;
139 _bm.clear();
140 // Make sure that the bitmap clearing above has been finished before publishing
141 // this PRT to concurrent threads.
142 OrderAccess::release_store_ptr(&_hr, hr);
143 }
145 void add_reference(OopOrNarrowOopStar from) {
146 add_reference_work(from, /*parallel*/ true);
147 }
149 void seq_add_reference(OopOrNarrowOopStar from) {
150 add_reference_work(from, /*parallel*/ false);
151 }
153 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
154 HeapWord* hr_bot = hr()->bottom();
155 size_t hr_first_card_index = ctbs->index_for(hr_bot);
156 bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
157 recount_occupied();
158 }
160 void add_card(CardIdx_t from_card_index) {
161 add_card_work(from_card_index, /*parallel*/ true);
162 }
164 void seq_add_card(CardIdx_t from_card_index) {
165 add_card_work(from_card_index, /*parallel*/ false);
166 }
168 // (Destructively) union the bitmap of the current table into the given
169 // bitmap (which is assumed to be of the same size.)
170 void union_bitmap_into(BitMap* bm) {
171 bm->set_union(_bm);
172 }
174 // Mem size in bytes.
175 size_t mem_size() const {
176 return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize;
177 }
179 // Requires "from" to be in "hr()".
180 bool contains_reference(OopOrNarrowOopStar from) const {
181 assert(hr()->is_in_reserved(from), "Precondition.");
182 size_t card_ind = pointer_delta(from, hr()->bottom(),
183 CardTableModRefBS::card_size);
184 return _bm.at(card_ind);
185 }
187 // Bulk-free the PRTs from prt to last, assumes that they are
188 // linked together using their _next field.
189 static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
190 while (true) {
191 PerRegionTable* fl = _free_list;
192 last->set_next(fl);
193 PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
194 if (res == fl) {
195 return;
196 }
197 }
198 ShouldNotReachHere();
199 }
201 static void free(PerRegionTable* prt) {
202 bulk_free(prt, prt);
203 }
205 // Returns an initialized PerRegionTable instance.
206 static PerRegionTable* alloc(HeapRegion* hr) {
207 PerRegionTable* fl = _free_list;
208 while (fl != NULL) {
209 PerRegionTable* nxt = fl->next();
210 PerRegionTable* res =
211 (PerRegionTable*)
212 Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
213 if (res == fl) {
214 fl->init(hr, true);
215 return fl;
216 } else {
217 fl = _free_list;
218 }
219 }
220 assert(fl == NULL, "Loop condition.");
221 return new PerRegionTable(hr);
222 }
224 PerRegionTable* next() const { return _next; }
225 void set_next(PerRegionTable* next) { _next = next; }
226 PerRegionTable* prev() const { return _prev; }
227 void set_prev(PerRegionTable* prev) { _prev = prev; }
229 // Accessor and Modification routines for the pointer for the
230 // singly linked collision list that links the PRTs within the
231 // OtherRegionsTable::_fine_grain_regions hash table.
232 //
233 // It might be useful to also make the collision list doubly linked
234 // to avoid iteration over the collisions list during scrubbing/deletion.
235 // OTOH there might not be many collisions.
237 PerRegionTable* collision_list_next() const {
238 return _collision_list_next;
239 }
241 void set_collision_list_next(PerRegionTable* next) {
242 _collision_list_next = next;
243 }
245 PerRegionTable** collision_list_next_addr() {
246 return &_collision_list_next;
247 }
249 static size_t fl_mem_size() {
250 PerRegionTable* cur = _free_list;
251 size_t res = 0;
252 while (cur != NULL) {
253 res += cur->mem_size();
254 cur = cur->next();
255 }
256 return res;
257 }
259 static void test_fl_mem_size();
260 };
262 PerRegionTable* PerRegionTable::_free_list = NULL;
264 size_t OtherRegionsTable::_max_fine_entries = 0;
265 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
266 size_t OtherRegionsTable::_fine_eviction_stride = 0;
267 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
269 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr, Mutex* m) :
270 _g1h(G1CollectedHeap::heap()),
271 _hr(hr), _m(m),
272 _coarse_map(G1CollectedHeap::heap()->max_regions(),
273 false /* in-resource-area */),
274 _fine_grain_regions(NULL),
275 _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
276 _n_fine_entries(0), _n_coarse_entries(0),
277 _fine_eviction_start(0),
278 _sparse_table(hr)
279 {
280 typedef PerRegionTable* PerRegionTablePtr;
282 if (_max_fine_entries == 0) {
283 assert(_mod_max_fine_entries_mask == 0, "Both or none.");
284 size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
285 _max_fine_entries = (size_t)1 << max_entries_log;
286 _mod_max_fine_entries_mask = _max_fine_entries - 1;
288 assert(_fine_eviction_sample_size == 0
289 && _fine_eviction_stride == 0, "All init at same time.");
290 _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
291 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
292 }
294 _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries,
295 mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
297 if (_fine_grain_regions == NULL) {
298 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
299 "Failed to allocate _fine_grain_entries.");
300 }
302 for (size_t i = 0; i < _max_fine_entries; i++) {
303 _fine_grain_regions[i] = NULL;
304 }
305 }
307 void OtherRegionsTable::link_to_all(PerRegionTable* prt) {
308 // We always append to the beginning of the list for convenience;
309 // the order of entries in this list does not matter.
310 if (_first_all_fine_prts != NULL) {
311 assert(_first_all_fine_prts->prev() == NULL, "invariant");
312 _first_all_fine_prts->set_prev(prt);
313 prt->set_next(_first_all_fine_prts);
314 } else {
315 // this is the first element we insert. Adjust the "last" pointer
316 _last_all_fine_prts = prt;
317 assert(prt->next() == NULL, "just checking");
318 }
319 // the new element is always the first element without a predecessor
320 prt->set_prev(NULL);
321 _first_all_fine_prts = prt;
323 assert(prt->prev() == NULL, "just checking");
324 assert(_first_all_fine_prts == prt, "just checking");
325 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
326 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
327 "just checking");
328 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
329 "just checking");
330 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
331 "just checking");
332 }
334 void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
335 if (prt->prev() != NULL) {
336 assert(_first_all_fine_prts != prt, "just checking");
337 prt->prev()->set_next(prt->next());
338 // removing the last element in the list?
339 if (_last_all_fine_prts == prt) {
340 _last_all_fine_prts = prt->prev();
341 }
342 } else {
343 assert(_first_all_fine_prts == prt, "just checking");
344 _first_all_fine_prts = prt->next();
345 // list is empty now?
346 if (_first_all_fine_prts == NULL) {
347 _last_all_fine_prts = NULL;
348 }
349 }
351 if (prt->next() != NULL) {
352 prt->next()->set_prev(prt->prev());
353 }
355 prt->set_next(NULL);
356 prt->set_prev(NULL);
358 assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
359 (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
360 "just checking");
361 assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
362 "just checking");
363 assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
364 "just checking");
365 }
367 int** FromCardCache::_cache = NULL;
368 uint FromCardCache::_max_regions = 0;
369 size_t FromCardCache::_static_mem_size = 0;
371 void FromCardCache::initialize(uint n_par_rs, uint max_num_regions) {
372 guarantee(_cache == NULL, "Should not call this multiple times");
374 _max_regions = max_num_regions;
375 _cache = Padded2DArray<int, mtGC>::create_unfreeable(n_par_rs,
376 _max_regions,
377 &_static_mem_size);
379 invalidate(0, _max_regions);
380 }
382 void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) {
383 guarantee((size_t)start_idx + new_num_regions <= max_uintx,
384 err_msg("Trying to invalidate beyond maximum region, from %u size " SIZE_FORMAT,
385 start_idx, new_num_regions));
386 for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
387 uint end_idx = (start_idx + (uint)new_num_regions);
388 assert(end_idx <= _max_regions, "Must be within max.");
389 for (uint j = start_idx; j < end_idx; j++) {
390 set(i, j, InvalidCard);
391 }
392 }
393 }
395 #ifndef PRODUCT
396 void FromCardCache::print(outputStream* out) {
397 for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
398 for (uint j = 0; j < _max_regions; j++) {
399 out->print_cr("_from_card_cache[" UINT32_FORMAT "][" UINT32_FORMAT "] = " INT32_FORMAT ".",
400 i, j, at(i, j));
401 }
402 }
403 }
404 #endif
406 void FromCardCache::clear(uint region_idx) {
407 uint num_par_remsets = HeapRegionRemSet::num_par_rem_sets();
408 for (uint i = 0; i < num_par_remsets; i++) {
409 set(i, region_idx, InvalidCard);
410 }
411 }
413 void OtherRegionsTable::initialize(uint max_regions) {
414 FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
415 }
417 void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
418 FromCardCache::invalidate(start_idx, num_regions);
419 }
421 void OtherRegionsTable::print_from_card_cache() {
422 FromCardCache::print();
423 }
425 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
426 uint cur_hrm_ind = hr()->hrm_index();
428 if (G1TraceHeapRegionRememberedSet) {
429 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
430 from,
431 UseCompressedOops
432 ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
433 : (void *)oopDesc::load_decode_heap_oop((oop*)from));
434 }
436 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
438 if (G1TraceHeapRegionRememberedSet) {
439 gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = " INT32_FORMAT ")",
440 hr()->bottom(), from_card,
441 FromCardCache::at((uint)tid, cur_hrm_ind));
442 }
444 if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) {
445 if (G1TraceHeapRegionRememberedSet) {
446 gclog_or_tty->print_cr(" from-card cache hit.");
447 }
448 assert(contains_reference(from), err_msg("We just found " PTR_FORMAT " in the FromCardCache", from));
449 return;
450 }
452 // Note that this may be a continued H region.
453 HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
454 RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index();
456 // If the region is already coarsened, return.
457 if (_coarse_map.at(from_hrm_ind)) {
458 if (G1TraceHeapRegionRememberedSet) {
459 gclog_or_tty->print_cr(" coarse map hit.");
460 }
461 assert(contains_reference(from), err_msg("We just found " PTR_FORMAT " in the Coarse table", from));
462 return;
463 }
465 // Otherwise find a per-region table to add it to.
466 size_t ind = from_hrm_ind & _mod_max_fine_entries_mask;
467 PerRegionTable* prt = find_region_table(ind, from_hr);
468 if (prt == NULL) {
469 MutexLockerEx x(_m, Mutex::_no_safepoint_check_flag);
470 // Confirm that it's really not there...
471 prt = find_region_table(ind, from_hr);
472 if (prt == NULL) {
474 uintptr_t from_hr_bot_card_index =
475 uintptr_t(from_hr->bottom())
476 >> CardTableModRefBS::card_shift;
477 CardIdx_t card_index = from_card - from_hr_bot_card_index;
478 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
479 "Must be in range.");
480 if (G1HRRSUseSparseTable &&
481 _sparse_table.add_card(from_hrm_ind, card_index)) {
482 if (G1RecordHRRSOops) {
483 HeapRegionRemSet::record(hr(), from);
484 if (G1TraceHeapRegionRememberedSet) {
485 gclog_or_tty->print(" Added card " PTR_FORMAT " to region "
486 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
487 align_size_down(uintptr_t(from),
488 CardTableModRefBS::card_size),
489 hr()->bottom(), from);
490 }
491 }
492 if (G1TraceHeapRegionRememberedSet) {
493 gclog_or_tty->print_cr(" added card to sparse table.");
494 }
495 assert(contains_reference_locked(from), err_msg("We just added " PTR_FORMAT " to the Sparse table", from));
496 return;
497 } else {
498 if (G1TraceHeapRegionRememberedSet) {
499 gclog_or_tty->print_cr(" [tid %d] sparse table entry "
500 "overflow(f: %d, t: %u)",
501 tid, from_hrm_ind, cur_hrm_ind);
502 }
503 }
505 if (_n_fine_entries == _max_fine_entries) {
506 prt = delete_region_table();
507 // There is no need to clear the links to the 'all' list here:
508 // prt will be reused immediately, i.e. remain in the 'all' list.
509 prt->init(from_hr, false /* clear_links_to_all_list */);
510 } else {
511 prt = PerRegionTable::alloc(from_hr);
512 link_to_all(prt);
513 }
515 PerRegionTable* first_prt = _fine_grain_regions[ind];
516 prt->set_collision_list_next(first_prt);
517 // The assignment into _fine_grain_regions allows the prt to
518 // start being used concurrently. In addition to
519 // collision_list_next which must be visible (else concurrent
520 // parsing of the list, if any, may fail to see other entries),
521 // the content of the prt must be visible (else for instance
522 // some mark bits may not yet seem cleared or a 'later' update
523 // performed by a concurrent thread could be undone when the
524 // zeroing becomes visible). This requires store ordering.
525 OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt);
526 _n_fine_entries++;
528 if (G1HRRSUseSparseTable) {
529 // Transfer from sparse to fine-grain.
530 SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind);
531 assert(sprt_entry != NULL, "There should have been an entry");
532 for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
533 CardIdx_t c = sprt_entry->card(i);
534 if (c != SparsePRTEntry::NullEntry) {
535 prt->add_card(c);
536 }
537 }
538 // Now we can delete the sparse entry.
539 bool res = _sparse_table.delete_entry(from_hrm_ind);
540 assert(res, "It should have been there.");
541 }
542 }
543 assert(prt != NULL && prt->hr() == from_hr, "consequence");
544 }
545 // Note that we can't assert "prt->hr() == from_hr", because of the
546 // possibility of concurrent reuse. But see head comment of
547 // OtherRegionsTable for why this is OK.
548 assert(prt != NULL, "Inv");
550 prt->add_reference(from);
552 if (G1RecordHRRSOops) {
553 HeapRegionRemSet::record(hr(), from);
554 if (G1TraceHeapRegionRememberedSet) {
555 gclog_or_tty->print("Added card " PTR_FORMAT " to region "
556 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
557 align_size_down(uintptr_t(from),
558 CardTableModRefBS::card_size),
559 hr()->bottom(), from);
560 }
561 }
562 assert(contains_reference(from), err_msg("We just added " PTR_FORMAT " to the PRT", from));
563 }
565 PerRegionTable*
566 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
567 assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
568 PerRegionTable* prt = _fine_grain_regions[ind];
569 while (prt != NULL && prt->hr() != hr) {
570 prt = prt->collision_list_next();
571 }
572 // Loop postcondition is the method postcondition.
573 return prt;
574 }
576 jint OtherRegionsTable::_n_coarsenings = 0;
578 PerRegionTable* OtherRegionsTable::delete_region_table() {
579 assert(_m->owned_by_self(), "Precondition");
580 assert(_n_fine_entries == _max_fine_entries, "Precondition");
581 PerRegionTable* max = NULL;
582 jint max_occ = 0;
583 PerRegionTable** max_prev = NULL;
584 size_t max_ind;
586 size_t i = _fine_eviction_start;
587 for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
588 size_t ii = i;
589 // Make sure we get a non-NULL sample.
590 while (_fine_grain_regions[ii] == NULL) {
591 ii++;
592 if (ii == _max_fine_entries) ii = 0;
593 guarantee(ii != i, "We must find one.");
594 }
595 PerRegionTable** prev = &_fine_grain_regions[ii];
596 PerRegionTable* cur = *prev;
597 while (cur != NULL) {
598 jint cur_occ = cur->occupied();
599 if (max == NULL || cur_occ > max_occ) {
600 max = cur;
601 max_prev = prev;
602 max_ind = i;
603 max_occ = cur_occ;
604 }
605 prev = cur->collision_list_next_addr();
606 cur = cur->collision_list_next();
607 }
608 i = i + _fine_eviction_stride;
609 if (i >= _n_fine_entries) i = i - _n_fine_entries;
610 }
612 _fine_eviction_start++;
614 if (_fine_eviction_start >= _n_fine_entries) {
615 _fine_eviction_start -= _n_fine_entries;
616 }
618 guarantee(max != NULL, "Since _n_fine_entries > 0");
619 guarantee(max_prev != NULL, "Since max != NULL.");
621 // Set the corresponding coarse bit.
622 size_t max_hrm_index = (size_t) max->hr()->hrm_index();
623 if (!_coarse_map.at(max_hrm_index)) {
624 _coarse_map.at_put(max_hrm_index, true);
625 _n_coarse_entries++;
626 if (G1TraceHeapRegionRememberedSet) {
627 gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
628 "for region [" PTR_FORMAT "...] (%d coarse entries).\n",
629 hr()->bottom(),
630 max->hr()->bottom(),
631 _n_coarse_entries);
632 }
633 }
635 // Unsplice.
636 *max_prev = max->collision_list_next();
637 Atomic::inc(&_n_coarsenings);
638 _n_fine_entries--;
639 return max;
640 }
643 // At present, this must be called stop-world single-threaded.
644 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
645 BitMap* region_bm, BitMap* card_bm) {
646 // First eliminated garbage regions from the coarse map.
647 if (G1RSScrubVerbose) {
648 gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index());
649 }
651 assert(_coarse_map.size() == region_bm->size(), "Precondition");
652 if (G1RSScrubVerbose) {
653 gclog_or_tty->print(" Coarse map: before = " SIZE_FORMAT "...",
654 _n_coarse_entries);
655 }
656 _coarse_map.set_intersection(*region_bm);
657 _n_coarse_entries = _coarse_map.count_one_bits();
658 if (G1RSScrubVerbose) {
659 gclog_or_tty->print_cr(" after = " SIZE_FORMAT ".", _n_coarse_entries);
660 }
662 // Now do the fine-grained maps.
663 for (size_t i = 0; i < _max_fine_entries; i++) {
664 PerRegionTable* cur = _fine_grain_regions[i];
665 PerRegionTable** prev = &_fine_grain_regions[i];
666 while (cur != NULL) {
667 PerRegionTable* nxt = cur->collision_list_next();
668 // If the entire region is dead, eliminate.
669 if (G1RSScrubVerbose) {
670 gclog_or_tty->print_cr(" For other region %u:",
671 cur->hr()->hrm_index());
672 }
673 if (!region_bm->at((size_t) cur->hr()->hrm_index())) {
674 *prev = nxt;
675 cur->set_collision_list_next(NULL);
676 _n_fine_entries--;
677 if (G1RSScrubVerbose) {
678 gclog_or_tty->print_cr(" deleted via region map.");
679 }
680 unlink_from_all(cur);
681 PerRegionTable::free(cur);
682 } else {
683 // Do fine-grain elimination.
684 if (G1RSScrubVerbose) {
685 gclog_or_tty->print(" occ: before = %4d.", cur->occupied());
686 }
687 cur->scrub(ctbs, card_bm);
688 if (G1RSScrubVerbose) {
689 gclog_or_tty->print_cr(" after = %4d.", cur->occupied());
690 }
691 // Did that empty the table completely?
692 if (cur->occupied() == 0) {
693 *prev = nxt;
694 cur->set_collision_list_next(NULL);
695 _n_fine_entries--;
696 unlink_from_all(cur);
697 PerRegionTable::free(cur);
698 } else {
699 prev = cur->collision_list_next_addr();
700 }
701 }
702 cur = nxt;
703 }
704 }
705 // Since we may have deleted a from_card_cache entry from the RS, clear
706 // the FCC.
707 clear_fcc();
708 }
710 bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const {
711 if (limit <= (size_t)G1RSetSparseRegionEntries) {
712 return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit;
713 } else {
714 // Current uses of this method may only use values less than G1RSetSparseRegionEntries
715 // for the limit. The solution, comparing against occupied() would be too slow
716 // at this time.
717 Unimplemented();
718 return false;
719 }
720 }
722 bool OtherRegionsTable::is_empty() const {
723 return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL;
724 }
726 size_t OtherRegionsTable::occupied() const {
727 size_t sum = occ_fine();
728 sum += occ_sparse();
729 sum += occ_coarse();
730 return sum;
731 }
733 size_t OtherRegionsTable::occ_fine() const {
734 size_t sum = 0;
736 size_t num = 0;
737 PerRegionTable * cur = _first_all_fine_prts;
738 while (cur != NULL) {
739 sum += cur->occupied();
740 cur = cur->next();
741 num++;
742 }
743 guarantee(num == _n_fine_entries, "just checking");
744 return sum;
745 }
747 size_t OtherRegionsTable::occ_coarse() const {
748 return (_n_coarse_entries * HeapRegion::CardsPerRegion);
749 }
751 size_t OtherRegionsTable::occ_sparse() const {
752 return _sparse_table.occupied();
753 }
755 size_t OtherRegionsTable::mem_size() const {
756 size_t sum = 0;
757 // all PRTs are of the same size so it is sufficient to query only one of them.
758 if (_first_all_fine_prts != NULL) {
759 assert(_last_all_fine_prts != NULL &&
760 _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant");
761 sum += _first_all_fine_prts->mem_size() * _n_fine_entries;
762 }
763 sum += (sizeof(PerRegionTable*) * _max_fine_entries);
764 sum += (_coarse_map.size_in_words() * HeapWordSize);
765 sum += (_sparse_table.mem_size());
766 sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above.
767 return sum;
768 }
770 size_t OtherRegionsTable::static_mem_size() {
771 return FromCardCache::static_mem_size();
772 }
774 size_t OtherRegionsTable::fl_mem_size() {
775 return PerRegionTable::fl_mem_size();
776 }
778 void OtherRegionsTable::clear_fcc() {
779 FromCardCache::clear(hr()->hrm_index());
780 }
782 void OtherRegionsTable::clear() {
783 // if there are no entries, skip this step
784 if (_first_all_fine_prts != NULL) {
785 guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
786 PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
787 memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
788 } else {
789 guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
790 }
792 _first_all_fine_prts = _last_all_fine_prts = NULL;
793 _sparse_table.clear();
794 _coarse_map.clear();
795 _n_fine_entries = 0;
796 _n_coarse_entries = 0;
798 clear_fcc();
799 }
801 bool OtherRegionsTable::del_single_region_table(size_t ind,
802 HeapRegion* hr) {
803 assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
804 PerRegionTable** prev_addr = &_fine_grain_regions[ind];
805 PerRegionTable* prt = *prev_addr;
806 while (prt != NULL && prt->hr() != hr) {
807 prev_addr = prt->collision_list_next_addr();
808 prt = prt->collision_list_next();
809 }
810 if (prt != NULL) {
811 assert(prt->hr() == hr, "Loop postcondition.");
812 *prev_addr = prt->collision_list_next();
813 unlink_from_all(prt);
814 PerRegionTable::free(prt);
815 _n_fine_entries--;
816 return true;
817 } else {
818 return false;
819 }
820 }
822 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
823 // Cast away const in this case.
824 MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
825 return contains_reference_locked(from);
826 }
828 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
829 HeapRegion* hr = _g1h->heap_region_containing_raw(from);
830 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index();
831 // Is this region in the coarse map?
832 if (_coarse_map.at(hr_ind)) return true;
834 PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
835 hr);
836 if (prt != NULL) {
837 return prt->contains_reference(from);
839 } else {
840 uintptr_t from_card =
841 (uintptr_t(from) >> CardTableModRefBS::card_shift);
842 uintptr_t hr_bot_card_index =
843 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
844 assert(from_card >= hr_bot_card_index, "Inv");
845 CardIdx_t card_index = from_card - hr_bot_card_index;
846 assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
847 "Must be in range.");
848 return _sparse_table.contains_card(hr_ind, card_index);
849 }
850 }
852 void
853 OtherRegionsTable::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
854 _sparse_table.do_cleanup_work(hrrs_cleanup_task);
855 }
857 // Determines how many threads can add records to an rset in parallel.
858 // This can be done by either mutator threads together with the
859 // concurrent refinement threads or GC threads.
860 uint HeapRegionRemSet::num_par_rem_sets() {
861 return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
862 }
864 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
865 HeapRegion* hr)
866 : _bosa(bosa),
867 _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true),
868 _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
869 reset_for_par_iteration();
870 }
872 void HeapRegionRemSet::setup_remset_size() {
873 // Setup sparse and fine-grain tables sizes.
874 // table_size = base * (log(region_size / 1M) + 1)
875 const int LOG_M = 20;
876 int region_size_log_mb = MAX2(HeapRegion::LogOfHRGrainBytes - LOG_M, 0);
877 if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
878 G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
879 }
880 if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
881 G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
882 }
883 guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
884 }
886 bool HeapRegionRemSet::claim_iter() {
887 if (_iter_state != Unclaimed) return false;
888 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
889 return (res == Unclaimed);
890 }
892 void HeapRegionRemSet::set_iter_complete() {
893 _iter_state = Complete;
894 }
896 bool HeapRegionRemSet::iter_is_complete() {
897 return _iter_state == Complete;
898 }
900 #ifndef PRODUCT
901 void HeapRegionRemSet::print() {
902 HeapRegionRemSetIterator iter(this);
903 size_t card_index;
904 while (iter.has_next(card_index)) {
905 HeapWord* card_start =
906 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
907 gclog_or_tty->print_cr(" Card " PTR_FORMAT, card_start);
908 }
909 if (iter.n_yielded() != occupied()) {
910 gclog_or_tty->print_cr("Yielded disagrees with occupied:");
911 gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).",
912 iter.n_yielded(),
913 iter.n_yielded_coarse(), iter.n_yielded_fine());
914 gclog_or_tty->print_cr(" %6d occ (%6d coarse, %6d fine).",
915 occupied(), occ_coarse(), occ_fine());
916 }
917 guarantee(iter.n_yielded() == occupied(),
918 "We should have yielded all the represented cards.");
919 }
920 #endif
922 void HeapRegionRemSet::cleanup() {
923 SparsePRT::cleanup_all();
924 }
926 void HeapRegionRemSet::clear() {
927 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
928 clear_locked();
929 }
931 void HeapRegionRemSet::clear_locked() {
932 _code_roots.clear();
933 _other_regions.clear();
934 assert(occupied_locked() == 0, "Should be clear.");
935 reset_for_par_iteration();
936 }
938 void HeapRegionRemSet::reset_for_par_iteration() {
939 _iter_state = Unclaimed;
940 _iter_claimed = 0;
941 // It's good to check this to make sure that the two methods are in sync.
942 assert(verify_ready_for_par_iteration(), "post-condition");
943 }
945 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
946 BitMap* region_bm, BitMap* card_bm) {
947 _other_regions.scrub(ctbs, region_bm, card_bm);
948 }
950 // Code roots support
951 //
952 // The code root set is protected by two separate locking schemes
953 // When at safepoint the per-hrrs lock must be held during modifications
954 // except when doing a full gc.
955 // When not at safepoint the CodeCache_lock must be held during modifications.
956 // When concurrent readers access the contains() function
957 // (during the evacuation phase) no removals are allowed.
959 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
960 assert(nm != NULL, "sanity");
961 assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()),
962 err_msg("should call add_strong_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s",
963 BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint())));
964 // Optimistic unlocked contains-check
965 if (!_code_roots.contains(nm)) {
966 MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
967 add_strong_code_root_locked(nm);
968 }
969 }
971 void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
972 assert(nm != NULL, "sanity");
973 assert((CodeCache_lock->owned_by_self() ||
974 (SafepointSynchronize::is_at_safepoint() &&
975 (_m.owned_by_self() || Thread::current()->is_VM_thread()))),
976 err_msg("not safely locked. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s, _m.owned_by_self(): %s, Thread::current()->is_VM_thread(): %s",
977 BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),
978 BOOL_TO_STR(_m.owned_by_self()), BOOL_TO_STR(Thread::current()->is_VM_thread())));
979 _code_roots.add(nm);
980 }
982 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
983 assert(nm != NULL, "sanity");
984 assert_locked_or_safepoint(CodeCache_lock);
986 MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
987 _code_roots.remove(nm);
989 // Check that there were no duplicates
990 guarantee(!_code_roots.contains(nm), "duplicate entry found");
991 }
993 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
994 _code_roots.nmethods_do(blk);
995 }
997 void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) {
998 _code_roots.clean(hr);
999 }
1001 size_t HeapRegionRemSet::strong_code_roots_mem_size() {
1002 return _code_roots.mem_size();
1003 }
1005 HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
1006 _hrrs(hrrs),
1007 _g1h(G1CollectedHeap::heap()),
1008 _coarse_map(&hrrs->_other_regions._coarse_map),
1009 _bosa(hrrs->bosa()),
1010 _is(Sparse),
1011 // Set these values so that we increment to the first region.
1012 _coarse_cur_region_index(-1),
1013 _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
1014 _cur_card_in_prt(HeapRegion::CardsPerRegion),
1015 _fine_cur_prt(NULL),
1016 _n_yielded_coarse(0),
1017 _n_yielded_fine(0),
1018 _n_yielded_sparse(0),
1019 _sparse_iter(&hrrs->_other_regions._sparse_table) {}
1021 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
1022 if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
1023 // Go to the next card.
1024 _coarse_cur_region_cur_card++;
1025 // Was the last the last card in the current region?
1026 if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
1027 // Yes: find the next region. This may leave _coarse_cur_region_index
1028 // Set to the last index, in which case there are no more coarse
1029 // regions.
1030 _coarse_cur_region_index =
1031 (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
1032 if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
1033 _coarse_cur_region_cur_card = 0;
1034 HeapWord* r_bot =
1035 _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
1036 _cur_region_card_offset = _bosa->index_for(r_bot);
1037 } else {
1038 return false;
1039 }
1040 }
1041 // If we didn't return false above, then we can yield a card.
1042 card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
1043 return true;
1044 }
1046 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
1047 if (fine_has_next()) {
1048 _cur_card_in_prt =
1049 _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1050 }
1051 if (_cur_card_in_prt == HeapRegion::CardsPerRegion) {
1052 // _fine_cur_prt may still be NULL in case if there are not PRTs at all for
1053 // the remembered set.
1054 if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) {
1055 return false;
1056 }
1057 PerRegionTable* next_prt = _fine_cur_prt->next();
1058 switch_to_prt(next_prt);
1059 _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1);
1060 }
1062 card_index = _cur_region_card_offset + _cur_card_in_prt;
1063 guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion,
1064 err_msg("Card index " SIZE_FORMAT " must be within the region", _cur_card_in_prt));
1065 return true;
1066 }
1068 bool HeapRegionRemSetIterator::fine_has_next() {
1069 return _cur_card_in_prt != HeapRegion::CardsPerRegion;
1070 }
1072 void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) {
1073 assert(prt != NULL, "Cannot switch to NULL prt");
1074 _fine_cur_prt = prt;
1076 HeapWord* r_bot = _fine_cur_prt->hr()->bottom();
1077 _cur_region_card_offset = _bosa->index_for(r_bot);
1079 // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1.
1080 // To avoid special-casing this start case, and not miss the first bitmap
1081 // entry, initialize _cur_region_cur_card with -1 instead of 0.
1082 _cur_card_in_prt = (size_t)-1;
1083 }
1085 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
1086 switch (_is) {
1087 case Sparse: {
1088 if (_sparse_iter.has_next(card_index)) {
1089 _n_yielded_sparse++;
1090 return true;
1091 }
1092 // Otherwise, deliberate fall-through
1093 _is = Fine;
1094 PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts;
1095 if (initial_fine_prt != NULL) {
1096 switch_to_prt(_hrrs->_other_regions._first_all_fine_prts);
1097 }
1098 }
1099 case Fine:
1100 if (fine_has_next(card_index)) {
1101 _n_yielded_fine++;
1102 return true;
1103 }
1104 // Otherwise, deliberate fall-through
1105 _is = Coarse;
1106 case Coarse:
1107 if (coarse_has_next(card_index)) {
1108 _n_yielded_coarse++;
1109 return true;
1110 }
1111 // Otherwise...
1112 break;
1113 }
1114 assert(ParallelGCThreads > 1 ||
1115 n_yielded() == _hrrs->occupied(),
1116 "Should have yielded all the cards in the rem set "
1117 "(in the non-par case).");
1118 return false;
1119 }
1123 OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
1124 HeapWord** HeapRegionRemSet::_recorded_cards = NULL;
1125 HeapRegion** HeapRegionRemSet::_recorded_regions = NULL;
1126 int HeapRegionRemSet::_n_recorded = 0;
1128 HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
1129 int* HeapRegionRemSet::_recorded_event_index = NULL;
1130 int HeapRegionRemSet::_n_recorded_events = 0;
1132 void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
1133 if (_recorded_oops == NULL) {
1134 assert(_n_recorded == 0
1135 && _recorded_cards == NULL
1136 && _recorded_regions == NULL,
1137 "Inv");
1138 _recorded_oops = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded, mtGC);
1139 _recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded, mtGC);
1140 _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded, mtGC);
1141 }
1142 if (_n_recorded == MaxRecorded) {
1143 gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
1144 } else {
1145 _recorded_cards[_n_recorded] =
1146 (HeapWord*)align_size_down(uintptr_t(f),
1147 CardTableModRefBS::card_size);
1148 _recorded_oops[_n_recorded] = f;
1149 _recorded_regions[_n_recorded] = hr;
1150 _n_recorded++;
1151 }
1152 }
1154 void HeapRegionRemSet::record_event(Event evnt) {
1155 if (!G1RecordHRRSEvents) return;
1157 if (_recorded_events == NULL) {
1158 assert(_n_recorded_events == 0
1159 && _recorded_event_index == NULL,
1160 "Inv");
1161 _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents, mtGC);
1162 _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents, mtGC);
1163 }
1164 if (_n_recorded_events == MaxRecordedEvents) {
1165 gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
1166 } else {
1167 _recorded_events[_n_recorded_events] = evnt;
1168 _recorded_event_index[_n_recorded_events] = _n_recorded;
1169 _n_recorded_events++;
1170 }
1171 }
1173 void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
1174 switch (evnt) {
1175 case Event_EvacStart:
1176 str->print("Evac Start");
1177 break;
1178 case Event_EvacEnd:
1179 str->print("Evac End");
1180 break;
1181 case Event_RSUpdateEnd:
1182 str->print("RS Update End");
1183 break;
1184 }
1185 }
1187 void HeapRegionRemSet::print_recorded() {
1188 int cur_evnt = 0;
1189 Event cur_evnt_kind = Event_illegal;
1190 int cur_evnt_ind = 0;
1191 if (_n_recorded_events > 0) {
1192 cur_evnt_kind = _recorded_events[cur_evnt];
1193 cur_evnt_ind = _recorded_event_index[cur_evnt];
1194 }
1196 for (int i = 0; i < _n_recorded; i++) {
1197 while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
1198 gclog_or_tty->print("Event: ");
1199 print_event(gclog_or_tty, cur_evnt_kind);
1200 gclog_or_tty->cr();
1201 cur_evnt++;
1202 if (cur_evnt < MaxRecordedEvents) {
1203 cur_evnt_kind = _recorded_events[cur_evnt];
1204 cur_evnt_ind = _recorded_event_index[cur_evnt];
1205 }
1206 }
1207 gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
1208 " for ref " PTR_FORMAT ".\n",
1209 _recorded_cards[i], _recorded_regions[i]->bottom(),
1210 _recorded_oops[i]);
1211 }
1212 }
1214 void HeapRegionRemSet::reset_for_cleanup_tasks() {
1215 SparsePRT::reset_for_cleanup_tasks();
1216 }
1218 void HeapRegionRemSet::do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task) {
1219 _other_regions.do_cleanup_work(hrrs_cleanup_task);
1220 }
1222 void
1223 HeapRegionRemSet::finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task) {
1224 SparsePRT::finish_cleanup_task(hrrs_cleanup_task);
1225 }
1227 #ifndef PRODUCT
1228 void PerRegionTable::test_fl_mem_size() {
1229 PerRegionTable* dummy = alloc(NULL);
1231 size_t min_prt_size = sizeof(void*) + dummy->bm()->size_in_words() * HeapWordSize;
1232 assert(dummy->mem_size() > min_prt_size,
1233 err_msg("PerRegionTable memory usage is suspiciously small, only has " SIZE_FORMAT " bytes. "
1234 "Should be at least " SIZE_FORMAT " bytes.", dummy->mem_size(), min_prt_size));
1235 free(dummy);
1236 guarantee(dummy->mem_size() == fl_mem_size(), "fl_mem_size() does not return the correct element size");
1237 // try to reset the state
1238 _free_list = NULL;
1239 delete dummy;
1240 }
1242 void HeapRegionRemSet::test_prt() {
1243 PerRegionTable::test_fl_mem_size();
1244 }
1246 void HeapRegionRemSet::test() {
1247 os::sleep(Thread::current(), (jlong)5000, false);
1248 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1250 // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
1251 // hash bucket.
1252 HeapRegion* hr0 = g1h->region_at(0);
1253 HeapRegion* hr1 = g1h->region_at(1);
1254 HeapRegion* hr2 = g1h->region_at(5);
1255 HeapRegion* hr3 = g1h->region_at(6);
1256 HeapRegion* hr4 = g1h->region_at(7);
1257 HeapRegion* hr5 = g1h->region_at(8);
1259 HeapWord* hr1_start = hr1->bottom();
1260 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
1261 HeapWord* hr1_last = hr1->end() - 1;
1263 HeapWord* hr2_start = hr2->bottom();
1264 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
1265 HeapWord* hr2_last = hr2->end() - 1;
1267 HeapWord* hr3_start = hr3->bottom();
1268 HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
1269 HeapWord* hr3_last = hr3->end() - 1;
1271 HeapRegionRemSet* hrrs = hr0->rem_set();
1273 // Make three references from region 0x101...
1274 hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
1275 hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
1276 hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
1278 hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
1279 hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
1280 hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
1282 hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
1283 hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
1284 hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
1286 // Now cause a coarsening.
1287 hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
1288 hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
1290 // Now, does iteration yield these three?
1291 HeapRegionRemSetIterator iter(hrrs);
1292 size_t sum = 0;
1293 size_t card_index;
1294 while (iter.has_next(card_index)) {
1295 HeapWord* card_start =
1296 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
1297 gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start);
1298 sum++;
1299 }
1300 guarantee(sum == 11 - 3 + 2048, "Failure");
1301 guarantee(sum == hrrs->occupied(), "Failure");
1302 }
1303 #endif