Mon, 02 Aug 2010 12:51:43 -0700
6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp
1 /*
2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_heapRegionRemSet.cpp.incl"
28 #define HRRS_VERBOSE 0
30 #define PRT_COUNT_OCCUPIED 1
32 // OtherRegionsTable
34 class PerRegionTable: public CHeapObj {
35 friend class OtherRegionsTable;
36 friend class HeapRegionRemSetIterator;
38 HeapRegion* _hr;
39 BitMap _bm;
40 #if PRT_COUNT_OCCUPIED
41 jint _occupied;
42 #endif
43 PerRegionTable* _next_free;
45 PerRegionTable* next_free() { return _next_free; }
46 void set_next_free(PerRegionTable* prt) { _next_free = prt; }
49 static PerRegionTable* _free_list;
51 #ifdef _MSC_VER
52 // For some reason even though the classes are marked as friend they are unable
53 // to access CardsPerRegion when private/protected. Only the windows c++ compiler
54 // says this Sun CC and linux gcc don't have a problem with access when private
56 public:
58 #endif // _MSC_VER
60 protected:
61 // We need access in order to union things into the base table.
62 BitMap* bm() { return &_bm; }
64 #if PRT_COUNT_OCCUPIED
65 void recount_occupied() {
66 _occupied = (jint) bm()->count_one_bits();
67 }
68 #endif
70 PerRegionTable(HeapRegion* hr) :
71 _hr(hr),
72 #if PRT_COUNT_OCCUPIED
73 _occupied(0),
74 #endif
75 _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */)
76 {}
78 static void free(PerRegionTable* prt) {
79 while (true) {
80 PerRegionTable* fl = _free_list;
81 prt->set_next_free(fl);
82 PerRegionTable* res =
83 (PerRegionTable*)
84 Atomic::cmpxchg_ptr(prt, &_free_list, fl);
85 if (res == fl) return;
86 }
87 ShouldNotReachHere();
88 }
90 static PerRegionTable* alloc(HeapRegion* hr) {
91 PerRegionTable* fl = _free_list;
92 while (fl != NULL) {
93 PerRegionTable* nxt = fl->next_free();
94 PerRegionTable* res =
95 (PerRegionTable*)
96 Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
97 if (res == fl) {
98 fl->init(hr);
99 return fl;
100 } else {
101 fl = _free_list;
102 }
103 }
104 assert(fl == NULL, "Loop condition.");
105 return new PerRegionTable(hr);
106 }
108 void add_card_work(CardIdx_t from_card, bool par) {
109 if (!_bm.at(from_card)) {
110 if (par) {
111 if (_bm.par_at_put(from_card, 1)) {
112 #if PRT_COUNT_OCCUPIED
113 Atomic::inc(&_occupied);
114 #endif
115 }
116 } else {
117 _bm.at_put(from_card, 1);
118 #if PRT_COUNT_OCCUPIED
119 _occupied++;
120 #endif
121 }
122 }
123 }
125 void add_reference_work(OopOrNarrowOopStar from, bool par) {
126 // Must make this robust in case "from" is not in "_hr", because of
127 // concurrency.
129 #if HRRS_VERBOSE
130 gclog_or_tty->print_cr(" PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
131 from, *from);
132 #endif
134 HeapRegion* loc_hr = hr();
135 // If the test below fails, then this table was reused concurrently
136 // with this operation. This is OK, since the old table was coarsened,
137 // and adding a bit to the new table is never incorrect.
138 if (loc_hr->is_in_reserved(from)) {
139 size_t hw_offset = pointer_delta((HeapWord*)from, loc_hr->bottom());
140 CardIdx_t from_card = (CardIdx_t)
141 hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
143 assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion,
144 "Must be in range.");
145 add_card_work(from_card, par);
146 }
147 }
149 public:
151 HeapRegion* hr() const { return _hr; }
153 #if PRT_COUNT_OCCUPIED
154 jint occupied() const {
155 // Overkill, but if we ever need it...
156 // guarantee(_occupied == _bm.count_one_bits(), "Check");
157 return _occupied;
158 }
159 #else
160 jint occupied() const {
161 return _bm.count_one_bits();
162 }
163 #endif
165 void init(HeapRegion* hr) {
166 _hr = hr;
167 #if PRT_COUNT_OCCUPIED
168 _occupied = 0;
169 #endif
170 _bm.clear();
171 }
173 void add_reference(OopOrNarrowOopStar from) {
174 add_reference_work(from, /*parallel*/ true);
175 }
177 void seq_add_reference(OopOrNarrowOopStar from) {
178 add_reference_work(from, /*parallel*/ false);
179 }
181 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
182 HeapWord* hr_bot = hr()->bottom();
183 size_t hr_first_card_index = ctbs->index_for(hr_bot);
184 bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
185 #if PRT_COUNT_OCCUPIED
186 recount_occupied();
187 #endif
188 }
190 void add_card(CardIdx_t from_card_index) {
191 add_card_work(from_card_index, /*parallel*/ true);
192 }
194 void seq_add_card(CardIdx_t from_card_index) {
195 add_card_work(from_card_index, /*parallel*/ false);
196 }
198 // (Destructively) union the bitmap of the current table into the given
199 // bitmap (which is assumed to be of the same size.)
200 void union_bitmap_into(BitMap* bm) {
201 bm->set_union(_bm);
202 }
204 // Mem size in bytes.
205 size_t mem_size() const {
206 return sizeof(this) + _bm.size_in_words() * HeapWordSize;
207 }
209 static size_t fl_mem_size() {
210 PerRegionTable* cur = _free_list;
211 size_t res = 0;
212 while (cur != NULL) {
213 res += sizeof(PerRegionTable);
214 cur = cur->next_free();
215 }
216 return res;
217 }
219 // Requires "from" to be in "hr()".
220 bool contains_reference(OopOrNarrowOopStar from) const {
221 assert(hr()->is_in_reserved(from), "Precondition.");
222 size_t card_ind = pointer_delta(from, hr()->bottom(),
223 CardTableModRefBS::card_size);
224 return _bm.at(card_ind);
225 }
226 };
228 PerRegionTable* PerRegionTable::_free_list = NULL;
231 #define COUNT_PAR_EXPANDS 0
233 #if COUNT_PAR_EXPANDS
234 static jint n_par_expands = 0;
235 static jint n_par_contracts = 0;
236 static jint par_expand_list_len = 0;
237 static jint max_par_expand_list_len = 0;
239 static void print_par_expand() {
240 Atomic::inc(&n_par_expands);
241 Atomic::inc(&par_expand_list_len);
242 if (par_expand_list_len > max_par_expand_list_len) {
243 max_par_expand_list_len = par_expand_list_len;
244 }
245 if ((n_par_expands % 10) == 0) {
246 gclog_or_tty->print_cr("\n\n%d par expands: %d contracts, "
247 "len = %d, max_len = %d\n.",
248 n_par_expands, n_par_contracts, par_expand_list_len,
249 max_par_expand_list_len);
250 }
251 }
252 #endif
254 class PosParPRT: public PerRegionTable {
255 PerRegionTable** _par_tables;
257 enum SomePrivateConstants {
258 ReserveParTableExpansion = 1
259 };
261 void par_contract() {
262 assert(_par_tables != NULL, "Precondition.");
263 int n = HeapRegionRemSet::num_par_rem_sets()-1;
264 for (int i = 0; i < n; i++) {
265 _par_tables[i]->union_bitmap_into(bm());
266 PerRegionTable::free(_par_tables[i]);
267 _par_tables[i] = NULL;
268 }
269 #if PRT_COUNT_OCCUPIED
270 // We must recount the "occupied."
271 recount_occupied();
272 #endif
273 FREE_C_HEAP_ARRAY(PerRegionTable*, _par_tables);
274 _par_tables = NULL;
275 #if COUNT_PAR_EXPANDS
276 Atomic::inc(&n_par_contracts);
277 Atomic::dec(&par_expand_list_len);
278 #endif
279 }
281 static PerRegionTable** _par_table_fl;
283 PosParPRT* _next;
285 static PosParPRT* _free_list;
287 PerRegionTable** par_tables() const {
288 assert(uintptr_t(NULL) == 0, "Assumption.");
289 if (uintptr_t(_par_tables) <= ReserveParTableExpansion)
290 return NULL;
291 else
292 return _par_tables;
293 }
295 PosParPRT* _next_par_expanded;
296 PosParPRT* next_par_expanded() { return _next_par_expanded; }
297 void set_next_par_expanded(PosParPRT* ppprt) { _next_par_expanded = ppprt; }
298 static PosParPRT* _par_expanded_list;
300 public:
302 PosParPRT(HeapRegion* hr) : PerRegionTable(hr), _par_tables(NULL) {}
304 jint occupied() const {
305 jint res = PerRegionTable::occupied();
306 if (par_tables() != NULL) {
307 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
308 res += par_tables()[i]->occupied();
309 }
310 }
311 return res;
312 }
314 void init(HeapRegion* hr) {
315 PerRegionTable::init(hr);
316 _next = NULL;
317 if (par_tables() != NULL) {
318 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
319 par_tables()[i]->init(hr);
320 }
321 }
322 }
324 static void free(PosParPRT* prt) {
325 while (true) {
326 PosParPRT* fl = _free_list;
327 prt->set_next(fl);
328 PosParPRT* res =
329 (PosParPRT*)
330 Atomic::cmpxchg_ptr(prt, &_free_list, fl);
331 if (res == fl) return;
332 }
333 ShouldNotReachHere();
334 }
336 static PosParPRT* alloc(HeapRegion* hr) {
337 PosParPRT* fl = _free_list;
338 while (fl != NULL) {
339 PosParPRT* nxt = fl->next();
340 PosParPRT* res =
341 (PosParPRT*)
342 Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
343 if (res == fl) {
344 fl->init(hr);
345 return fl;
346 } else {
347 fl = _free_list;
348 }
349 }
350 assert(fl == NULL, "Loop condition.");
351 return new PosParPRT(hr);
352 }
354 PosParPRT* next() const { return _next; }
355 void set_next(PosParPRT* nxt) { _next = nxt; }
356 PosParPRT** next_addr() { return &_next; }
358 bool should_expand(int tid) {
359 return par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region();
360 }
362 void par_expand() {
363 int n = HeapRegionRemSet::num_par_rem_sets()-1;
364 if (n <= 0) return;
365 if (_par_tables == NULL) {
366 PerRegionTable* res =
367 (PerRegionTable*)
368 Atomic::cmpxchg_ptr((PerRegionTable*)ReserveParTableExpansion,
369 &_par_tables, NULL);
370 if (res != NULL) return;
371 // Otherwise, we reserved the right to do the expansion.
373 PerRegionTable** ptables = NEW_C_HEAP_ARRAY(PerRegionTable*, n);
374 for (int i = 0; i < n; i++) {
375 PerRegionTable* ptable = PerRegionTable::alloc(hr());
376 ptables[i] = ptable;
377 }
378 // Here we do not need an atomic.
379 _par_tables = ptables;
380 #if COUNT_PAR_EXPANDS
381 print_par_expand();
382 #endif
383 // We must put this table on the expanded list.
384 PosParPRT* exp_head = _par_expanded_list;
385 while (true) {
386 set_next_par_expanded(exp_head);
387 PosParPRT* res =
388 (PosParPRT*)
389 Atomic::cmpxchg_ptr(this, &_par_expanded_list, exp_head);
390 if (res == exp_head) return;
391 // Otherwise.
392 exp_head = res;
393 }
394 ShouldNotReachHere();
395 }
396 }
398 void add_reference(OopOrNarrowOopStar from, int tid) {
399 // Expand if necessary.
400 PerRegionTable** pt = par_tables();
401 if (pt != NULL) {
402 // We always have to assume that mods to table 0 are in parallel,
403 // because of the claiming scheme in parallel expansion. A thread
404 // with tid != 0 that finds the table to be NULL, but doesn't succeed
405 // in claiming the right of expanding it, will end up in the else
406 // clause of the above if test. That thread could be delayed, and a
407 // thread 0 add reference could see the table expanded, and come
408 // here. Both threads would be adding in parallel. But we get to
409 // not use atomics for tids > 0.
410 if (tid == 0) {
411 PerRegionTable::add_reference(from);
412 } else {
413 pt[tid-1]->seq_add_reference(from);
414 }
415 } else {
416 // Not expanded -- add to the base table.
417 PerRegionTable::add_reference(from);
418 }
419 }
421 void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
422 assert(_par_tables == NULL, "Precondition");
423 PerRegionTable::scrub(ctbs, card_bm);
424 }
426 size_t mem_size() const {
427 size_t res =
428 PerRegionTable::mem_size() + sizeof(this) - sizeof(PerRegionTable);
429 if (_par_tables != NULL) {
430 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
431 res += _par_tables[i]->mem_size();
432 }
433 }
434 return res;
435 }
437 static size_t fl_mem_size() {
438 PosParPRT* cur = _free_list;
439 size_t res = 0;
440 while (cur != NULL) {
441 res += sizeof(PosParPRT);
442 cur = cur->next();
443 }
444 return res;
445 }
447 bool contains_reference(OopOrNarrowOopStar from) const {
448 if (PerRegionTable::contains_reference(from)) return true;
449 if (_par_tables != NULL) {
450 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets()-1; i++) {
451 if (_par_tables[i]->contains_reference(from)) return true;
452 }
453 }
454 return false;
455 }
457 static void par_contract_all();
459 };
461 void PosParPRT::par_contract_all() {
462 PosParPRT* hd = _par_expanded_list;
463 while (hd != NULL) {
464 PosParPRT* nxt = hd->next_par_expanded();
465 PosParPRT* res =
466 (PosParPRT*)
467 Atomic::cmpxchg_ptr(nxt, &_par_expanded_list, hd);
468 if (res == hd) {
469 // We claimed the right to contract this table.
470 hd->set_next_par_expanded(NULL);
471 hd->par_contract();
472 hd = _par_expanded_list;
473 } else {
474 hd = res;
475 }
476 }
477 }
479 PosParPRT* PosParPRT::_free_list = NULL;
480 PosParPRT* PosParPRT::_par_expanded_list = NULL;
482 jint OtherRegionsTable::_cache_probes = 0;
483 jint OtherRegionsTable::_cache_hits = 0;
485 size_t OtherRegionsTable::_max_fine_entries = 0;
486 size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0;
487 #if SAMPLE_FOR_EVICTION
488 size_t OtherRegionsTable::_fine_eviction_stride = 0;
489 size_t OtherRegionsTable::_fine_eviction_sample_size = 0;
490 #endif
492 OtherRegionsTable::OtherRegionsTable(HeapRegion* hr) :
493 _g1h(G1CollectedHeap::heap()),
494 _m(Mutex::leaf, "An OtherRegionsTable lock", true),
495 _hr(hr),
496 _coarse_map(G1CollectedHeap::heap()->max_regions(),
497 false /* in-resource-area */),
498 _fine_grain_regions(NULL),
499 _n_fine_entries(0), _n_coarse_entries(0),
500 #if SAMPLE_FOR_EVICTION
501 _fine_eviction_start(0),
502 #endif
503 _sparse_table(hr)
504 {
505 typedef PosParPRT* PosParPRTPtr;
506 if (_max_fine_entries == 0) {
507 assert(_mod_max_fine_entries_mask == 0, "Both or none.");
508 size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
509 _max_fine_entries = (size_t)(1 << max_entries_log);
510 _mod_max_fine_entries_mask = _max_fine_entries - 1;
511 #if SAMPLE_FOR_EVICTION
512 assert(_fine_eviction_sample_size == 0
513 && _fine_eviction_stride == 0, "All init at same time.");
514 _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log);
515 _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size;
516 #endif
517 }
518 _fine_grain_regions = new PosParPRTPtr[_max_fine_entries];
519 if (_fine_grain_regions == NULL)
520 vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries,
521 "Failed to allocate _fine_grain_entries.");
522 for (size_t i = 0; i < _max_fine_entries; i++) {
523 _fine_grain_regions[i] = NULL;
524 }
525 }
527 int** OtherRegionsTable::_from_card_cache = NULL;
528 size_t OtherRegionsTable::_from_card_cache_max_regions = 0;
529 size_t OtherRegionsTable::_from_card_cache_mem_size = 0;
531 void OtherRegionsTable::init_from_card_cache(size_t max_regions) {
532 _from_card_cache_max_regions = max_regions;
534 int n_par_rs = HeapRegionRemSet::num_par_rem_sets();
535 _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs);
536 for (int i = 0; i < n_par_rs; i++) {
537 _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions);
538 for (size_t j = 0; j < max_regions; j++) {
539 _from_card_cache[i][j] = -1; // An invalid value.
540 }
541 }
542 _from_card_cache_mem_size = n_par_rs * max_regions * sizeof(int);
543 }
545 void OtherRegionsTable::shrink_from_card_cache(size_t new_n_regs) {
546 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
547 assert(new_n_regs <= _from_card_cache_max_regions, "Must be within max.");
548 for (size_t j = new_n_regs; j < _from_card_cache_max_regions; j++) {
549 _from_card_cache[i][j] = -1; // An invalid value.
550 }
551 }
552 }
554 #ifndef PRODUCT
555 void OtherRegionsTable::print_from_card_cache() {
556 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
557 for (size_t j = 0; j < _from_card_cache_max_regions; j++) {
558 gclog_or_tty->print_cr("_from_card_cache[%d][%d] = %d.",
559 i, j, _from_card_cache[i][j]);
560 }
561 }
562 }
563 #endif
565 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
566 size_t cur_hrs_ind = hr()->hrs_index();
568 #if HRRS_VERBOSE
569 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
570 from,
571 UseCompressedOops
572 ? oopDesc::load_decode_heap_oop((narrowOop*)from)
573 : oopDesc::load_decode_heap_oop((oop*)from));
574 #endif
576 int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
578 #if HRRS_VERBOSE
579 gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
580 hr()->bottom(), from_card,
581 _from_card_cache[tid][cur_hrs_ind]);
582 #endif
584 #define COUNT_CACHE 0
585 #if COUNT_CACHE
586 jint p = Atomic::add(1, &_cache_probes);
587 if ((p % 10000) == 0) {
588 jint hits = _cache_hits;
589 gclog_or_tty->print_cr("%d/%d = %5.2f%% RS cache hits.",
590 _cache_hits, p, 100.0* (float)hits/(float)p);
591 }
592 #endif
593 if (from_card == _from_card_cache[tid][cur_hrs_ind]) {
594 #if HRRS_VERBOSE
595 gclog_or_tty->print_cr(" from-card cache hit.");
596 #endif
597 #if COUNT_CACHE
598 Atomic::inc(&_cache_hits);
599 #endif
600 assert(contains_reference(from), "We just added it!");
601 return;
602 } else {
603 _from_card_cache[tid][cur_hrs_ind] = from_card;
604 }
606 // Note that this may be a continued H region.
607 HeapRegion* from_hr = _g1h->heap_region_containing_raw(from);
608 RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index();
610 // If the region is already coarsened, return.
611 if (_coarse_map.at(from_hrs_ind)) {
612 #if HRRS_VERBOSE
613 gclog_or_tty->print_cr(" coarse map hit.");
614 #endif
615 assert(contains_reference(from), "We just added it!");
616 return;
617 }
619 // Otherwise find a per-region table to add it to.
620 size_t ind = from_hrs_ind & _mod_max_fine_entries_mask;
621 PosParPRT* prt = find_region_table(ind, from_hr);
622 if (prt == NULL) {
623 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
624 // Confirm that it's really not there...
625 prt = find_region_table(ind, from_hr);
626 if (prt == NULL) {
628 uintptr_t from_hr_bot_card_index =
629 uintptr_t(from_hr->bottom())
630 >> CardTableModRefBS::card_shift;
631 CardIdx_t card_index = from_card - from_hr_bot_card_index;
632 assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
633 "Must be in range.");
634 if (G1HRRSUseSparseTable &&
635 _sparse_table.add_card(from_hrs_ind, card_index)) {
636 if (G1RecordHRRSOops) {
637 HeapRegionRemSet::record(hr(), from);
638 #if HRRS_VERBOSE
639 gclog_or_tty->print(" Added card " PTR_FORMAT " to region "
640 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
641 align_size_down(uintptr_t(from),
642 CardTableModRefBS::card_size),
643 hr()->bottom(), from);
644 #endif
645 }
646 #if HRRS_VERBOSE
647 gclog_or_tty->print_cr(" added card to sparse table.");
648 #endif
649 assert(contains_reference_locked(from), "We just added it!");
650 return;
651 } else {
652 #if HRRS_VERBOSE
653 gclog_or_tty->print_cr(" [tid %d] sparse table entry "
654 "overflow(f: %d, t: %d)",
655 tid, from_hrs_ind, cur_hrs_ind);
656 #endif
657 }
659 if (_n_fine_entries == _max_fine_entries) {
660 prt = delete_region_table();
661 } else {
662 prt = PosParPRT::alloc(from_hr);
663 }
664 prt->init(from_hr);
666 PosParPRT* first_prt = _fine_grain_regions[ind];
667 prt->set_next(first_prt); // XXX Maybe move to init?
668 _fine_grain_regions[ind] = prt;
669 _n_fine_entries++;
671 if (G1HRRSUseSparseTable) {
672 // Transfer from sparse to fine-grain.
673 SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrs_ind);
674 assert(sprt_entry != NULL, "There should have been an entry");
675 for (int i = 0; i < SparsePRTEntry::cards_num(); i++) {
676 CardIdx_t c = sprt_entry->card(i);
677 if (c != SparsePRTEntry::NullEntry) {
678 prt->add_card(c);
679 }
680 }
681 // Now we can delete the sparse entry.
682 bool res = _sparse_table.delete_entry(from_hrs_ind);
683 assert(res, "It should have been there.");
684 }
685 }
686 assert(prt != NULL && prt->hr() == from_hr, "consequence");
687 }
688 // Note that we can't assert "prt->hr() == from_hr", because of the
689 // possibility of concurrent reuse. But see head comment of
690 // OtherRegionsTable for why this is OK.
691 assert(prt != NULL, "Inv");
693 if (prt->should_expand(tid)) {
694 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
695 HeapRegion* prt_hr = prt->hr();
696 if (prt_hr == from_hr) {
697 // Make sure the table still corresponds to the same region
698 prt->par_expand();
699 prt->add_reference(from, tid);
700 }
701 // else: The table has been concurrently coarsened, evicted, and
702 // the table data structure re-used for another table. So, we
703 // don't need to add the reference any more given that the table
704 // has been coarsened and the whole region will be scanned anyway.
705 } else {
706 prt->add_reference(from, tid);
707 }
708 if (G1RecordHRRSOops) {
709 HeapRegionRemSet::record(hr(), from);
710 #if HRRS_VERBOSE
711 gclog_or_tty->print("Added card " PTR_FORMAT " to region "
712 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
713 align_size_down(uintptr_t(from),
714 CardTableModRefBS::card_size),
715 hr()->bottom(), from);
716 #endif
717 }
718 assert(contains_reference(from), "We just added it!");
719 }
721 PosParPRT*
722 OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const {
723 assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
724 PosParPRT* prt = _fine_grain_regions[ind];
725 while (prt != NULL && prt->hr() != hr) {
726 prt = prt->next();
727 }
728 // Loop postcondition is the method postcondition.
729 return prt;
730 }
733 #define DRT_CENSUS 0
735 #if DRT_CENSUS
736 static const int HistoSize = 6;
737 static int global_histo[HistoSize] = { 0, 0, 0, 0, 0, 0 };
738 static int coarsenings = 0;
739 static int occ_sum = 0;
740 #endif
742 jint OtherRegionsTable::_n_coarsenings = 0;
744 PosParPRT* OtherRegionsTable::delete_region_table() {
745 #if DRT_CENSUS
746 int histo[HistoSize] = { 0, 0, 0, 0, 0, 0 };
747 const int histo_limits[] = { 1, 4, 16, 64, 256, 2048 };
748 #endif
750 assert(_m.owned_by_self(), "Precondition");
751 assert(_n_fine_entries == _max_fine_entries, "Precondition");
752 PosParPRT* max = NULL;
753 jint max_occ = 0;
754 PosParPRT** max_prev;
755 size_t max_ind;
757 #if SAMPLE_FOR_EVICTION
758 size_t i = _fine_eviction_start;
759 for (size_t k = 0; k < _fine_eviction_sample_size; k++) {
760 size_t ii = i;
761 // Make sure we get a non-NULL sample.
762 while (_fine_grain_regions[ii] == NULL) {
763 ii++;
764 if (ii == _max_fine_entries) ii = 0;
765 guarantee(ii != i, "We must find one.");
766 }
767 PosParPRT** prev = &_fine_grain_regions[ii];
768 PosParPRT* cur = *prev;
769 while (cur != NULL) {
770 jint cur_occ = cur->occupied();
771 if (max == NULL || cur_occ > max_occ) {
772 max = cur;
773 max_prev = prev;
774 max_ind = i;
775 max_occ = cur_occ;
776 }
777 prev = cur->next_addr();
778 cur = cur->next();
779 }
780 i = i + _fine_eviction_stride;
781 if (i >= _n_fine_entries) i = i - _n_fine_entries;
782 }
783 _fine_eviction_start++;
784 if (_fine_eviction_start >= _n_fine_entries)
785 _fine_eviction_start -= _n_fine_entries;
786 #else
787 for (int i = 0; i < _max_fine_entries; i++) {
788 PosParPRT** prev = &_fine_grain_regions[i];
789 PosParPRT* cur = *prev;
790 while (cur != NULL) {
791 jint cur_occ = cur->occupied();
792 #if DRT_CENSUS
793 for (int k = 0; k < HistoSize; k++) {
794 if (cur_occ <= histo_limits[k]) {
795 histo[k]++; global_histo[k]++; break;
796 }
797 }
798 #endif
799 if (max == NULL || cur_occ > max_occ) {
800 max = cur;
801 max_prev = prev;
802 max_ind = i;
803 max_occ = cur_occ;
804 }
805 prev = cur->next_addr();
806 cur = cur->next();
807 }
808 }
809 #endif
810 // XXX
811 guarantee(max != NULL, "Since _n_fine_entries > 0");
812 #if DRT_CENSUS
813 gclog_or_tty->print_cr("In a coarsening: histo of occs:");
814 for (int k = 0; k < HistoSize; k++) {
815 gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], histo[k]);
816 }
817 coarsenings++;
818 occ_sum += max_occ;
819 if ((coarsenings % 100) == 0) {
820 gclog_or_tty->print_cr("\ncoarsenings = %d; global summary:", coarsenings);
821 for (int k = 0; k < HistoSize; k++) {
822 gclog_or_tty->print_cr(" <= %4d: %5d.", histo_limits[k], global_histo[k]);
823 }
824 gclog_or_tty->print_cr("Avg occ of deleted region = %6.2f.",
825 (float)occ_sum/(float)coarsenings);
826 }
827 #endif
829 // Set the corresponding coarse bit.
830 int max_hrs_index = max->hr()->hrs_index();
831 if (!_coarse_map.at(max_hrs_index)) {
832 _coarse_map.at_put(max_hrs_index, true);
833 _n_coarse_entries++;
834 #if 0
835 gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
836 "for region [" PTR_FORMAT "...] (%d coarse entries).\n",
837 hr()->bottom(),
838 max->hr()->bottom(),
839 _n_coarse_entries);
840 #endif
841 }
843 // Unsplice.
844 *max_prev = max->next();
845 Atomic::inc(&_n_coarsenings);
846 _n_fine_entries--;
847 return max;
848 }
851 // At present, this must be called stop-world single-threaded.
852 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
853 BitMap* region_bm, BitMap* card_bm) {
854 // First eliminated garbage regions from the coarse map.
855 if (G1RSScrubVerbose)
856 gclog_or_tty->print_cr("Scrubbing region %d:", hr()->hrs_index());
858 assert(_coarse_map.size() == region_bm->size(), "Precondition");
859 if (G1RSScrubVerbose)
860 gclog_or_tty->print(" Coarse map: before = %d...", _n_coarse_entries);
861 _coarse_map.set_intersection(*region_bm);
862 _n_coarse_entries = _coarse_map.count_one_bits();
863 if (G1RSScrubVerbose)
864 gclog_or_tty->print_cr(" after = %d.", _n_coarse_entries);
866 // Now do the fine-grained maps.
867 for (size_t i = 0; i < _max_fine_entries; i++) {
868 PosParPRT* cur = _fine_grain_regions[i];
869 PosParPRT** prev = &_fine_grain_regions[i];
870 while (cur != NULL) {
871 PosParPRT* nxt = cur->next();
872 // If the entire region is dead, eliminate.
873 if (G1RSScrubVerbose)
874 gclog_or_tty->print_cr(" For other region %d:", cur->hr()->hrs_index());
875 if (!region_bm->at(cur->hr()->hrs_index())) {
876 *prev = nxt;
877 cur->set_next(NULL);
878 _n_fine_entries--;
879 if (G1RSScrubVerbose)
880 gclog_or_tty->print_cr(" deleted via region map.");
881 PosParPRT::free(cur);
882 } else {
883 // Do fine-grain elimination.
884 if (G1RSScrubVerbose)
885 gclog_or_tty->print(" occ: before = %4d.", cur->occupied());
886 cur->scrub(ctbs, card_bm);
887 if (G1RSScrubVerbose)
888 gclog_or_tty->print_cr(" after = %4d.", cur->occupied());
889 // Did that empty the table completely?
890 if (cur->occupied() == 0) {
891 *prev = nxt;
892 cur->set_next(NULL);
893 _n_fine_entries--;
894 PosParPRT::free(cur);
895 } else {
896 prev = cur->next_addr();
897 }
898 }
899 cur = nxt;
900 }
901 }
902 // Since we may have deleted a from_card_cache entry from the RS, clear
903 // the FCC.
904 clear_fcc();
905 }
908 size_t OtherRegionsTable::occupied() const {
909 // Cast away const in this case.
910 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
911 size_t sum = occ_fine();
912 sum += occ_sparse();
913 sum += occ_coarse();
914 return sum;
915 }
917 size_t OtherRegionsTable::occ_fine() const {
918 size_t sum = 0;
919 for (size_t i = 0; i < _max_fine_entries; i++) {
920 PosParPRT* cur = _fine_grain_regions[i];
921 while (cur != NULL) {
922 sum += cur->occupied();
923 cur = cur->next();
924 }
925 }
926 return sum;
927 }
929 size_t OtherRegionsTable::occ_coarse() const {
930 return (_n_coarse_entries * HeapRegion::CardsPerRegion);
931 }
933 size_t OtherRegionsTable::occ_sparse() const {
934 return _sparse_table.occupied();
935 }
937 size_t OtherRegionsTable::mem_size() const {
938 // Cast away const in this case.
939 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
940 size_t sum = 0;
941 for (size_t i = 0; i < _max_fine_entries; i++) {
942 PosParPRT* cur = _fine_grain_regions[i];
943 while (cur != NULL) {
944 sum += cur->mem_size();
945 cur = cur->next();
946 }
947 }
948 sum += (sizeof(PosParPRT*) * _max_fine_entries);
949 sum += (_coarse_map.size_in_words() * HeapWordSize);
950 sum += (_sparse_table.mem_size());
951 sum += sizeof(*this) - sizeof(_sparse_table); // Avoid double counting above.
952 return sum;
953 }
955 size_t OtherRegionsTable::static_mem_size() {
956 return _from_card_cache_mem_size;
957 }
959 size_t OtherRegionsTable::fl_mem_size() {
960 return PerRegionTable::fl_mem_size() + PosParPRT::fl_mem_size();
961 }
963 void OtherRegionsTable::clear_fcc() {
964 for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
965 _from_card_cache[i][hr()->hrs_index()] = -1;
966 }
967 }
969 void OtherRegionsTable::clear() {
970 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
971 for (size_t i = 0; i < _max_fine_entries; i++) {
972 PosParPRT* cur = _fine_grain_regions[i];
973 while (cur != NULL) {
974 PosParPRT* nxt = cur->next();
975 PosParPRT::free(cur);
976 cur = nxt;
977 }
978 _fine_grain_regions[i] = NULL;
979 }
980 _sparse_table.clear();
981 _coarse_map.clear();
982 _n_fine_entries = 0;
983 _n_coarse_entries = 0;
985 clear_fcc();
986 }
988 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
989 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
990 size_t hrs_ind = (size_t)from_hr->hrs_index();
991 size_t ind = hrs_ind & _mod_max_fine_entries_mask;
992 if (del_single_region_table(ind, from_hr)) {
993 assert(!_coarse_map.at(hrs_ind), "Inv");
994 } else {
995 _coarse_map.par_at_put(hrs_ind, 0);
996 }
997 // Check to see if any of the fcc entries come from here.
998 int hr_ind = hr()->hrs_index();
999 for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
1000 int fcc_ent = _from_card_cache[tid][hr_ind];
1001 if (fcc_ent != -1) {
1002 HeapWord* card_addr = (HeapWord*)
1003 (uintptr_t(fcc_ent) << CardTableModRefBS::card_shift);
1004 if (hr()->is_in_reserved(card_addr)) {
1005 // Clear the from card cache.
1006 _from_card_cache[tid][hr_ind] = -1;
1007 }
1008 }
1009 }
1010 }
1012 bool OtherRegionsTable::del_single_region_table(size_t ind,
1013 HeapRegion* hr) {
1014 assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
1015 PosParPRT** prev_addr = &_fine_grain_regions[ind];
1016 PosParPRT* prt = *prev_addr;
1017 while (prt != NULL && prt->hr() != hr) {
1018 prev_addr = prt->next_addr();
1019 prt = prt->next();
1020 }
1021 if (prt != NULL) {
1022 assert(prt->hr() == hr, "Loop postcondition.");
1023 *prev_addr = prt->next();
1024 PosParPRT::free(prt);
1025 _n_fine_entries--;
1026 return true;
1027 } else {
1028 return false;
1029 }
1030 }
1032 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
1033 // Cast away const in this case.
1034 MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
1035 return contains_reference_locked(from);
1036 }
1038 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const {
1039 HeapRegion* hr = _g1h->heap_region_containing_raw(from);
1040 if (hr == NULL) return false;
1041 RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index();
1042 // Is this region in the coarse map?
1043 if (_coarse_map.at(hr_ind)) return true;
1045 PosParPRT* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask,
1046 hr);
1047 if (prt != NULL) {
1048 return prt->contains_reference(from);
1050 } else {
1051 uintptr_t from_card =
1052 (uintptr_t(from) >> CardTableModRefBS::card_shift);
1053 uintptr_t hr_bot_card_index =
1054 uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
1055 assert(from_card >= hr_bot_card_index, "Inv");
1056 CardIdx_t card_index = from_card - hr_bot_card_index;
1057 assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
1058 "Must be in range.");
1059 return _sparse_table.contains_card(hr_ind, card_index);
1060 }
1063 }
1065 // Determines how many threads can add records to an rset in parallel.
1066 // This can be done by either mutator threads together with the
1067 // concurrent refinement threads or GC threads.
1068 int HeapRegionRemSet::num_par_rem_sets() {
1069 return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
1070 }
1072 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
1073 HeapRegion* hr)
1074 : _bosa(bosa), _other_regions(hr), _iter_state(Unclaimed) { }
1077 void HeapRegionRemSet::setup_remset_size() {
1078 // Setup sparse and fine-grain tables sizes.
1079 // table_size = base * (log(region_size / 1M) + 1)
1080 int region_size_log_mb = MAX2((int)HeapRegion::LogOfHRGrainBytes - (int)LOG_M, 0);
1081 if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) {
1082 G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * (region_size_log_mb + 1);
1083 }
1084 if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) {
1085 G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1);
1086 }
1087 guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity");
1088 }
1090 void HeapRegionRemSet::init_for_par_iteration() {
1091 _iter_state = Unclaimed;
1092 }
1094 bool HeapRegionRemSet::claim_iter() {
1095 if (_iter_state != Unclaimed) return false;
1096 jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_state), Unclaimed);
1097 return (res == Unclaimed);
1098 }
1100 void HeapRegionRemSet::set_iter_complete() {
1101 _iter_state = Complete;
1102 }
1104 bool HeapRegionRemSet::iter_is_complete() {
1105 return _iter_state == Complete;
1106 }
1109 void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
1110 iter->initialize(this);
1111 }
1113 #ifndef PRODUCT
1114 void HeapRegionRemSet::print() const {
1115 HeapRegionRemSetIterator iter;
1116 init_iterator(&iter);
1117 size_t card_index;
1118 while (iter.has_next(card_index)) {
1119 HeapWord* card_start =
1120 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
1121 gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start);
1122 }
1123 // XXX
1124 if (iter.n_yielded() != occupied()) {
1125 gclog_or_tty->print_cr("Yielded disagrees with occupied:");
1126 gclog_or_tty->print_cr(" %6d yielded (%6d coarse, %6d fine).",
1127 iter.n_yielded(),
1128 iter.n_yielded_coarse(), iter.n_yielded_fine());
1129 gclog_or_tty->print_cr(" %6d occ (%6d coarse, %6d fine).",
1130 occupied(), occ_coarse(), occ_fine());
1131 }
1132 guarantee(iter.n_yielded() == occupied(),
1133 "We should have yielded all the represented cards.");
1134 }
1135 #endif
1137 void HeapRegionRemSet::cleanup() {
1138 SparsePRT::cleanup_all();
1139 }
1141 void HeapRegionRemSet::par_cleanup() {
1142 PosParPRT::par_contract_all();
1143 }
1145 void HeapRegionRemSet::clear() {
1146 _other_regions.clear();
1147 assert(occupied() == 0, "Should be clear.");
1148 }
1150 void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
1151 BitMap* region_bm, BitMap* card_bm) {
1152 _other_regions.scrub(ctbs, region_bm, card_bm);
1153 }
1155 //-------------------- Iteration --------------------
1157 HeapRegionRemSetIterator::
1158 HeapRegionRemSetIterator() :
1159 _hrrs(NULL),
1160 _g1h(G1CollectedHeap::heap()),
1161 _bosa(NULL),
1162 _sparse_iter(size_t(G1CollectedHeap::heap()->reserved_region().start())
1163 >> CardTableModRefBS::card_shift)
1164 {}
1166 void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
1167 _hrrs = hrrs;
1168 _coarse_map = &_hrrs->_other_regions._coarse_map;
1169 _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
1170 _bosa = _hrrs->bosa();
1172 _is = Sparse;
1173 // Set these values so that we increment to the first region.
1174 _coarse_cur_region_index = -1;
1175 _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);;
1177 _cur_region_cur_card = 0;
1179 _fine_array_index = -1;
1180 _fine_cur_prt = NULL;
1182 _n_yielded_coarse = 0;
1183 _n_yielded_fine = 0;
1184 _n_yielded_sparse = 0;
1186 _sparse_iter.init(&hrrs->_other_regions._sparse_table);
1187 }
1189 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
1190 if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
1191 // Go to the next card.
1192 _coarse_cur_region_cur_card++;
1193 // Was the last the last card in the current region?
1194 if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
1195 // Yes: find the next region. This may leave _coarse_cur_region_index
1196 // Set to the last index, in which case there are no more coarse
1197 // regions.
1198 _coarse_cur_region_index =
1199 (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1);
1200 if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
1201 _coarse_cur_region_cur_card = 0;
1202 HeapWord* r_bot =
1203 _g1h->region_at(_coarse_cur_region_index)->bottom();
1204 _cur_region_card_offset = _bosa->index_for(r_bot);
1205 } else {
1206 return false;
1207 }
1208 }
1209 // If we didn't return false above, then we can yield a card.
1210 card_index = _cur_region_card_offset + _coarse_cur_region_cur_card;
1211 return true;
1212 }
1214 void HeapRegionRemSetIterator::fine_find_next_non_null_prt() {
1215 // Otherwise, find the next bucket list in the array.
1216 _fine_array_index++;
1217 while (_fine_array_index < (int) OtherRegionsTable::_max_fine_entries) {
1218 _fine_cur_prt = _fine_grain_regions[_fine_array_index];
1219 if (_fine_cur_prt != NULL) return;
1220 else _fine_array_index++;
1221 }
1222 assert(_fine_cur_prt == NULL, "Loop post");
1223 }
1225 bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
1226 if (fine_has_next()) {
1227 _cur_region_cur_card =
1228 _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
1229 }
1230 while (!fine_has_next()) {
1231 if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
1232 _cur_region_cur_card = 0;
1233 _fine_cur_prt = _fine_cur_prt->next();
1234 }
1235 if (_fine_cur_prt == NULL) {
1236 fine_find_next_non_null_prt();
1237 if (_fine_cur_prt == NULL) return false;
1238 }
1239 assert(_fine_cur_prt != NULL && _cur_region_cur_card == 0,
1240 "inv.");
1241 HeapWord* r_bot =
1242 _fine_cur_prt->hr()->bottom();
1243 _cur_region_card_offset = _bosa->index_for(r_bot);
1244 _cur_region_cur_card = _fine_cur_prt->_bm.get_next_one_offset(0);
1245 }
1246 assert(fine_has_next(), "Or else we exited the loop via the return.");
1247 card_index = _cur_region_card_offset + _cur_region_cur_card;
1248 return true;
1249 }
1251 bool HeapRegionRemSetIterator::fine_has_next() {
1252 return
1253 _fine_cur_prt != NULL &&
1254 _cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion;
1255 }
1257 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
1258 switch (_is) {
1259 case Sparse:
1260 if (_sparse_iter.has_next(card_index)) {
1261 _n_yielded_sparse++;
1262 return true;
1263 }
1264 // Otherwise, deliberate fall-through
1265 _is = Fine;
1266 case Fine:
1267 if (fine_has_next(card_index)) {
1268 _n_yielded_fine++;
1269 return true;
1270 }
1271 // Otherwise, deliberate fall-through
1272 _is = Coarse;
1273 case Coarse:
1274 if (coarse_has_next(card_index)) {
1275 _n_yielded_coarse++;
1276 return true;
1277 }
1278 // Otherwise...
1279 break;
1280 }
1281 assert(ParallelGCThreads > 1 ||
1282 n_yielded() == _hrrs->occupied(),
1283 "Should have yielded all the cards in the rem set "
1284 "(in the non-par case).");
1285 return false;
1286 }
1290 OopOrNarrowOopStar* HeapRegionRemSet::_recorded_oops = NULL;
1291 HeapWord** HeapRegionRemSet::_recorded_cards = NULL;
1292 HeapRegion** HeapRegionRemSet::_recorded_regions = NULL;
1293 int HeapRegionRemSet::_n_recorded = 0;
1295 HeapRegionRemSet::Event* HeapRegionRemSet::_recorded_events = NULL;
1296 int* HeapRegionRemSet::_recorded_event_index = NULL;
1297 int HeapRegionRemSet::_n_recorded_events = 0;
1299 void HeapRegionRemSet::record(HeapRegion* hr, OopOrNarrowOopStar f) {
1300 if (_recorded_oops == NULL) {
1301 assert(_n_recorded == 0
1302 && _recorded_cards == NULL
1303 && _recorded_regions == NULL,
1304 "Inv");
1305 _recorded_oops = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded);
1306 _recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded);
1307 _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded);
1308 }
1309 if (_n_recorded == MaxRecorded) {
1310 gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded);
1311 } else {
1312 _recorded_cards[_n_recorded] =
1313 (HeapWord*)align_size_down(uintptr_t(f),
1314 CardTableModRefBS::card_size);
1315 _recorded_oops[_n_recorded] = f;
1316 _recorded_regions[_n_recorded] = hr;
1317 _n_recorded++;
1318 }
1319 }
1321 void HeapRegionRemSet::record_event(Event evnt) {
1322 if (!G1RecordHRRSEvents) return;
1324 if (_recorded_events == NULL) {
1325 assert(_n_recorded_events == 0
1326 && _recorded_event_index == NULL,
1327 "Inv");
1328 _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents);
1329 _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents);
1330 }
1331 if (_n_recorded_events == MaxRecordedEvents) {
1332 gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
1333 } else {
1334 _recorded_events[_n_recorded_events] = evnt;
1335 _recorded_event_index[_n_recorded_events] = _n_recorded;
1336 _n_recorded_events++;
1337 }
1338 }
1340 void HeapRegionRemSet::print_event(outputStream* str, Event evnt) {
1341 switch (evnt) {
1342 case Event_EvacStart:
1343 str->print("Evac Start");
1344 break;
1345 case Event_EvacEnd:
1346 str->print("Evac End");
1347 break;
1348 case Event_RSUpdateEnd:
1349 str->print("RS Update End");
1350 break;
1351 }
1352 }
1354 void HeapRegionRemSet::print_recorded() {
1355 int cur_evnt = 0;
1356 Event cur_evnt_kind;
1357 int cur_evnt_ind = 0;
1358 if (_n_recorded_events > 0) {
1359 cur_evnt_kind = _recorded_events[cur_evnt];
1360 cur_evnt_ind = _recorded_event_index[cur_evnt];
1361 }
1363 for (int i = 0; i < _n_recorded; i++) {
1364 while (cur_evnt < _n_recorded_events && i == cur_evnt_ind) {
1365 gclog_or_tty->print("Event: ");
1366 print_event(gclog_or_tty, cur_evnt_kind);
1367 gclog_or_tty->print_cr("");
1368 cur_evnt++;
1369 if (cur_evnt < MaxRecordedEvents) {
1370 cur_evnt_kind = _recorded_events[cur_evnt];
1371 cur_evnt_ind = _recorded_event_index[cur_evnt];
1372 }
1373 }
1374 gclog_or_tty->print("Added card " PTR_FORMAT " to region [" PTR_FORMAT "...]"
1375 " for ref " PTR_FORMAT ".\n",
1376 _recorded_cards[i], _recorded_regions[i]->bottom(),
1377 _recorded_oops[i]);
1378 }
1379 }
1381 #ifndef PRODUCT
1382 void HeapRegionRemSet::test() {
1383 os::sleep(Thread::current(), (jlong)5000, false);
1384 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1386 // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same
1387 // hash bucket.
1388 HeapRegion* hr0 = g1h->region_at(0);
1389 HeapRegion* hr1 = g1h->region_at(1);
1390 HeapRegion* hr2 = g1h->region_at(5);
1391 HeapRegion* hr3 = g1h->region_at(6);
1392 HeapRegion* hr4 = g1h->region_at(7);
1393 HeapRegion* hr5 = g1h->region_at(8);
1395 HeapWord* hr1_start = hr1->bottom();
1396 HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2;
1397 HeapWord* hr1_last = hr1->end() - 1;
1399 HeapWord* hr2_start = hr2->bottom();
1400 HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2;
1401 HeapWord* hr2_last = hr2->end() - 1;
1403 HeapWord* hr3_start = hr3->bottom();
1404 HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2;
1405 HeapWord* hr3_last = hr3->end() - 1;
1407 HeapRegionRemSet* hrrs = hr0->rem_set();
1409 // Make three references from region 0x101...
1410 hrrs->add_reference((OopOrNarrowOopStar)hr1_start);
1411 hrrs->add_reference((OopOrNarrowOopStar)hr1_mid);
1412 hrrs->add_reference((OopOrNarrowOopStar)hr1_last);
1414 hrrs->add_reference((OopOrNarrowOopStar)hr2_start);
1415 hrrs->add_reference((OopOrNarrowOopStar)hr2_mid);
1416 hrrs->add_reference((OopOrNarrowOopStar)hr2_last);
1418 hrrs->add_reference((OopOrNarrowOopStar)hr3_start);
1419 hrrs->add_reference((OopOrNarrowOopStar)hr3_mid);
1420 hrrs->add_reference((OopOrNarrowOopStar)hr3_last);
1422 // Now cause a coarsening.
1423 hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom());
1424 hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
1426 // Now, does iteration yield these three?
1427 HeapRegionRemSetIterator iter;
1428 hrrs->init_iterator(&iter);
1429 size_t sum = 0;
1430 size_t card_index;
1431 while (iter.has_next(card_index)) {
1432 HeapWord* card_start =
1433 G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
1434 gclog_or_tty->print_cr(" Card " PTR_FORMAT ".", card_start);
1435 sum++;
1436 }
1437 guarantee(sum == 11 - 3 + 2048, "Failure");
1438 guarantee(sum == hrrs->occupied(), "Failure");
1439 }
1440 #endif