Tue, 20 Sep 2011 09:59:59 -0400
7059019: G1: add G1 support to the SA
Summary: Extend the SA to recognize the G1CollectedHeap and implement any code that's needed by our serviceability tools (jmap, jinfo, jstack, etc.) that depend on the SA.
Reviewed-by: never, poonam, johnc
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/bufferingOopClosure.hpp"
27 #include "gc_implementation/g1/concurrentG1Refine.hpp"
28 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
29 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
33 #include "gc_implementation/g1/g1RemSet.inline.hpp"
34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
35 #include "memory/iterator.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "utilities/intHisto.hpp"
39 #define CARD_REPEAT_HISTO 0
41 #if CARD_REPEAT_HISTO
42 static size_t ct_freq_sz;
43 static jbyte* ct_freq = NULL;
45 void init_ct_freq_table(size_t heap_sz_bytes) {
46 if (ct_freq == NULL) {
47 ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size;
48 ct_freq = new jbyte[ct_freq_sz];
49 for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0;
50 }
51 }
53 void ct_freq_note_card(size_t index) {
54 assert(0 <= index && index < ct_freq_sz, "Bounds error.");
55 if (ct_freq[index] < 100) { ct_freq[index]++; }
56 }
58 static IntHistogram card_repeat_count(10, 10);
60 void ct_freq_update_histo_and_reset() {
61 for (size_t j = 0; j < ct_freq_sz; j++) {
62 card_repeat_count.add_entry(ct_freq[j]);
63 ct_freq[j] = 0;
64 }
66 }
67 #endif
69 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
70 : _g1(g1), _conc_refine_cards(0),
71 _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
72 _cg1r(g1->concurrent_g1_refine()),
73 _cset_rs_update_cl(NULL),
74 _cards_scanned(NULL), _total_cards_scanned(0)
75 {
76 _seq_task = new SubTasksDone(NumSeqTasks);
77 guarantee(n_workers() > 0, "There should be some workers");
78 _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers());
79 for (uint i = 0; i < n_workers(); i++) {
80 _cset_rs_update_cl[i] = NULL;
81 }
82 }
84 G1RemSet::~G1RemSet() {
85 delete _seq_task;
86 for (uint i = 0; i < n_workers(); i++) {
87 assert(_cset_rs_update_cl[i] == NULL, "it should be");
88 }
89 FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl);
90 }
92 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
93 if (_g1->is_in_g1_reserved(mr.start())) {
94 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
95 if (_start_first == NULL) _start_first = mr.start();
96 }
97 }
99 class ScanRSClosure : public HeapRegionClosure {
100 size_t _cards_done, _cards;
101 G1CollectedHeap* _g1h;
102 OopsInHeapRegionClosure* _oc;
103 G1BlockOffsetSharedArray* _bot_shared;
104 CardTableModRefBS *_ct_bs;
105 int _worker_i;
106 int _block_size;
107 bool _try_claimed;
108 public:
109 ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
110 _oc(oc),
111 _cards(0),
112 _cards_done(0),
113 _worker_i(worker_i),
114 _try_claimed(false)
115 {
116 _g1h = G1CollectedHeap::heap();
117 _bot_shared = _g1h->bot_shared();
118 _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
119 _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
120 }
122 void set_try_claimed() { _try_claimed = true; }
124 void scanCard(size_t index, HeapRegion *r) {
125 DirtyCardToOopClosure* cl =
126 r->new_dcto_closure(_oc,
127 CardTableModRefBS::Precise,
128 HeapRegionDCTOC::IntoCSFilterKind);
130 // Set the "from" region in the closure.
131 _oc->set_region(r);
132 HeapWord* card_start = _bot_shared->address_for_index(index);
133 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
134 Space *sp = SharedHeap::heap()->space_containing(card_start);
135 MemRegion sm_region = sp->used_region_at_save_marks();
136 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
137 if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
138 // We make the card as "claimed" lazily (so races are possible
139 // but they're benign), which reduces the number of duplicate
140 // scans (the rsets of the regions in the cset can intersect).
141 _ct_bs->set_card_claimed(index);
142 _cards_done++;
143 cl->do_MemRegion(mr);
144 }
145 }
147 void printCard(HeapRegion* card_region, size_t card_index,
148 HeapWord* card_start) {
149 gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
150 "RS names card %p: "
151 "[" PTR_FORMAT ", " PTR_FORMAT ")",
152 _worker_i,
153 card_region->bottom(), card_region->end(),
154 card_index,
155 card_start, card_start + G1BlockOffsetSharedArray::N_words);
156 }
158 bool doHeapRegion(HeapRegion* r) {
159 assert(r->in_collection_set(), "should only be called on elements of CS.");
160 HeapRegionRemSet* hrrs = r->rem_set();
161 if (hrrs->iter_is_complete()) return false; // All done.
162 if (!_try_claimed && !hrrs->claim_iter()) return false;
163 // If we ever free the collection set concurrently, we should also
164 // clear the card table concurrently therefore we won't need to
165 // add regions of the collection set to the dirty cards region.
166 _g1h->push_dirty_cards_region(r);
167 // If we didn't return above, then
168 // _try_claimed || r->claim_iter()
169 // is true: either we're supposed to work on claimed-but-not-complete
170 // regions, or we successfully claimed the region.
171 HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
172 hrrs->init_iterator(iter);
173 size_t card_index;
175 // We claim cards in block so as to recude the contention. The block size is determined by
176 // the G1RSetScanBlockSize parameter.
177 size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
178 for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
179 if (current_card >= jump_to_card + _block_size) {
180 jump_to_card = hrrs->iter_claimed_next(_block_size);
181 }
182 if (current_card < jump_to_card) continue;
183 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
184 #if 0
185 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
186 card_start, card_start + CardTableModRefBS::card_size_in_words);
187 #endif
189 HeapRegion* card_region = _g1h->heap_region_containing(card_start);
190 assert(card_region != NULL, "Yielding cards not in the heap?");
191 _cards++;
193 if (!card_region->is_on_dirty_cards_region_list()) {
194 _g1h->push_dirty_cards_region(card_region);
195 }
197 // If the card is dirty, then we will scan it during updateRS.
198 if (!card_region->in_collection_set() &&
199 !_ct_bs->is_card_dirty(card_index)) {
200 scanCard(card_index, card_region);
201 }
202 }
203 if (!_try_claimed) {
204 hrrs->set_iter_complete();
205 }
206 return false;
207 }
208 size_t cards_done() { return _cards_done;}
209 size_t cards_looked_up() { return _cards;}
210 };
212 // We want the parallel threads to start their scanning at
213 // different collection set regions to avoid contention.
214 // If we have:
215 // n collection set regions
216 // p threads
217 // Then thread t will start at region t * floor (n/p)
219 HeapRegion* G1RemSet::calculateStartRegion(int worker_i) {
220 HeapRegion* result = _g1p->collection_set();
221 if (ParallelGCThreads > 0) {
222 size_t cs_size = _g1p->collection_set_size();
223 int n_workers = _g1->workers()->total_workers();
224 size_t cs_spans = cs_size / n_workers;
225 size_t ind = cs_spans * worker_i;
226 for (size_t i = 0; i < ind; i++)
227 result = result->next_in_collection_set();
228 }
229 return result;
230 }
232 void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
233 double rs_time_start = os::elapsedTime();
234 HeapRegion *startRegion = calculateStartRegion(worker_i);
236 ScanRSClosure scanRScl(oc, worker_i);
237 _g1->collection_set_iterate_from(startRegion, &scanRScl);
238 scanRScl.set_try_claimed();
239 _g1->collection_set_iterate_from(startRegion, &scanRScl);
241 double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
243 assert( _cards_scanned != NULL, "invariant" );
244 _cards_scanned[worker_i] = scanRScl.cards_done();
246 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
247 }
249 // Closure used for updating RSets and recording references that
250 // point into the collection set. Only called during an
251 // evacuation pause.
253 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure {
254 G1RemSet* _g1rs;
255 DirtyCardQueue* _into_cset_dcq;
256 public:
257 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h,
258 DirtyCardQueue* into_cset_dcq) :
259 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
260 {}
261 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
262 // The only time we care about recording cards that
263 // contain references that point into the collection set
264 // is during RSet updating within an evacuation pause.
265 // In this case worker_i should be the id of a GC worker thread.
266 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
267 assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
269 if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
270 // 'card_ptr' contains references that point into the collection
271 // set. We need to record the card in the DCQS
272 // (G1CollectedHeap::into_cset_dirty_card_queue_set())
273 // that's used for that purpose.
274 //
275 // Enqueue the card
276 _into_cset_dcq->enqueue(card_ptr);
277 }
278 return true;
279 }
280 };
282 void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
283 double start = os::elapsedTime();
284 // Apply the given closure to all remaining log entries.
285 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
286 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
288 // Now there should be no dirty cards.
289 if (G1RSLogCheckCardTable) {
290 CountNonCleanMemRegionClosure cl(_g1);
291 _ct_bs->mod_card_iterate(&cl);
292 // XXX This isn't true any more: keeping cards of young regions
293 // marked dirty broke it. Need some reasonable fix.
294 guarantee(cl.n() == 0, "Card table should be clean.");
295 }
297 _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
298 }
300 class CountRSSizeClosure: public HeapRegionClosure {
301 size_t _n;
302 size_t _tot;
303 size_t _max;
304 HeapRegion* _max_r;
305 enum {
306 N = 20,
307 MIN = 6
308 };
309 int _histo[N];
310 public:
311 CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) {
312 for (int i = 0; i < N; i++) _histo[i] = 0;
313 }
314 bool doHeapRegion(HeapRegion* r) {
315 if (!r->continuesHumongous()) {
316 size_t occ = r->rem_set()->occupied();
317 _n++;
318 _tot += occ;
319 if (occ > _max) {
320 _max = occ;
321 _max_r = r;
322 }
323 // Fit it into a histo bin.
324 int s = 1 << MIN;
325 int i = 0;
326 while (occ > (size_t) s && i < (N-1)) {
327 s = s << 1;
328 i++;
329 }
330 _histo[i]++;
331 }
332 return false;
333 }
334 size_t n() { return _n; }
335 size_t tot() { return _tot; }
336 size_t mx() { return _max; }
337 HeapRegion* mxr() { return _max_r; }
338 void print_histo() {
339 int mx = N;
340 while (mx >= 0) {
341 if (_histo[mx-1] > 0) break;
342 mx--;
343 }
344 gclog_or_tty->print_cr("Number of regions with given RS sizes:");
345 gclog_or_tty->print_cr(" <= %8d %8d", 1 << MIN, _histo[0]);
346 for (int i = 1; i < mx-1; i++) {
347 gclog_or_tty->print_cr(" %8d - %8d %8d",
348 (1 << (MIN + i - 1)) + 1,
349 1 << (MIN + i),
350 _histo[i]);
351 }
352 gclog_or_tty->print_cr(" > %8d %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]);
353 }
354 };
356 void G1RemSet::cleanupHRRS() {
357 HeapRegionRemSet::cleanup();
358 }
360 void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
361 int worker_i) {
362 #if CARD_REPEAT_HISTO
363 ct_freq_update_histo_and_reset();
364 #endif
365 if (worker_i == 0) {
366 _cg1r->clear_and_record_card_counts();
367 }
369 // Make this into a command-line flag...
370 if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) {
371 CountRSSizeClosure count_cl;
372 _g1->heap_region_iterate(&count_cl);
373 gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, "
374 "max region is " PTR_FORMAT,
375 count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(),
376 count_cl.mx(), count_cl.mxr());
377 count_cl.print_histo();
378 }
380 // We cache the value of 'oc' closure into the appropriate slot in the
381 // _cset_rs_update_cl for this worker
382 assert(worker_i < (int)n_workers(), "sanity");
383 _cset_rs_update_cl[worker_i] = oc;
385 // A DirtyCardQueue that is used to hold cards containing references
386 // that point into the collection set. This DCQ is associated with a
387 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal
388 // circumstances (i.e. the pause successfully completes), these cards
389 // are just discarded (there's no need to update the RSets of regions
390 // that were in the collection set - after the pause these regions
391 // are wholly 'free' of live objects. In the event of an evacuation
392 // failure the cards/buffers in this queue set are:
393 // * passed to the DirtyCardQueueSet that is used to manage deferred
394 // RSet updates, or
395 // * scanned for references that point into the collection set
396 // and the RSet of the corresponding region in the collection set
397 // is updated immediately.
398 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
400 assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
402 // The two flags below were introduced temporarily to serialize
403 // the updating and scanning of remembered sets. There are some
404 // race conditions when these two operations are done in parallel
405 // and they are causing failures. When we resolve said race
406 // conditions, we'll revert back to parallel remembered set
407 // updating and scanning. See CRs 6677707 and 6677708.
408 if (G1UseParallelRSetUpdating || (worker_i == 0)) {
409 updateRS(&into_cset_dcq, worker_i);
410 } else {
411 _g1p->record_update_rs_processed_buffers(worker_i, 0.0);
412 _g1p->record_update_rs_time(worker_i, 0.0);
413 }
414 if (G1UseParallelRSetScanning || (worker_i == 0)) {
415 scanRS(oc, worker_i);
416 } else {
417 _g1p->record_scan_rs_time(worker_i, 0.0);
418 }
420 // We now clear the cached values of _cset_rs_update_cl for this worker
421 _cset_rs_update_cl[worker_i] = NULL;
422 }
424 void G1RemSet::prepare_for_oops_into_collection_set_do() {
425 cleanupHRRS();
426 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
427 _g1->set_refine_cte_cl_concurrency(false);
428 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
429 dcqs.concatenate_logs();
431 if (ParallelGCThreads > 0) {
432 _seq_task->set_n_threads((int)n_workers());
433 }
434 guarantee( _cards_scanned == NULL, "invariant" );
435 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
436 for (uint i = 0; i < n_workers(); ++i) {
437 _cards_scanned[i] = 0;
438 }
439 _total_cards_scanned = 0;
440 }
443 // This closure, applied to a DirtyCardQueueSet, is used to immediately
444 // update the RSets for the regions in the CSet. For each card it iterates
445 // through the oops which coincide with that card. It scans the reference
446 // fields in each oop; when it finds an oop that points into the collection
447 // set, the RSet for the region containing the referenced object is updated.
448 class UpdateRSetCardTableEntryIntoCSetClosure: public CardTableEntryClosure {
449 G1CollectedHeap* _g1;
450 CardTableModRefBS* _ct_bs;
451 public:
452 UpdateRSetCardTableEntryIntoCSetClosure(G1CollectedHeap* g1,
453 CardTableModRefBS* bs):
454 _g1(g1), _ct_bs(bs)
455 { }
457 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
458 // Construct the region representing the card.
459 HeapWord* start = _ct_bs->addr_for(card_ptr);
460 // And find the region containing it.
461 HeapRegion* r = _g1->heap_region_containing(start);
462 assert(r != NULL, "unexpected null");
464 // Scan oops in the card looking for references into the collection set
465 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
466 MemRegion scanRegion(start, end);
468 UpdateRSetImmediate update_rs_cl(_g1->g1_rem_set());
469 FilterIntoCSClosure update_rs_cset_oop_cl(NULL, _g1, &update_rs_cl);
470 FilterOutOfRegionClosure filter_then_update_rs_cset_oop_cl(r, &update_rs_cset_oop_cl);
472 // We can pass false as the "filter_young" parameter here as:
473 // * we should be in a STW pause,
474 // * the DCQS to which this closure is applied is used to hold
475 // references that point into the collection set from the prior
476 // RSet updating,
477 // * the post-write barrier shouldn't be logging updates to young
478 // regions (but there is a situation where this can happen - see
479 // the comment in G1RemSet::concurrentRefineOneCard below -
480 // that should not be applicable here), and
481 // * during actual RSet updating, the filtering of cards in young
482 // regions in HeapRegion::oops_on_card_seq_iterate_careful is
483 // employed.
484 // As a result, when this closure is applied to "refs into cset"
485 // DCQS, we shouldn't see any cards in young regions.
486 update_rs_cl.set_region(r);
487 HeapWord* stop_point =
488 r->oops_on_card_seq_iterate_careful(scanRegion,
489 &filter_then_update_rs_cset_oop_cl,
490 false /* filter_young */,
491 NULL /* card_ptr */);
493 // Since this is performed in the event of an evacuation failure, we
494 // we shouldn't see a non-null stop point
495 assert(stop_point == NULL, "saw an unallocated region");
496 return true;
497 }
498 };
500 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
501 guarantee( _cards_scanned != NULL, "invariant" );
502 _total_cards_scanned = 0;
503 for (uint i = 0; i < n_workers(); ++i) {
504 _total_cards_scanned += _cards_scanned[i];
505 }
506 FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
507 _cards_scanned = NULL;
508 // Cleanup after copy
509 _g1->set_refine_cte_cl_concurrency(true);
510 // Set all cards back to clean.
511 _g1->cleanUpCardTable();
513 DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set();
514 int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
516 if (_g1->evacuation_failed()) {
517 // Restore remembered sets for the regions pointing into the collection set.
519 if (G1DeferredRSUpdate) {
520 // If deferred RS updates are enabled then we just need to transfer
521 // the completed buffers from (a) the DirtyCardQueueSet used to hold
522 // cards that contain references that point into the collection set
523 // to (b) the DCQS used to hold the deferred RS updates
524 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs);
525 } else {
527 CardTableModRefBS* bs = (CardTableModRefBS*)_g1->barrier_set();
528 UpdateRSetCardTableEntryIntoCSetClosure update_rs_cset_immediate(_g1, bs);
530 int n_completed_buffers = 0;
531 while (into_cset_dcqs.apply_closure_to_completed_buffer(&update_rs_cset_immediate,
532 0, 0, true)) {
533 n_completed_buffers++;
534 }
535 assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers");
536 }
537 }
539 // Free any completed buffers in the DirtyCardQueueSet used to hold cards
540 // which contain references that point into the collection.
541 _g1->into_cset_dirty_card_queue_set().clear();
542 assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0,
543 "all buffers should be freed");
544 _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers();
545 }
547 class ScrubRSClosure: public HeapRegionClosure {
548 G1CollectedHeap* _g1h;
549 BitMap* _region_bm;
550 BitMap* _card_bm;
551 CardTableModRefBS* _ctbs;
552 public:
553 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
554 _g1h(G1CollectedHeap::heap()),
555 _region_bm(region_bm), _card_bm(card_bm),
556 _ctbs(NULL)
557 {
558 ModRefBarrierSet* bs = _g1h->mr_bs();
559 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
560 _ctbs = (CardTableModRefBS*)bs;
561 }
563 bool doHeapRegion(HeapRegion* r) {
564 if (!r->continuesHumongous()) {
565 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
566 }
567 return false;
568 }
569 };
571 void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
572 ScrubRSClosure scrub_cl(region_bm, card_bm);
573 _g1->heap_region_iterate(&scrub_cl);
574 }
576 void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
577 int worker_num, int claim_val) {
578 ScrubRSClosure scrub_cl(region_bm, card_bm);
579 _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
580 }
583 static IntHistogram out_of_histo(50, 50);
585 class TriggerClosure : public OopClosure {
586 bool _trigger;
587 public:
588 TriggerClosure() : _trigger(false) { }
589 bool value() const { return _trigger; }
590 template <class T> void do_oop_nv(T* p) { _trigger = true; }
591 virtual void do_oop(oop* p) { do_oop_nv(p); }
592 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
593 };
595 class InvokeIfNotTriggeredClosure: public OopClosure {
596 TriggerClosure* _t;
597 OopClosure* _oc;
598 public:
599 InvokeIfNotTriggeredClosure(TriggerClosure* t, OopClosure* oc):
600 _t(t), _oc(oc) { }
601 template <class T> void do_oop_nv(T* p) {
602 if (!_t->value()) _oc->do_oop(p);
603 }
604 virtual void do_oop(oop* p) { do_oop_nv(p); }
605 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
606 };
608 class Mux2Closure : public OopClosure {
609 OopClosure* _c1;
610 OopClosure* _c2;
611 public:
612 Mux2Closure(OopClosure *c1, OopClosure *c2) : _c1(c1), _c2(c2) { }
613 template <class T> void do_oop_nv(T* p) {
614 _c1->do_oop(p); _c2->do_oop(p);
615 }
616 virtual void do_oop(oop* p) { do_oop_nv(p); }
617 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
618 };
620 bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
621 bool check_for_refs_into_cset) {
622 // Construct the region representing the card.
623 HeapWord* start = _ct_bs->addr_for(card_ptr);
624 // And find the region containing it.
625 HeapRegion* r = _g1->heap_region_containing(start);
626 assert(r != NULL, "unexpected null");
628 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
629 MemRegion dirtyRegion(start, end);
631 #if CARD_REPEAT_HISTO
632 init_ct_freq_table(_g1->max_capacity());
633 ct_freq_note_card(_ct_bs->index_for(start));
634 #endif
636 assert(!check_for_refs_into_cset || _cset_rs_update_cl[worker_i] != NULL, "sanity");
637 UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
638 _g1->g1_rem_set(),
639 _cset_rs_update_cl[worker_i],
640 check_for_refs_into_cset,
641 worker_i);
642 update_rs_oop_cl.set_from(r);
644 TriggerClosure trigger_cl;
645 FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
646 InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
647 Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
649 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
650 (check_for_refs_into_cset ?
651 (OopClosure*)&mux :
652 (OopClosure*)&update_rs_oop_cl));
654 // The region for the current card may be a young region. The
655 // current card may have been a card that was evicted from the
656 // card cache. When the card was inserted into the cache, we had
657 // determined that its region was non-young. While in the cache,
658 // the region may have been freed during a cleanup pause, reallocated
659 // and tagged as young.
660 //
661 // We wish to filter out cards for such a region but the current
662 // thread, if we're running concurrently, may "see" the young type
663 // change at any time (so an earlier "is_young" check may pass or
664 // fail arbitrarily). We tell the iteration code to perform this
665 // filtering when it has been determined that there has been an actual
666 // allocation in this region and making it safe to check the young type.
667 bool filter_young = true;
669 HeapWord* stop_point =
670 r->oops_on_card_seq_iterate_careful(dirtyRegion,
671 &filter_then_update_rs_oop_cl,
672 filter_young,
673 card_ptr);
675 // If stop_point is non-null, then we encountered an unallocated region
676 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
677 // card and re-enqueue: if we put off the card until a GC pause, then the
678 // unallocated portion will be filled in. Alternatively, we might try
679 // the full complexity of the technique used in "regular" precleaning.
680 if (stop_point != NULL) {
681 // The card might have gotten re-dirtied and re-enqueued while we
682 // worked. (In fact, it's pretty likely.)
683 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
684 *card_ptr = CardTableModRefBS::dirty_card_val();
685 MutexLockerEx x(Shared_DirtyCardQ_lock,
686 Mutex::_no_safepoint_check_flag);
687 DirtyCardQueue* sdcq =
688 JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
689 sdcq->enqueue(card_ptr);
690 }
691 } else {
692 out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region());
693 _conc_refine_cards++;
694 }
696 return trigger_cl.value();
697 }
699 bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
700 bool check_for_refs_into_cset) {
701 // If the card is no longer dirty, nothing to do.
702 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
703 // No need to return that this card contains refs that point
704 // into the collection set.
705 return false;
706 }
708 // Construct the region representing the card.
709 HeapWord* start = _ct_bs->addr_for(card_ptr);
710 // And find the region containing it.
711 HeapRegion* r = _g1->heap_region_containing(start);
712 if (r == NULL) {
713 guarantee(_g1->is_in_permanent(start), "Or else where?");
714 // Again no need to return that this card contains refs that
715 // point into the collection set.
716 return false; // Not in the G1 heap (might be in perm, for example.)
717 }
718 // Why do we have to check here whether a card is on a young region,
719 // given that we dirty young regions and, as a result, the
720 // post-barrier is supposed to filter them out and never to enqueue
721 // them? When we allocate a new region as the "allocation region" we
722 // actually dirty its cards after we release the lock, since card
723 // dirtying while holding the lock was a performance bottleneck. So,
724 // as a result, it is possible for other threads to actually
725 // allocate objects in the region (after the acquire the lock)
726 // before all the cards on the region are dirtied. This is unlikely,
727 // and it doesn't happen often, but it can happen. So, the extra
728 // check below filters out those cards.
729 if (r->is_young()) {
730 return false;
731 }
732 // While we are processing RSet buffers during the collection, we
733 // actually don't want to scan any cards on the collection set,
734 // since we don't want to update remebered sets with entries that
735 // point into the collection set, given that live objects from the
736 // collection set are about to move and such entries will be stale
737 // very soon. This change also deals with a reliability issue which
738 // involves scanning a card in the collection set and coming across
739 // an array that was being chunked and looking malformed. Note,
740 // however, that if evacuation fails, we have to scan any objects
741 // that were not moved and create any missing entries.
742 if (r->in_collection_set()) {
743 return false;
744 }
746 // Should we defer processing the card?
747 //
748 // Previously the result from the insert_cache call would be
749 // either card_ptr (implying that card_ptr was currently "cold"),
750 // null (meaning we had inserted the card ptr into the "hot"
751 // cache, which had some headroom), or a "hot" card ptr
752 // extracted from the "hot" cache.
753 //
754 // Now that the _card_counts cache in the ConcurrentG1Refine
755 // instance is an evicting hash table, the result we get back
756 // could be from evicting the card ptr in an already occupied
757 // bucket (in which case we have replaced the card ptr in the
758 // bucket with card_ptr and "defer" is set to false). To avoid
759 // having a data structure (updates to which would need a lock)
760 // to hold these unprocessed dirty cards, we need to immediately
761 // process card_ptr. The actions needed to be taken on return
762 // from cache_insert are summarized in the following table:
763 //
764 // res defer action
765 // --------------------------------------------------------------
766 // null false card evicted from _card_counts & replaced with
767 // card_ptr; evicted ptr added to hot cache.
768 // No need to process res; immediately process card_ptr
769 //
770 // null true card not evicted from _card_counts; card_ptr added
771 // to hot cache.
772 // Nothing to do.
773 //
774 // non-null false card evicted from _card_counts & replaced with
775 // card_ptr; evicted ptr is currently "cold" or
776 // caused an eviction from the hot cache.
777 // Immediately process res; process card_ptr.
778 //
779 // non-null true card not evicted from _card_counts; card_ptr is
780 // currently cold, or caused an eviction from hot
781 // cache.
782 // Immediately process res; no need to process card_ptr.
785 jbyte* res = card_ptr;
786 bool defer = false;
788 // This gets set to true if the card being refined has references
789 // that point into the collection set.
790 bool oops_into_cset = false;
792 if (_cg1r->use_cache()) {
793 jbyte* res = _cg1r->cache_insert(card_ptr, &defer);
794 if (res != NULL && (res != card_ptr || defer)) {
795 start = _ct_bs->addr_for(res);
796 r = _g1->heap_region_containing(start);
797 if (r == NULL) {
798 assert(_g1->is_in_permanent(start), "Or else where?");
799 } else {
800 // Checking whether the region we got back from the cache
801 // is young here is inappropriate. The region could have been
802 // freed, reallocated and tagged as young while in the cache.
803 // Hence we could see its young type change at any time.
804 //
805 // Process card pointer we get back from the hot card cache. This
806 // will check whether the region containing the card is young
807 // _after_ checking that the region has been allocated from.
808 oops_into_cset = concurrentRefineOneCard_impl(res, worker_i,
809 false /* check_for_refs_into_cset */);
810 // The above call to concurrentRefineOneCard_impl is only
811 // performed if the hot card cache is enabled. This cache is
812 // disabled during an evacuation pause - which is the only
813 // time when we need know if the card contains references
814 // that point into the collection set. Also when the hot card
815 // cache is enabled, this code is executed by the concurrent
816 // refine threads - rather than the GC worker threads - and
817 // concurrentRefineOneCard_impl will return false.
818 assert(!oops_into_cset, "should not see true here");
819 }
820 }
821 }
823 if (!defer) {
824 oops_into_cset =
825 concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset);
826 // We should only be detecting that the card contains references
827 // that point into the collection set if the current thread is
828 // a GC worker thread.
829 assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(),
830 "invalid result at non safepoint");
831 }
832 return oops_into_cset;
833 }
835 class HRRSStatsIter: public HeapRegionClosure {
836 size_t _occupied;
837 size_t _total_mem_sz;
838 size_t _max_mem_sz;
839 HeapRegion* _max_mem_sz_region;
840 public:
841 HRRSStatsIter() :
842 _occupied(0),
843 _total_mem_sz(0),
844 _max_mem_sz(0),
845 _max_mem_sz_region(NULL)
846 {}
848 bool doHeapRegion(HeapRegion* r) {
849 if (r->continuesHumongous()) return false;
850 size_t mem_sz = r->rem_set()->mem_size();
851 if (mem_sz > _max_mem_sz) {
852 _max_mem_sz = mem_sz;
853 _max_mem_sz_region = r;
854 }
855 _total_mem_sz += mem_sz;
856 size_t occ = r->rem_set()->occupied();
857 _occupied += occ;
858 return false;
859 }
860 size_t total_mem_sz() { return _total_mem_sz; }
861 size_t max_mem_sz() { return _max_mem_sz; }
862 size_t occupied() { return _occupied; }
863 HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
864 };
866 class PrintRSThreadVTimeClosure : public ThreadClosure {
867 public:
868 virtual void do_thread(Thread *t) {
869 ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
870 gclog_or_tty->print(" %5.2f", crt->vtime_accum());
871 }
872 };
874 void G1RemSet::print_summary_info() {
875 G1CollectedHeap* g1 = G1CollectedHeap::heap();
877 #if CARD_REPEAT_HISTO
878 gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
879 gclog_or_tty->print_cr(" # of repeats --> # of cards with that number.");
880 card_repeat_count.print_on(gclog_or_tty);
881 #endif
883 if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) {
884 gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: ");
885 gclog_or_tty->print_cr(" # of CS ptrs --> # of cards with that number.");
886 out_of_histo.print_on(gclog_or_tty);
887 }
888 gclog_or_tty->print_cr("\n Concurrent RS processed %d cards",
889 _conc_refine_cards);
890 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
891 jint tot_processed_buffers =
892 dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread();
893 gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers);
894 gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.",
895 dcqs.processed_buffers_rs_thread(),
896 100.0*(float)dcqs.processed_buffers_rs_thread()/
897 (float)tot_processed_buffers);
898 gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.",
899 dcqs.processed_buffers_mut(),
900 100.0*(float)dcqs.processed_buffers_mut()/
901 (float)tot_processed_buffers);
902 gclog_or_tty->print_cr(" Conc RS threads times(s)");
903 PrintRSThreadVTimeClosure p;
904 gclog_or_tty->print(" ");
905 g1->concurrent_g1_refine()->threads_do(&p);
906 gclog_or_tty->print_cr("");
908 HRRSStatsIter blk;
909 g1->heap_region_iterate(&blk);
910 gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K."
911 " Max = " SIZE_FORMAT "K.",
912 blk.total_mem_sz()/K, blk.max_mem_sz()/K);
913 gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K,"
914 " free_lists = " SIZE_FORMAT "K.",
915 HeapRegionRemSet::static_mem_size()/K,
916 HeapRegionRemSet::fl_mem_size()/K);
917 gclog_or_tty->print_cr(" %d occupied cards represented.",
918 blk.occupied());
919 gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
920 ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
921 blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
922 (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
923 (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
924 gclog_or_tty->print_cr(" Did %d coarsenings.", HeapRegionRemSet::n_coarsenings());
925 }
927 void G1RemSet::prepare_for_verify() {
928 if (G1HRRSFlushLogBuffersOnVerify &&
929 (VerifyBeforeGC || VerifyAfterGC)
930 && !_g1->full_collection()) {
931 cleanupHRRS();
932 _g1->set_refine_cte_cl_concurrency(false);
933 if (SafepointSynchronize::is_at_safepoint()) {
934 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
935 dcqs.concatenate_logs();
936 }
937 bool cg1r_use_cache = _cg1r->use_cache();
938 _cg1r->set_use_cache(false);
939 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
940 updateRS(&into_cset_dcq, 0);
941 _g1->into_cset_dirty_card_queue_set().clear();
942 _cg1r->set_use_cache(cg1r_use_cache);
944 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
945 }
946 }