Tue, 23 Nov 2010 13:22:55 -0800
6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/bufferingOopClosure.hpp"
27 #include "gc_implementation/g1/concurrentG1Refine.hpp"
28 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
29 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
33 #include "gc_implementation/g1/g1RemSet.inline.hpp"
34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
35 #include "memory/iterator.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "utilities/intHisto.hpp"
39 #define CARD_REPEAT_HISTO 0
41 #if CARD_REPEAT_HISTO
42 static size_t ct_freq_sz;
43 static jbyte* ct_freq = NULL;
45 void init_ct_freq_table(size_t heap_sz_bytes) {
46 if (ct_freq == NULL) {
47 ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size;
48 ct_freq = new jbyte[ct_freq_sz];
49 for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0;
50 }
51 }
53 void ct_freq_note_card(size_t index) {
54 assert(0 <= index && index < ct_freq_sz, "Bounds error.");
55 if (ct_freq[index] < 100) { ct_freq[index]++; }
56 }
58 static IntHistogram card_repeat_count(10, 10);
60 void ct_freq_update_histo_and_reset() {
61 for (size_t j = 0; j < ct_freq_sz; j++) {
62 card_repeat_count.add_entry(ct_freq[j]);
63 ct_freq[j] = 0;
64 }
66 }
67 #endif
70 class IntoCSOopClosure: public OopsInHeapRegionClosure {
71 OopsInHeapRegionClosure* _blk;
72 G1CollectedHeap* _g1;
73 public:
74 IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
75 _g1(g1), _blk(blk) {}
76 void set_region(HeapRegion* from) {
77 _blk->set_region(from);
78 }
79 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
80 virtual void do_oop( oop* p) { do_oop_work(p); }
81 template <class T> void do_oop_work(T* p) {
82 oop obj = oopDesc::load_decode_heap_oop(p);
83 if (_g1->obj_in_cs(obj)) _blk->do_oop(p);
84 }
85 bool apply_to_weak_ref_discovered_field() { return true; }
86 bool idempotent() { return true; }
87 };
89 class IntoCSRegionClosure: public HeapRegionClosure {
90 IntoCSOopClosure _blk;
91 G1CollectedHeap* _g1;
92 public:
93 IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
94 _g1(g1), _blk(g1, blk) {}
95 bool doHeapRegion(HeapRegion* r) {
96 if (!r->in_collection_set()) {
97 _blk.set_region(r);
98 if (r->isHumongous()) {
99 if (r->startsHumongous()) {
100 oop obj = oop(r->bottom());
101 obj->oop_iterate(&_blk);
102 }
103 } else {
104 r->oop_before_save_marks_iterate(&_blk);
105 }
106 }
107 return false;
108 }
109 };
111 class VerifyRSCleanCardOopClosure: public OopClosure {
112 G1CollectedHeap* _g1;
113 public:
114 VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {}
116 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
117 virtual void do_oop( oop* p) { do_oop_work(p); }
118 template <class T> void do_oop_work(T* p) {
119 oop obj = oopDesc::load_decode_heap_oop(p);
120 HeapRegion* to = _g1->heap_region_containing(obj);
121 guarantee(to == NULL || !to->in_collection_set(),
122 "Missed a rem set member.");
123 }
124 };
126 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
127 : _g1(g1), _conc_refine_cards(0),
128 _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
129 _cg1r(g1->concurrent_g1_refine()),
130 _cset_rs_update_cl(NULL),
131 _cards_scanned(NULL), _total_cards_scanned(0)
132 {
133 _seq_task = new SubTasksDone(NumSeqTasks);
134 guarantee(n_workers() > 0, "There should be some workers");
135 _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers());
136 for (uint i = 0; i < n_workers(); i++) {
137 _cset_rs_update_cl[i] = NULL;
138 }
139 }
141 G1RemSet::~G1RemSet() {
142 delete _seq_task;
143 for (uint i = 0; i < n_workers(); i++) {
144 assert(_cset_rs_update_cl[i] == NULL, "it should be");
145 }
146 FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl);
147 }
149 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
150 if (_g1->is_in_g1_reserved(mr.start())) {
151 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
152 if (_start_first == NULL) _start_first = mr.start();
153 }
154 }
156 class ScanRSClosure : public HeapRegionClosure {
157 size_t _cards_done, _cards;
158 G1CollectedHeap* _g1h;
159 OopsInHeapRegionClosure* _oc;
160 G1BlockOffsetSharedArray* _bot_shared;
161 CardTableModRefBS *_ct_bs;
162 int _worker_i;
163 int _block_size;
164 bool _try_claimed;
165 public:
166 ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
167 _oc(oc),
168 _cards(0),
169 _cards_done(0),
170 _worker_i(worker_i),
171 _try_claimed(false)
172 {
173 _g1h = G1CollectedHeap::heap();
174 _bot_shared = _g1h->bot_shared();
175 _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
176 _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
177 }
179 void set_try_claimed() { _try_claimed = true; }
181 void scanCard(size_t index, HeapRegion *r) {
182 _cards_done++;
183 DirtyCardToOopClosure* cl =
184 r->new_dcto_closure(_oc,
185 CardTableModRefBS::Precise,
186 HeapRegionDCTOC::IntoCSFilterKind);
188 // Set the "from" region in the closure.
189 _oc->set_region(r);
190 HeapWord* card_start = _bot_shared->address_for_index(index);
191 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
192 Space *sp = SharedHeap::heap()->space_containing(card_start);
193 MemRegion sm_region;
194 if (ParallelGCThreads > 0) {
195 // first find the used area
196 sm_region = sp->used_region_at_save_marks();
197 } else {
198 // The closure is not idempotent. We shouldn't look at objects
199 // allocated during the GC.
200 sm_region = sp->used_region_at_save_marks();
201 }
202 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
203 if (!mr.is_empty()) {
204 cl->do_MemRegion(mr);
205 }
206 }
208 void printCard(HeapRegion* card_region, size_t card_index,
209 HeapWord* card_start) {
210 gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
211 "RS names card %p: "
212 "[" PTR_FORMAT ", " PTR_FORMAT ")",
213 _worker_i,
214 card_region->bottom(), card_region->end(),
215 card_index,
216 card_start, card_start + G1BlockOffsetSharedArray::N_words);
217 }
219 bool doHeapRegion(HeapRegion* r) {
220 assert(r->in_collection_set(), "should only be called on elements of CS.");
221 HeapRegionRemSet* hrrs = r->rem_set();
222 if (hrrs->iter_is_complete()) return false; // All done.
223 if (!_try_claimed && !hrrs->claim_iter()) return false;
224 _g1h->push_dirty_cards_region(r);
225 // If we didn't return above, then
226 // _try_claimed || r->claim_iter()
227 // is true: either we're supposed to work on claimed-but-not-complete
228 // regions, or we successfully claimed the region.
229 HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
230 hrrs->init_iterator(iter);
231 size_t card_index;
233 // We claim cards in block so as to recude the contention. The block size is determined by
234 // the G1RSetScanBlockSize parameter.
235 size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
236 for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
237 if (current_card >= jump_to_card + _block_size) {
238 jump_to_card = hrrs->iter_claimed_next(_block_size);
239 }
240 if (current_card < jump_to_card) continue;
241 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
242 #if 0
243 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
244 card_start, card_start + CardTableModRefBS::card_size_in_words);
245 #endif
247 HeapRegion* card_region = _g1h->heap_region_containing(card_start);
248 assert(card_region != NULL, "Yielding cards not in the heap?");
249 _cards++;
251 if (!card_region->is_on_dirty_cards_region_list()) {
252 _g1h->push_dirty_cards_region(card_region);
253 }
255 // If the card is dirty, then we will scan it during updateRS.
256 if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) {
257 // We make the card as "claimed" lazily (so races are possible but they're benign),
258 // which reduces the number of duplicate scans (the rsets of the regions in the cset
259 // can intersect).
260 if (!_ct_bs->is_card_claimed(card_index)) {
261 _ct_bs->set_card_claimed(card_index);
262 scanCard(card_index, card_region);
263 }
264 }
265 }
266 if (!_try_claimed) {
267 hrrs->set_iter_complete();
268 }
269 return false;
270 }
271 // Set all cards back to clean.
272 void cleanup() {_g1h->cleanUpCardTable();}
273 size_t cards_done() { return _cards_done;}
274 size_t cards_looked_up() { return _cards;}
275 };
277 // We want the parallel threads to start their scanning at
278 // different collection set regions to avoid contention.
279 // If we have:
280 // n collection set regions
281 // p threads
282 // Then thread t will start at region t * floor (n/p)
284 HeapRegion* G1RemSet::calculateStartRegion(int worker_i) {
285 HeapRegion* result = _g1p->collection_set();
286 if (ParallelGCThreads > 0) {
287 size_t cs_size = _g1p->collection_set_size();
288 int n_workers = _g1->workers()->total_workers();
289 size_t cs_spans = cs_size / n_workers;
290 size_t ind = cs_spans * worker_i;
291 for (size_t i = 0; i < ind; i++)
292 result = result->next_in_collection_set();
293 }
294 return result;
295 }
297 void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
298 double rs_time_start = os::elapsedTime();
299 HeapRegion *startRegion = calculateStartRegion(worker_i);
301 ScanRSClosure scanRScl(oc, worker_i);
302 _g1->collection_set_iterate_from(startRegion, &scanRScl);
303 scanRScl.set_try_claimed();
304 _g1->collection_set_iterate_from(startRegion, &scanRScl);
306 double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
308 assert( _cards_scanned != NULL, "invariant" );
309 _cards_scanned[worker_i] = scanRScl.cards_done();
311 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
312 }
314 // Closure used for updating RSets and recording references that
315 // point into the collection set. Only called during an
316 // evacuation pause.
318 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure {
319 G1RemSet* _g1rs;
320 DirtyCardQueue* _into_cset_dcq;
321 public:
322 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h,
323 DirtyCardQueue* into_cset_dcq) :
324 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
325 {}
326 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
327 // The only time we care about recording cards that
328 // contain references that point into the collection set
329 // is during RSet updating within an evacuation pause.
330 // In this case worker_i should be the id of a GC worker thread.
331 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
332 assert(worker_i < (int) DirtyCardQueueSet::num_par_ids(), "should be a GC worker");
334 if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
335 // 'card_ptr' contains references that point into the collection
336 // set. We need to record the card in the DCQS
337 // (G1CollectedHeap::into_cset_dirty_card_queue_set())
338 // that's used for that purpose.
339 //
340 // Enqueue the card
341 _into_cset_dcq->enqueue(card_ptr);
342 }
343 return true;
344 }
345 };
347 void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
348 double start = os::elapsedTime();
349 // Apply the given closure to all remaining log entries.
350 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
351 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
353 // Now there should be no dirty cards.
354 if (G1RSLogCheckCardTable) {
355 CountNonCleanMemRegionClosure cl(_g1);
356 _ct_bs->mod_card_iterate(&cl);
357 // XXX This isn't true any more: keeping cards of young regions
358 // marked dirty broke it. Need some reasonable fix.
359 guarantee(cl.n() == 0, "Card table should be clean.");
360 }
362 _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
363 }
365 #ifndef PRODUCT
366 class PrintRSClosure : public HeapRegionClosure {
367 int _count;
368 public:
369 PrintRSClosure() : _count(0) {}
370 bool doHeapRegion(HeapRegion* r) {
371 HeapRegionRemSet* hrrs = r->rem_set();
372 _count += (int) hrrs->occupied();
373 if (hrrs->occupied() == 0) {
374 gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") "
375 "has no remset entries\n",
376 r->bottom(), r->end());
377 } else {
378 gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n",
379 r->bottom(), r->end());
380 r->print();
381 hrrs->print();
382 gclog_or_tty->print("\nDone printing rem set\n");
383 }
384 return false;
385 }
386 int occupied() {return _count;}
387 };
388 #endif
390 class CountRSSizeClosure: public HeapRegionClosure {
391 size_t _n;
392 size_t _tot;
393 size_t _max;
394 HeapRegion* _max_r;
395 enum {
396 N = 20,
397 MIN = 6
398 };
399 int _histo[N];
400 public:
401 CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) {
402 for (int i = 0; i < N; i++) _histo[i] = 0;
403 }
404 bool doHeapRegion(HeapRegion* r) {
405 if (!r->continuesHumongous()) {
406 size_t occ = r->rem_set()->occupied();
407 _n++;
408 _tot += occ;
409 if (occ > _max) {
410 _max = occ;
411 _max_r = r;
412 }
413 // Fit it into a histo bin.
414 int s = 1 << MIN;
415 int i = 0;
416 while (occ > (size_t) s && i < (N-1)) {
417 s = s << 1;
418 i++;
419 }
420 _histo[i]++;
421 }
422 return false;
423 }
424 size_t n() { return _n; }
425 size_t tot() { return _tot; }
426 size_t mx() { return _max; }
427 HeapRegion* mxr() { return _max_r; }
428 void print_histo() {
429 int mx = N;
430 while (mx >= 0) {
431 if (_histo[mx-1] > 0) break;
432 mx--;
433 }
434 gclog_or_tty->print_cr("Number of regions with given RS sizes:");
435 gclog_or_tty->print_cr(" <= %8d %8d", 1 << MIN, _histo[0]);
436 for (int i = 1; i < mx-1; i++) {
437 gclog_or_tty->print_cr(" %8d - %8d %8d",
438 (1 << (MIN + i - 1)) + 1,
439 1 << (MIN + i),
440 _histo[i]);
441 }
442 gclog_or_tty->print_cr(" > %8d %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]);
443 }
444 };
446 void G1RemSet::cleanupHRRS() {
447 HeapRegionRemSet::cleanup();
448 }
450 void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
451 int worker_i) {
452 #if CARD_REPEAT_HISTO
453 ct_freq_update_histo_and_reset();
454 #endif
455 if (worker_i == 0) {
456 _cg1r->clear_and_record_card_counts();
457 }
459 // Make this into a command-line flag...
460 if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) {
461 CountRSSizeClosure count_cl;
462 _g1->heap_region_iterate(&count_cl);
463 gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, "
464 "max region is " PTR_FORMAT,
465 count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(),
466 count_cl.mx(), count_cl.mxr());
467 count_cl.print_histo();
468 }
470 // We cache the value of 'oc' closure into the appropriate slot in the
471 // _cset_rs_update_cl for this worker
472 assert(worker_i < (int)n_workers(), "sanity");
473 _cset_rs_update_cl[worker_i] = oc;
475 // A DirtyCardQueue that is used to hold cards containing references
476 // that point into the collection set. This DCQ is associated with a
477 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal
478 // circumstances (i.e. the pause successfully completes), these cards
479 // are just discarded (there's no need to update the RSets of regions
480 // that were in the collection set - after the pause these regions
481 // are wholly 'free' of live objects. In the event of an evacuation
482 // failure the cards/buffers in this queue set are:
483 // * passed to the DirtyCardQueueSet that is used to manage deferred
484 // RSet updates, or
485 // * scanned for references that point into the collection set
486 // and the RSet of the corresponding region in the collection set
487 // is updated immediately.
488 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
490 assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
492 // The two flags below were introduced temporarily to serialize
493 // the updating and scanning of remembered sets. There are some
494 // race conditions when these two operations are done in parallel
495 // and they are causing failures. When we resolve said race
496 // conditions, we'll revert back to parallel remembered set
497 // updating and scanning. See CRs 6677707 and 6677708.
498 if (G1UseParallelRSetUpdating || (worker_i == 0)) {
499 updateRS(&into_cset_dcq, worker_i);
500 } else {
501 _g1p->record_update_rs_processed_buffers(worker_i, 0.0);
502 _g1p->record_update_rs_time(worker_i, 0.0);
503 }
504 if (G1UseParallelRSetScanning || (worker_i == 0)) {
505 scanRS(oc, worker_i);
506 } else {
507 _g1p->record_scan_rs_time(worker_i, 0.0);
508 }
510 // We now clear the cached values of _cset_rs_update_cl for this worker
511 _cset_rs_update_cl[worker_i] = NULL;
512 }
514 void G1RemSet::prepare_for_oops_into_collection_set_do() {
515 #if G1_REM_SET_LOGGING
516 PrintRSClosure cl;
517 _g1->collection_set_iterate(&cl);
518 #endif
519 cleanupHRRS();
520 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
521 _g1->set_refine_cte_cl_concurrency(false);
522 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
523 dcqs.concatenate_logs();
525 if (ParallelGCThreads > 0) {
526 _seq_task->set_n_threads((int)n_workers());
527 }
528 guarantee( _cards_scanned == NULL, "invariant" );
529 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
530 for (uint i = 0; i < n_workers(); ++i) {
531 _cards_scanned[i] = 0;
532 }
533 _total_cards_scanned = 0;
534 }
537 class cleanUpIteratorsClosure : public HeapRegionClosure {
538 bool doHeapRegion(HeapRegion *r) {
539 HeapRegionRemSet* hrrs = r->rem_set();
540 hrrs->init_for_par_iteration();
541 return false;
542 }
543 };
545 // This closure, applied to a DirtyCardQueueSet, is used to immediately
546 // update the RSets for the regions in the CSet. For each card it iterates
547 // through the oops which coincide with that card. It scans the reference
548 // fields in each oop; when it finds an oop that points into the collection
549 // set, the RSet for the region containing the referenced object is updated.
550 class UpdateRSetCardTableEntryIntoCSetClosure: public CardTableEntryClosure {
551 G1CollectedHeap* _g1;
552 CardTableModRefBS* _ct_bs;
553 public:
554 UpdateRSetCardTableEntryIntoCSetClosure(G1CollectedHeap* g1,
555 CardTableModRefBS* bs):
556 _g1(g1), _ct_bs(bs)
557 { }
559 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
560 // Construct the region representing the card.
561 HeapWord* start = _ct_bs->addr_for(card_ptr);
562 // And find the region containing it.
563 HeapRegion* r = _g1->heap_region_containing(start);
564 assert(r != NULL, "unexpected null");
566 // Scan oops in the card looking for references into the collection set
567 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
568 MemRegion scanRegion(start, end);
570 UpdateRSetImmediate update_rs_cl(_g1->g1_rem_set());
571 FilterIntoCSClosure update_rs_cset_oop_cl(NULL, _g1, &update_rs_cl);
572 FilterOutOfRegionClosure filter_then_update_rs_cset_oop_cl(r, &update_rs_cset_oop_cl);
574 // We can pass false as the "filter_young" parameter here as:
575 // * we should be in a STW pause,
576 // * the DCQS to which this closure is applied is used to hold
577 // references that point into the collection set from the prior
578 // RSet updating,
579 // * the post-write barrier shouldn't be logging updates to young
580 // regions (but there is a situation where this can happen - see
581 // the comment in G1RemSet::concurrentRefineOneCard below -
582 // that should not be applicable here), and
583 // * during actual RSet updating, the filtering of cards in young
584 // regions in HeapRegion::oops_on_card_seq_iterate_careful is
585 // employed.
586 // As a result, when this closure is applied to "refs into cset"
587 // DCQS, we shouldn't see any cards in young regions.
588 update_rs_cl.set_region(r);
589 HeapWord* stop_point =
590 r->oops_on_card_seq_iterate_careful(scanRegion,
591 &filter_then_update_rs_cset_oop_cl,
592 false /* filter_young */);
594 // Since this is performed in the event of an evacuation failure, we
595 // we shouldn't see a non-null stop point
596 assert(stop_point == NULL, "saw an unallocated region");
597 return true;
598 }
599 };
601 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
602 guarantee( _cards_scanned != NULL, "invariant" );
603 _total_cards_scanned = 0;
604 for (uint i = 0; i < n_workers(); ++i)
605 _total_cards_scanned += _cards_scanned[i];
606 FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
607 _cards_scanned = NULL;
608 // Cleanup after copy
609 #if G1_REM_SET_LOGGING
610 PrintRSClosure cl;
611 _g1->heap_region_iterate(&cl);
612 #endif
613 _g1->set_refine_cte_cl_concurrency(true);
614 cleanUpIteratorsClosure iterClosure;
615 _g1->collection_set_iterate(&iterClosure);
616 // Set all cards back to clean.
617 _g1->cleanUpCardTable();
619 DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set();
620 int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
622 if (_g1->evacuation_failed()) {
623 // Restore remembered sets for the regions pointing into the collection set.
625 if (G1DeferredRSUpdate) {
626 // If deferred RS updates are enabled then we just need to transfer
627 // the completed buffers from (a) the DirtyCardQueueSet used to hold
628 // cards that contain references that point into the collection set
629 // to (b) the DCQS used to hold the deferred RS updates
630 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs);
631 } else {
633 CardTableModRefBS* bs = (CardTableModRefBS*)_g1->barrier_set();
634 UpdateRSetCardTableEntryIntoCSetClosure update_rs_cset_immediate(_g1, bs);
636 int n_completed_buffers = 0;
637 while (into_cset_dcqs.apply_closure_to_completed_buffer(&update_rs_cset_immediate,
638 0, 0, true)) {
639 n_completed_buffers++;
640 }
641 assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers");
642 }
643 }
645 // Free any completed buffers in the DirtyCardQueueSet used to hold cards
646 // which contain references that point into the collection.
647 _g1->into_cset_dirty_card_queue_set().clear();
648 assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0,
649 "all buffers should be freed");
650 _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers();
651 }
653 class ScrubRSClosure: public HeapRegionClosure {
654 G1CollectedHeap* _g1h;
655 BitMap* _region_bm;
656 BitMap* _card_bm;
657 CardTableModRefBS* _ctbs;
658 public:
659 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
660 _g1h(G1CollectedHeap::heap()),
661 _region_bm(region_bm), _card_bm(card_bm),
662 _ctbs(NULL)
663 {
664 ModRefBarrierSet* bs = _g1h->mr_bs();
665 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
666 _ctbs = (CardTableModRefBS*)bs;
667 }
669 bool doHeapRegion(HeapRegion* r) {
670 if (!r->continuesHumongous()) {
671 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
672 }
673 return false;
674 }
675 };
677 void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
678 ScrubRSClosure scrub_cl(region_bm, card_bm);
679 _g1->heap_region_iterate(&scrub_cl);
680 }
682 void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
683 int worker_num, int claim_val) {
684 ScrubRSClosure scrub_cl(region_bm, card_bm);
685 _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
686 }
689 static IntHistogram out_of_histo(50, 50);
691 class TriggerClosure : public OopClosure {
692 bool _trigger;
693 public:
694 TriggerClosure() : _trigger(false) { }
695 bool value() const { return _trigger; }
696 template <class T> void do_oop_nv(T* p) { _trigger = true; }
697 virtual void do_oop(oop* p) { do_oop_nv(p); }
698 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
699 };
701 class InvokeIfNotTriggeredClosure: public OopClosure {
702 TriggerClosure* _t;
703 OopClosure* _oc;
704 public:
705 InvokeIfNotTriggeredClosure(TriggerClosure* t, OopClosure* oc):
706 _t(t), _oc(oc) { }
707 template <class T> void do_oop_nv(T* p) {
708 if (!_t->value()) _oc->do_oop(p);
709 }
710 virtual void do_oop(oop* p) { do_oop_nv(p); }
711 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
712 };
714 class Mux2Closure : public OopClosure {
715 OopClosure* _c1;
716 OopClosure* _c2;
717 public:
718 Mux2Closure(OopClosure *c1, OopClosure *c2) : _c1(c1), _c2(c2) { }
719 template <class T> void do_oop_nv(T* p) {
720 _c1->do_oop(p); _c2->do_oop(p);
721 }
722 virtual void do_oop(oop* p) { do_oop_nv(p); }
723 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
724 };
726 bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
727 bool check_for_refs_into_cset) {
728 // Construct the region representing the card.
729 HeapWord* start = _ct_bs->addr_for(card_ptr);
730 // And find the region containing it.
731 HeapRegion* r = _g1->heap_region_containing(start);
732 assert(r != NULL, "unexpected null");
734 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
735 MemRegion dirtyRegion(start, end);
737 #if CARD_REPEAT_HISTO
738 init_ct_freq_table(_g1->g1_reserved_obj_bytes());
739 ct_freq_note_card(_ct_bs->index_for(start));
740 #endif
742 assert(!check_for_refs_into_cset || _cset_rs_update_cl[worker_i] != NULL, "sanity");
743 UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
744 _g1->g1_rem_set(),
745 _cset_rs_update_cl[worker_i],
746 check_for_refs_into_cset,
747 worker_i);
748 update_rs_oop_cl.set_from(r);
750 TriggerClosure trigger_cl;
751 FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
752 InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
753 Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
755 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
756 (check_for_refs_into_cset ?
757 (OopClosure*)&mux :
758 (OopClosure*)&update_rs_oop_cl));
760 // Undirty the card.
761 *card_ptr = CardTableModRefBS::clean_card_val();
762 // We must complete this write before we do any of the reads below.
763 OrderAccess::storeload();
764 // And process it, being careful of unallocated portions of TLAB's.
766 // The region for the current card may be a young region. The
767 // current card may have been a card that was evicted from the
768 // card cache. When the card was inserted into the cache, we had
769 // determined that its region was non-young. While in the cache,
770 // the region may have been freed during a cleanup pause, reallocated
771 // and tagged as young.
772 //
773 // We wish to filter out cards for such a region but the current
774 // thread, if we're running conucrrently, may "see" the young type
775 // change at any time (so an earlier "is_young" check may pass or
776 // fail arbitrarily). We tell the iteration code to perform this
777 // filtering when it has been determined that there has been an actual
778 // allocation in this region and making it safe to check the young type.
779 bool filter_young = true;
781 HeapWord* stop_point =
782 r->oops_on_card_seq_iterate_careful(dirtyRegion,
783 &filter_then_update_rs_oop_cl,
784 filter_young);
786 // If stop_point is non-null, then we encountered an unallocated region
787 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
788 // card and re-enqueue: if we put off the card until a GC pause, then the
789 // unallocated portion will be filled in. Alternatively, we might try
790 // the full complexity of the technique used in "regular" precleaning.
791 if (stop_point != NULL) {
792 // The card might have gotten re-dirtied and re-enqueued while we
793 // worked. (In fact, it's pretty likely.)
794 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
795 *card_ptr = CardTableModRefBS::dirty_card_val();
796 MutexLockerEx x(Shared_DirtyCardQ_lock,
797 Mutex::_no_safepoint_check_flag);
798 DirtyCardQueue* sdcq =
799 JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
800 sdcq->enqueue(card_ptr);
801 }
802 } else {
803 out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region());
804 _conc_refine_cards++;
805 }
807 return trigger_cl.value();
808 }
810 bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
811 bool check_for_refs_into_cset) {
812 // If the card is no longer dirty, nothing to do.
813 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
814 // No need to return that this card contains refs that point
815 // into the collection set.
816 return false;
817 }
819 // Construct the region representing the card.
820 HeapWord* start = _ct_bs->addr_for(card_ptr);
821 // And find the region containing it.
822 HeapRegion* r = _g1->heap_region_containing(start);
823 if (r == NULL) {
824 guarantee(_g1->is_in_permanent(start), "Or else where?");
825 // Again no need to return that this card contains refs that
826 // point into the collection set.
827 return false; // Not in the G1 heap (might be in perm, for example.)
828 }
829 // Why do we have to check here whether a card is on a young region,
830 // given that we dirty young regions and, as a result, the
831 // post-barrier is supposed to filter them out and never to enqueue
832 // them? When we allocate a new region as the "allocation region" we
833 // actually dirty its cards after we release the lock, since card
834 // dirtying while holding the lock was a performance bottleneck. So,
835 // as a result, it is possible for other threads to actually
836 // allocate objects in the region (after the acquire the lock)
837 // before all the cards on the region are dirtied. This is unlikely,
838 // and it doesn't happen often, but it can happen. So, the extra
839 // check below filters out those cards.
840 if (r->is_young()) {
841 return false;
842 }
843 // While we are processing RSet buffers during the collection, we
844 // actually don't want to scan any cards on the collection set,
845 // since we don't want to update remebered sets with entries that
846 // point into the collection set, given that live objects from the
847 // collection set are about to move and such entries will be stale
848 // very soon. This change also deals with a reliability issue which
849 // involves scanning a card in the collection set and coming across
850 // an array that was being chunked and looking malformed. Note,
851 // however, that if evacuation fails, we have to scan any objects
852 // that were not moved and create any missing entries.
853 if (r->in_collection_set()) {
854 return false;
855 }
857 // Should we defer processing the card?
858 //
859 // Previously the result from the insert_cache call would be
860 // either card_ptr (implying that card_ptr was currently "cold"),
861 // null (meaning we had inserted the card ptr into the "hot"
862 // cache, which had some headroom), or a "hot" card ptr
863 // extracted from the "hot" cache.
864 //
865 // Now that the _card_counts cache in the ConcurrentG1Refine
866 // instance is an evicting hash table, the result we get back
867 // could be from evicting the card ptr in an already occupied
868 // bucket (in which case we have replaced the card ptr in the
869 // bucket with card_ptr and "defer" is set to false). To avoid
870 // having a data structure (updates to which would need a lock)
871 // to hold these unprocessed dirty cards, we need to immediately
872 // process card_ptr. The actions needed to be taken on return
873 // from cache_insert are summarized in the following table:
874 //
875 // res defer action
876 // --------------------------------------------------------------
877 // null false card evicted from _card_counts & replaced with
878 // card_ptr; evicted ptr added to hot cache.
879 // No need to process res; immediately process card_ptr
880 //
881 // null true card not evicted from _card_counts; card_ptr added
882 // to hot cache.
883 // Nothing to do.
884 //
885 // non-null false card evicted from _card_counts & replaced with
886 // card_ptr; evicted ptr is currently "cold" or
887 // caused an eviction from the hot cache.
888 // Immediately process res; process card_ptr.
889 //
890 // non-null true card not evicted from _card_counts; card_ptr is
891 // currently cold, or caused an eviction from hot
892 // cache.
893 // Immediately process res; no need to process card_ptr.
896 jbyte* res = card_ptr;
897 bool defer = false;
899 // This gets set to true if the card being refined has references
900 // that point into the collection set.
901 bool oops_into_cset = false;
903 if (_cg1r->use_cache()) {
904 jbyte* res = _cg1r->cache_insert(card_ptr, &defer);
905 if (res != NULL && (res != card_ptr || defer)) {
906 start = _ct_bs->addr_for(res);
907 r = _g1->heap_region_containing(start);
908 if (r == NULL) {
909 assert(_g1->is_in_permanent(start), "Or else where?");
910 } else {
911 // Checking whether the region we got back from the cache
912 // is young here is inappropriate. The region could have been
913 // freed, reallocated and tagged as young while in the cache.
914 // Hence we could see its young type change at any time.
915 //
916 // Process card pointer we get back from the hot card cache. This
917 // will check whether the region containing the card is young
918 // _after_ checking that the region has been allocated from.
919 oops_into_cset = concurrentRefineOneCard_impl(res, worker_i,
920 false /* check_for_refs_into_cset */);
921 // The above call to concurrentRefineOneCard_impl is only
922 // performed if the hot card cache is enabled. This cache is
923 // disabled during an evacuation pause - which is the only
924 // time when we need know if the card contains references
925 // that point into the collection set. Also when the hot card
926 // cache is enabled, this code is executed by the concurrent
927 // refine threads - rather than the GC worker threads - and
928 // concurrentRefineOneCard_impl will return false.
929 assert(!oops_into_cset, "should not see true here");
930 }
931 }
932 }
934 if (!defer) {
935 oops_into_cset =
936 concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset);
937 // We should only be detecting that the card contains references
938 // that point into the collection set if the current thread is
939 // a GC worker thread.
940 assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(),
941 "invalid result at non safepoint");
942 }
943 return oops_into_cset;
944 }
946 class HRRSStatsIter: public HeapRegionClosure {
947 size_t _occupied;
948 size_t _total_mem_sz;
949 size_t _max_mem_sz;
950 HeapRegion* _max_mem_sz_region;
951 public:
952 HRRSStatsIter() :
953 _occupied(0),
954 _total_mem_sz(0),
955 _max_mem_sz(0),
956 _max_mem_sz_region(NULL)
957 {}
959 bool doHeapRegion(HeapRegion* r) {
960 if (r->continuesHumongous()) return false;
961 size_t mem_sz = r->rem_set()->mem_size();
962 if (mem_sz > _max_mem_sz) {
963 _max_mem_sz = mem_sz;
964 _max_mem_sz_region = r;
965 }
966 _total_mem_sz += mem_sz;
967 size_t occ = r->rem_set()->occupied();
968 _occupied += occ;
969 return false;
970 }
971 size_t total_mem_sz() { return _total_mem_sz; }
972 size_t max_mem_sz() { return _max_mem_sz; }
973 size_t occupied() { return _occupied; }
974 HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
975 };
977 class PrintRSThreadVTimeClosure : public ThreadClosure {
978 public:
979 virtual void do_thread(Thread *t) {
980 ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
981 gclog_or_tty->print(" %5.2f", crt->vtime_accum());
982 }
983 };
985 void G1RemSet::print_summary_info() {
986 G1CollectedHeap* g1 = G1CollectedHeap::heap();
988 #if CARD_REPEAT_HISTO
989 gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
990 gclog_or_tty->print_cr(" # of repeats --> # of cards with that number.");
991 card_repeat_count.print_on(gclog_or_tty);
992 #endif
994 if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) {
995 gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: ");
996 gclog_or_tty->print_cr(" # of CS ptrs --> # of cards with that number.");
997 out_of_histo.print_on(gclog_or_tty);
998 }
999 gclog_or_tty->print_cr("\n Concurrent RS processed %d cards",
1000 _conc_refine_cards);
1001 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1002 jint tot_processed_buffers =
1003 dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread();
1004 gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers);
1005 gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.",
1006 dcqs.processed_buffers_rs_thread(),
1007 100.0*(float)dcqs.processed_buffers_rs_thread()/
1008 (float)tot_processed_buffers);
1009 gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.",
1010 dcqs.processed_buffers_mut(),
1011 100.0*(float)dcqs.processed_buffers_mut()/
1012 (float)tot_processed_buffers);
1013 gclog_or_tty->print_cr(" Conc RS threads times(s)");
1014 PrintRSThreadVTimeClosure p;
1015 gclog_or_tty->print(" ");
1016 g1->concurrent_g1_refine()->threads_do(&p);
1017 gclog_or_tty->print_cr("");
1019 HRRSStatsIter blk;
1020 g1->heap_region_iterate(&blk);
1021 gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K."
1022 " Max = " SIZE_FORMAT "K.",
1023 blk.total_mem_sz()/K, blk.max_mem_sz()/K);
1024 gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K,"
1025 " free_lists = " SIZE_FORMAT "K.",
1026 HeapRegionRemSet::static_mem_size()/K,
1027 HeapRegionRemSet::fl_mem_size()/K);
1028 gclog_or_tty->print_cr(" %d occupied cards represented.",
1029 blk.occupied());
1030 gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
1031 ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
1032 blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
1033 (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
1034 (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
1035 gclog_or_tty->print_cr(" Did %d coarsenings.", HeapRegionRemSet::n_coarsenings());
1036 }
1038 void G1RemSet::prepare_for_verify() {
1039 if (G1HRRSFlushLogBuffersOnVerify &&
1040 (VerifyBeforeGC || VerifyAfterGC)
1041 && !_g1->full_collection()) {
1042 cleanupHRRS();
1043 _g1->set_refine_cte_cl_concurrency(false);
1044 if (SafepointSynchronize::is_at_safepoint()) {
1045 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1046 dcqs.concatenate_logs();
1047 }
1048 bool cg1r_use_cache = _cg1r->use_cache();
1049 _cg1r->set_use_cache(false);
1050 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
1051 updateRS(&into_cset_dcq, 0);
1052 _g1->into_cset_dirty_card_queue_set().clear();
1053 _cg1r->set_use_cache(cg1r_use_cache);
1055 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
1056 }
1057 }