Wed, 25 Mar 2009 13:10:54 -0700
6543938: G1: remove the concept of popularity
Reviewed-by: iveresov, tonyp
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_g1RemSet.cpp.incl"
28 #define CARD_REPEAT_HISTO 0
30 #if CARD_REPEAT_HISTO
31 static size_t ct_freq_sz;
32 static jbyte* ct_freq = NULL;
34 void init_ct_freq_table(size_t heap_sz_bytes) {
35 if (ct_freq == NULL) {
36 ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size;
37 ct_freq = new jbyte[ct_freq_sz];
38 for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0;
39 }
40 }
42 void ct_freq_note_card(size_t index) {
43 assert(0 <= index && index < ct_freq_sz, "Bounds error.");
44 if (ct_freq[index] < 100) { ct_freq[index]++; }
45 }
47 static IntHistogram card_repeat_count(10, 10);
49 void ct_freq_update_histo_and_reset() {
50 for (size_t j = 0; j < ct_freq_sz; j++) {
51 card_repeat_count.add_entry(ct_freq[j]);
52 ct_freq[j] = 0;
53 }
55 }
56 #endif
59 class IntoCSOopClosure: public OopsInHeapRegionClosure {
60 OopsInHeapRegionClosure* _blk;
61 G1CollectedHeap* _g1;
62 public:
63 IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
64 _g1(g1), _blk(blk) {}
65 void set_region(HeapRegion* from) {
66 _blk->set_region(from);
67 }
68 virtual void do_oop(narrowOop* p) {
69 guarantee(false, "NYI");
70 }
71 virtual void do_oop(oop* p) {
72 oop obj = *p;
73 if (_g1->obj_in_cs(obj)) _blk->do_oop(p);
74 }
75 bool apply_to_weak_ref_discovered_field() { return true; }
76 bool idempotent() { return true; }
77 };
79 class IntoCSRegionClosure: public HeapRegionClosure {
80 IntoCSOopClosure _blk;
81 G1CollectedHeap* _g1;
82 public:
83 IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
84 _g1(g1), _blk(g1, blk) {}
85 bool doHeapRegion(HeapRegion* r) {
86 if (!r->in_collection_set()) {
87 _blk.set_region(r);
88 if (r->isHumongous()) {
89 if (r->startsHumongous()) {
90 oop obj = oop(r->bottom());
91 obj->oop_iterate(&_blk);
92 }
93 } else {
94 r->oop_before_save_marks_iterate(&_blk);
95 }
96 }
97 return false;
98 }
99 };
101 void
102 StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
103 int worker_i) {
104 IntoCSRegionClosure rc(_g1, oc);
105 _g1->heap_region_iterate(&rc);
106 }
108 class UpdateRSOutOfRegionClosure: public HeapRegionClosure {
109 G1CollectedHeap* _g1h;
110 ModRefBarrierSet* _mr_bs;
111 UpdateRSOopClosure _cl;
112 int _worker_i;
113 public:
114 UpdateRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
115 _cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i),
116 _mr_bs(g1->mr_bs()),
117 _worker_i(worker_i),
118 _g1h(g1)
119 {}
120 bool doHeapRegion(HeapRegion* r) {
121 if (!r->in_collection_set() && !r->continuesHumongous()) {
122 _cl.set_from(r);
123 r->set_next_filter_kind(HeapRegionDCTOC::OutOfRegionFilterKind);
124 _mr_bs->mod_oop_in_space_iterate(r, &_cl, true, true);
125 }
126 return false;
127 }
128 };
130 class VerifyRSCleanCardOopClosure: public OopClosure {
131 G1CollectedHeap* _g1;
132 public:
133 VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {}
135 virtual void do_oop(narrowOop* p) {
136 guarantee(false, "NYI");
137 }
138 virtual void do_oop(oop* p) {
139 oop obj = *p;
140 HeapRegion* to = _g1->heap_region_containing(obj);
141 guarantee(to == NULL || !to->in_collection_set(),
142 "Missed a rem set member.");
143 }
144 };
146 HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
147 : G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
148 _cg1r(g1->concurrent_g1_refine()),
149 _par_traversal_in_progress(false), _new_refs(NULL),
150 _cards_scanned(NULL), _total_cards_scanned(0)
151 {
152 _seq_task = new SubTasksDone(NumSeqTasks);
153 guarantee(n_workers() > 0, "There should be some workers");
154 _new_refs = NEW_C_HEAP_ARRAY(GrowableArray<oop*>*, n_workers());
155 for (uint i = 0; i < n_workers(); i++) {
156 _new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<oop*>(8192,true);
157 }
158 }
160 HRInto_G1RemSet::~HRInto_G1RemSet() {
161 delete _seq_task;
162 for (uint i = 0; i < n_workers(); i++) {
163 delete _new_refs[i];
164 }
165 FREE_C_HEAP_ARRAY(GrowableArray<oop*>*, _new_refs);
166 }
168 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
169 if (_g1->is_in_g1_reserved(mr.start())) {
170 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
171 if (_start_first == NULL) _start_first = mr.start();
172 }
173 }
175 class ScanRSClosure : public HeapRegionClosure {
176 size_t _cards_done, _cards;
177 G1CollectedHeap* _g1h;
178 OopsInHeapRegionClosure* _oc;
179 G1BlockOffsetSharedArray* _bot_shared;
180 CardTableModRefBS *_ct_bs;
181 int _worker_i;
182 bool _try_claimed;
183 public:
184 ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
185 _oc(oc),
186 _cards(0),
187 _cards_done(0),
188 _worker_i(worker_i),
189 _try_claimed(false)
190 {
191 _g1h = G1CollectedHeap::heap();
192 _bot_shared = _g1h->bot_shared();
193 _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
194 }
196 void set_try_claimed() { _try_claimed = true; }
198 void scanCard(size_t index, HeapRegion *r) {
199 _cards_done++;
200 DirtyCardToOopClosure* cl =
201 r->new_dcto_closure(_oc,
202 CardTableModRefBS::Precise,
203 HeapRegionDCTOC::IntoCSFilterKind);
205 // Set the "from" region in the closure.
206 _oc->set_region(r);
207 HeapWord* card_start = _bot_shared->address_for_index(index);
208 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
209 Space *sp = SharedHeap::heap()->space_containing(card_start);
210 MemRegion sm_region;
211 if (ParallelGCThreads > 0) {
212 // first find the used area
213 sm_region = sp->used_region_at_save_marks();
214 } else {
215 // The closure is not idempotent. We shouldn't look at objects
216 // allocated during the GC.
217 sm_region = sp->used_region_at_save_marks();
218 }
219 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
220 if (!mr.is_empty()) {
221 cl->do_MemRegion(mr);
222 }
223 }
225 void printCard(HeapRegion* card_region, size_t card_index,
226 HeapWord* card_start) {
227 gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
228 "RS names card %p: "
229 "[" PTR_FORMAT ", " PTR_FORMAT ")",
230 _worker_i,
231 card_region->bottom(), card_region->end(),
232 card_index,
233 card_start, card_start + G1BlockOffsetSharedArray::N_words);
234 }
236 bool doHeapRegion(HeapRegion* r) {
237 assert(r->in_collection_set(), "should only be called on elements of CS.");
238 HeapRegionRemSet* hrrs = r->rem_set();
239 if (hrrs->iter_is_complete()) return false; // All done.
240 if (!_try_claimed && !hrrs->claim_iter()) return false;
241 // If we didn't return above, then
242 // _try_claimed || r->claim_iter()
243 // is true: either we're supposed to work on claimed-but-not-complete
244 // regions, or we successfully claimed the region.
245 HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
246 hrrs->init_iterator(iter);
247 size_t card_index;
248 while (iter->has_next(card_index)) {
249 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
251 #if 0
252 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
253 card_start, card_start + CardTableModRefBS::card_size_in_words);
254 #endif
256 HeapRegion* card_region = _g1h->heap_region_containing(card_start);
257 assert(card_region != NULL, "Yielding cards not in the heap?");
258 _cards++;
260 if (!card_region->in_collection_set()) {
261 // If the card is dirty, then we will scan it during updateRS.
262 if (!_ct_bs->is_card_claimed(card_index) &&
263 !_ct_bs->is_card_dirty(card_index)) {
264 assert(_ct_bs->is_card_clean(card_index) ||
265 _ct_bs->is_card_claimed(card_index) ||
266 _ct_bs->is_card_deferred(card_index),
267 "Card is either clean, claimed or deferred");
268 if (_ct_bs->claim_card(card_index))
269 scanCard(card_index, card_region);
270 }
271 }
272 }
273 hrrs->set_iter_complete();
274 return false;
275 }
276 // Set all cards back to clean.
277 void cleanup() {_g1h->cleanUpCardTable();}
278 size_t cards_done() { return _cards_done;}
279 size_t cards_looked_up() { return _cards;}
280 };
282 // We want the parallel threads to start their scanning at
283 // different collection set regions to avoid contention.
284 // If we have:
285 // n collection set regions
286 // p threads
287 // Then thread t will start at region t * floor (n/p)
289 HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) {
290 HeapRegion* result = _g1p->collection_set();
291 if (ParallelGCThreads > 0) {
292 size_t cs_size = _g1p->collection_set_size();
293 int n_workers = _g1->workers()->total_workers();
294 size_t cs_spans = cs_size / n_workers;
295 size_t ind = cs_spans * worker_i;
296 for (size_t i = 0; i < ind; i++)
297 result = result->next_in_collection_set();
298 }
299 return result;
300 }
302 void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
303 double rs_time_start = os::elapsedTime();
304 HeapRegion *startRegion = calculateStartRegion(worker_i);
306 BufferingOopsInHeapRegionClosure boc(oc);
307 ScanRSClosure scanRScl(&boc, worker_i);
308 _g1->collection_set_iterate_from(startRegion, &scanRScl);
309 scanRScl.set_try_claimed();
310 _g1->collection_set_iterate_from(startRegion, &scanRScl);
312 boc.done();
313 double closure_app_time_sec = boc.closure_app_seconds();
314 double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
315 closure_app_time_sec;
316 double closure_app_time_ms = closure_app_time_sec * 1000.0;
318 assert( _cards_scanned != NULL, "invariant" );
319 _cards_scanned[worker_i] = scanRScl.cards_done();
321 _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0);
322 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
324 double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i);
325 if (scan_new_refs_time_ms > 0.0) {
326 closure_app_time_ms += scan_new_refs_time_ms;
327 }
329 _g1p->record_obj_copy_time(worker_i, closure_app_time_ms);
330 }
332 void HRInto_G1RemSet::updateRS(int worker_i) {
333 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
335 double start = os::elapsedTime();
336 _g1p->record_update_rs_start_time(worker_i, start * 1000.0);
338 if (G1RSBarrierUseQueue && !cg1r->do_traversal()) {
339 // Apply the appropriate closure to all remaining log entries.
340 _g1->iterate_dirty_card_closure(false, worker_i);
341 // Now there should be no dirty cards.
342 if (G1RSLogCheckCardTable) {
343 CountNonCleanMemRegionClosure cl(_g1);
344 _ct_bs->mod_card_iterate(&cl);
345 // XXX This isn't true any more: keeping cards of young regions
346 // marked dirty broke it. Need some reasonable fix.
347 guarantee(cl.n() == 0, "Card table should be clean.");
348 }
349 } else {
350 UpdateRSOutOfRegionClosure update_rs(_g1, worker_i);
351 _g1->heap_region_iterate(&update_rs);
352 // We did a traversal; no further one is necessary.
353 if (G1RSBarrierUseQueue) {
354 assert(cg1r->do_traversal(), "Or we shouldn't have gotten here.");
355 cg1r->set_pya_cancel();
356 }
357 if (_cg1r->use_cache()) {
358 _cg1r->clear_and_record_card_counts();
359 _cg1r->clear_hot_cache();
360 }
361 }
362 _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
363 }
365 #ifndef PRODUCT
366 class PrintRSClosure : public HeapRegionClosure {
367 int _count;
368 public:
369 PrintRSClosure() : _count(0) {}
370 bool doHeapRegion(HeapRegion* r) {
371 HeapRegionRemSet* hrrs = r->rem_set();
372 _count += (int) hrrs->occupied();
373 if (hrrs->occupied() == 0) {
374 gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") "
375 "has no remset entries\n",
376 r->bottom(), r->end());
377 } else {
378 gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n",
379 r->bottom(), r->end());
380 r->print();
381 hrrs->print();
382 gclog_or_tty->print("\nDone printing rem set\n");
383 }
384 return false;
385 }
386 int occupied() {return _count;}
387 };
388 #endif
390 class CountRSSizeClosure: public HeapRegionClosure {
391 size_t _n;
392 size_t _tot;
393 size_t _max;
394 HeapRegion* _max_r;
395 enum {
396 N = 20,
397 MIN = 6
398 };
399 int _histo[N];
400 public:
401 CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) {
402 for (int i = 0; i < N; i++) _histo[i] = 0;
403 }
404 bool doHeapRegion(HeapRegion* r) {
405 if (!r->continuesHumongous()) {
406 size_t occ = r->rem_set()->occupied();
407 _n++;
408 _tot += occ;
409 if (occ > _max) {
410 _max = occ;
411 _max_r = r;
412 }
413 // Fit it into a histo bin.
414 int s = 1 << MIN;
415 int i = 0;
416 while (occ > (size_t) s && i < (N-1)) {
417 s = s << 1;
418 i++;
419 }
420 _histo[i]++;
421 }
422 return false;
423 }
424 size_t n() { return _n; }
425 size_t tot() { return _tot; }
426 size_t mx() { return _max; }
427 HeapRegion* mxr() { return _max_r; }
428 void print_histo() {
429 int mx = N;
430 while (mx >= 0) {
431 if (_histo[mx-1] > 0) break;
432 mx--;
433 }
434 gclog_or_tty->print_cr("Number of regions with given RS sizes:");
435 gclog_or_tty->print_cr(" <= %8d %8d", 1 << MIN, _histo[0]);
436 for (int i = 1; i < mx-1; i++) {
437 gclog_or_tty->print_cr(" %8d - %8d %8d",
438 (1 << (MIN + i - 1)) + 1,
439 1 << (MIN + i),
440 _histo[i]);
441 }
442 gclog_or_tty->print_cr(" > %8d %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]);
443 }
444 };
446 void
447 HRInto_G1RemSet::scanNewRefsRS(OopsInHeapRegionClosure* oc,
448 int worker_i) {
449 double scan_new_refs_start_sec = os::elapsedTime();
450 G1CollectedHeap* g1h = G1CollectedHeap::heap();
451 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
452 for (int i = 0; i < _new_refs[worker_i]->length(); i++) {
453 oop* p = _new_refs[worker_i]->at(i);
454 oop obj = *p;
455 // *p was in the collection set when p was pushed on "_new_refs", but
456 // another thread may have processed this location from an RS, so it
457 // might not point into the CS any longer. If so, it's obviously been
458 // processed, and we don't need to do anything further.
459 if (g1h->obj_in_cs(obj)) {
460 HeapRegion* r = g1h->heap_region_containing(p);
462 DEBUG_ONLY(HeapRegion* to = g1h->heap_region_containing(obj));
463 oc->set_region(r);
464 // If "p" has already been processed concurrently, this is
465 // idempotent.
466 oc->do_oop(p);
467 }
468 }
469 _g1p->record_scan_new_refs_time(worker_i,
470 (os::elapsedTime() - scan_new_refs_start_sec)
471 * 1000.0);
472 }
474 void HRInto_G1RemSet::set_par_traversal(bool b) {
475 _par_traversal_in_progress = b;
476 HeapRegionRemSet::set_par_traversal(b);
477 }
479 void HRInto_G1RemSet::cleanupHRRS() {
480 HeapRegionRemSet::cleanup();
481 }
483 void
484 HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
485 int worker_i) {
486 #if CARD_REPEAT_HISTO
487 ct_freq_update_histo_and_reset();
488 #endif
489 if (worker_i == 0) {
490 _cg1r->clear_and_record_card_counts();
491 }
493 // Make this into a command-line flag...
494 if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) {
495 CountRSSizeClosure count_cl;
496 _g1->heap_region_iterate(&count_cl);
497 gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, "
498 "max region is " PTR_FORMAT,
499 count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(),
500 count_cl.mx(), count_cl.mxr());
501 count_cl.print_histo();
502 }
504 if (ParallelGCThreads > 0) {
505 // The two flags below were introduced temporarily to serialize
506 // the updating and scanning of remembered sets. There are some
507 // race conditions when these two operations are done in parallel
508 // and they are causing failures. When we resolve said race
509 // conditions, we'll revert back to parallel remembered set
510 // updating and scanning. See CRs 6677707 and 6677708.
511 if (G1EnableParallelRSetUpdating || (worker_i == 0)) {
512 updateRS(worker_i);
513 scanNewRefsRS(oc, worker_i);
514 } else {
515 _g1p->record_update_rs_start_time(worker_i, os::elapsedTime());
516 _g1p->record_update_rs_processed_buffers(worker_i, 0.0);
517 _g1p->record_update_rs_time(worker_i, 0.0);
518 _g1p->record_scan_new_refs_time(worker_i, 0.0);
519 }
520 if (G1EnableParallelRSetScanning || (worker_i == 0)) {
521 scanRS(oc, worker_i);
522 } else {
523 _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime());
524 _g1p->record_scan_rs_time(worker_i, 0.0);
525 }
526 } else {
527 assert(worker_i == 0, "invariant");
528 updateRS(0);
529 scanNewRefsRS(oc, 0);
530 scanRS(oc, 0);
531 }
532 }
534 void HRInto_G1RemSet::
535 prepare_for_oops_into_collection_set_do() {
536 #if G1_REM_SET_LOGGING
537 PrintRSClosure cl;
538 _g1->collection_set_iterate(&cl);
539 #endif
540 cleanupHRRS();
541 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
542 _g1->set_refine_cte_cl_concurrency(false);
543 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
544 dcqs.concatenate_logs();
546 assert(!_par_traversal_in_progress, "Invariant between iterations.");
547 if (ParallelGCThreads > 0) {
548 set_par_traversal(true);
549 _seq_task->set_par_threads((int)n_workers());
550 if (cg1r->do_traversal()) {
551 updateRS(0);
552 // Have to do this again after updaters
553 cleanupHRRS();
554 }
555 }
556 guarantee( _cards_scanned == NULL, "invariant" );
557 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
558 for (uint i = 0; i < n_workers(); ++i) {
559 _cards_scanned[i] = 0;
560 }
561 _total_cards_scanned = 0;
562 }
565 class cleanUpIteratorsClosure : public HeapRegionClosure {
566 bool doHeapRegion(HeapRegion *r) {
567 HeapRegionRemSet* hrrs = r->rem_set();
568 hrrs->init_for_par_iteration();
569 return false;
570 }
571 };
573 class UpdateRSetOopsIntoCSImmediate : public OopClosure {
574 G1CollectedHeap* _g1;
575 public:
576 UpdateRSetOopsIntoCSImmediate(G1CollectedHeap* g1) : _g1(g1) { }
577 virtual void do_oop(narrowOop* p) {
578 guarantee(false, "NYI");
579 }
580 virtual void do_oop(oop* p) {
581 HeapRegion* to = _g1->heap_region_containing(*p);
582 if (to->in_collection_set()) {
583 to->rem_set()->add_reference(p, 0);
584 }
585 }
586 };
588 class UpdateRSetOopsIntoCSDeferred : public OopClosure {
589 G1CollectedHeap* _g1;
590 CardTableModRefBS* _ct_bs;
591 DirtyCardQueue* _dcq;
592 public:
593 UpdateRSetOopsIntoCSDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
594 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) { }
595 virtual void do_oop(narrowOop* p) {
596 guarantee(false, "NYI");
597 }
598 virtual void do_oop(oop* p) {
599 oop obj = *p;
600 if (_g1->obj_in_cs(obj)) {
601 size_t card_index = _ct_bs->index_for(p);
602 if (_ct_bs->mark_card_deferred(card_index)) {
603 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
604 }
605 }
606 }
607 };
609 void HRInto_G1RemSet::new_refs_iterate(OopClosure* cl) {
610 for (size_t i = 0; i < n_workers(); i++) {
611 for (int j = 0; j < _new_refs[i]->length(); j++) {
612 oop* p = _new_refs[i]->at(j);
613 cl->do_oop(p);
614 }
615 }
616 }
618 void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() {
619 guarantee( _cards_scanned != NULL, "invariant" );
620 _total_cards_scanned = 0;
621 for (uint i = 0; i < n_workers(); ++i)
622 _total_cards_scanned += _cards_scanned[i];
623 FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
624 _cards_scanned = NULL;
625 // Cleanup after copy
626 #if G1_REM_SET_LOGGING
627 PrintRSClosure cl;
628 _g1->heap_region_iterate(&cl);
629 #endif
630 _g1->set_refine_cte_cl_concurrency(true);
631 cleanUpIteratorsClosure iterClosure;
632 _g1->collection_set_iterate(&iterClosure);
633 // Set all cards back to clean.
634 _g1->cleanUpCardTable();
635 if (ParallelGCThreads > 0) {
636 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
637 if (cg1r->do_traversal()) {
638 cg1r->cg1rThread()->set_do_traversal(false);
639 }
640 set_par_traversal(false);
641 }
643 if (_g1->evacuation_failed()) {
644 // Restore remembered sets for the regions pointing into
645 // the collection set.
646 if (G1DeferredRSUpdate) {
647 DirtyCardQueue dcq(&_g1->dirty_card_queue_set());
648 UpdateRSetOopsIntoCSDeferred deferred_update(_g1, &dcq);
649 new_refs_iterate(&deferred_update);
650 } else {
651 UpdateRSetOopsIntoCSImmediate immediate_update(_g1);
652 new_refs_iterate(&immediate_update);
653 }
654 }
655 for (uint i = 0; i < n_workers(); i++) {
656 _new_refs[i]->clear();
657 }
659 assert(!_par_traversal_in_progress, "Invariant between iterations.");
660 }
662 class UpdateRSObjectClosure: public ObjectClosure {
663 UpdateRSOopClosure* _update_rs_oop_cl;
664 public:
665 UpdateRSObjectClosure(UpdateRSOopClosure* update_rs_oop_cl) :
666 _update_rs_oop_cl(update_rs_oop_cl) {}
667 void do_object(oop obj) {
668 obj->oop_iterate(_update_rs_oop_cl);
669 }
671 };
673 class ScrubRSClosure: public HeapRegionClosure {
674 G1CollectedHeap* _g1h;
675 BitMap* _region_bm;
676 BitMap* _card_bm;
677 CardTableModRefBS* _ctbs;
678 public:
679 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
680 _g1h(G1CollectedHeap::heap()),
681 _region_bm(region_bm), _card_bm(card_bm),
682 _ctbs(NULL)
683 {
684 ModRefBarrierSet* bs = _g1h->mr_bs();
685 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
686 _ctbs = (CardTableModRefBS*)bs;
687 }
689 bool doHeapRegion(HeapRegion* r) {
690 if (!r->continuesHumongous()) {
691 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
692 }
693 return false;
694 }
695 };
697 void HRInto_G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
698 ScrubRSClosure scrub_cl(region_bm, card_bm);
699 _g1->heap_region_iterate(&scrub_cl);
700 }
702 void HRInto_G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
703 int worker_num, int claim_val) {
704 ScrubRSClosure scrub_cl(region_bm, card_bm);
705 _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
706 }
709 class ConcRefineRegionClosure: public HeapRegionClosure {
710 G1CollectedHeap* _g1h;
711 CardTableModRefBS* _ctbs;
712 ConcurrentGCThread* _cgc_thrd;
713 ConcurrentG1Refine* _cg1r;
714 unsigned _cards_processed;
715 UpdateRSOopClosure _update_rs_oop_cl;
716 public:
717 ConcRefineRegionClosure(CardTableModRefBS* ctbs,
718 ConcurrentG1Refine* cg1r,
719 HRInto_G1RemSet* g1rs) :
720 _ctbs(ctbs), _cg1r(cg1r), _cgc_thrd(cg1r->cg1rThread()),
721 _update_rs_oop_cl(g1rs), _cards_processed(0),
722 _g1h(G1CollectedHeap::heap())
723 {}
725 bool doHeapRegion(HeapRegion* r) {
726 if (!r->in_collection_set() &&
727 !r->continuesHumongous() &&
728 !r->is_young()) {
729 _update_rs_oop_cl.set_from(r);
730 UpdateRSObjectClosure update_rs_obj_cl(&_update_rs_oop_cl);
732 // For each run of dirty card in the region:
733 // 1) Clear the cards.
734 // 2) Process the range corresponding to the run, adding any
735 // necessary RS entries.
736 // 1 must precede 2, so that a concurrent modification redirties the
737 // card. If a processing attempt does not succeed, because it runs
738 // into an unparseable region, we will do binary search to find the
739 // beginning of the next parseable region.
740 HeapWord* startAddr = r->bottom();
741 HeapWord* endAddr = r->used_region().end();
742 HeapWord* lastAddr;
743 HeapWord* nextAddr;
745 for (nextAddr = lastAddr = startAddr;
746 nextAddr < endAddr;
747 nextAddr = lastAddr) {
748 MemRegion dirtyRegion;
750 // Get and clear dirty region from card table
751 MemRegion next_mr(nextAddr, endAddr);
752 dirtyRegion =
753 _ctbs->dirty_card_range_after_reset(
754 next_mr,
755 true, CardTableModRefBS::clean_card_val());
756 assert(dirtyRegion.start() >= nextAddr,
757 "returned region inconsistent?");
759 if (!dirtyRegion.is_empty()) {
760 HeapWord* stop_point =
761 r->object_iterate_mem_careful(dirtyRegion,
762 &update_rs_obj_cl);
763 if (stop_point == NULL) {
764 lastAddr = dirtyRegion.end();
765 _cards_processed +=
766 (int) (dirtyRegion.word_size() / CardTableModRefBS::card_size_in_words);
767 } else {
768 // We're going to skip one or more cards that we can't parse.
769 HeapWord* next_parseable_card =
770 r->next_block_start_careful(stop_point);
771 // Round this up to a card boundary.
772 next_parseable_card =
773 _ctbs->addr_for(_ctbs->byte_after_const(next_parseable_card));
774 // Now we invalidate the intervening cards so we'll see them
775 // again.
776 MemRegion remaining_dirty =
777 MemRegion(stop_point, dirtyRegion.end());
778 MemRegion skipped =
779 MemRegion(stop_point, next_parseable_card);
780 _ctbs->invalidate(skipped.intersection(remaining_dirty));
782 // Now start up again where we can parse.
783 lastAddr = next_parseable_card;
785 // Count how many we did completely.
786 _cards_processed +=
787 (stop_point - dirtyRegion.start()) /
788 CardTableModRefBS::card_size_in_words;
789 }
790 // Allow interruption at regular intervals.
791 // (Might need to make them more regular, if we get big
792 // dirty regions.)
793 if (_cgc_thrd != NULL) {
794 if (_cgc_thrd->should_yield()) {
795 _cgc_thrd->yield();
796 switch (_cg1r->get_pya()) {
797 case PYA_continue:
798 // This may have changed: re-read.
799 endAddr = r->used_region().end();
800 continue;
801 case PYA_restart: case PYA_cancel:
802 return true;
803 }
804 }
805 }
806 } else {
807 break;
808 }
809 }
810 }
811 // A good yield opportunity.
812 if (_cgc_thrd != NULL) {
813 if (_cgc_thrd->should_yield()) {
814 _cgc_thrd->yield();
815 switch (_cg1r->get_pya()) {
816 case PYA_restart: case PYA_cancel:
817 return true;
818 default:
819 break;
820 }
822 }
823 }
824 return false;
825 }
827 unsigned cards_processed() { return _cards_processed; }
828 };
831 void HRInto_G1RemSet::concurrentRefinementPass(ConcurrentG1Refine* cg1r) {
832 ConcRefineRegionClosure cr_cl(ct_bs(), cg1r, this);
833 _g1->heap_region_iterate(&cr_cl);
834 _conc_refine_traversals++;
835 _conc_refine_cards += cr_cl.cards_processed();
836 }
838 static IntHistogram out_of_histo(50, 50);
842 void HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {
843 // If the card is no longer dirty, nothing to do.
844 if (*card_ptr != CardTableModRefBS::dirty_card_val()) return;
846 // Construct the region representing the card.
847 HeapWord* start = _ct_bs->addr_for(card_ptr);
848 // And find the region containing it.
849 HeapRegion* r = _g1->heap_region_containing(start);
850 if (r == NULL) {
851 guarantee(_g1->is_in_permanent(start), "Or else where?");
852 return; // Not in the G1 heap (might be in perm, for example.)
853 }
854 // Why do we have to check here whether a card is on a young region,
855 // given that we dirty young regions and, as a result, the
856 // post-barrier is supposed to filter them out and never to enqueue
857 // them? When we allocate a new region as the "allocation region" we
858 // actually dirty its cards after we release the lock, since card
859 // dirtying while holding the lock was a performance bottleneck. So,
860 // as a result, it is possible for other threads to actually
861 // allocate objects in the region (after the acquire the lock)
862 // before all the cards on the region are dirtied. This is unlikely,
863 // and it doesn't happen often, but it can happen. So, the extra
864 // check below filters out those cards.
865 if (r->is_young()) {
866 return;
867 }
868 // While we are processing RSet buffers during the collection, we
869 // actually don't want to scan any cards on the collection set,
870 // since we don't want to update remebered sets with entries that
871 // point into the collection set, given that live objects from the
872 // collection set are about to move and such entries will be stale
873 // very soon. This change also deals with a reliability issue which
874 // involves scanning a card in the collection set and coming across
875 // an array that was being chunked and looking malformed. Note,
876 // however, that if evacuation fails, we have to scan any objects
877 // that were not moved and create any missing entries.
878 if (r->in_collection_set()) {
879 return;
880 }
882 // Should we defer it?
883 if (_cg1r->use_cache()) {
884 card_ptr = _cg1r->cache_insert(card_ptr);
885 // If it was not an eviction, nothing to do.
886 if (card_ptr == NULL) return;
888 // OK, we have to reset the card start, region, etc.
889 start = _ct_bs->addr_for(card_ptr);
890 r = _g1->heap_region_containing(start);
891 if (r == NULL) {
892 guarantee(_g1->is_in_permanent(start), "Or else where?");
893 return; // Not in the G1 heap (might be in perm, for example.)
894 }
895 guarantee(!r->is_young(), "It was evicted in the current minor cycle.");
896 }
898 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
899 MemRegion dirtyRegion(start, end);
901 #if CARD_REPEAT_HISTO
902 init_ct_freq_table(_g1->g1_reserved_obj_bytes());
903 ct_freq_note_card(_ct_bs->index_for(start));
904 #endif
906 UpdateRSOopClosure update_rs_oop_cl(this, worker_i);
907 update_rs_oop_cl.set_from(r);
908 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, &update_rs_oop_cl);
910 // Undirty the card.
911 *card_ptr = CardTableModRefBS::clean_card_val();
912 // We must complete this write before we do any of the reads below.
913 OrderAccess::storeload();
914 // And process it, being careful of unallocated portions of TLAB's.
915 HeapWord* stop_point =
916 r->oops_on_card_seq_iterate_careful(dirtyRegion,
917 &filter_then_update_rs_oop_cl);
918 // If stop_point is non-null, then we encountered an unallocated region
919 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
920 // card and re-enqueue: if we put off the card until a GC pause, then the
921 // unallocated portion will be filled in. Alternatively, we might try
922 // the full complexity of the technique used in "regular" precleaning.
923 if (stop_point != NULL) {
924 // The card might have gotten re-dirtied and re-enqueued while we
925 // worked. (In fact, it's pretty likely.)
926 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
927 *card_ptr = CardTableModRefBS::dirty_card_val();
928 MutexLockerEx x(Shared_DirtyCardQ_lock,
929 Mutex::_no_safepoint_check_flag);
930 DirtyCardQueue* sdcq =
931 JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
932 sdcq->enqueue(card_ptr);
933 }
934 } else {
935 out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region());
936 _conc_refine_cards++;
937 }
938 }
940 class HRRSStatsIter: public HeapRegionClosure {
941 size_t _occupied;
942 size_t _total_mem_sz;
943 size_t _max_mem_sz;
944 HeapRegion* _max_mem_sz_region;
945 public:
946 HRRSStatsIter() :
947 _occupied(0),
948 _total_mem_sz(0),
949 _max_mem_sz(0),
950 _max_mem_sz_region(NULL)
951 {}
953 bool doHeapRegion(HeapRegion* r) {
954 if (r->continuesHumongous()) return false;
955 size_t mem_sz = r->rem_set()->mem_size();
956 if (mem_sz > _max_mem_sz) {
957 _max_mem_sz = mem_sz;
958 _max_mem_sz_region = r;
959 }
960 _total_mem_sz += mem_sz;
961 size_t occ = r->rem_set()->occupied();
962 _occupied += occ;
963 return false;
964 }
965 size_t total_mem_sz() { return _total_mem_sz; }
966 size_t max_mem_sz() { return _max_mem_sz; }
967 size_t occupied() { return _occupied; }
968 HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
969 };
971 void HRInto_G1RemSet::print_summary_info() {
972 G1CollectedHeap* g1 = G1CollectedHeap::heap();
973 ConcurrentG1RefineThread* cg1r_thrd =
974 g1->concurrent_g1_refine()->cg1rThread();
976 #if CARD_REPEAT_HISTO
977 gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
978 gclog_or_tty->print_cr(" # of repeats --> # of cards with that number.");
979 card_repeat_count.print_on(gclog_or_tty);
980 #endif
982 if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) {
983 gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: ");
984 gclog_or_tty->print_cr(" # of CS ptrs --> # of cards with that number.");
985 out_of_histo.print_on(gclog_or_tty);
986 }
987 gclog_or_tty->print_cr("\n Concurrent RS processed %d cards in "
988 "%5.2fs.",
989 _conc_refine_cards, cg1r_thrd->vtime_accum());
991 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
992 jint tot_processed_buffers =
993 dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread();
994 gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers);
995 gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS thread.",
996 dcqs.processed_buffers_rs_thread(),
997 100.0*(float)dcqs.processed_buffers_rs_thread()/
998 (float)tot_processed_buffers);
999 gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.",
1000 dcqs.processed_buffers_mut(),
1001 100.0*(float)dcqs.processed_buffers_mut()/
1002 (float)tot_processed_buffers);
1003 gclog_or_tty->print_cr(" Did %d concurrent refinement traversals.",
1004 _conc_refine_traversals);
1005 if (!G1RSBarrierUseQueue) {
1006 gclog_or_tty->print_cr(" Scanned %8.2f cards/traversal.",
1007 _conc_refine_traversals > 0 ?
1008 (float)_conc_refine_cards/(float)_conc_refine_traversals :
1009 0);
1010 }
1011 gclog_or_tty->print_cr("");
1012 if (G1UseHRIntoRS) {
1013 HRRSStatsIter blk;
1014 g1->heap_region_iterate(&blk);
1015 gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K."
1016 " Max = " SIZE_FORMAT "K.",
1017 blk.total_mem_sz()/K, blk.max_mem_sz()/K);
1018 gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K,"
1019 " free_lists = " SIZE_FORMAT "K.",
1020 HeapRegionRemSet::static_mem_size()/K,
1021 HeapRegionRemSet::fl_mem_size()/K);
1022 gclog_or_tty->print_cr(" %d occupied cards represented.",
1023 blk.occupied());
1024 gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
1025 ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
1026 blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
1027 (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
1028 (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
1029 gclog_or_tty->print_cr(" Did %d coarsenings.",
1030 HeapRegionRemSet::n_coarsenings());
1032 }
1033 }
1034 void HRInto_G1RemSet::prepare_for_verify() {
1035 if (G1HRRSFlushLogBuffersOnVerify &&
1036 (VerifyBeforeGC || VerifyAfterGC)
1037 && !_g1->full_collection()) {
1038 cleanupHRRS();
1039 _g1->set_refine_cte_cl_concurrency(false);
1040 if (SafepointSynchronize::is_at_safepoint()) {
1041 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1042 dcqs.concatenate_logs();
1043 }
1044 bool cg1r_use_cache = _cg1r->use_cache();
1045 _cg1r->set_use_cache(false);
1046 updateRS(0);
1047 _cg1r->set_use_cache(cg1r_use_cache);
1049 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
1050 }
1051 }