src/share/vm/gc_implementation/g1/g1RemSet.cpp

Mon, 03 Aug 2009 12:59:30 -0700

author
johnc
date
Mon, 03 Aug 2009 12:59:30 -0700
changeset 1324
15c5903cf9e1
parent 1280
df6caf649ff7
child 1325
6cb8e9df7174
permissions
-rw-r--r--

6865703: G1: Parallelize hot card cache cleanup
Summary: Have the GC worker threads clear the hot card cache in parallel by having each worker thread claim a chunk of the card cache and process the cards in that chunk. The size of the chunks that each thread will claim is determined at VM initialization from the size of the card cache and the number of worker threads.
Reviewed-by: jmasa, tonyp

ysr@777 1 /*
xdono@1014 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
ysr@777 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
ysr@777 20 * CA 95054 USA or visit www.sun.com if you need additional information or
ysr@777 21 * have any questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #include "incls/_precompiled.incl"
ysr@777 26 #include "incls/_g1RemSet.cpp.incl"
ysr@777 27
ysr@777 28 #define CARD_REPEAT_HISTO 0
ysr@777 29
ysr@777 30 #if CARD_REPEAT_HISTO
ysr@777 31 static size_t ct_freq_sz;
ysr@777 32 static jbyte* ct_freq = NULL;
ysr@777 33
ysr@777 34 void init_ct_freq_table(size_t heap_sz_bytes) {
ysr@777 35 if (ct_freq == NULL) {
ysr@777 36 ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size;
ysr@777 37 ct_freq = new jbyte[ct_freq_sz];
ysr@777 38 for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0;
ysr@777 39 }
ysr@777 40 }
ysr@777 41
ysr@777 42 void ct_freq_note_card(size_t index) {
ysr@777 43 assert(0 <= index && index < ct_freq_sz, "Bounds error.");
ysr@777 44 if (ct_freq[index] < 100) { ct_freq[index]++; }
ysr@777 45 }
ysr@777 46
ysr@777 47 static IntHistogram card_repeat_count(10, 10);
ysr@777 48
ysr@777 49 void ct_freq_update_histo_and_reset() {
ysr@777 50 for (size_t j = 0; j < ct_freq_sz; j++) {
ysr@777 51 card_repeat_count.add_entry(ct_freq[j]);
ysr@777 52 ct_freq[j] = 0;
ysr@777 53 }
ysr@777 54
ysr@777 55 }
ysr@777 56 #endif
ysr@777 57
ysr@777 58
ysr@777 59 class IntoCSOopClosure: public OopsInHeapRegionClosure {
ysr@777 60 OopsInHeapRegionClosure* _blk;
ysr@777 61 G1CollectedHeap* _g1;
ysr@777 62 public:
ysr@777 63 IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
ysr@777 64 _g1(g1), _blk(blk) {}
ysr@777 65 void set_region(HeapRegion* from) {
ysr@777 66 _blk->set_region(from);
ysr@777 67 }
ysr@1280 68 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 69 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 70 template <class T> void do_oop_work(T* p) {
ysr@1280 71 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 72 if (_g1->obj_in_cs(obj)) _blk->do_oop(p);
ysr@777 73 }
ysr@777 74 bool apply_to_weak_ref_discovered_field() { return true; }
ysr@777 75 bool idempotent() { return true; }
ysr@777 76 };
ysr@777 77
ysr@777 78 class IntoCSRegionClosure: public HeapRegionClosure {
ysr@777 79 IntoCSOopClosure _blk;
ysr@777 80 G1CollectedHeap* _g1;
ysr@777 81 public:
ysr@777 82 IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
ysr@777 83 _g1(g1), _blk(g1, blk) {}
ysr@777 84 bool doHeapRegion(HeapRegion* r) {
ysr@777 85 if (!r->in_collection_set()) {
ysr@777 86 _blk.set_region(r);
ysr@777 87 if (r->isHumongous()) {
ysr@777 88 if (r->startsHumongous()) {
ysr@777 89 oop obj = oop(r->bottom());
ysr@777 90 obj->oop_iterate(&_blk);
ysr@777 91 }
ysr@777 92 } else {
ysr@777 93 r->oop_before_save_marks_iterate(&_blk);
ysr@777 94 }
ysr@777 95 }
ysr@777 96 return false;
ysr@777 97 }
ysr@777 98 };
ysr@777 99
ysr@777 100 void
ysr@777 101 StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
ysr@777 102 int worker_i) {
ysr@777 103 IntoCSRegionClosure rc(_g1, oc);
ysr@777 104 _g1->heap_region_iterate(&rc);
ysr@777 105 }
ysr@777 106
ysr@777 107 class VerifyRSCleanCardOopClosure: public OopClosure {
ysr@777 108 G1CollectedHeap* _g1;
ysr@777 109 public:
ysr@777 110 VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {}
ysr@777 111
ysr@1280 112 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 113 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 114 template <class T> void do_oop_work(T* p) {
ysr@1280 115 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 116 HeapRegion* to = _g1->heap_region_containing(obj);
ysr@777 117 guarantee(to == NULL || !to->in_collection_set(),
ysr@777 118 "Missed a rem set member.");
ysr@777 119 }
ysr@777 120 };
ysr@777 121
ysr@777 122 HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
ysr@777 123 : G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
ysr@777 124 _cg1r(g1->concurrent_g1_refine()),
ysr@777 125 _par_traversal_in_progress(false), _new_refs(NULL),
ysr@777 126 _cards_scanned(NULL), _total_cards_scanned(0)
ysr@777 127 {
ysr@777 128 _seq_task = new SubTasksDone(NumSeqTasks);
iveresov@1051 129 guarantee(n_workers() > 0, "There should be some workers");
ysr@1280 130 _new_refs = NEW_C_HEAP_ARRAY(GrowableArray<OopOrNarrowOopStar>*, n_workers());
iveresov@1051 131 for (uint i = 0; i < n_workers(); i++) {
ysr@1280 132 _new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<OopOrNarrowOopStar>(8192,true);
iveresov@1051 133 }
ysr@777 134 }
ysr@777 135
ysr@777 136 HRInto_G1RemSet::~HRInto_G1RemSet() {
ysr@777 137 delete _seq_task;
iveresov@1051 138 for (uint i = 0; i < n_workers(); i++) {
iveresov@1051 139 delete _new_refs[i];
iveresov@1051 140 }
ysr@1280 141 FREE_C_HEAP_ARRAY(GrowableArray<OopOrNarrowOopStar>*, _new_refs);
ysr@777 142 }
ysr@777 143
ysr@777 144 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
ysr@777 145 if (_g1->is_in_g1_reserved(mr.start())) {
ysr@777 146 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
ysr@777 147 if (_start_first == NULL) _start_first = mr.start();
ysr@777 148 }
ysr@777 149 }
ysr@777 150
ysr@777 151 class ScanRSClosure : public HeapRegionClosure {
ysr@777 152 size_t _cards_done, _cards;
ysr@777 153 G1CollectedHeap* _g1h;
ysr@777 154 OopsInHeapRegionClosure* _oc;
ysr@777 155 G1BlockOffsetSharedArray* _bot_shared;
ysr@777 156 CardTableModRefBS *_ct_bs;
ysr@777 157 int _worker_i;
ysr@777 158 bool _try_claimed;
iveresov@1182 159 size_t _min_skip_distance, _max_skip_distance;
ysr@777 160 public:
ysr@777 161 ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
ysr@777 162 _oc(oc),
ysr@777 163 _cards(0),
ysr@777 164 _cards_done(0),
ysr@777 165 _worker_i(worker_i),
ysr@777 166 _try_claimed(false)
ysr@777 167 {
ysr@777 168 _g1h = G1CollectedHeap::heap();
ysr@777 169 _bot_shared = _g1h->bot_shared();
ysr@777 170 _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
iveresov@1182 171 _min_skip_distance = 16;
iveresov@1182 172 _max_skip_distance = 2 * _g1h->n_par_threads() * _min_skip_distance;
ysr@777 173 }
ysr@777 174
ysr@777 175 void set_try_claimed() { _try_claimed = true; }
ysr@777 176
ysr@777 177 void scanCard(size_t index, HeapRegion *r) {
ysr@777 178 _cards_done++;
ysr@777 179 DirtyCardToOopClosure* cl =
ysr@777 180 r->new_dcto_closure(_oc,
ysr@777 181 CardTableModRefBS::Precise,
ysr@777 182 HeapRegionDCTOC::IntoCSFilterKind);
ysr@777 183
ysr@777 184 // Set the "from" region in the closure.
ysr@777 185 _oc->set_region(r);
ysr@777 186 HeapWord* card_start = _bot_shared->address_for_index(index);
ysr@777 187 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
ysr@777 188 Space *sp = SharedHeap::heap()->space_containing(card_start);
ysr@777 189 MemRegion sm_region;
ysr@777 190 if (ParallelGCThreads > 0) {
ysr@777 191 // first find the used area
ysr@777 192 sm_region = sp->used_region_at_save_marks();
ysr@777 193 } else {
ysr@777 194 // The closure is not idempotent. We shouldn't look at objects
ysr@777 195 // allocated during the GC.
ysr@777 196 sm_region = sp->used_region_at_save_marks();
ysr@777 197 }
ysr@777 198 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
ysr@777 199 if (!mr.is_empty()) {
ysr@777 200 cl->do_MemRegion(mr);
ysr@777 201 }
ysr@777 202 }
ysr@777 203
ysr@777 204 void printCard(HeapRegion* card_region, size_t card_index,
ysr@777 205 HeapWord* card_start) {
ysr@777 206 gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
ysr@777 207 "RS names card %p: "
ysr@777 208 "[" PTR_FORMAT ", " PTR_FORMAT ")",
ysr@777 209 _worker_i,
ysr@777 210 card_region->bottom(), card_region->end(),
ysr@777 211 card_index,
ysr@777 212 card_start, card_start + G1BlockOffsetSharedArray::N_words);
ysr@777 213 }
ysr@777 214
ysr@777 215 bool doHeapRegion(HeapRegion* r) {
ysr@777 216 assert(r->in_collection_set(), "should only be called on elements of CS.");
ysr@777 217 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 218 if (hrrs->iter_is_complete()) return false; // All done.
ysr@777 219 if (!_try_claimed && !hrrs->claim_iter()) return false;
apetrusenko@1231 220 _g1h->push_dirty_cards_region(r);
ysr@777 221 // If we didn't return above, then
ysr@777 222 // _try_claimed || r->claim_iter()
ysr@777 223 // is true: either we're supposed to work on claimed-but-not-complete
ysr@777 224 // regions, or we successfully claimed the region.
ysr@777 225 HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
ysr@777 226 hrrs->init_iterator(iter);
ysr@777 227 size_t card_index;
iveresov@1182 228 size_t skip_distance = 0, current_card = 0, jump_to_card = 0;
ysr@777 229 while (iter->has_next(card_index)) {
iveresov@1182 230 if (current_card < jump_to_card) {
iveresov@1182 231 ++current_card;
iveresov@1182 232 continue;
iveresov@1182 233 }
ysr@777 234 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
ysr@777 235 #if 0
ysr@777 236 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
ysr@777 237 card_start, card_start + CardTableModRefBS::card_size_in_words);
ysr@777 238 #endif
ysr@777 239
ysr@777 240 HeapRegion* card_region = _g1h->heap_region_containing(card_start);
ysr@777 241 assert(card_region != NULL, "Yielding cards not in the heap?");
ysr@777 242 _cards++;
ysr@777 243
apetrusenko@1231 244 if (!card_region->is_on_dirty_cards_region_list()) {
apetrusenko@1231 245 _g1h->push_dirty_cards_region(card_region);
apetrusenko@1231 246 }
apetrusenko@1231 247
iveresov@1182 248 // If the card is dirty, then we will scan it during updateRS.
iveresov@1182 249 if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) {
iveresov@1182 250 if (!_ct_bs->is_card_claimed(card_index) && _ct_bs->claim_card(card_index)) {
ysr@777 251 scanCard(card_index, card_region);
iveresov@1182 252 } else if (_try_claimed) {
iveresov@1182 253 if (jump_to_card == 0 || jump_to_card != current_card) {
iveresov@1182 254 // We did some useful work in the previous iteration.
iveresov@1182 255 // Decrease the distance.
iveresov@1182 256 skip_distance = MAX2(skip_distance >> 1, _min_skip_distance);
iveresov@1182 257 } else {
iveresov@1182 258 // Previous iteration resulted in a claim failure.
iveresov@1182 259 // Increase the distance.
iveresov@1182 260 skip_distance = MIN2(skip_distance << 1, _max_skip_distance);
iveresov@1182 261 }
iveresov@1182 262 jump_to_card = current_card + skip_distance;
iveresov@1182 263 }
ysr@777 264 }
iveresov@1182 265 ++current_card;
ysr@777 266 }
iveresov@1182 267 if (!_try_claimed) {
iveresov@1182 268 hrrs->set_iter_complete();
iveresov@1182 269 }
ysr@777 270 return false;
ysr@777 271 }
ysr@777 272 // Set all cards back to clean.
ysr@777 273 void cleanup() {_g1h->cleanUpCardTable();}
ysr@777 274 size_t cards_done() { return _cards_done;}
ysr@777 275 size_t cards_looked_up() { return _cards;}
ysr@777 276 };
ysr@777 277
ysr@777 278 // We want the parallel threads to start their scanning at
ysr@777 279 // different collection set regions to avoid contention.
ysr@777 280 // If we have:
ysr@777 281 // n collection set regions
ysr@777 282 // p threads
ysr@777 283 // Then thread t will start at region t * floor (n/p)
ysr@777 284
ysr@777 285 HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) {
ysr@777 286 HeapRegion* result = _g1p->collection_set();
ysr@777 287 if (ParallelGCThreads > 0) {
ysr@777 288 size_t cs_size = _g1p->collection_set_size();
ysr@777 289 int n_workers = _g1->workers()->total_workers();
ysr@777 290 size_t cs_spans = cs_size / n_workers;
ysr@777 291 size_t ind = cs_spans * worker_i;
ysr@777 292 for (size_t i = 0; i < ind; i++)
ysr@777 293 result = result->next_in_collection_set();
ysr@777 294 }
ysr@777 295 return result;
ysr@777 296 }
ysr@777 297
ysr@777 298 void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
ysr@777 299 double rs_time_start = os::elapsedTime();
ysr@777 300 HeapRegion *startRegion = calculateStartRegion(worker_i);
ysr@777 301
ysr@777 302 BufferingOopsInHeapRegionClosure boc(oc);
ysr@777 303 ScanRSClosure scanRScl(&boc, worker_i);
ysr@777 304 _g1->collection_set_iterate_from(startRegion, &scanRScl);
ysr@777 305 scanRScl.set_try_claimed();
ysr@777 306 _g1->collection_set_iterate_from(startRegion, &scanRScl);
ysr@777 307
ysr@777 308 boc.done();
ysr@777 309 double closure_app_time_sec = boc.closure_app_seconds();
ysr@777 310 double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
ysr@777 311 closure_app_time_sec;
ysr@777 312 double closure_app_time_ms = closure_app_time_sec * 1000.0;
ysr@777 313
ysr@777 314 assert( _cards_scanned != NULL, "invariant" );
ysr@777 315 _cards_scanned[worker_i] = scanRScl.cards_done();
ysr@777 316
ysr@777 317 _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0);
ysr@777 318 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
iveresov@1051 319
iveresov@1051 320 double scan_new_refs_time_ms = _g1p->get_scan_new_refs_time(worker_i);
iveresov@1051 321 if (scan_new_refs_time_ms > 0.0) {
iveresov@1051 322 closure_app_time_ms += scan_new_refs_time_ms;
ysr@777 323 }
iveresov@1051 324
ysr@777 325 _g1p->record_obj_copy_time(worker_i, closure_app_time_ms);
ysr@777 326 }
ysr@777 327
ysr@777 328 void HRInto_G1RemSet::updateRS(int worker_i) {
ysr@777 329 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
ysr@777 330
ysr@777 331 double start = os::elapsedTime();
ysr@777 332 _g1p->record_update_rs_start_time(worker_i, start * 1000.0);
ysr@777 333
iveresov@1229 334 // Apply the appropriate closure to all remaining log entries.
iveresov@1229 335 _g1->iterate_dirty_card_closure(false, worker_i);
iveresov@1229 336 // Now there should be no dirty cards.
iveresov@1229 337 if (G1RSLogCheckCardTable) {
iveresov@1229 338 CountNonCleanMemRegionClosure cl(_g1);
iveresov@1229 339 _ct_bs->mod_card_iterate(&cl);
iveresov@1229 340 // XXX This isn't true any more: keeping cards of young regions
iveresov@1229 341 // marked dirty broke it. Need some reasonable fix.
iveresov@1229 342 guarantee(cl.n() == 0, "Card table should be clean.");
ysr@777 343 }
iveresov@1229 344
ysr@777 345 _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
ysr@777 346 }
ysr@777 347
ysr@777 348 #ifndef PRODUCT
ysr@777 349 class PrintRSClosure : public HeapRegionClosure {
ysr@777 350 int _count;
ysr@777 351 public:
ysr@777 352 PrintRSClosure() : _count(0) {}
ysr@777 353 bool doHeapRegion(HeapRegion* r) {
ysr@777 354 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 355 _count += (int) hrrs->occupied();
ysr@777 356 if (hrrs->occupied() == 0) {
ysr@777 357 gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") "
ysr@777 358 "has no remset entries\n",
ysr@777 359 r->bottom(), r->end());
ysr@777 360 } else {
ysr@777 361 gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n",
ysr@777 362 r->bottom(), r->end());
ysr@777 363 r->print();
ysr@777 364 hrrs->print();
ysr@777 365 gclog_or_tty->print("\nDone printing rem set\n");
ysr@777 366 }
ysr@777 367 return false;
ysr@777 368 }
ysr@777 369 int occupied() {return _count;}
ysr@777 370 };
ysr@777 371 #endif
ysr@777 372
ysr@777 373 class CountRSSizeClosure: public HeapRegionClosure {
ysr@777 374 size_t _n;
ysr@777 375 size_t _tot;
ysr@777 376 size_t _max;
ysr@777 377 HeapRegion* _max_r;
ysr@777 378 enum {
ysr@777 379 N = 20,
ysr@777 380 MIN = 6
ysr@777 381 };
ysr@777 382 int _histo[N];
ysr@777 383 public:
ysr@777 384 CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) {
ysr@777 385 for (int i = 0; i < N; i++) _histo[i] = 0;
ysr@777 386 }
ysr@777 387 bool doHeapRegion(HeapRegion* r) {
ysr@777 388 if (!r->continuesHumongous()) {
ysr@777 389 size_t occ = r->rem_set()->occupied();
ysr@777 390 _n++;
ysr@777 391 _tot += occ;
ysr@777 392 if (occ > _max) {
ysr@777 393 _max = occ;
ysr@777 394 _max_r = r;
ysr@777 395 }
ysr@777 396 // Fit it into a histo bin.
ysr@777 397 int s = 1 << MIN;
ysr@777 398 int i = 0;
ysr@777 399 while (occ > (size_t) s && i < (N-1)) {
ysr@777 400 s = s << 1;
ysr@777 401 i++;
ysr@777 402 }
ysr@777 403 _histo[i]++;
ysr@777 404 }
ysr@777 405 return false;
ysr@777 406 }
ysr@777 407 size_t n() { return _n; }
ysr@777 408 size_t tot() { return _tot; }
ysr@777 409 size_t mx() { return _max; }
ysr@777 410 HeapRegion* mxr() { return _max_r; }
ysr@777 411 void print_histo() {
ysr@777 412 int mx = N;
ysr@777 413 while (mx >= 0) {
ysr@777 414 if (_histo[mx-1] > 0) break;
ysr@777 415 mx--;
ysr@777 416 }
ysr@777 417 gclog_or_tty->print_cr("Number of regions with given RS sizes:");
ysr@777 418 gclog_or_tty->print_cr(" <= %8d %8d", 1 << MIN, _histo[0]);
ysr@777 419 for (int i = 1; i < mx-1; i++) {
ysr@777 420 gclog_or_tty->print_cr(" %8d - %8d %8d",
ysr@777 421 (1 << (MIN + i - 1)) + 1,
ysr@777 422 1 << (MIN + i),
ysr@777 423 _histo[i]);
ysr@777 424 }
ysr@777 425 gclog_or_tty->print_cr(" > %8d %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]);
ysr@777 426 }
ysr@777 427 };
ysr@777 428
ysr@1280 429 template <class T> void
ysr@1280 430 HRInto_G1RemSet::scanNewRefsRS_work(OopsInHeapRegionClosure* oc,
ysr@1280 431 int worker_i) {
ysr@777 432 double scan_new_refs_start_sec = os::elapsedTime();
ysr@777 433 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 434 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
iveresov@1051 435 for (int i = 0; i < _new_refs[worker_i]->length(); i++) {
ysr@1280 436 T* p = (T*) _new_refs[worker_i]->at(i);
ysr@1280 437 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 438 // *p was in the collection set when p was pushed on "_new_refs", but
ysr@777 439 // another thread may have processed this location from an RS, so it
ysr@777 440 // might not point into the CS any longer. If so, it's obviously been
ysr@777 441 // processed, and we don't need to do anything further.
ysr@777 442 if (g1h->obj_in_cs(obj)) {
ysr@777 443 HeapRegion* r = g1h->heap_region_containing(p);
ysr@777 444
ysr@777 445 DEBUG_ONLY(HeapRegion* to = g1h->heap_region_containing(obj));
ysr@777 446 oc->set_region(r);
ysr@777 447 // If "p" has already been processed concurrently, this is
ysr@777 448 // idempotent.
ysr@777 449 oc->do_oop(p);
ysr@777 450 }
ysr@777 451 }
ysr@777 452 _g1p->record_scan_new_refs_time(worker_i,
ysr@777 453 (os::elapsedTime() - scan_new_refs_start_sec)
ysr@777 454 * 1000.0);
ysr@777 455 }
ysr@777 456
ysr@777 457 void HRInto_G1RemSet::cleanupHRRS() {
ysr@777 458 HeapRegionRemSet::cleanup();
ysr@777 459 }
ysr@777 460
ysr@777 461 void
ysr@777 462 HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
ysr@777 463 int worker_i) {
ysr@777 464 #if CARD_REPEAT_HISTO
ysr@777 465 ct_freq_update_histo_and_reset();
ysr@777 466 #endif
ysr@777 467 if (worker_i == 0) {
ysr@777 468 _cg1r->clear_and_record_card_counts();
ysr@777 469 }
ysr@777 470
ysr@777 471 // Make this into a command-line flag...
ysr@777 472 if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) {
ysr@777 473 CountRSSizeClosure count_cl;
ysr@777 474 _g1->heap_region_iterate(&count_cl);
ysr@777 475 gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, "
ysr@777 476 "max region is " PTR_FORMAT,
ysr@777 477 count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(),
ysr@777 478 count_cl.mx(), count_cl.mxr());
ysr@777 479 count_cl.print_histo();
ysr@777 480 }
ysr@777 481
ysr@777 482 if (ParallelGCThreads > 0) {
tonyp@1073 483 // The two flags below were introduced temporarily to serialize
tonyp@1073 484 // the updating and scanning of remembered sets. There are some
tonyp@1073 485 // race conditions when these two operations are done in parallel
tonyp@1073 486 // and they are causing failures. When we resolve said race
tonyp@1073 487 // conditions, we'll revert back to parallel remembered set
tonyp@1073 488 // updating and scanning. See CRs 6677707 and 6677708.
johnc@1186 489 if (G1ParallelRSetUpdatingEnabled || (worker_i == 0)) {
ysr@777 490 updateRS(worker_i);
ysr@777 491 scanNewRefsRS(oc, worker_i);
tonyp@1083 492 } else {
iveresov@1229 493 _g1p->record_update_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
tonyp@1083 494 _g1p->record_update_rs_processed_buffers(worker_i, 0.0);
tonyp@1083 495 _g1p->record_update_rs_time(worker_i, 0.0);
tonyp@1083 496 _g1p->record_scan_new_refs_time(worker_i, 0.0);
tonyp@1073 497 }
johnc@1186 498 if (G1ParallelRSetScanningEnabled || (worker_i == 0)) {
ysr@777 499 scanRS(oc, worker_i);
tonyp@1083 500 } else {
iveresov@1229 501 _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
tonyp@1083 502 _g1p->record_scan_rs_time(worker_i, 0.0);
ysr@777 503 }
ysr@777 504 } else {
ysr@777 505 assert(worker_i == 0, "invariant");
ysr@777 506 updateRS(0);
iveresov@1051 507 scanNewRefsRS(oc, 0);
ysr@777 508 scanRS(oc, 0);
ysr@777 509 }
ysr@777 510 }
ysr@777 511
ysr@777 512 void HRInto_G1RemSet::
ysr@777 513 prepare_for_oops_into_collection_set_do() {
ysr@777 514 #if G1_REM_SET_LOGGING
ysr@777 515 PrintRSClosure cl;
ysr@777 516 _g1->collection_set_iterate(&cl);
ysr@777 517 #endif
ysr@777 518 cleanupHRRS();
ysr@777 519 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
ysr@777 520 _g1->set_refine_cte_cl_concurrency(false);
ysr@777 521 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 522 dcqs.concatenate_logs();
ysr@777 523
ysr@777 524 assert(!_par_traversal_in_progress, "Invariant between iterations.");
ysr@777 525 if (ParallelGCThreads > 0) {
ysr@777 526 set_par_traversal(true);
iveresov@1051 527 _seq_task->set_par_threads((int)n_workers());
ysr@777 528 }
ysr@777 529 guarantee( _cards_scanned == NULL, "invariant" );
ysr@777 530 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
apetrusenko@980 531 for (uint i = 0; i < n_workers(); ++i) {
apetrusenko@980 532 _cards_scanned[i] = 0;
apetrusenko@980 533 }
ysr@777 534 _total_cards_scanned = 0;
ysr@777 535 }
ysr@777 536
ysr@777 537
ysr@777 538 class cleanUpIteratorsClosure : public HeapRegionClosure {
ysr@777 539 bool doHeapRegion(HeapRegion *r) {
ysr@777 540 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 541 hrrs->init_for_par_iteration();
ysr@777 542 return false;
ysr@777 543 }
ysr@777 544 };
ysr@777 545
iveresov@1051 546 class UpdateRSetOopsIntoCSImmediate : public OopClosure {
iveresov@1051 547 G1CollectedHeap* _g1;
iveresov@1051 548 public:
iveresov@1051 549 UpdateRSetOopsIntoCSImmediate(G1CollectedHeap* g1) : _g1(g1) { }
ysr@1280 550 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 551 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 552 template <class T> void do_oop_work(T* p) {
ysr@1280 553 HeapRegion* to = _g1->heap_region_containing(oopDesc::load_decode_heap_oop(p));
iveresov@1051 554 if (to->in_collection_set()) {
apetrusenko@1112 555 to->rem_set()->add_reference(p, 0);
iveresov@1051 556 }
iveresov@1051 557 }
iveresov@1051 558 };
iveresov@1051 559
iveresov@1051 560 class UpdateRSetOopsIntoCSDeferred : public OopClosure {
iveresov@1051 561 G1CollectedHeap* _g1;
iveresov@1051 562 CardTableModRefBS* _ct_bs;
iveresov@1051 563 DirtyCardQueue* _dcq;
iveresov@1051 564 public:
iveresov@1051 565 UpdateRSetOopsIntoCSDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
iveresov@1051 566 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) { }
ysr@1280 567 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 568 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 569 template <class T> void do_oop_work(T* p) {
ysr@1280 570 oop obj = oopDesc::load_decode_heap_oop(p);
iveresov@1051 571 if (_g1->obj_in_cs(obj)) {
iveresov@1051 572 size_t card_index = _ct_bs->index_for(p);
iveresov@1051 573 if (_ct_bs->mark_card_deferred(card_index)) {
iveresov@1051 574 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
iveresov@1051 575 }
iveresov@1051 576 }
iveresov@1051 577 }
iveresov@1051 578 };
iveresov@1051 579
ysr@1280 580 template <class T> void HRInto_G1RemSet::new_refs_iterate_work(OopClosure* cl) {
iveresov@1051 581 for (size_t i = 0; i < n_workers(); i++) {
iveresov@1051 582 for (int j = 0; j < _new_refs[i]->length(); j++) {
ysr@1280 583 T* p = (T*) _new_refs[i]->at(j);
iveresov@1051 584 cl->do_oop(p);
iveresov@1051 585 }
iveresov@1051 586 }
iveresov@1051 587 }
iveresov@1051 588
ysr@777 589 void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() {
ysr@777 590 guarantee( _cards_scanned != NULL, "invariant" );
ysr@777 591 _total_cards_scanned = 0;
ysr@777 592 for (uint i = 0; i < n_workers(); ++i)
ysr@777 593 _total_cards_scanned += _cards_scanned[i];
ysr@777 594 FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
ysr@777 595 _cards_scanned = NULL;
ysr@777 596 // Cleanup after copy
ysr@777 597 #if G1_REM_SET_LOGGING
ysr@777 598 PrintRSClosure cl;
ysr@777 599 _g1->heap_region_iterate(&cl);
ysr@777 600 #endif
ysr@777 601 _g1->set_refine_cte_cl_concurrency(true);
ysr@777 602 cleanUpIteratorsClosure iterClosure;
ysr@777 603 _g1->collection_set_iterate(&iterClosure);
ysr@777 604 // Set all cards back to clean.
ysr@777 605 _g1->cleanUpCardTable();
iveresov@1229 606
ysr@777 607 if (ParallelGCThreads > 0) {
ysr@777 608 set_par_traversal(false);
ysr@777 609 }
iveresov@1051 610
iveresov@1051 611 if (_g1->evacuation_failed()) {
iveresov@1051 612 // Restore remembered sets for the regions pointing into
iveresov@1051 613 // the collection set.
iveresov@1051 614 if (G1DeferredRSUpdate) {
iveresov@1051 615 DirtyCardQueue dcq(&_g1->dirty_card_queue_set());
iveresov@1051 616 UpdateRSetOopsIntoCSDeferred deferred_update(_g1, &dcq);
iveresov@1051 617 new_refs_iterate(&deferred_update);
iveresov@1051 618 } else {
iveresov@1051 619 UpdateRSetOopsIntoCSImmediate immediate_update(_g1);
iveresov@1051 620 new_refs_iterate(&immediate_update);
iveresov@1051 621 }
iveresov@1051 622 }
iveresov@1051 623 for (uint i = 0; i < n_workers(); i++) {
iveresov@1051 624 _new_refs[i]->clear();
iveresov@1051 625 }
iveresov@1051 626
ysr@777 627 assert(!_par_traversal_in_progress, "Invariant between iterations.");
ysr@777 628 }
ysr@777 629
ysr@777 630 class UpdateRSObjectClosure: public ObjectClosure {
ysr@777 631 UpdateRSOopClosure* _update_rs_oop_cl;
ysr@777 632 public:
ysr@777 633 UpdateRSObjectClosure(UpdateRSOopClosure* update_rs_oop_cl) :
ysr@777 634 _update_rs_oop_cl(update_rs_oop_cl) {}
ysr@777 635 void do_object(oop obj) {
ysr@777 636 obj->oop_iterate(_update_rs_oop_cl);
ysr@777 637 }
ysr@777 638
ysr@777 639 };
ysr@777 640
ysr@777 641 class ScrubRSClosure: public HeapRegionClosure {
ysr@777 642 G1CollectedHeap* _g1h;
ysr@777 643 BitMap* _region_bm;
ysr@777 644 BitMap* _card_bm;
ysr@777 645 CardTableModRefBS* _ctbs;
ysr@777 646 public:
ysr@777 647 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
ysr@777 648 _g1h(G1CollectedHeap::heap()),
ysr@777 649 _region_bm(region_bm), _card_bm(card_bm),
ysr@777 650 _ctbs(NULL)
ysr@777 651 {
ysr@777 652 ModRefBarrierSet* bs = _g1h->mr_bs();
ysr@777 653 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
ysr@777 654 _ctbs = (CardTableModRefBS*)bs;
ysr@777 655 }
ysr@777 656
ysr@777 657 bool doHeapRegion(HeapRegion* r) {
ysr@777 658 if (!r->continuesHumongous()) {
ysr@777 659 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
ysr@777 660 }
ysr@777 661 return false;
ysr@777 662 }
ysr@777 663 };
ysr@777 664
ysr@777 665 void HRInto_G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
ysr@777 666 ScrubRSClosure scrub_cl(region_bm, card_bm);
ysr@777 667 _g1->heap_region_iterate(&scrub_cl);
ysr@777 668 }
ysr@777 669
ysr@777 670 void HRInto_G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
ysr@777 671 int worker_num, int claim_val) {
ysr@777 672 ScrubRSClosure scrub_cl(region_bm, card_bm);
ysr@777 673 _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
ysr@777 674 }
ysr@777 675
ysr@777 676
ysr@777 677 static IntHistogram out_of_histo(50, 50);
ysr@777 678
ysr@777 679 void HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {
ysr@777 680 // If the card is no longer dirty, nothing to do.
ysr@777 681 if (*card_ptr != CardTableModRefBS::dirty_card_val()) return;
ysr@777 682
ysr@777 683 // Construct the region representing the card.
ysr@777 684 HeapWord* start = _ct_bs->addr_for(card_ptr);
ysr@777 685 // And find the region containing it.
ysr@777 686 HeapRegion* r = _g1->heap_region_containing(start);
ysr@777 687 if (r == NULL) {
ysr@777 688 guarantee(_g1->is_in_permanent(start), "Or else where?");
ysr@777 689 return; // Not in the G1 heap (might be in perm, for example.)
ysr@777 690 }
ysr@777 691 // Why do we have to check here whether a card is on a young region,
ysr@777 692 // given that we dirty young regions and, as a result, the
ysr@777 693 // post-barrier is supposed to filter them out and never to enqueue
ysr@777 694 // them? When we allocate a new region as the "allocation region" we
ysr@777 695 // actually dirty its cards after we release the lock, since card
ysr@777 696 // dirtying while holding the lock was a performance bottleneck. So,
ysr@777 697 // as a result, it is possible for other threads to actually
ysr@777 698 // allocate objects in the region (after the acquire the lock)
ysr@777 699 // before all the cards on the region are dirtied. This is unlikely,
ysr@777 700 // and it doesn't happen often, but it can happen. So, the extra
ysr@777 701 // check below filters out those cards.
iveresov@1072 702 if (r->is_young()) {
ysr@777 703 return;
ysr@777 704 }
ysr@777 705 // While we are processing RSet buffers during the collection, we
ysr@777 706 // actually don't want to scan any cards on the collection set,
ysr@777 707 // since we don't want to update remebered sets with entries that
ysr@777 708 // point into the collection set, given that live objects from the
ysr@777 709 // collection set are about to move and such entries will be stale
ysr@777 710 // very soon. This change also deals with a reliability issue which
ysr@777 711 // involves scanning a card in the collection set and coming across
ysr@777 712 // an array that was being chunked and looking malformed. Note,
ysr@777 713 // however, that if evacuation fails, we have to scan any objects
ysr@777 714 // that were not moved and create any missing entries.
ysr@777 715 if (r->in_collection_set()) {
ysr@777 716 return;
ysr@777 717 }
ysr@777 718
ysr@777 719 // Should we defer it?
ysr@777 720 if (_cg1r->use_cache()) {
ysr@777 721 card_ptr = _cg1r->cache_insert(card_ptr);
ysr@777 722 // If it was not an eviction, nothing to do.
ysr@777 723 if (card_ptr == NULL) return;
ysr@777 724
ysr@777 725 // OK, we have to reset the card start, region, etc.
ysr@777 726 start = _ct_bs->addr_for(card_ptr);
ysr@777 727 r = _g1->heap_region_containing(start);
ysr@777 728 if (r == NULL) {
ysr@777 729 guarantee(_g1->is_in_permanent(start), "Or else where?");
ysr@777 730 return; // Not in the G1 heap (might be in perm, for example.)
ysr@777 731 }
ysr@777 732 guarantee(!r->is_young(), "It was evicted in the current minor cycle.");
ysr@777 733 }
ysr@777 734
ysr@777 735 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
ysr@777 736 MemRegion dirtyRegion(start, end);
ysr@777 737
ysr@777 738 #if CARD_REPEAT_HISTO
ysr@777 739 init_ct_freq_table(_g1->g1_reserved_obj_bytes());
ysr@777 740 ct_freq_note_card(_ct_bs->index_for(start));
ysr@777 741 #endif
ysr@777 742
ysr@777 743 UpdateRSOopClosure update_rs_oop_cl(this, worker_i);
ysr@777 744 update_rs_oop_cl.set_from(r);
ysr@777 745 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, &update_rs_oop_cl);
ysr@777 746
ysr@777 747 // Undirty the card.
ysr@777 748 *card_ptr = CardTableModRefBS::clean_card_val();
ysr@777 749 // We must complete this write before we do any of the reads below.
ysr@777 750 OrderAccess::storeload();
ysr@777 751 // And process it, being careful of unallocated portions of TLAB's.
ysr@777 752 HeapWord* stop_point =
ysr@777 753 r->oops_on_card_seq_iterate_careful(dirtyRegion,
ysr@777 754 &filter_then_update_rs_oop_cl);
ysr@777 755 // If stop_point is non-null, then we encountered an unallocated region
ysr@777 756 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
ysr@777 757 // card and re-enqueue: if we put off the card until a GC pause, then the
ysr@777 758 // unallocated portion will be filled in. Alternatively, we might try
ysr@777 759 // the full complexity of the technique used in "regular" precleaning.
ysr@777 760 if (stop_point != NULL) {
ysr@777 761 // The card might have gotten re-dirtied and re-enqueued while we
ysr@777 762 // worked. (In fact, it's pretty likely.)
ysr@777 763 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
ysr@777 764 *card_ptr = CardTableModRefBS::dirty_card_val();
ysr@777 765 MutexLockerEx x(Shared_DirtyCardQ_lock,
ysr@777 766 Mutex::_no_safepoint_check_flag);
ysr@777 767 DirtyCardQueue* sdcq =
ysr@777 768 JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
ysr@777 769 sdcq->enqueue(card_ptr);
ysr@777 770 }
ysr@777 771 } else {
ysr@777 772 out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region());
ysr@777 773 _conc_refine_cards++;
ysr@777 774 }
ysr@777 775 }
ysr@777 776
ysr@777 777 class HRRSStatsIter: public HeapRegionClosure {
ysr@777 778 size_t _occupied;
ysr@777 779 size_t _total_mem_sz;
ysr@777 780 size_t _max_mem_sz;
ysr@777 781 HeapRegion* _max_mem_sz_region;
ysr@777 782 public:
ysr@777 783 HRRSStatsIter() :
ysr@777 784 _occupied(0),
ysr@777 785 _total_mem_sz(0),
ysr@777 786 _max_mem_sz(0),
ysr@777 787 _max_mem_sz_region(NULL)
ysr@777 788 {}
ysr@777 789
ysr@777 790 bool doHeapRegion(HeapRegion* r) {
ysr@777 791 if (r->continuesHumongous()) return false;
ysr@777 792 size_t mem_sz = r->rem_set()->mem_size();
ysr@777 793 if (mem_sz > _max_mem_sz) {
ysr@777 794 _max_mem_sz = mem_sz;
ysr@777 795 _max_mem_sz_region = r;
ysr@777 796 }
ysr@777 797 _total_mem_sz += mem_sz;
ysr@777 798 size_t occ = r->rem_set()->occupied();
ysr@777 799 _occupied += occ;
ysr@777 800 return false;
ysr@777 801 }
ysr@777 802 size_t total_mem_sz() { return _total_mem_sz; }
ysr@777 803 size_t max_mem_sz() { return _max_mem_sz; }
ysr@777 804 size_t occupied() { return _occupied; }
ysr@777 805 HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
ysr@777 806 };
ysr@777 807
iveresov@1229 808 class PrintRSThreadVTimeClosure : public ThreadClosure {
iveresov@1229 809 public:
iveresov@1229 810 virtual void do_thread(Thread *t) {
iveresov@1229 811 ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
iveresov@1229 812 gclog_or_tty->print(" %5.2f", crt->vtime_accum());
iveresov@1229 813 }
iveresov@1229 814 };
iveresov@1229 815
ysr@777 816 void HRInto_G1RemSet::print_summary_info() {
ysr@777 817 G1CollectedHeap* g1 = G1CollectedHeap::heap();
ysr@777 818
ysr@777 819 #if CARD_REPEAT_HISTO
ysr@777 820 gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
ysr@777 821 gclog_or_tty->print_cr(" # of repeats --> # of cards with that number.");
ysr@777 822 card_repeat_count.print_on(gclog_or_tty);
ysr@777 823 #endif
ysr@777 824
ysr@777 825 if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) {
ysr@777 826 gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: ");
ysr@777 827 gclog_or_tty->print_cr(" # of CS ptrs --> # of cards with that number.");
ysr@777 828 out_of_histo.print_on(gclog_or_tty);
ysr@777 829 }
iveresov@1229 830 gclog_or_tty->print_cr("\n Concurrent RS processed %d cards",
iveresov@1229 831 _conc_refine_cards);
ysr@777 832 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 833 jint tot_processed_buffers =
ysr@777 834 dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread();
ysr@777 835 gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers);
iveresov@1229 836 gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.",
ysr@777 837 dcqs.processed_buffers_rs_thread(),
ysr@777 838 100.0*(float)dcqs.processed_buffers_rs_thread()/
ysr@777 839 (float)tot_processed_buffers);
ysr@777 840 gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.",
ysr@777 841 dcqs.processed_buffers_mut(),
ysr@777 842 100.0*(float)dcqs.processed_buffers_mut()/
ysr@777 843 (float)tot_processed_buffers);
iveresov@1229 844 gclog_or_tty->print_cr(" Conc RS threads times(s)");
iveresov@1229 845 PrintRSThreadVTimeClosure p;
iveresov@1229 846 gclog_or_tty->print(" ");
iveresov@1229 847 g1->concurrent_g1_refine()->threads_do(&p);
ysr@777 848 gclog_or_tty->print_cr("");
iveresov@1229 849
ysr@777 850 if (G1UseHRIntoRS) {
ysr@777 851 HRRSStatsIter blk;
ysr@777 852 g1->heap_region_iterate(&blk);
ysr@777 853 gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K."
ysr@777 854 " Max = " SIZE_FORMAT "K.",
ysr@777 855 blk.total_mem_sz()/K, blk.max_mem_sz()/K);
ysr@777 856 gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K,"
ysr@777 857 " free_lists = " SIZE_FORMAT "K.",
ysr@777 858 HeapRegionRemSet::static_mem_size()/K,
ysr@777 859 HeapRegionRemSet::fl_mem_size()/K);
ysr@777 860 gclog_or_tty->print_cr(" %d occupied cards represented.",
ysr@777 861 blk.occupied());
ysr@777 862 gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
apetrusenko@1112 863 ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
ysr@777 864 blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
ysr@777 865 (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
ysr@777 866 (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
ysr@777 867 gclog_or_tty->print_cr(" Did %d coarsenings.",
ysr@777 868 HeapRegionRemSet::n_coarsenings());
ysr@777 869
ysr@777 870 }
ysr@777 871 }
ysr@777 872 void HRInto_G1RemSet::prepare_for_verify() {
iveresov@1072 873 if (G1HRRSFlushLogBuffersOnVerify &&
iveresov@1072 874 (VerifyBeforeGC || VerifyAfterGC)
iveresov@1072 875 && !_g1->full_collection()) {
ysr@777 876 cleanupHRRS();
ysr@777 877 _g1->set_refine_cte_cl_concurrency(false);
ysr@777 878 if (SafepointSynchronize::is_at_safepoint()) {
ysr@777 879 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 880 dcqs.concatenate_logs();
ysr@777 881 }
ysr@777 882 bool cg1r_use_cache = _cg1r->use_cache();
ysr@777 883 _cg1r->set_use_cache(false);
ysr@777 884 updateRS(0);
ysr@777 885 _cg1r->set_use_cache(cg1r_use_cache);
iveresov@1072 886
iveresov@1072 887 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
ysr@777 888 }
ysr@777 889 }

mercurial