src/share/vm/gc_implementation/g1/g1RemSet.cpp

Mon, 19 Jul 2010 11:06:34 -0700

author
johnc
date
Mon, 19 Jul 2010 11:06:34 -0700
changeset 2021
5cbac8938c4c
parent 1966
215576b54709
child 2060
2d160770d2e5
permissions
-rw-r--r--

6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
Summary: During concurrent refinment, filter cards in young regions after it has been determined that the region has been allocated from and the young type of the region has been set.
Reviewed-by: iveresov, tonyp, jcoomes

ysr@777 1 /*
johnc@2021 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #include "incls/_precompiled.incl"
ysr@777 26 #include "incls/_g1RemSet.cpp.incl"
ysr@777 27
ysr@777 28 #define CARD_REPEAT_HISTO 0
ysr@777 29
ysr@777 30 #if CARD_REPEAT_HISTO
ysr@777 31 static size_t ct_freq_sz;
ysr@777 32 static jbyte* ct_freq = NULL;
ysr@777 33
ysr@777 34 void init_ct_freq_table(size_t heap_sz_bytes) {
ysr@777 35 if (ct_freq == NULL) {
ysr@777 36 ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size;
ysr@777 37 ct_freq = new jbyte[ct_freq_sz];
ysr@777 38 for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0;
ysr@777 39 }
ysr@777 40 }
ysr@777 41
ysr@777 42 void ct_freq_note_card(size_t index) {
ysr@777 43 assert(0 <= index && index < ct_freq_sz, "Bounds error.");
ysr@777 44 if (ct_freq[index] < 100) { ct_freq[index]++; }
ysr@777 45 }
ysr@777 46
ysr@777 47 static IntHistogram card_repeat_count(10, 10);
ysr@777 48
ysr@777 49 void ct_freq_update_histo_and_reset() {
ysr@777 50 for (size_t j = 0; j < ct_freq_sz; j++) {
ysr@777 51 card_repeat_count.add_entry(ct_freq[j]);
ysr@777 52 ct_freq[j] = 0;
ysr@777 53 }
ysr@777 54
ysr@777 55 }
ysr@777 56 #endif
ysr@777 57
ysr@777 58
ysr@777 59 class IntoCSOopClosure: public OopsInHeapRegionClosure {
ysr@777 60 OopsInHeapRegionClosure* _blk;
ysr@777 61 G1CollectedHeap* _g1;
ysr@777 62 public:
ysr@777 63 IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
ysr@777 64 _g1(g1), _blk(blk) {}
ysr@777 65 void set_region(HeapRegion* from) {
ysr@777 66 _blk->set_region(from);
ysr@777 67 }
ysr@1280 68 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 69 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 70 template <class T> void do_oop_work(T* p) {
ysr@1280 71 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 72 if (_g1->obj_in_cs(obj)) _blk->do_oop(p);
ysr@777 73 }
ysr@777 74 bool apply_to_weak_ref_discovered_field() { return true; }
ysr@777 75 bool idempotent() { return true; }
ysr@777 76 };
ysr@777 77
ysr@777 78 class IntoCSRegionClosure: public HeapRegionClosure {
ysr@777 79 IntoCSOopClosure _blk;
ysr@777 80 G1CollectedHeap* _g1;
ysr@777 81 public:
ysr@777 82 IntoCSRegionClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
ysr@777 83 _g1(g1), _blk(g1, blk) {}
ysr@777 84 bool doHeapRegion(HeapRegion* r) {
ysr@777 85 if (!r->in_collection_set()) {
ysr@777 86 _blk.set_region(r);
ysr@777 87 if (r->isHumongous()) {
ysr@777 88 if (r->startsHumongous()) {
ysr@777 89 oop obj = oop(r->bottom());
ysr@777 90 obj->oop_iterate(&_blk);
ysr@777 91 }
ysr@777 92 } else {
ysr@777 93 r->oop_before_save_marks_iterate(&_blk);
ysr@777 94 }
ysr@777 95 }
ysr@777 96 return false;
ysr@777 97 }
ysr@777 98 };
ysr@777 99
ysr@777 100 void
ysr@777 101 StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
ysr@777 102 int worker_i) {
ysr@777 103 IntoCSRegionClosure rc(_g1, oc);
ysr@777 104 _g1->heap_region_iterate(&rc);
ysr@777 105 }
ysr@777 106
ysr@777 107 class VerifyRSCleanCardOopClosure: public OopClosure {
ysr@777 108 G1CollectedHeap* _g1;
ysr@777 109 public:
ysr@777 110 VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {}
ysr@777 111
ysr@1280 112 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 113 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 114 template <class T> void do_oop_work(T* p) {
ysr@1280 115 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 116 HeapRegion* to = _g1->heap_region_containing(obj);
ysr@777 117 guarantee(to == NULL || !to->in_collection_set(),
ysr@777 118 "Missed a rem set member.");
ysr@777 119 }
ysr@777 120 };
ysr@777 121
ysr@777 122 HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
ysr@777 123 : G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
ysr@777 124 _cg1r(g1->concurrent_g1_refine()),
ysr@777 125 _par_traversal_in_progress(false), _new_refs(NULL),
ysr@777 126 _cards_scanned(NULL), _total_cards_scanned(0)
ysr@777 127 {
ysr@777 128 _seq_task = new SubTasksDone(NumSeqTasks);
iveresov@1051 129 guarantee(n_workers() > 0, "There should be some workers");
ysr@1280 130 _new_refs = NEW_C_HEAP_ARRAY(GrowableArray<OopOrNarrowOopStar>*, n_workers());
iveresov@1051 131 for (uint i = 0; i < n_workers(); i++) {
ysr@1280 132 _new_refs[i] = new (ResourceObj::C_HEAP) GrowableArray<OopOrNarrowOopStar>(8192,true);
iveresov@1051 133 }
ysr@777 134 }
ysr@777 135
ysr@777 136 HRInto_G1RemSet::~HRInto_G1RemSet() {
ysr@777 137 delete _seq_task;
iveresov@1051 138 for (uint i = 0; i < n_workers(); i++) {
iveresov@1051 139 delete _new_refs[i];
iveresov@1051 140 }
ysr@1280 141 FREE_C_HEAP_ARRAY(GrowableArray<OopOrNarrowOopStar>*, _new_refs);
ysr@777 142 }
ysr@777 143
ysr@777 144 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
ysr@777 145 if (_g1->is_in_g1_reserved(mr.start())) {
ysr@777 146 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
ysr@777 147 if (_start_first == NULL) _start_first = mr.start();
ysr@777 148 }
ysr@777 149 }
ysr@777 150
ysr@777 151 class ScanRSClosure : public HeapRegionClosure {
ysr@777 152 size_t _cards_done, _cards;
ysr@777 153 G1CollectedHeap* _g1h;
ysr@777 154 OopsInHeapRegionClosure* _oc;
ysr@777 155 G1BlockOffsetSharedArray* _bot_shared;
ysr@777 156 CardTableModRefBS *_ct_bs;
ysr@777 157 int _worker_i;
iveresov@1696 158 int _block_size;
ysr@777 159 bool _try_claimed;
ysr@777 160 public:
ysr@777 161 ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
ysr@777 162 _oc(oc),
ysr@777 163 _cards(0),
ysr@777 164 _cards_done(0),
ysr@777 165 _worker_i(worker_i),
ysr@777 166 _try_claimed(false)
ysr@777 167 {
ysr@777 168 _g1h = G1CollectedHeap::heap();
ysr@777 169 _bot_shared = _g1h->bot_shared();
ysr@777 170 _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
iveresov@1696 171 _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
ysr@777 172 }
ysr@777 173
ysr@777 174 void set_try_claimed() { _try_claimed = true; }
ysr@777 175
ysr@777 176 void scanCard(size_t index, HeapRegion *r) {
ysr@777 177 _cards_done++;
ysr@777 178 DirtyCardToOopClosure* cl =
ysr@777 179 r->new_dcto_closure(_oc,
ysr@777 180 CardTableModRefBS::Precise,
ysr@777 181 HeapRegionDCTOC::IntoCSFilterKind);
ysr@777 182
ysr@777 183 // Set the "from" region in the closure.
ysr@777 184 _oc->set_region(r);
ysr@777 185 HeapWord* card_start = _bot_shared->address_for_index(index);
ysr@777 186 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
ysr@777 187 Space *sp = SharedHeap::heap()->space_containing(card_start);
ysr@777 188 MemRegion sm_region;
ysr@777 189 if (ParallelGCThreads > 0) {
ysr@777 190 // first find the used area
ysr@777 191 sm_region = sp->used_region_at_save_marks();
ysr@777 192 } else {
ysr@777 193 // The closure is not idempotent. We shouldn't look at objects
ysr@777 194 // allocated during the GC.
ysr@777 195 sm_region = sp->used_region_at_save_marks();
ysr@777 196 }
ysr@777 197 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
ysr@777 198 if (!mr.is_empty()) {
ysr@777 199 cl->do_MemRegion(mr);
ysr@777 200 }
ysr@777 201 }
ysr@777 202
ysr@777 203 void printCard(HeapRegion* card_region, size_t card_index,
ysr@777 204 HeapWord* card_start) {
ysr@777 205 gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
ysr@777 206 "RS names card %p: "
ysr@777 207 "[" PTR_FORMAT ", " PTR_FORMAT ")",
ysr@777 208 _worker_i,
ysr@777 209 card_region->bottom(), card_region->end(),
ysr@777 210 card_index,
ysr@777 211 card_start, card_start + G1BlockOffsetSharedArray::N_words);
ysr@777 212 }
ysr@777 213
ysr@777 214 bool doHeapRegion(HeapRegion* r) {
ysr@777 215 assert(r->in_collection_set(), "should only be called on elements of CS.");
ysr@777 216 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 217 if (hrrs->iter_is_complete()) return false; // All done.
ysr@777 218 if (!_try_claimed && !hrrs->claim_iter()) return false;
apetrusenko@1231 219 _g1h->push_dirty_cards_region(r);
ysr@777 220 // If we didn't return above, then
ysr@777 221 // _try_claimed || r->claim_iter()
ysr@777 222 // is true: either we're supposed to work on claimed-but-not-complete
ysr@777 223 // regions, or we successfully claimed the region.
ysr@777 224 HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
ysr@777 225 hrrs->init_iterator(iter);
ysr@777 226 size_t card_index;
iveresov@1696 227
iveresov@1696 228 // We claim cards in block so as to recude the contention. The block size is determined by
iveresov@1696 229 // the G1RSetScanBlockSize parameter.
iveresov@1696 230 size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
iveresov@1696 231 for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
iveresov@1696 232 if (current_card >= jump_to_card + _block_size) {
iveresov@1696 233 jump_to_card = hrrs->iter_claimed_next(_block_size);
iveresov@1182 234 }
iveresov@1696 235 if (current_card < jump_to_card) continue;
ysr@777 236 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
ysr@777 237 #if 0
ysr@777 238 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
ysr@777 239 card_start, card_start + CardTableModRefBS::card_size_in_words);
ysr@777 240 #endif
ysr@777 241
ysr@777 242 HeapRegion* card_region = _g1h->heap_region_containing(card_start);
ysr@777 243 assert(card_region != NULL, "Yielding cards not in the heap?");
ysr@777 244 _cards++;
ysr@777 245
apetrusenko@1231 246 if (!card_region->is_on_dirty_cards_region_list()) {
apetrusenko@1231 247 _g1h->push_dirty_cards_region(card_region);
apetrusenko@1231 248 }
apetrusenko@1231 249
iveresov@1182 250 // If the card is dirty, then we will scan it during updateRS.
iveresov@1182 251 if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) {
iveresov@1696 252 // We make the card as "claimed" lazily (so races are possible but they're benign),
iveresov@1696 253 // which reduces the number of duplicate scans (the rsets of the regions in the cset
iveresov@1696 254 // can intersect).
iveresov@1696 255 if (!_ct_bs->is_card_claimed(card_index)) {
iveresov@1696 256 _ct_bs->set_card_claimed(card_index);
iveresov@1696 257 scanCard(card_index, card_region);
iveresov@1696 258 }
ysr@777 259 }
ysr@777 260 }
iveresov@1182 261 if (!_try_claimed) {
iveresov@1182 262 hrrs->set_iter_complete();
iveresov@1182 263 }
ysr@777 264 return false;
ysr@777 265 }
ysr@777 266 // Set all cards back to clean.
ysr@777 267 void cleanup() {_g1h->cleanUpCardTable();}
ysr@777 268 size_t cards_done() { return _cards_done;}
ysr@777 269 size_t cards_looked_up() { return _cards;}
ysr@777 270 };
ysr@777 271
ysr@777 272 // We want the parallel threads to start their scanning at
ysr@777 273 // different collection set regions to avoid contention.
ysr@777 274 // If we have:
ysr@777 275 // n collection set regions
ysr@777 276 // p threads
ysr@777 277 // Then thread t will start at region t * floor (n/p)
ysr@777 278
ysr@777 279 HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) {
ysr@777 280 HeapRegion* result = _g1p->collection_set();
ysr@777 281 if (ParallelGCThreads > 0) {
ysr@777 282 size_t cs_size = _g1p->collection_set_size();
ysr@777 283 int n_workers = _g1->workers()->total_workers();
ysr@777 284 size_t cs_spans = cs_size / n_workers;
ysr@777 285 size_t ind = cs_spans * worker_i;
ysr@777 286 for (size_t i = 0; i < ind; i++)
ysr@777 287 result = result->next_in_collection_set();
ysr@777 288 }
ysr@777 289 return result;
ysr@777 290 }
ysr@777 291
ysr@777 292 void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
ysr@777 293 double rs_time_start = os::elapsedTime();
ysr@777 294 HeapRegion *startRegion = calculateStartRegion(worker_i);
ysr@777 295
iveresov@1696 296 ScanRSClosure scanRScl(oc, worker_i);
ysr@777 297 _g1->collection_set_iterate_from(startRegion, &scanRScl);
ysr@777 298 scanRScl.set_try_claimed();
ysr@777 299 _g1->collection_set_iterate_from(startRegion, &scanRScl);
ysr@777 300
iveresov@1696 301 double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
ysr@777 302
ysr@777 303 assert( _cards_scanned != NULL, "invariant" );
ysr@777 304 _cards_scanned[worker_i] = scanRScl.cards_done();
ysr@777 305
ysr@777 306 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
ysr@777 307 }
ysr@777 308
ysr@777 309 void HRInto_G1RemSet::updateRS(int worker_i) {
ysr@777 310 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
ysr@777 311
ysr@777 312 double start = os::elapsedTime();
iveresov@1229 313 // Apply the appropriate closure to all remaining log entries.
iveresov@1229 314 _g1->iterate_dirty_card_closure(false, worker_i);
iveresov@1229 315 // Now there should be no dirty cards.
iveresov@1229 316 if (G1RSLogCheckCardTable) {
iveresov@1229 317 CountNonCleanMemRegionClosure cl(_g1);
iveresov@1229 318 _ct_bs->mod_card_iterate(&cl);
iveresov@1229 319 // XXX This isn't true any more: keeping cards of young regions
iveresov@1229 320 // marked dirty broke it. Need some reasonable fix.
iveresov@1229 321 guarantee(cl.n() == 0, "Card table should be clean.");
ysr@777 322 }
iveresov@1229 323
ysr@777 324 _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
ysr@777 325 }
ysr@777 326
ysr@777 327 #ifndef PRODUCT
ysr@777 328 class PrintRSClosure : public HeapRegionClosure {
ysr@777 329 int _count;
ysr@777 330 public:
ysr@777 331 PrintRSClosure() : _count(0) {}
ysr@777 332 bool doHeapRegion(HeapRegion* r) {
ysr@777 333 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 334 _count += (int) hrrs->occupied();
ysr@777 335 if (hrrs->occupied() == 0) {
ysr@777 336 gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") "
ysr@777 337 "has no remset entries\n",
ysr@777 338 r->bottom(), r->end());
ysr@777 339 } else {
ysr@777 340 gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n",
ysr@777 341 r->bottom(), r->end());
ysr@777 342 r->print();
ysr@777 343 hrrs->print();
ysr@777 344 gclog_or_tty->print("\nDone printing rem set\n");
ysr@777 345 }
ysr@777 346 return false;
ysr@777 347 }
ysr@777 348 int occupied() {return _count;}
ysr@777 349 };
ysr@777 350 #endif
ysr@777 351
ysr@777 352 class CountRSSizeClosure: public HeapRegionClosure {
ysr@777 353 size_t _n;
ysr@777 354 size_t _tot;
ysr@777 355 size_t _max;
ysr@777 356 HeapRegion* _max_r;
ysr@777 357 enum {
ysr@777 358 N = 20,
ysr@777 359 MIN = 6
ysr@777 360 };
ysr@777 361 int _histo[N];
ysr@777 362 public:
ysr@777 363 CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) {
ysr@777 364 for (int i = 0; i < N; i++) _histo[i] = 0;
ysr@777 365 }
ysr@777 366 bool doHeapRegion(HeapRegion* r) {
ysr@777 367 if (!r->continuesHumongous()) {
ysr@777 368 size_t occ = r->rem_set()->occupied();
ysr@777 369 _n++;
ysr@777 370 _tot += occ;
ysr@777 371 if (occ > _max) {
ysr@777 372 _max = occ;
ysr@777 373 _max_r = r;
ysr@777 374 }
ysr@777 375 // Fit it into a histo bin.
ysr@777 376 int s = 1 << MIN;
ysr@777 377 int i = 0;
ysr@777 378 while (occ > (size_t) s && i < (N-1)) {
ysr@777 379 s = s << 1;
ysr@777 380 i++;
ysr@777 381 }
ysr@777 382 _histo[i]++;
ysr@777 383 }
ysr@777 384 return false;
ysr@777 385 }
ysr@777 386 size_t n() { return _n; }
ysr@777 387 size_t tot() { return _tot; }
ysr@777 388 size_t mx() { return _max; }
ysr@777 389 HeapRegion* mxr() { return _max_r; }
ysr@777 390 void print_histo() {
ysr@777 391 int mx = N;
ysr@777 392 while (mx >= 0) {
ysr@777 393 if (_histo[mx-1] > 0) break;
ysr@777 394 mx--;
ysr@777 395 }
ysr@777 396 gclog_or_tty->print_cr("Number of regions with given RS sizes:");
ysr@777 397 gclog_or_tty->print_cr(" <= %8d %8d", 1 << MIN, _histo[0]);
ysr@777 398 for (int i = 1; i < mx-1; i++) {
ysr@777 399 gclog_or_tty->print_cr(" %8d - %8d %8d",
ysr@777 400 (1 << (MIN + i - 1)) + 1,
ysr@777 401 1 << (MIN + i),
ysr@777 402 _histo[i]);
ysr@777 403 }
ysr@777 404 gclog_or_tty->print_cr(" > %8d %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]);
ysr@777 405 }
ysr@777 406 };
ysr@777 407
ysr@1280 408 template <class T> void
ysr@1280 409 HRInto_G1RemSet::scanNewRefsRS_work(OopsInHeapRegionClosure* oc,
ysr@1280 410 int worker_i) {
ysr@777 411 double scan_new_refs_start_sec = os::elapsedTime();
ysr@777 412 G1CollectedHeap* g1h = G1CollectedHeap::heap();
ysr@777 413 CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
iveresov@1051 414 for (int i = 0; i < _new_refs[worker_i]->length(); i++) {
ysr@1280 415 T* p = (T*) _new_refs[worker_i]->at(i);
ysr@1280 416 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 417 // *p was in the collection set when p was pushed on "_new_refs", but
ysr@777 418 // another thread may have processed this location from an RS, so it
ysr@777 419 // might not point into the CS any longer. If so, it's obviously been
ysr@777 420 // processed, and we don't need to do anything further.
ysr@777 421 if (g1h->obj_in_cs(obj)) {
ysr@777 422 HeapRegion* r = g1h->heap_region_containing(p);
ysr@777 423
ysr@777 424 DEBUG_ONLY(HeapRegion* to = g1h->heap_region_containing(obj));
ysr@777 425 oc->set_region(r);
ysr@777 426 // If "p" has already been processed concurrently, this is
ysr@777 427 // idempotent.
ysr@777 428 oc->do_oop(p);
ysr@777 429 }
ysr@777 430 }
iveresov@1696 431 double scan_new_refs_time_ms = (os::elapsedTime() - scan_new_refs_start_sec) * 1000.0;
iveresov@1696 432 _g1p->record_scan_new_refs_time(worker_i, scan_new_refs_time_ms);
ysr@777 433 }
ysr@777 434
ysr@777 435 void HRInto_G1RemSet::cleanupHRRS() {
ysr@777 436 HeapRegionRemSet::cleanup();
ysr@777 437 }
ysr@777 438
ysr@777 439 void
ysr@777 440 HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
ysr@777 441 int worker_i) {
ysr@777 442 #if CARD_REPEAT_HISTO
ysr@777 443 ct_freq_update_histo_and_reset();
ysr@777 444 #endif
ysr@777 445 if (worker_i == 0) {
ysr@777 446 _cg1r->clear_and_record_card_counts();
ysr@777 447 }
ysr@777 448
ysr@777 449 // Make this into a command-line flag...
ysr@777 450 if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) {
ysr@777 451 CountRSSizeClosure count_cl;
ysr@777 452 _g1->heap_region_iterate(&count_cl);
ysr@777 453 gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, "
ysr@777 454 "max region is " PTR_FORMAT,
ysr@777 455 count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(),
ysr@777 456 count_cl.mx(), count_cl.mxr());
ysr@777 457 count_cl.print_histo();
ysr@777 458 }
ysr@777 459
ysr@777 460 if (ParallelGCThreads > 0) {
tonyp@1073 461 // The two flags below were introduced temporarily to serialize
tonyp@1073 462 // the updating and scanning of remembered sets. There are some
tonyp@1073 463 // race conditions when these two operations are done in parallel
tonyp@1073 464 // and they are causing failures. When we resolve said race
tonyp@1073 465 // conditions, we'll revert back to parallel remembered set
tonyp@1073 466 // updating and scanning. See CRs 6677707 and 6677708.
tonyp@1717 467 if (G1UseParallelRSetUpdating || (worker_i == 0)) {
ysr@777 468 updateRS(worker_i);
ysr@777 469 scanNewRefsRS(oc, worker_i);
tonyp@1083 470 } else {
tonyp@1083 471 _g1p->record_update_rs_processed_buffers(worker_i, 0.0);
tonyp@1083 472 _g1p->record_update_rs_time(worker_i, 0.0);
tonyp@1083 473 _g1p->record_scan_new_refs_time(worker_i, 0.0);
tonyp@1073 474 }
tonyp@1717 475 if (G1UseParallelRSetScanning || (worker_i == 0)) {
ysr@777 476 scanRS(oc, worker_i);
tonyp@1083 477 } else {
tonyp@1083 478 _g1p->record_scan_rs_time(worker_i, 0.0);
ysr@777 479 }
ysr@777 480 } else {
ysr@777 481 assert(worker_i == 0, "invariant");
ysr@777 482 updateRS(0);
iveresov@1051 483 scanNewRefsRS(oc, 0);
ysr@777 484 scanRS(oc, 0);
ysr@777 485 }
ysr@777 486 }
ysr@777 487
ysr@777 488 void HRInto_G1RemSet::
ysr@777 489 prepare_for_oops_into_collection_set_do() {
ysr@777 490 #if G1_REM_SET_LOGGING
ysr@777 491 PrintRSClosure cl;
ysr@777 492 _g1->collection_set_iterate(&cl);
ysr@777 493 #endif
ysr@777 494 cleanupHRRS();
ysr@777 495 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
ysr@777 496 _g1->set_refine_cte_cl_concurrency(false);
ysr@777 497 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 498 dcqs.concatenate_logs();
ysr@777 499
ysr@777 500 assert(!_par_traversal_in_progress, "Invariant between iterations.");
ysr@777 501 if (ParallelGCThreads > 0) {
ysr@777 502 set_par_traversal(true);
iveresov@1051 503 _seq_task->set_par_threads((int)n_workers());
ysr@777 504 }
ysr@777 505 guarantee( _cards_scanned == NULL, "invariant" );
ysr@777 506 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
apetrusenko@980 507 for (uint i = 0; i < n_workers(); ++i) {
apetrusenko@980 508 _cards_scanned[i] = 0;
apetrusenko@980 509 }
ysr@777 510 _total_cards_scanned = 0;
ysr@777 511 }
ysr@777 512
ysr@777 513
ysr@777 514 class cleanUpIteratorsClosure : public HeapRegionClosure {
ysr@777 515 bool doHeapRegion(HeapRegion *r) {
ysr@777 516 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 517 hrrs->init_for_par_iteration();
ysr@777 518 return false;
ysr@777 519 }
ysr@777 520 };
ysr@777 521
iveresov@1051 522 class UpdateRSetOopsIntoCSImmediate : public OopClosure {
iveresov@1051 523 G1CollectedHeap* _g1;
iveresov@1051 524 public:
iveresov@1051 525 UpdateRSetOopsIntoCSImmediate(G1CollectedHeap* g1) : _g1(g1) { }
ysr@1280 526 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 527 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 528 template <class T> void do_oop_work(T* p) {
ysr@1280 529 HeapRegion* to = _g1->heap_region_containing(oopDesc::load_decode_heap_oop(p));
iveresov@1051 530 if (to->in_collection_set()) {
apetrusenko@1112 531 to->rem_set()->add_reference(p, 0);
iveresov@1051 532 }
iveresov@1051 533 }
iveresov@1051 534 };
iveresov@1051 535
iveresov@1051 536 class UpdateRSetOopsIntoCSDeferred : public OopClosure {
iveresov@1051 537 G1CollectedHeap* _g1;
iveresov@1051 538 CardTableModRefBS* _ct_bs;
iveresov@1051 539 DirtyCardQueue* _dcq;
iveresov@1051 540 public:
iveresov@1051 541 UpdateRSetOopsIntoCSDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
iveresov@1051 542 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) { }
ysr@1280 543 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 544 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 545 template <class T> void do_oop_work(T* p) {
ysr@1280 546 oop obj = oopDesc::load_decode_heap_oop(p);
iveresov@1051 547 if (_g1->obj_in_cs(obj)) {
iveresov@1051 548 size_t card_index = _ct_bs->index_for(p);
iveresov@1051 549 if (_ct_bs->mark_card_deferred(card_index)) {
iveresov@1051 550 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
iveresov@1051 551 }
iveresov@1051 552 }
iveresov@1051 553 }
iveresov@1051 554 };
iveresov@1051 555
ysr@1280 556 template <class T> void HRInto_G1RemSet::new_refs_iterate_work(OopClosure* cl) {
iveresov@1051 557 for (size_t i = 0; i < n_workers(); i++) {
iveresov@1051 558 for (int j = 0; j < _new_refs[i]->length(); j++) {
ysr@1280 559 T* p = (T*) _new_refs[i]->at(j);
iveresov@1051 560 cl->do_oop(p);
iveresov@1051 561 }
iveresov@1051 562 }
iveresov@1051 563 }
iveresov@1051 564
ysr@777 565 void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() {
ysr@777 566 guarantee( _cards_scanned != NULL, "invariant" );
ysr@777 567 _total_cards_scanned = 0;
ysr@777 568 for (uint i = 0; i < n_workers(); ++i)
ysr@777 569 _total_cards_scanned += _cards_scanned[i];
ysr@777 570 FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
ysr@777 571 _cards_scanned = NULL;
ysr@777 572 // Cleanup after copy
ysr@777 573 #if G1_REM_SET_LOGGING
ysr@777 574 PrintRSClosure cl;
ysr@777 575 _g1->heap_region_iterate(&cl);
ysr@777 576 #endif
ysr@777 577 _g1->set_refine_cte_cl_concurrency(true);
ysr@777 578 cleanUpIteratorsClosure iterClosure;
ysr@777 579 _g1->collection_set_iterate(&iterClosure);
ysr@777 580 // Set all cards back to clean.
ysr@777 581 _g1->cleanUpCardTable();
iveresov@1229 582
ysr@777 583 if (ParallelGCThreads > 0) {
ysr@777 584 set_par_traversal(false);
ysr@777 585 }
iveresov@1051 586
iveresov@1051 587 if (_g1->evacuation_failed()) {
iveresov@1051 588 // Restore remembered sets for the regions pointing into
iveresov@1051 589 // the collection set.
iveresov@1051 590 if (G1DeferredRSUpdate) {
iveresov@1051 591 DirtyCardQueue dcq(&_g1->dirty_card_queue_set());
iveresov@1051 592 UpdateRSetOopsIntoCSDeferred deferred_update(_g1, &dcq);
iveresov@1051 593 new_refs_iterate(&deferred_update);
iveresov@1051 594 } else {
iveresov@1051 595 UpdateRSetOopsIntoCSImmediate immediate_update(_g1);
iveresov@1051 596 new_refs_iterate(&immediate_update);
iveresov@1051 597 }
iveresov@1051 598 }
iveresov@1051 599 for (uint i = 0; i < n_workers(); i++) {
iveresov@1051 600 _new_refs[i]->clear();
iveresov@1051 601 }
iveresov@1051 602
ysr@777 603 assert(!_par_traversal_in_progress, "Invariant between iterations.");
ysr@777 604 }
ysr@777 605
ysr@777 606 class UpdateRSObjectClosure: public ObjectClosure {
ysr@777 607 UpdateRSOopClosure* _update_rs_oop_cl;
ysr@777 608 public:
ysr@777 609 UpdateRSObjectClosure(UpdateRSOopClosure* update_rs_oop_cl) :
ysr@777 610 _update_rs_oop_cl(update_rs_oop_cl) {}
ysr@777 611 void do_object(oop obj) {
ysr@777 612 obj->oop_iterate(_update_rs_oop_cl);
ysr@777 613 }
ysr@777 614
ysr@777 615 };
ysr@777 616
ysr@777 617 class ScrubRSClosure: public HeapRegionClosure {
ysr@777 618 G1CollectedHeap* _g1h;
ysr@777 619 BitMap* _region_bm;
ysr@777 620 BitMap* _card_bm;
ysr@777 621 CardTableModRefBS* _ctbs;
ysr@777 622 public:
ysr@777 623 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
ysr@777 624 _g1h(G1CollectedHeap::heap()),
ysr@777 625 _region_bm(region_bm), _card_bm(card_bm),
ysr@777 626 _ctbs(NULL)
ysr@777 627 {
ysr@777 628 ModRefBarrierSet* bs = _g1h->mr_bs();
ysr@777 629 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
ysr@777 630 _ctbs = (CardTableModRefBS*)bs;
ysr@777 631 }
ysr@777 632
ysr@777 633 bool doHeapRegion(HeapRegion* r) {
ysr@777 634 if (!r->continuesHumongous()) {
ysr@777 635 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
ysr@777 636 }
ysr@777 637 return false;
ysr@777 638 }
ysr@777 639 };
ysr@777 640
ysr@777 641 void HRInto_G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
ysr@777 642 ScrubRSClosure scrub_cl(region_bm, card_bm);
ysr@777 643 _g1->heap_region_iterate(&scrub_cl);
ysr@777 644 }
ysr@777 645
ysr@777 646 void HRInto_G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
ysr@777 647 int worker_num, int claim_val) {
ysr@777 648 ScrubRSClosure scrub_cl(region_bm, card_bm);
ysr@777 649 _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
ysr@777 650 }
ysr@777 651
ysr@777 652
ysr@777 653 static IntHistogram out_of_histo(50, 50);
ysr@777 654
johnc@1325 655 void HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i) {
johnc@1325 656 // Construct the region representing the card.
johnc@1325 657 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@1325 658 // And find the region containing it.
johnc@1325 659 HeapRegion* r = _g1->heap_region_containing(start);
johnc@1325 660 assert(r != NULL, "unexpected null");
johnc@1325 661
johnc@1325 662 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
johnc@1325 663 MemRegion dirtyRegion(start, end);
johnc@1325 664
johnc@1325 665 #if CARD_REPEAT_HISTO
johnc@1325 666 init_ct_freq_table(_g1->g1_reserved_obj_bytes());
johnc@1325 667 ct_freq_note_card(_ct_bs->index_for(start));
johnc@1325 668 #endif
johnc@1325 669
johnc@1325 670 UpdateRSOopClosure update_rs_oop_cl(this, worker_i);
johnc@1325 671 update_rs_oop_cl.set_from(r);
johnc@1325 672 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, &update_rs_oop_cl);
johnc@1325 673
johnc@1325 674 // Undirty the card.
johnc@1325 675 *card_ptr = CardTableModRefBS::clean_card_val();
johnc@1325 676 // We must complete this write before we do any of the reads below.
johnc@1325 677 OrderAccess::storeload();
johnc@1325 678 // And process it, being careful of unallocated portions of TLAB's.
johnc@2021 679
johnc@2021 680 // The region for the current card may be a young region. The
johnc@2021 681 // current card may have been a card that was evicted from the
johnc@2021 682 // card cache. When the card was inserted into the cache, we had
johnc@2021 683 // determined that its region was non-young. While in the cache,
johnc@2021 684 // the region may have been freed during a cleanup pause, reallocated
johnc@2021 685 // and tagged as young.
johnc@2021 686 //
johnc@2021 687 // We wish to filter out cards for such a region but the current
johnc@2021 688 // thread, if we're running conucrrently, may "see" the young type
johnc@2021 689 // change at any time (so an earlier "is_young" check may pass or
johnc@2021 690 // fail arbitrarily). We tell the iteration code to perform this
johnc@2021 691 // filtering when it has been determined that there has been an actual
johnc@2021 692 // allocation in this region and making it safe to check the young type.
johnc@2021 693 bool filter_young = true;
johnc@2021 694
johnc@1325 695 HeapWord* stop_point =
johnc@1325 696 r->oops_on_card_seq_iterate_careful(dirtyRegion,
johnc@2021 697 &filter_then_update_rs_oop_cl,
johnc@2021 698 filter_young);
johnc@2021 699
johnc@1325 700 // If stop_point is non-null, then we encountered an unallocated region
johnc@1325 701 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
johnc@1325 702 // card and re-enqueue: if we put off the card until a GC pause, then the
johnc@1325 703 // unallocated portion will be filled in. Alternatively, we might try
johnc@1325 704 // the full complexity of the technique used in "regular" precleaning.
johnc@1325 705 if (stop_point != NULL) {
johnc@1325 706 // The card might have gotten re-dirtied and re-enqueued while we
johnc@1325 707 // worked. (In fact, it's pretty likely.)
johnc@1325 708 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
johnc@1325 709 *card_ptr = CardTableModRefBS::dirty_card_val();
johnc@1325 710 MutexLockerEx x(Shared_DirtyCardQ_lock,
johnc@1325 711 Mutex::_no_safepoint_check_flag);
johnc@1325 712 DirtyCardQueue* sdcq =
johnc@1325 713 JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
johnc@1325 714 sdcq->enqueue(card_ptr);
johnc@1325 715 }
johnc@1325 716 } else {
johnc@1325 717 out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region());
johnc@1325 718 _conc_refine_cards++;
johnc@1325 719 }
johnc@1325 720 }
johnc@1325 721
ysr@777 722 void HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {
ysr@777 723 // If the card is no longer dirty, nothing to do.
ysr@777 724 if (*card_ptr != CardTableModRefBS::dirty_card_val()) return;
ysr@777 725
ysr@777 726 // Construct the region representing the card.
ysr@777 727 HeapWord* start = _ct_bs->addr_for(card_ptr);
ysr@777 728 // And find the region containing it.
ysr@777 729 HeapRegion* r = _g1->heap_region_containing(start);
ysr@777 730 if (r == NULL) {
ysr@777 731 guarantee(_g1->is_in_permanent(start), "Or else where?");
ysr@777 732 return; // Not in the G1 heap (might be in perm, for example.)
ysr@777 733 }
ysr@777 734 // Why do we have to check here whether a card is on a young region,
ysr@777 735 // given that we dirty young regions and, as a result, the
ysr@777 736 // post-barrier is supposed to filter them out and never to enqueue
ysr@777 737 // them? When we allocate a new region as the "allocation region" we
ysr@777 738 // actually dirty its cards after we release the lock, since card
ysr@777 739 // dirtying while holding the lock was a performance bottleneck. So,
ysr@777 740 // as a result, it is possible for other threads to actually
ysr@777 741 // allocate objects in the region (after the acquire the lock)
ysr@777 742 // before all the cards on the region are dirtied. This is unlikely,
ysr@777 743 // and it doesn't happen often, but it can happen. So, the extra
ysr@777 744 // check below filters out those cards.
iveresov@1072 745 if (r->is_young()) {
ysr@777 746 return;
ysr@777 747 }
ysr@777 748 // While we are processing RSet buffers during the collection, we
ysr@777 749 // actually don't want to scan any cards on the collection set,
ysr@777 750 // since we don't want to update remebered sets with entries that
ysr@777 751 // point into the collection set, given that live objects from the
ysr@777 752 // collection set are about to move and such entries will be stale
ysr@777 753 // very soon. This change also deals with a reliability issue which
ysr@777 754 // involves scanning a card in the collection set and coming across
ysr@777 755 // an array that was being chunked and looking malformed. Note,
ysr@777 756 // however, that if evacuation fails, we have to scan any objects
ysr@777 757 // that were not moved and create any missing entries.
ysr@777 758 if (r->in_collection_set()) {
ysr@777 759 return;
ysr@777 760 }
ysr@777 761
johnc@1325 762 // Should we defer processing the card?
johnc@1325 763 //
johnc@1325 764 // Previously the result from the insert_cache call would be
johnc@1325 765 // either card_ptr (implying that card_ptr was currently "cold"),
johnc@1325 766 // null (meaning we had inserted the card ptr into the "hot"
johnc@1325 767 // cache, which had some headroom), or a "hot" card ptr
johnc@1325 768 // extracted from the "hot" cache.
johnc@1325 769 //
johnc@1325 770 // Now that the _card_counts cache in the ConcurrentG1Refine
johnc@1325 771 // instance is an evicting hash table, the result we get back
johnc@1325 772 // could be from evicting the card ptr in an already occupied
johnc@1325 773 // bucket (in which case we have replaced the card ptr in the
johnc@1325 774 // bucket with card_ptr and "defer" is set to false). To avoid
johnc@1325 775 // having a data structure (updates to which would need a lock)
johnc@1325 776 // to hold these unprocessed dirty cards, we need to immediately
johnc@1325 777 // process card_ptr. The actions needed to be taken on return
johnc@1325 778 // from cache_insert are summarized in the following table:
johnc@1325 779 //
johnc@1325 780 // res defer action
johnc@1325 781 // --------------------------------------------------------------
johnc@1325 782 // null false card evicted from _card_counts & replaced with
johnc@1325 783 // card_ptr; evicted ptr added to hot cache.
johnc@1325 784 // No need to process res; immediately process card_ptr
johnc@1325 785 //
johnc@1325 786 // null true card not evicted from _card_counts; card_ptr added
johnc@1325 787 // to hot cache.
johnc@1325 788 // Nothing to do.
johnc@1325 789 //
johnc@1325 790 // non-null false card evicted from _card_counts & replaced with
johnc@1325 791 // card_ptr; evicted ptr is currently "cold" or
johnc@1325 792 // caused an eviction from the hot cache.
johnc@1325 793 // Immediately process res; process card_ptr.
johnc@1325 794 //
johnc@1325 795 // non-null true card not evicted from _card_counts; card_ptr is
johnc@1325 796 // currently cold, or caused an eviction from hot
johnc@1325 797 // cache.
johnc@1325 798 // Immediately process res; no need to process card_ptr.
johnc@1325 799
johnc@1325 800 jbyte* res = card_ptr;
johnc@1325 801 bool defer = false;
ysr@777 802 if (_cg1r->use_cache()) {
johnc@1325 803 jbyte* res = _cg1r->cache_insert(card_ptr, &defer);
johnc@1325 804 if (res != NULL && (res != card_ptr || defer)) {
johnc@1325 805 start = _ct_bs->addr_for(res);
johnc@1325 806 r = _g1->heap_region_containing(start);
johnc@1325 807 if (r == NULL) {
johnc@1325 808 assert(_g1->is_in_permanent(start), "Or else where?");
johnc@1325 809 } else {
johnc@2021 810 // Checking whether the region we got back from the cache
johnc@2021 811 // is young here is inappropriate. The region could have been
johnc@2021 812 // freed, reallocated and tagged as young while in the cache.
johnc@2021 813 // Hence we could see its young type change at any time.
johnc@2021 814 //
johnc@2021 815 // Process card pointer we get back from the hot card cache. This
johnc@2021 816 // will check whether the region containing the card is young
johnc@2021 817 // _after_ checking that the region has been allocated from.
johnc@1325 818 concurrentRefineOneCard_impl(res, worker_i);
johnc@1325 819 }
ysr@777 820 }
ysr@777 821 }
ysr@777 822
johnc@1325 823 if (!defer) {
johnc@1325 824 concurrentRefineOneCard_impl(card_ptr, worker_i);
ysr@777 825 }
ysr@777 826 }
ysr@777 827
ysr@777 828 class HRRSStatsIter: public HeapRegionClosure {
ysr@777 829 size_t _occupied;
ysr@777 830 size_t _total_mem_sz;
ysr@777 831 size_t _max_mem_sz;
ysr@777 832 HeapRegion* _max_mem_sz_region;
ysr@777 833 public:
ysr@777 834 HRRSStatsIter() :
ysr@777 835 _occupied(0),
ysr@777 836 _total_mem_sz(0),
ysr@777 837 _max_mem_sz(0),
ysr@777 838 _max_mem_sz_region(NULL)
ysr@777 839 {}
ysr@777 840
ysr@777 841 bool doHeapRegion(HeapRegion* r) {
ysr@777 842 if (r->continuesHumongous()) return false;
ysr@777 843 size_t mem_sz = r->rem_set()->mem_size();
ysr@777 844 if (mem_sz > _max_mem_sz) {
ysr@777 845 _max_mem_sz = mem_sz;
ysr@777 846 _max_mem_sz_region = r;
ysr@777 847 }
ysr@777 848 _total_mem_sz += mem_sz;
ysr@777 849 size_t occ = r->rem_set()->occupied();
ysr@777 850 _occupied += occ;
ysr@777 851 return false;
ysr@777 852 }
ysr@777 853 size_t total_mem_sz() { return _total_mem_sz; }
ysr@777 854 size_t max_mem_sz() { return _max_mem_sz; }
ysr@777 855 size_t occupied() { return _occupied; }
ysr@777 856 HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
ysr@777 857 };
ysr@777 858
iveresov@1229 859 class PrintRSThreadVTimeClosure : public ThreadClosure {
iveresov@1229 860 public:
iveresov@1229 861 virtual void do_thread(Thread *t) {
iveresov@1229 862 ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
iveresov@1229 863 gclog_or_tty->print(" %5.2f", crt->vtime_accum());
iveresov@1229 864 }
iveresov@1229 865 };
iveresov@1229 866
ysr@777 867 void HRInto_G1RemSet::print_summary_info() {
ysr@777 868 G1CollectedHeap* g1 = G1CollectedHeap::heap();
ysr@777 869
ysr@777 870 #if CARD_REPEAT_HISTO
ysr@777 871 gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
ysr@777 872 gclog_or_tty->print_cr(" # of repeats --> # of cards with that number.");
ysr@777 873 card_repeat_count.print_on(gclog_or_tty);
ysr@777 874 #endif
ysr@777 875
ysr@777 876 if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) {
ysr@777 877 gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: ");
ysr@777 878 gclog_or_tty->print_cr(" # of CS ptrs --> # of cards with that number.");
ysr@777 879 out_of_histo.print_on(gclog_or_tty);
ysr@777 880 }
iveresov@1229 881 gclog_or_tty->print_cr("\n Concurrent RS processed %d cards",
iveresov@1229 882 _conc_refine_cards);
ysr@777 883 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 884 jint tot_processed_buffers =
ysr@777 885 dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread();
ysr@777 886 gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers);
iveresov@1229 887 gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.",
ysr@777 888 dcqs.processed_buffers_rs_thread(),
ysr@777 889 100.0*(float)dcqs.processed_buffers_rs_thread()/
ysr@777 890 (float)tot_processed_buffers);
ysr@777 891 gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.",
ysr@777 892 dcqs.processed_buffers_mut(),
ysr@777 893 100.0*(float)dcqs.processed_buffers_mut()/
ysr@777 894 (float)tot_processed_buffers);
iveresov@1229 895 gclog_or_tty->print_cr(" Conc RS threads times(s)");
iveresov@1229 896 PrintRSThreadVTimeClosure p;
iveresov@1229 897 gclog_or_tty->print(" ");
iveresov@1229 898 g1->concurrent_g1_refine()->threads_do(&p);
ysr@777 899 gclog_or_tty->print_cr("");
iveresov@1229 900
ysr@777 901 if (G1UseHRIntoRS) {
ysr@777 902 HRRSStatsIter blk;
ysr@777 903 g1->heap_region_iterate(&blk);
ysr@777 904 gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K."
ysr@777 905 " Max = " SIZE_FORMAT "K.",
ysr@777 906 blk.total_mem_sz()/K, blk.max_mem_sz()/K);
ysr@777 907 gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K,"
ysr@777 908 " free_lists = " SIZE_FORMAT "K.",
ysr@777 909 HeapRegionRemSet::static_mem_size()/K,
ysr@777 910 HeapRegionRemSet::fl_mem_size()/K);
ysr@777 911 gclog_or_tty->print_cr(" %d occupied cards represented.",
ysr@777 912 blk.occupied());
ysr@777 913 gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
apetrusenko@1112 914 ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
ysr@777 915 blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
ysr@777 916 (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
ysr@777 917 (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
ysr@777 918 gclog_or_tty->print_cr(" Did %d coarsenings.",
ysr@777 919 HeapRegionRemSet::n_coarsenings());
ysr@777 920
ysr@777 921 }
ysr@777 922 }
ysr@777 923 void HRInto_G1RemSet::prepare_for_verify() {
iveresov@1072 924 if (G1HRRSFlushLogBuffersOnVerify &&
iveresov@1072 925 (VerifyBeforeGC || VerifyAfterGC)
iveresov@1072 926 && !_g1->full_collection()) {
ysr@777 927 cleanupHRRS();
ysr@777 928 _g1->set_refine_cte_cl_concurrency(false);
ysr@777 929 if (SafepointSynchronize::is_at_safepoint()) {
ysr@777 930 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 931 dcqs.concatenate_logs();
ysr@777 932 }
ysr@777 933 bool cg1r_use_cache = _cg1r->use_cache();
ysr@777 934 _cg1r->set_use_cache(false);
ysr@777 935 updateRS(0);
ysr@777 936 _cg1r->set_use_cache(cg1r_use_cache);
iveresov@1072 937
iveresov@1072 938 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
ysr@777 939 }
ysr@777 940 }

mercurial