src/share/vm/gc_implementation/g1/g1RemSet.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2646
04d1138b4cce
child 2849
063382f9b575
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

ysr@777 1 /*
johnc@2504 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/bufferingOopClosure.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
stefank@2314 32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
stefank@2314 33 #include "gc_implementation/g1/g1RemSet.inline.hpp"
stefank@2314 34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@2314 35 #include "memory/iterator.hpp"
stefank@2314 36 #include "oops/oop.inline.hpp"
stefank@2314 37 #include "utilities/intHisto.hpp"
ysr@777 38
ysr@777 39 #define CARD_REPEAT_HISTO 0
ysr@777 40
ysr@777 41 #if CARD_REPEAT_HISTO
ysr@777 42 static size_t ct_freq_sz;
ysr@777 43 static jbyte* ct_freq = NULL;
ysr@777 44
ysr@777 45 void init_ct_freq_table(size_t heap_sz_bytes) {
ysr@777 46 if (ct_freq == NULL) {
ysr@777 47 ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size;
ysr@777 48 ct_freq = new jbyte[ct_freq_sz];
ysr@777 49 for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0;
ysr@777 50 }
ysr@777 51 }
ysr@777 52
ysr@777 53 void ct_freq_note_card(size_t index) {
ysr@777 54 assert(0 <= index && index < ct_freq_sz, "Bounds error.");
ysr@777 55 if (ct_freq[index] < 100) { ct_freq[index]++; }
ysr@777 56 }
ysr@777 57
ysr@777 58 static IntHistogram card_repeat_count(10, 10);
ysr@777 59
ysr@777 60 void ct_freq_update_histo_and_reset() {
ysr@777 61 for (size_t j = 0; j < ct_freq_sz; j++) {
ysr@777 62 card_repeat_count.add_entry(ct_freq[j]);
ysr@777 63 ct_freq[j] = 0;
ysr@777 64 }
ysr@777 65
ysr@777 66 }
ysr@777 67 #endif
ysr@777 68
ysr@777 69
ysr@777 70 class IntoCSOopClosure: public OopsInHeapRegionClosure {
ysr@777 71 OopsInHeapRegionClosure* _blk;
ysr@777 72 G1CollectedHeap* _g1;
ysr@777 73 public:
ysr@777 74 IntoCSOopClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* blk) :
ysr@777 75 _g1(g1), _blk(blk) {}
ysr@777 76 void set_region(HeapRegion* from) {
ysr@777 77 _blk->set_region(from);
ysr@777 78 }
ysr@1280 79 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 80 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 81 template <class T> void do_oop_work(T* p) {
ysr@1280 82 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 83 if (_g1->obj_in_cs(obj)) _blk->do_oop(p);
ysr@777 84 }
ysr@777 85 bool apply_to_weak_ref_discovered_field() { return true; }
ysr@777 86 bool idempotent() { return true; }
ysr@777 87 };
ysr@777 88
ysr@777 89 class VerifyRSCleanCardOopClosure: public OopClosure {
ysr@777 90 G1CollectedHeap* _g1;
ysr@777 91 public:
ysr@777 92 VerifyRSCleanCardOopClosure(G1CollectedHeap* g1) : _g1(g1) {}
ysr@777 93
ysr@1280 94 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
ysr@1280 95 virtual void do_oop( oop* p) { do_oop_work(p); }
ysr@1280 96 template <class T> void do_oop_work(T* p) {
ysr@1280 97 oop obj = oopDesc::load_decode_heap_oop(p);
ysr@777 98 HeapRegion* to = _g1->heap_region_containing(obj);
ysr@777 99 guarantee(to == NULL || !to->in_collection_set(),
ysr@777 100 "Missed a rem set member.");
ysr@777 101 }
ysr@777 102 };
ysr@777 103
johnc@2216 104 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
johnc@2216 105 : _g1(g1), _conc_refine_cards(0),
johnc@2216 106 _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
ysr@777 107 _cg1r(g1->concurrent_g1_refine()),
johnc@2060 108 _cset_rs_update_cl(NULL),
ysr@777 109 _cards_scanned(NULL), _total_cards_scanned(0)
ysr@777 110 {
ysr@777 111 _seq_task = new SubTasksDone(NumSeqTasks);
iveresov@1051 112 guarantee(n_workers() > 0, "There should be some workers");
johnc@2060 113 _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers());
iveresov@1051 114 for (uint i = 0; i < n_workers(); i++) {
johnc@2060 115 _cset_rs_update_cl[i] = NULL;
iveresov@1051 116 }
ysr@777 117 }
ysr@777 118
johnc@2216 119 G1RemSet::~G1RemSet() {
ysr@777 120 delete _seq_task;
iveresov@1051 121 for (uint i = 0; i < n_workers(); i++) {
johnc@2060 122 assert(_cset_rs_update_cl[i] == NULL, "it should be");
iveresov@1051 123 }
johnc@2060 124 FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl);
ysr@777 125 }
ysr@777 126
ysr@777 127 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
ysr@777 128 if (_g1->is_in_g1_reserved(mr.start())) {
ysr@777 129 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
ysr@777 130 if (_start_first == NULL) _start_first = mr.start();
ysr@777 131 }
ysr@777 132 }
ysr@777 133
ysr@777 134 class ScanRSClosure : public HeapRegionClosure {
ysr@777 135 size_t _cards_done, _cards;
ysr@777 136 G1CollectedHeap* _g1h;
ysr@777 137 OopsInHeapRegionClosure* _oc;
ysr@777 138 G1BlockOffsetSharedArray* _bot_shared;
ysr@777 139 CardTableModRefBS *_ct_bs;
ysr@777 140 int _worker_i;
iveresov@1696 141 int _block_size;
ysr@777 142 bool _try_claimed;
ysr@777 143 public:
ysr@777 144 ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
ysr@777 145 _oc(oc),
ysr@777 146 _cards(0),
ysr@777 147 _cards_done(0),
ysr@777 148 _worker_i(worker_i),
ysr@777 149 _try_claimed(false)
ysr@777 150 {
ysr@777 151 _g1h = G1CollectedHeap::heap();
ysr@777 152 _bot_shared = _g1h->bot_shared();
ysr@777 153 _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
iveresov@1696 154 _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
ysr@777 155 }
ysr@777 156
ysr@777 157 void set_try_claimed() { _try_claimed = true; }
ysr@777 158
ysr@777 159 void scanCard(size_t index, HeapRegion *r) {
ysr@777 160 _cards_done++;
ysr@777 161 DirtyCardToOopClosure* cl =
ysr@777 162 r->new_dcto_closure(_oc,
ysr@777 163 CardTableModRefBS::Precise,
ysr@777 164 HeapRegionDCTOC::IntoCSFilterKind);
ysr@777 165
ysr@777 166 // Set the "from" region in the closure.
ysr@777 167 _oc->set_region(r);
ysr@777 168 HeapWord* card_start = _bot_shared->address_for_index(index);
ysr@777 169 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
ysr@777 170 Space *sp = SharedHeap::heap()->space_containing(card_start);
ysr@777 171 MemRegion sm_region;
ysr@777 172 if (ParallelGCThreads > 0) {
ysr@777 173 // first find the used area
ysr@777 174 sm_region = sp->used_region_at_save_marks();
ysr@777 175 } else {
ysr@777 176 // The closure is not idempotent. We shouldn't look at objects
ysr@777 177 // allocated during the GC.
ysr@777 178 sm_region = sp->used_region_at_save_marks();
ysr@777 179 }
ysr@777 180 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
ysr@777 181 if (!mr.is_empty()) {
ysr@777 182 cl->do_MemRegion(mr);
ysr@777 183 }
ysr@777 184 }
ysr@777 185
ysr@777 186 void printCard(HeapRegion* card_region, size_t card_index,
ysr@777 187 HeapWord* card_start) {
ysr@777 188 gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
ysr@777 189 "RS names card %p: "
ysr@777 190 "[" PTR_FORMAT ", " PTR_FORMAT ")",
ysr@777 191 _worker_i,
ysr@777 192 card_region->bottom(), card_region->end(),
ysr@777 193 card_index,
ysr@777 194 card_start, card_start + G1BlockOffsetSharedArray::N_words);
ysr@777 195 }
ysr@777 196
ysr@777 197 bool doHeapRegion(HeapRegion* r) {
ysr@777 198 assert(r->in_collection_set(), "should only be called on elements of CS.");
ysr@777 199 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 200 if (hrrs->iter_is_complete()) return false; // All done.
ysr@777 201 if (!_try_claimed && !hrrs->claim_iter()) return false;
apetrusenko@1231 202 _g1h->push_dirty_cards_region(r);
ysr@777 203 // If we didn't return above, then
ysr@777 204 // _try_claimed || r->claim_iter()
ysr@777 205 // is true: either we're supposed to work on claimed-but-not-complete
ysr@777 206 // regions, or we successfully claimed the region.
ysr@777 207 HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
ysr@777 208 hrrs->init_iterator(iter);
ysr@777 209 size_t card_index;
iveresov@1696 210
iveresov@1696 211 // We claim cards in block so as to recude the contention. The block size is determined by
iveresov@1696 212 // the G1RSetScanBlockSize parameter.
iveresov@1696 213 size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
iveresov@1696 214 for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
iveresov@1696 215 if (current_card >= jump_to_card + _block_size) {
iveresov@1696 216 jump_to_card = hrrs->iter_claimed_next(_block_size);
iveresov@1182 217 }
iveresov@1696 218 if (current_card < jump_to_card) continue;
ysr@777 219 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
ysr@777 220 #if 0
ysr@777 221 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
ysr@777 222 card_start, card_start + CardTableModRefBS::card_size_in_words);
ysr@777 223 #endif
ysr@777 224
ysr@777 225 HeapRegion* card_region = _g1h->heap_region_containing(card_start);
ysr@777 226 assert(card_region != NULL, "Yielding cards not in the heap?");
ysr@777 227 _cards++;
ysr@777 228
apetrusenko@1231 229 if (!card_region->is_on_dirty_cards_region_list()) {
apetrusenko@1231 230 _g1h->push_dirty_cards_region(card_region);
apetrusenko@1231 231 }
apetrusenko@1231 232
iveresov@1182 233 // If the card is dirty, then we will scan it during updateRS.
iveresov@1182 234 if (!card_region->in_collection_set() && !_ct_bs->is_card_dirty(card_index)) {
iveresov@1696 235 // We make the card as "claimed" lazily (so races are possible but they're benign),
iveresov@1696 236 // which reduces the number of duplicate scans (the rsets of the regions in the cset
iveresov@1696 237 // can intersect).
iveresov@1696 238 if (!_ct_bs->is_card_claimed(card_index)) {
iveresov@1696 239 _ct_bs->set_card_claimed(card_index);
iveresov@1696 240 scanCard(card_index, card_region);
iveresov@1696 241 }
ysr@777 242 }
ysr@777 243 }
iveresov@1182 244 if (!_try_claimed) {
iveresov@1182 245 hrrs->set_iter_complete();
iveresov@1182 246 }
ysr@777 247 return false;
ysr@777 248 }
ysr@777 249 // Set all cards back to clean.
ysr@777 250 void cleanup() {_g1h->cleanUpCardTable();}
ysr@777 251 size_t cards_done() { return _cards_done;}
ysr@777 252 size_t cards_looked_up() { return _cards;}
ysr@777 253 };
ysr@777 254
ysr@777 255 // We want the parallel threads to start their scanning at
ysr@777 256 // different collection set regions to avoid contention.
ysr@777 257 // If we have:
ysr@777 258 // n collection set regions
ysr@777 259 // p threads
ysr@777 260 // Then thread t will start at region t * floor (n/p)
ysr@777 261
johnc@2216 262 HeapRegion* G1RemSet::calculateStartRegion(int worker_i) {
ysr@777 263 HeapRegion* result = _g1p->collection_set();
ysr@777 264 if (ParallelGCThreads > 0) {
ysr@777 265 size_t cs_size = _g1p->collection_set_size();
ysr@777 266 int n_workers = _g1->workers()->total_workers();
ysr@777 267 size_t cs_spans = cs_size / n_workers;
ysr@777 268 size_t ind = cs_spans * worker_i;
ysr@777 269 for (size_t i = 0; i < ind; i++)
ysr@777 270 result = result->next_in_collection_set();
ysr@777 271 }
ysr@777 272 return result;
ysr@777 273 }
ysr@777 274
johnc@2216 275 void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
ysr@777 276 double rs_time_start = os::elapsedTime();
ysr@777 277 HeapRegion *startRegion = calculateStartRegion(worker_i);
ysr@777 278
iveresov@1696 279 ScanRSClosure scanRScl(oc, worker_i);
ysr@777 280 _g1->collection_set_iterate_from(startRegion, &scanRScl);
ysr@777 281 scanRScl.set_try_claimed();
ysr@777 282 _g1->collection_set_iterate_from(startRegion, &scanRScl);
ysr@777 283
iveresov@1696 284 double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
ysr@777 285
ysr@777 286 assert( _cards_scanned != NULL, "invariant" );
ysr@777 287 _cards_scanned[worker_i] = scanRScl.cards_done();
ysr@777 288
ysr@777 289 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
ysr@777 290 }
ysr@777 291
johnc@2060 292 // Closure used for updating RSets and recording references that
johnc@2060 293 // point into the collection set. Only called during an
johnc@2060 294 // evacuation pause.
ysr@777 295
johnc@2060 296 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure {
johnc@2060 297 G1RemSet* _g1rs;
johnc@2060 298 DirtyCardQueue* _into_cset_dcq;
johnc@2060 299 public:
johnc@2060 300 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h,
johnc@2060 301 DirtyCardQueue* into_cset_dcq) :
johnc@2060 302 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
johnc@2060 303 {}
johnc@2060 304 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
johnc@2060 305 // The only time we care about recording cards that
johnc@2060 306 // contain references that point into the collection set
johnc@2060 307 // is during RSet updating within an evacuation pause.
johnc@2060 308 // In this case worker_i should be the id of a GC worker thread.
johnc@2060 309 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
brutisso@2646 310 assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
johnc@2060 311
johnc@2060 312 if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
johnc@2060 313 // 'card_ptr' contains references that point into the collection
johnc@2060 314 // set. We need to record the card in the DCQS
johnc@2060 315 // (G1CollectedHeap::into_cset_dirty_card_queue_set())
johnc@2060 316 // that's used for that purpose.
johnc@2060 317 //
johnc@2060 318 // Enqueue the card
johnc@2060 319 _into_cset_dcq->enqueue(card_ptr);
johnc@2060 320 }
johnc@2060 321 return true;
johnc@2060 322 }
johnc@2060 323 };
johnc@2060 324
johnc@2216 325 void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
ysr@777 326 double start = os::elapsedTime();
johnc@2060 327 // Apply the given closure to all remaining log entries.
johnc@2060 328 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
johnc@2060 329 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
johnc@2060 330
iveresov@1229 331 // Now there should be no dirty cards.
iveresov@1229 332 if (G1RSLogCheckCardTable) {
iveresov@1229 333 CountNonCleanMemRegionClosure cl(_g1);
iveresov@1229 334 _ct_bs->mod_card_iterate(&cl);
iveresov@1229 335 // XXX This isn't true any more: keeping cards of young regions
iveresov@1229 336 // marked dirty broke it. Need some reasonable fix.
iveresov@1229 337 guarantee(cl.n() == 0, "Card table should be clean.");
ysr@777 338 }
iveresov@1229 339
ysr@777 340 _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
ysr@777 341 }
ysr@777 342
ysr@777 343 #ifndef PRODUCT
ysr@777 344 class PrintRSClosure : public HeapRegionClosure {
ysr@777 345 int _count;
ysr@777 346 public:
ysr@777 347 PrintRSClosure() : _count(0) {}
ysr@777 348 bool doHeapRegion(HeapRegion* r) {
ysr@777 349 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 350 _count += (int) hrrs->occupied();
ysr@777 351 if (hrrs->occupied() == 0) {
ysr@777 352 gclog_or_tty->print("Heap Region [" PTR_FORMAT ", " PTR_FORMAT ") "
ysr@777 353 "has no remset entries\n",
ysr@777 354 r->bottom(), r->end());
ysr@777 355 } else {
ysr@777 356 gclog_or_tty->print("Printing rem set for heap region [" PTR_FORMAT ", " PTR_FORMAT ")\n",
ysr@777 357 r->bottom(), r->end());
ysr@777 358 r->print();
ysr@777 359 hrrs->print();
ysr@777 360 gclog_or_tty->print("\nDone printing rem set\n");
ysr@777 361 }
ysr@777 362 return false;
ysr@777 363 }
ysr@777 364 int occupied() {return _count;}
ysr@777 365 };
ysr@777 366 #endif
ysr@777 367
ysr@777 368 class CountRSSizeClosure: public HeapRegionClosure {
ysr@777 369 size_t _n;
ysr@777 370 size_t _tot;
ysr@777 371 size_t _max;
ysr@777 372 HeapRegion* _max_r;
ysr@777 373 enum {
ysr@777 374 N = 20,
ysr@777 375 MIN = 6
ysr@777 376 };
ysr@777 377 int _histo[N];
ysr@777 378 public:
ysr@777 379 CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) {
ysr@777 380 for (int i = 0; i < N; i++) _histo[i] = 0;
ysr@777 381 }
ysr@777 382 bool doHeapRegion(HeapRegion* r) {
ysr@777 383 if (!r->continuesHumongous()) {
ysr@777 384 size_t occ = r->rem_set()->occupied();
ysr@777 385 _n++;
ysr@777 386 _tot += occ;
ysr@777 387 if (occ > _max) {
ysr@777 388 _max = occ;
ysr@777 389 _max_r = r;
ysr@777 390 }
ysr@777 391 // Fit it into a histo bin.
ysr@777 392 int s = 1 << MIN;
ysr@777 393 int i = 0;
ysr@777 394 while (occ > (size_t) s && i < (N-1)) {
ysr@777 395 s = s << 1;
ysr@777 396 i++;
ysr@777 397 }
ysr@777 398 _histo[i]++;
ysr@777 399 }
ysr@777 400 return false;
ysr@777 401 }
ysr@777 402 size_t n() { return _n; }
ysr@777 403 size_t tot() { return _tot; }
ysr@777 404 size_t mx() { return _max; }
ysr@777 405 HeapRegion* mxr() { return _max_r; }
ysr@777 406 void print_histo() {
ysr@777 407 int mx = N;
ysr@777 408 while (mx >= 0) {
ysr@777 409 if (_histo[mx-1] > 0) break;
ysr@777 410 mx--;
ysr@777 411 }
ysr@777 412 gclog_or_tty->print_cr("Number of regions with given RS sizes:");
ysr@777 413 gclog_or_tty->print_cr(" <= %8d %8d", 1 << MIN, _histo[0]);
ysr@777 414 for (int i = 1; i < mx-1; i++) {
ysr@777 415 gclog_or_tty->print_cr(" %8d - %8d %8d",
ysr@777 416 (1 << (MIN + i - 1)) + 1,
ysr@777 417 1 << (MIN + i),
ysr@777 418 _histo[i]);
ysr@777 419 }
ysr@777 420 gclog_or_tty->print_cr(" > %8d %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]);
ysr@777 421 }
ysr@777 422 };
ysr@777 423
johnc@2216 424 void G1RemSet::cleanupHRRS() {
ysr@777 425 HeapRegionRemSet::cleanup();
ysr@777 426 }
ysr@777 427
johnc@2216 428 void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
ysr@777 429 int worker_i) {
ysr@777 430 #if CARD_REPEAT_HISTO
ysr@777 431 ct_freq_update_histo_and_reset();
ysr@777 432 #endif
ysr@777 433 if (worker_i == 0) {
ysr@777 434 _cg1r->clear_and_record_card_counts();
ysr@777 435 }
ysr@777 436
ysr@777 437 // Make this into a command-line flag...
ysr@777 438 if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) {
ysr@777 439 CountRSSizeClosure count_cl;
ysr@777 440 _g1->heap_region_iterate(&count_cl);
ysr@777 441 gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, "
ysr@777 442 "max region is " PTR_FORMAT,
ysr@777 443 count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(),
ysr@777 444 count_cl.mx(), count_cl.mxr());
ysr@777 445 count_cl.print_histo();
ysr@777 446 }
ysr@777 447
johnc@2060 448 // We cache the value of 'oc' closure into the appropriate slot in the
johnc@2060 449 // _cset_rs_update_cl for this worker
johnc@2060 450 assert(worker_i < (int)n_workers(), "sanity");
johnc@2060 451 _cset_rs_update_cl[worker_i] = oc;
johnc@2060 452
johnc@2060 453 // A DirtyCardQueue that is used to hold cards containing references
johnc@2060 454 // that point into the collection set. This DCQ is associated with a
johnc@2060 455 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal
johnc@2060 456 // circumstances (i.e. the pause successfully completes), these cards
johnc@2060 457 // are just discarded (there's no need to update the RSets of regions
johnc@2060 458 // that were in the collection set - after the pause these regions
johnc@2060 459 // are wholly 'free' of live objects. In the event of an evacuation
johnc@2060 460 // failure the cards/buffers in this queue set are:
johnc@2060 461 // * passed to the DirtyCardQueueSet that is used to manage deferred
johnc@2060 462 // RSet updates, or
johnc@2060 463 // * scanned for references that point into the collection set
johnc@2060 464 // and the RSet of the corresponding region in the collection set
johnc@2060 465 // is updated immediately.
johnc@2060 466 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
johnc@2060 467
johnc@2063 468 assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
johnc@2063 469
johnc@2063 470 // The two flags below were introduced temporarily to serialize
johnc@2063 471 // the updating and scanning of remembered sets. There are some
johnc@2063 472 // race conditions when these two operations are done in parallel
johnc@2063 473 // and they are causing failures. When we resolve said race
johnc@2063 474 // conditions, we'll revert back to parallel remembered set
johnc@2063 475 // updating and scanning. See CRs 6677707 and 6677708.
johnc@2063 476 if (G1UseParallelRSetUpdating || (worker_i == 0)) {
johnc@2063 477 updateRS(&into_cset_dcq, worker_i);
ysr@777 478 } else {
johnc@2063 479 _g1p->record_update_rs_processed_buffers(worker_i, 0.0);
johnc@2063 480 _g1p->record_update_rs_time(worker_i, 0.0);
johnc@2063 481 }
johnc@2063 482 if (G1UseParallelRSetScanning || (worker_i == 0)) {
johnc@2063 483 scanRS(oc, worker_i);
johnc@2063 484 } else {
johnc@2063 485 _g1p->record_scan_rs_time(worker_i, 0.0);
ysr@777 486 }
johnc@2060 487
johnc@2060 488 // We now clear the cached values of _cset_rs_update_cl for this worker
johnc@2060 489 _cset_rs_update_cl[worker_i] = NULL;
ysr@777 490 }
ysr@777 491
johnc@2216 492 void G1RemSet::prepare_for_oops_into_collection_set_do() {
ysr@777 493 #if G1_REM_SET_LOGGING
ysr@777 494 PrintRSClosure cl;
ysr@777 495 _g1->collection_set_iterate(&cl);
ysr@777 496 #endif
ysr@777 497 cleanupHRRS();
ysr@777 498 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
ysr@777 499 _g1->set_refine_cte_cl_concurrency(false);
ysr@777 500 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 501 dcqs.concatenate_logs();
ysr@777 502
ysr@777 503 if (ParallelGCThreads > 0) {
jmasa@2188 504 _seq_task->set_n_threads((int)n_workers());
ysr@777 505 }
ysr@777 506 guarantee( _cards_scanned == NULL, "invariant" );
ysr@777 507 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
apetrusenko@980 508 for (uint i = 0; i < n_workers(); ++i) {
apetrusenko@980 509 _cards_scanned[i] = 0;
apetrusenko@980 510 }
ysr@777 511 _total_cards_scanned = 0;
ysr@777 512 }
ysr@777 513
ysr@777 514
ysr@777 515 class cleanUpIteratorsClosure : public HeapRegionClosure {
ysr@777 516 bool doHeapRegion(HeapRegion *r) {
ysr@777 517 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 518 hrrs->init_for_par_iteration();
ysr@777 519 return false;
ysr@777 520 }
ysr@777 521 };
ysr@777 522
johnc@2060 523 // This closure, applied to a DirtyCardQueueSet, is used to immediately
johnc@2060 524 // update the RSets for the regions in the CSet. For each card it iterates
johnc@2060 525 // through the oops which coincide with that card. It scans the reference
johnc@2060 526 // fields in each oop; when it finds an oop that points into the collection
johnc@2060 527 // set, the RSet for the region containing the referenced object is updated.
johnc@2060 528 class UpdateRSetCardTableEntryIntoCSetClosure: public CardTableEntryClosure {
iveresov@1051 529 G1CollectedHeap* _g1;
johnc@2060 530 CardTableModRefBS* _ct_bs;
iveresov@1051 531 public:
johnc@2060 532 UpdateRSetCardTableEntryIntoCSetClosure(G1CollectedHeap* g1,
johnc@2060 533 CardTableModRefBS* bs):
johnc@2060 534 _g1(g1), _ct_bs(bs)
johnc@2060 535 { }
johnc@2060 536
johnc@2060 537 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
johnc@2060 538 // Construct the region representing the card.
johnc@2060 539 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@2060 540 // And find the region containing it.
johnc@2060 541 HeapRegion* r = _g1->heap_region_containing(start);
johnc@2060 542 assert(r != NULL, "unexpected null");
johnc@2060 543
johnc@2060 544 // Scan oops in the card looking for references into the collection set
johnc@2060 545 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
johnc@2060 546 MemRegion scanRegion(start, end);
johnc@2060 547
johnc@2060 548 UpdateRSetImmediate update_rs_cl(_g1->g1_rem_set());
johnc@2060 549 FilterIntoCSClosure update_rs_cset_oop_cl(NULL, _g1, &update_rs_cl);
johnc@2060 550 FilterOutOfRegionClosure filter_then_update_rs_cset_oop_cl(r, &update_rs_cset_oop_cl);
johnc@2060 551
johnc@2060 552 // We can pass false as the "filter_young" parameter here as:
johnc@2060 553 // * we should be in a STW pause,
johnc@2060 554 // * the DCQS to which this closure is applied is used to hold
johnc@2060 555 // references that point into the collection set from the prior
johnc@2060 556 // RSet updating,
johnc@2060 557 // * the post-write barrier shouldn't be logging updates to young
johnc@2060 558 // regions (but there is a situation where this can happen - see
johnc@2216 559 // the comment in G1RemSet::concurrentRefineOneCard below -
johnc@2060 560 // that should not be applicable here), and
johnc@2060 561 // * during actual RSet updating, the filtering of cards in young
johnc@2060 562 // regions in HeapRegion::oops_on_card_seq_iterate_careful is
johnc@2060 563 // employed.
johnc@2060 564 // As a result, when this closure is applied to "refs into cset"
johnc@2060 565 // DCQS, we shouldn't see any cards in young regions.
johnc@2060 566 update_rs_cl.set_region(r);
johnc@2060 567 HeapWord* stop_point =
johnc@2060 568 r->oops_on_card_seq_iterate_careful(scanRegion,
johnc@2060 569 &filter_then_update_rs_cset_oop_cl,
johnc@2060 570 false /* filter_young */);
johnc@2060 571
johnc@2060 572 // Since this is performed in the event of an evacuation failure, we
johnc@2060 573 // we shouldn't see a non-null stop point
johnc@2060 574 assert(stop_point == NULL, "saw an unallocated region");
johnc@2060 575 return true;
iveresov@1051 576 }
iveresov@1051 577 };
iveresov@1051 578
johnc@2216 579 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
ysr@777 580 guarantee( _cards_scanned != NULL, "invariant" );
ysr@777 581 _total_cards_scanned = 0;
ysr@777 582 for (uint i = 0; i < n_workers(); ++i)
ysr@777 583 _total_cards_scanned += _cards_scanned[i];
ysr@777 584 FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
ysr@777 585 _cards_scanned = NULL;
ysr@777 586 // Cleanup after copy
ysr@777 587 #if G1_REM_SET_LOGGING
ysr@777 588 PrintRSClosure cl;
ysr@777 589 _g1->heap_region_iterate(&cl);
ysr@777 590 #endif
ysr@777 591 _g1->set_refine_cte_cl_concurrency(true);
ysr@777 592 cleanUpIteratorsClosure iterClosure;
ysr@777 593 _g1->collection_set_iterate(&iterClosure);
ysr@777 594 // Set all cards back to clean.
ysr@777 595 _g1->cleanUpCardTable();
iveresov@1229 596
johnc@2060 597 DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set();
johnc@2060 598 int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
johnc@2060 599
iveresov@1051 600 if (_g1->evacuation_failed()) {
johnc@2060 601 // Restore remembered sets for the regions pointing into the collection set.
johnc@2060 602
iveresov@1051 603 if (G1DeferredRSUpdate) {
johnc@2060 604 // If deferred RS updates are enabled then we just need to transfer
johnc@2060 605 // the completed buffers from (a) the DirtyCardQueueSet used to hold
johnc@2060 606 // cards that contain references that point into the collection set
johnc@2060 607 // to (b) the DCQS used to hold the deferred RS updates
johnc@2060 608 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs);
iveresov@1051 609 } else {
johnc@2060 610
johnc@2060 611 CardTableModRefBS* bs = (CardTableModRefBS*)_g1->barrier_set();
johnc@2060 612 UpdateRSetCardTableEntryIntoCSetClosure update_rs_cset_immediate(_g1, bs);
johnc@2060 613
johnc@2060 614 int n_completed_buffers = 0;
johnc@2060 615 while (into_cset_dcqs.apply_closure_to_completed_buffer(&update_rs_cset_immediate,
johnc@2060 616 0, 0, true)) {
johnc@2060 617 n_completed_buffers++;
johnc@2060 618 }
johnc@2060 619 assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers");
iveresov@1051 620 }
iveresov@1051 621 }
johnc@2060 622
johnc@2060 623 // Free any completed buffers in the DirtyCardQueueSet used to hold cards
johnc@2060 624 // which contain references that point into the collection.
johnc@2060 625 _g1->into_cset_dirty_card_queue_set().clear();
johnc@2060 626 assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0,
johnc@2060 627 "all buffers should be freed");
johnc@2060 628 _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers();
ysr@777 629 }
ysr@777 630
ysr@777 631 class ScrubRSClosure: public HeapRegionClosure {
ysr@777 632 G1CollectedHeap* _g1h;
ysr@777 633 BitMap* _region_bm;
ysr@777 634 BitMap* _card_bm;
ysr@777 635 CardTableModRefBS* _ctbs;
ysr@777 636 public:
ysr@777 637 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
ysr@777 638 _g1h(G1CollectedHeap::heap()),
ysr@777 639 _region_bm(region_bm), _card_bm(card_bm),
ysr@777 640 _ctbs(NULL)
ysr@777 641 {
ysr@777 642 ModRefBarrierSet* bs = _g1h->mr_bs();
ysr@777 643 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
ysr@777 644 _ctbs = (CardTableModRefBS*)bs;
ysr@777 645 }
ysr@777 646
ysr@777 647 bool doHeapRegion(HeapRegion* r) {
ysr@777 648 if (!r->continuesHumongous()) {
ysr@777 649 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
ysr@777 650 }
ysr@777 651 return false;
ysr@777 652 }
ysr@777 653 };
ysr@777 654
johnc@2216 655 void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
ysr@777 656 ScrubRSClosure scrub_cl(region_bm, card_bm);
ysr@777 657 _g1->heap_region_iterate(&scrub_cl);
ysr@777 658 }
ysr@777 659
johnc@2216 660 void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
ysr@777 661 int worker_num, int claim_val) {
ysr@777 662 ScrubRSClosure scrub_cl(region_bm, card_bm);
ysr@777 663 _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
ysr@777 664 }
ysr@777 665
ysr@777 666
ysr@777 667 static IntHistogram out_of_histo(50, 50);
ysr@777 668
johnc@2060 669 class TriggerClosure : public OopClosure {
johnc@2060 670 bool _trigger;
johnc@2060 671 public:
johnc@2060 672 TriggerClosure() : _trigger(false) { }
johnc@2060 673 bool value() const { return _trigger; }
johnc@2060 674 template <class T> void do_oop_nv(T* p) { _trigger = true; }
johnc@2060 675 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@2060 676 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@2060 677 };
johnc@2060 678
johnc@2060 679 class InvokeIfNotTriggeredClosure: public OopClosure {
johnc@2060 680 TriggerClosure* _t;
johnc@2060 681 OopClosure* _oc;
johnc@2060 682 public:
johnc@2060 683 InvokeIfNotTriggeredClosure(TriggerClosure* t, OopClosure* oc):
johnc@2060 684 _t(t), _oc(oc) { }
johnc@2060 685 template <class T> void do_oop_nv(T* p) {
johnc@2060 686 if (!_t->value()) _oc->do_oop(p);
johnc@2060 687 }
johnc@2060 688 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@2060 689 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@2060 690 };
johnc@2060 691
johnc@2060 692 class Mux2Closure : public OopClosure {
johnc@2060 693 OopClosure* _c1;
johnc@2060 694 OopClosure* _c2;
johnc@2060 695 public:
johnc@2060 696 Mux2Closure(OopClosure *c1, OopClosure *c2) : _c1(c1), _c2(c2) { }
johnc@2060 697 template <class T> void do_oop_nv(T* p) {
johnc@2060 698 _c1->do_oop(p); _c2->do_oop(p);
johnc@2060 699 }
johnc@2060 700 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@2060 701 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@2060 702 };
johnc@2060 703
johnc@2216 704 bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
johnc@2060 705 bool check_for_refs_into_cset) {
johnc@1325 706 // Construct the region representing the card.
johnc@1325 707 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@1325 708 // And find the region containing it.
johnc@1325 709 HeapRegion* r = _g1->heap_region_containing(start);
johnc@1325 710 assert(r != NULL, "unexpected null");
johnc@1325 711
johnc@1325 712 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
johnc@1325 713 MemRegion dirtyRegion(start, end);
johnc@1325 714
johnc@1325 715 #if CARD_REPEAT_HISTO
johnc@2504 716 init_ct_freq_table(_g1->max_capacity());
johnc@1325 717 ct_freq_note_card(_ct_bs->index_for(start));
johnc@1325 718 #endif
johnc@1325 719
johnc@2302 720 assert(!check_for_refs_into_cset || _cset_rs_update_cl[worker_i] != NULL, "sanity");
johnc@2302 721 UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
johnc@2302 722 _g1->g1_rem_set(),
johnc@2302 723 _cset_rs_update_cl[worker_i],
johnc@2302 724 check_for_refs_into_cset,
johnc@2302 725 worker_i);
johnc@1325 726 update_rs_oop_cl.set_from(r);
johnc@2060 727
johnc@2060 728 TriggerClosure trigger_cl;
johnc@2060 729 FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
johnc@2060 730 InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
johnc@2060 731 Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
johnc@2060 732
johnc@2060 733 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
johnc@2060 734 (check_for_refs_into_cset ?
johnc@2060 735 (OopClosure*)&mux :
johnc@2060 736 (OopClosure*)&update_rs_oop_cl));
johnc@1325 737
johnc@1325 738 // Undirty the card.
johnc@1325 739 *card_ptr = CardTableModRefBS::clean_card_val();
johnc@1325 740 // We must complete this write before we do any of the reads below.
johnc@1325 741 OrderAccess::storeload();
johnc@1325 742 // And process it, being careful of unallocated portions of TLAB's.
johnc@2021 743
johnc@2021 744 // The region for the current card may be a young region. The
johnc@2021 745 // current card may have been a card that was evicted from the
johnc@2021 746 // card cache. When the card was inserted into the cache, we had
johnc@2021 747 // determined that its region was non-young. While in the cache,
johnc@2021 748 // the region may have been freed during a cleanup pause, reallocated
johnc@2021 749 // and tagged as young.
johnc@2021 750 //
johnc@2021 751 // We wish to filter out cards for such a region but the current
johnc@2021 752 // thread, if we're running conucrrently, may "see" the young type
johnc@2021 753 // change at any time (so an earlier "is_young" check may pass or
johnc@2021 754 // fail arbitrarily). We tell the iteration code to perform this
johnc@2021 755 // filtering when it has been determined that there has been an actual
johnc@2021 756 // allocation in this region and making it safe to check the young type.
johnc@2021 757 bool filter_young = true;
johnc@2021 758
johnc@1325 759 HeapWord* stop_point =
johnc@1325 760 r->oops_on_card_seq_iterate_careful(dirtyRegion,
johnc@2021 761 &filter_then_update_rs_oop_cl,
johnc@2021 762 filter_young);
johnc@2021 763
johnc@1325 764 // If stop_point is non-null, then we encountered an unallocated region
johnc@1325 765 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
johnc@1325 766 // card and re-enqueue: if we put off the card until a GC pause, then the
johnc@1325 767 // unallocated portion will be filled in. Alternatively, we might try
johnc@1325 768 // the full complexity of the technique used in "regular" precleaning.
johnc@1325 769 if (stop_point != NULL) {
johnc@1325 770 // The card might have gotten re-dirtied and re-enqueued while we
johnc@1325 771 // worked. (In fact, it's pretty likely.)
johnc@1325 772 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
johnc@1325 773 *card_ptr = CardTableModRefBS::dirty_card_val();
johnc@1325 774 MutexLockerEx x(Shared_DirtyCardQ_lock,
johnc@1325 775 Mutex::_no_safepoint_check_flag);
johnc@1325 776 DirtyCardQueue* sdcq =
johnc@1325 777 JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
johnc@1325 778 sdcq->enqueue(card_ptr);
johnc@1325 779 }
johnc@1325 780 } else {
johnc@1325 781 out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region());
johnc@1325 782 _conc_refine_cards++;
johnc@1325 783 }
johnc@2060 784
johnc@2060 785 return trigger_cl.value();
johnc@1325 786 }
johnc@1325 787
johnc@2216 788 bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
johnc@2060 789 bool check_for_refs_into_cset) {
ysr@777 790 // If the card is no longer dirty, nothing to do.
johnc@2060 791 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
johnc@2060 792 // No need to return that this card contains refs that point
johnc@2060 793 // into the collection set.
johnc@2060 794 return false;
johnc@2060 795 }
ysr@777 796
ysr@777 797 // Construct the region representing the card.
ysr@777 798 HeapWord* start = _ct_bs->addr_for(card_ptr);
ysr@777 799 // And find the region containing it.
ysr@777 800 HeapRegion* r = _g1->heap_region_containing(start);
ysr@777 801 if (r == NULL) {
ysr@777 802 guarantee(_g1->is_in_permanent(start), "Or else where?");
johnc@2060 803 // Again no need to return that this card contains refs that
johnc@2060 804 // point into the collection set.
johnc@2060 805 return false; // Not in the G1 heap (might be in perm, for example.)
ysr@777 806 }
ysr@777 807 // Why do we have to check here whether a card is on a young region,
ysr@777 808 // given that we dirty young regions and, as a result, the
ysr@777 809 // post-barrier is supposed to filter them out and never to enqueue
ysr@777 810 // them? When we allocate a new region as the "allocation region" we
ysr@777 811 // actually dirty its cards after we release the lock, since card
ysr@777 812 // dirtying while holding the lock was a performance bottleneck. So,
ysr@777 813 // as a result, it is possible for other threads to actually
ysr@777 814 // allocate objects in the region (after the acquire the lock)
ysr@777 815 // before all the cards on the region are dirtied. This is unlikely,
ysr@777 816 // and it doesn't happen often, but it can happen. So, the extra
ysr@777 817 // check below filters out those cards.
iveresov@1072 818 if (r->is_young()) {
johnc@2060 819 return false;
ysr@777 820 }
ysr@777 821 // While we are processing RSet buffers during the collection, we
ysr@777 822 // actually don't want to scan any cards on the collection set,
ysr@777 823 // since we don't want to update remebered sets with entries that
ysr@777 824 // point into the collection set, given that live objects from the
ysr@777 825 // collection set are about to move and such entries will be stale
ysr@777 826 // very soon. This change also deals with a reliability issue which
ysr@777 827 // involves scanning a card in the collection set and coming across
ysr@777 828 // an array that was being chunked and looking malformed. Note,
ysr@777 829 // however, that if evacuation fails, we have to scan any objects
ysr@777 830 // that were not moved and create any missing entries.
ysr@777 831 if (r->in_collection_set()) {
johnc@2060 832 return false;
ysr@777 833 }
ysr@777 834
johnc@1325 835 // Should we defer processing the card?
johnc@1325 836 //
johnc@1325 837 // Previously the result from the insert_cache call would be
johnc@1325 838 // either card_ptr (implying that card_ptr was currently "cold"),
johnc@1325 839 // null (meaning we had inserted the card ptr into the "hot"
johnc@1325 840 // cache, which had some headroom), or a "hot" card ptr
johnc@1325 841 // extracted from the "hot" cache.
johnc@1325 842 //
johnc@1325 843 // Now that the _card_counts cache in the ConcurrentG1Refine
johnc@1325 844 // instance is an evicting hash table, the result we get back
johnc@1325 845 // could be from evicting the card ptr in an already occupied
johnc@1325 846 // bucket (in which case we have replaced the card ptr in the
johnc@1325 847 // bucket with card_ptr and "defer" is set to false). To avoid
johnc@1325 848 // having a data structure (updates to which would need a lock)
johnc@1325 849 // to hold these unprocessed dirty cards, we need to immediately
johnc@1325 850 // process card_ptr. The actions needed to be taken on return
johnc@1325 851 // from cache_insert are summarized in the following table:
johnc@1325 852 //
johnc@1325 853 // res defer action
johnc@1325 854 // --------------------------------------------------------------
johnc@1325 855 // null false card evicted from _card_counts & replaced with
johnc@1325 856 // card_ptr; evicted ptr added to hot cache.
johnc@1325 857 // No need to process res; immediately process card_ptr
johnc@1325 858 //
johnc@1325 859 // null true card not evicted from _card_counts; card_ptr added
johnc@1325 860 // to hot cache.
johnc@1325 861 // Nothing to do.
johnc@1325 862 //
johnc@1325 863 // non-null false card evicted from _card_counts & replaced with
johnc@1325 864 // card_ptr; evicted ptr is currently "cold" or
johnc@1325 865 // caused an eviction from the hot cache.
johnc@1325 866 // Immediately process res; process card_ptr.
johnc@1325 867 //
johnc@1325 868 // non-null true card not evicted from _card_counts; card_ptr is
johnc@1325 869 // currently cold, or caused an eviction from hot
johnc@1325 870 // cache.
johnc@1325 871 // Immediately process res; no need to process card_ptr.
johnc@1325 872
johnc@2060 873
johnc@1325 874 jbyte* res = card_ptr;
johnc@1325 875 bool defer = false;
johnc@2060 876
johnc@2060 877 // This gets set to true if the card being refined has references
johnc@2060 878 // that point into the collection set.
johnc@2060 879 bool oops_into_cset = false;
johnc@2060 880
ysr@777 881 if (_cg1r->use_cache()) {
johnc@1325 882 jbyte* res = _cg1r->cache_insert(card_ptr, &defer);
johnc@1325 883 if (res != NULL && (res != card_ptr || defer)) {
johnc@1325 884 start = _ct_bs->addr_for(res);
johnc@1325 885 r = _g1->heap_region_containing(start);
johnc@1325 886 if (r == NULL) {
johnc@1325 887 assert(_g1->is_in_permanent(start), "Or else where?");
johnc@1325 888 } else {
johnc@2021 889 // Checking whether the region we got back from the cache
johnc@2021 890 // is young here is inappropriate. The region could have been
johnc@2021 891 // freed, reallocated and tagged as young while in the cache.
johnc@2021 892 // Hence we could see its young type change at any time.
johnc@2021 893 //
johnc@2021 894 // Process card pointer we get back from the hot card cache. This
johnc@2021 895 // will check whether the region containing the card is young
johnc@2021 896 // _after_ checking that the region has been allocated from.
johnc@2060 897 oops_into_cset = concurrentRefineOneCard_impl(res, worker_i,
johnc@2060 898 false /* check_for_refs_into_cset */);
johnc@2060 899 // The above call to concurrentRefineOneCard_impl is only
johnc@2060 900 // performed if the hot card cache is enabled. This cache is
johnc@2060 901 // disabled during an evacuation pause - which is the only
johnc@2060 902 // time when we need know if the card contains references
johnc@2060 903 // that point into the collection set. Also when the hot card
johnc@2060 904 // cache is enabled, this code is executed by the concurrent
johnc@2060 905 // refine threads - rather than the GC worker threads - and
johnc@2060 906 // concurrentRefineOneCard_impl will return false.
johnc@2060 907 assert(!oops_into_cset, "should not see true here");
johnc@1325 908 }
ysr@777 909 }
ysr@777 910 }
ysr@777 911
johnc@1325 912 if (!defer) {
johnc@2060 913 oops_into_cset =
johnc@2060 914 concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset);
johnc@2060 915 // We should only be detecting that the card contains references
johnc@2060 916 // that point into the collection set if the current thread is
johnc@2060 917 // a GC worker thread.
johnc@2060 918 assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(),
johnc@2060 919 "invalid result at non safepoint");
ysr@777 920 }
johnc@2060 921 return oops_into_cset;
ysr@777 922 }
ysr@777 923
ysr@777 924 class HRRSStatsIter: public HeapRegionClosure {
ysr@777 925 size_t _occupied;
ysr@777 926 size_t _total_mem_sz;
ysr@777 927 size_t _max_mem_sz;
ysr@777 928 HeapRegion* _max_mem_sz_region;
ysr@777 929 public:
ysr@777 930 HRRSStatsIter() :
ysr@777 931 _occupied(0),
ysr@777 932 _total_mem_sz(0),
ysr@777 933 _max_mem_sz(0),
ysr@777 934 _max_mem_sz_region(NULL)
ysr@777 935 {}
ysr@777 936
ysr@777 937 bool doHeapRegion(HeapRegion* r) {
ysr@777 938 if (r->continuesHumongous()) return false;
ysr@777 939 size_t mem_sz = r->rem_set()->mem_size();
ysr@777 940 if (mem_sz > _max_mem_sz) {
ysr@777 941 _max_mem_sz = mem_sz;
ysr@777 942 _max_mem_sz_region = r;
ysr@777 943 }
ysr@777 944 _total_mem_sz += mem_sz;
ysr@777 945 size_t occ = r->rem_set()->occupied();
ysr@777 946 _occupied += occ;
ysr@777 947 return false;
ysr@777 948 }
ysr@777 949 size_t total_mem_sz() { return _total_mem_sz; }
ysr@777 950 size_t max_mem_sz() { return _max_mem_sz; }
ysr@777 951 size_t occupied() { return _occupied; }
ysr@777 952 HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
ysr@777 953 };
ysr@777 954
iveresov@1229 955 class PrintRSThreadVTimeClosure : public ThreadClosure {
iveresov@1229 956 public:
iveresov@1229 957 virtual void do_thread(Thread *t) {
iveresov@1229 958 ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
iveresov@1229 959 gclog_or_tty->print(" %5.2f", crt->vtime_accum());
iveresov@1229 960 }
iveresov@1229 961 };
iveresov@1229 962
johnc@2216 963 void G1RemSet::print_summary_info() {
ysr@777 964 G1CollectedHeap* g1 = G1CollectedHeap::heap();
ysr@777 965
ysr@777 966 #if CARD_REPEAT_HISTO
ysr@777 967 gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
ysr@777 968 gclog_or_tty->print_cr(" # of repeats --> # of cards with that number.");
ysr@777 969 card_repeat_count.print_on(gclog_or_tty);
ysr@777 970 #endif
ysr@777 971
ysr@777 972 if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) {
ysr@777 973 gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: ");
ysr@777 974 gclog_or_tty->print_cr(" # of CS ptrs --> # of cards with that number.");
ysr@777 975 out_of_histo.print_on(gclog_or_tty);
ysr@777 976 }
iveresov@1229 977 gclog_or_tty->print_cr("\n Concurrent RS processed %d cards",
iveresov@1229 978 _conc_refine_cards);
ysr@777 979 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 980 jint tot_processed_buffers =
ysr@777 981 dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread();
ysr@777 982 gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers);
iveresov@1229 983 gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.",
ysr@777 984 dcqs.processed_buffers_rs_thread(),
ysr@777 985 100.0*(float)dcqs.processed_buffers_rs_thread()/
ysr@777 986 (float)tot_processed_buffers);
ysr@777 987 gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.",
ysr@777 988 dcqs.processed_buffers_mut(),
ysr@777 989 100.0*(float)dcqs.processed_buffers_mut()/
ysr@777 990 (float)tot_processed_buffers);
iveresov@1229 991 gclog_or_tty->print_cr(" Conc RS threads times(s)");
iveresov@1229 992 PrintRSThreadVTimeClosure p;
iveresov@1229 993 gclog_or_tty->print(" ");
iveresov@1229 994 g1->concurrent_g1_refine()->threads_do(&p);
ysr@777 995 gclog_or_tty->print_cr("");
iveresov@1229 996
johnc@2216 997 HRRSStatsIter blk;
johnc@2216 998 g1->heap_region_iterate(&blk);
johnc@2216 999 gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K."
johnc@2216 1000 " Max = " SIZE_FORMAT "K.",
johnc@2216 1001 blk.total_mem_sz()/K, blk.max_mem_sz()/K);
johnc@2216 1002 gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K,"
johnc@2216 1003 " free_lists = " SIZE_FORMAT "K.",
johnc@2216 1004 HeapRegionRemSet::static_mem_size()/K,
johnc@2216 1005 HeapRegionRemSet::fl_mem_size()/K);
johnc@2216 1006 gclog_or_tty->print_cr(" %d occupied cards represented.",
johnc@2216 1007 blk.occupied());
johnc@2216 1008 gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
johnc@2216 1009 ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
johnc@2216 1010 blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
johnc@2216 1011 (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
johnc@2216 1012 (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
johnc@2216 1013 gclog_or_tty->print_cr(" Did %d coarsenings.", HeapRegionRemSet::n_coarsenings());
ysr@777 1014 }
johnc@2060 1015
johnc@2216 1016 void G1RemSet::prepare_for_verify() {
iveresov@1072 1017 if (G1HRRSFlushLogBuffersOnVerify &&
iveresov@1072 1018 (VerifyBeforeGC || VerifyAfterGC)
iveresov@1072 1019 && !_g1->full_collection()) {
ysr@777 1020 cleanupHRRS();
ysr@777 1021 _g1->set_refine_cte_cl_concurrency(false);
ysr@777 1022 if (SafepointSynchronize::is_at_safepoint()) {
ysr@777 1023 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 1024 dcqs.concatenate_logs();
ysr@777 1025 }
ysr@777 1026 bool cg1r_use_cache = _cg1r->use_cache();
ysr@777 1027 _cg1r->set_use_cache(false);
johnc@2060 1028 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
johnc@2060 1029 updateRS(&into_cset_dcq, 0);
johnc@2060 1030 _g1->into_cset_dirty_card_queue_set().clear();
ysr@777 1031 _cg1r->set_use_cache(cg1r_use_cache);
iveresov@1072 1032
iveresov@1072 1033 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
ysr@777 1034 }
ysr@777 1035 }

mercurial