src/share/vm/gc_implementation/g1/g1RemSet.cpp

Tue, 21 Jun 2011 15:23:07 -0400

author
tonyp
date
Tue, 21 Jun 2011 15:23:07 -0400
changeset 2974
e8b0b0392037
parent 2962
ae5b2f1dcf12
child 3175
4dfb2df418f2
permissions
-rw-r--r--

7046182: G1: remove unnecessary iterations over the collection set
Summary: Remove two unnecessary iterations over the collection set which are supposed to prepare the RSet's of the CSet regions for parallel iterations (we'll make sure this is done incrementally). I'll piggyback on this CR the removal of the G1_REM_SET_LOGGING code.
Reviewed-by: brutisso, johnc

ysr@777 1 /*
johnc@2504 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/bufferingOopClosure.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
stefank@2314 32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
stefank@2314 33 #include "gc_implementation/g1/g1RemSet.inline.hpp"
stefank@2314 34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@2314 35 #include "memory/iterator.hpp"
stefank@2314 36 #include "oops/oop.inline.hpp"
stefank@2314 37 #include "utilities/intHisto.hpp"
ysr@777 38
ysr@777 39 #define CARD_REPEAT_HISTO 0
ysr@777 40
ysr@777 41 #if CARD_REPEAT_HISTO
ysr@777 42 static size_t ct_freq_sz;
ysr@777 43 static jbyte* ct_freq = NULL;
ysr@777 44
ysr@777 45 void init_ct_freq_table(size_t heap_sz_bytes) {
ysr@777 46 if (ct_freq == NULL) {
ysr@777 47 ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size;
ysr@777 48 ct_freq = new jbyte[ct_freq_sz];
ysr@777 49 for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0;
ysr@777 50 }
ysr@777 51 }
ysr@777 52
ysr@777 53 void ct_freq_note_card(size_t index) {
ysr@777 54 assert(0 <= index && index < ct_freq_sz, "Bounds error.");
ysr@777 55 if (ct_freq[index] < 100) { ct_freq[index]++; }
ysr@777 56 }
ysr@777 57
ysr@777 58 static IntHistogram card_repeat_count(10, 10);
ysr@777 59
ysr@777 60 void ct_freq_update_histo_and_reset() {
ysr@777 61 for (size_t j = 0; j < ct_freq_sz; j++) {
ysr@777 62 card_repeat_count.add_entry(ct_freq[j]);
ysr@777 63 ct_freq[j] = 0;
ysr@777 64 }
ysr@777 65
ysr@777 66 }
ysr@777 67 #endif
ysr@777 68
johnc@2216 69 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
johnc@2216 70 : _g1(g1), _conc_refine_cards(0),
johnc@2216 71 _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
ysr@777 72 _cg1r(g1->concurrent_g1_refine()),
johnc@2060 73 _cset_rs_update_cl(NULL),
ysr@777 74 _cards_scanned(NULL), _total_cards_scanned(0)
ysr@777 75 {
ysr@777 76 _seq_task = new SubTasksDone(NumSeqTasks);
iveresov@1051 77 guarantee(n_workers() > 0, "There should be some workers");
johnc@2060 78 _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers());
iveresov@1051 79 for (uint i = 0; i < n_workers(); i++) {
johnc@2060 80 _cset_rs_update_cl[i] = NULL;
iveresov@1051 81 }
ysr@777 82 }
ysr@777 83
johnc@2216 84 G1RemSet::~G1RemSet() {
ysr@777 85 delete _seq_task;
iveresov@1051 86 for (uint i = 0; i < n_workers(); i++) {
johnc@2060 87 assert(_cset_rs_update_cl[i] == NULL, "it should be");
iveresov@1051 88 }
johnc@2060 89 FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl);
ysr@777 90 }
ysr@777 91
ysr@777 92 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
ysr@777 93 if (_g1->is_in_g1_reserved(mr.start())) {
ysr@777 94 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
ysr@777 95 if (_start_first == NULL) _start_first = mr.start();
ysr@777 96 }
ysr@777 97 }
ysr@777 98
ysr@777 99 class ScanRSClosure : public HeapRegionClosure {
ysr@777 100 size_t _cards_done, _cards;
ysr@777 101 G1CollectedHeap* _g1h;
ysr@777 102 OopsInHeapRegionClosure* _oc;
ysr@777 103 G1BlockOffsetSharedArray* _bot_shared;
ysr@777 104 CardTableModRefBS *_ct_bs;
ysr@777 105 int _worker_i;
iveresov@1696 106 int _block_size;
ysr@777 107 bool _try_claimed;
ysr@777 108 public:
ysr@777 109 ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
ysr@777 110 _oc(oc),
ysr@777 111 _cards(0),
ysr@777 112 _cards_done(0),
ysr@777 113 _worker_i(worker_i),
ysr@777 114 _try_claimed(false)
ysr@777 115 {
ysr@777 116 _g1h = G1CollectedHeap::heap();
ysr@777 117 _bot_shared = _g1h->bot_shared();
ysr@777 118 _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
iveresov@1696 119 _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
ysr@777 120 }
ysr@777 121
ysr@777 122 void set_try_claimed() { _try_claimed = true; }
ysr@777 123
ysr@777 124 void scanCard(size_t index, HeapRegion *r) {
ysr@777 125 DirtyCardToOopClosure* cl =
ysr@777 126 r->new_dcto_closure(_oc,
ysr@777 127 CardTableModRefBS::Precise,
ysr@777 128 HeapRegionDCTOC::IntoCSFilterKind);
ysr@777 129
ysr@777 130 // Set the "from" region in the closure.
ysr@777 131 _oc->set_region(r);
ysr@777 132 HeapWord* card_start = _bot_shared->address_for_index(index);
ysr@777 133 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
ysr@777 134 Space *sp = SharedHeap::heap()->space_containing(card_start);
tonyp@2849 135 MemRegion sm_region = sp->used_region_at_save_marks();
ysr@777 136 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
tonyp@2849 137 if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
tonyp@2849 138 // We make the card as "claimed" lazily (so races are possible
tonyp@2849 139 // but they're benign), which reduces the number of duplicate
tonyp@2849 140 // scans (the rsets of the regions in the cset can intersect).
tonyp@2849 141 _ct_bs->set_card_claimed(index);
tonyp@2849 142 _cards_done++;
ysr@777 143 cl->do_MemRegion(mr);
ysr@777 144 }
ysr@777 145 }
ysr@777 146
ysr@777 147 void printCard(HeapRegion* card_region, size_t card_index,
ysr@777 148 HeapWord* card_start) {
ysr@777 149 gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
ysr@777 150 "RS names card %p: "
ysr@777 151 "[" PTR_FORMAT ", " PTR_FORMAT ")",
ysr@777 152 _worker_i,
ysr@777 153 card_region->bottom(), card_region->end(),
ysr@777 154 card_index,
ysr@777 155 card_start, card_start + G1BlockOffsetSharedArray::N_words);
ysr@777 156 }
ysr@777 157
ysr@777 158 bool doHeapRegion(HeapRegion* r) {
ysr@777 159 assert(r->in_collection_set(), "should only be called on elements of CS.");
ysr@777 160 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 161 if (hrrs->iter_is_complete()) return false; // All done.
ysr@777 162 if (!_try_claimed && !hrrs->claim_iter()) return false;
tonyp@2849 163 // If we ever free the collection set concurrently, we should also
tonyp@2849 164 // clear the card table concurrently therefore we won't need to
tonyp@2849 165 // add regions of the collection set to the dirty cards region.
apetrusenko@1231 166 _g1h->push_dirty_cards_region(r);
ysr@777 167 // If we didn't return above, then
ysr@777 168 // _try_claimed || r->claim_iter()
ysr@777 169 // is true: either we're supposed to work on claimed-but-not-complete
ysr@777 170 // regions, or we successfully claimed the region.
ysr@777 171 HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
ysr@777 172 hrrs->init_iterator(iter);
ysr@777 173 size_t card_index;
iveresov@1696 174
iveresov@1696 175 // We claim cards in block so as to recude the contention. The block size is determined by
iveresov@1696 176 // the G1RSetScanBlockSize parameter.
iveresov@1696 177 size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
iveresov@1696 178 for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
iveresov@1696 179 if (current_card >= jump_to_card + _block_size) {
iveresov@1696 180 jump_to_card = hrrs->iter_claimed_next(_block_size);
iveresov@1182 181 }
iveresov@1696 182 if (current_card < jump_to_card) continue;
ysr@777 183 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
ysr@777 184 #if 0
ysr@777 185 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
ysr@777 186 card_start, card_start + CardTableModRefBS::card_size_in_words);
ysr@777 187 #endif
ysr@777 188
ysr@777 189 HeapRegion* card_region = _g1h->heap_region_containing(card_start);
ysr@777 190 assert(card_region != NULL, "Yielding cards not in the heap?");
ysr@777 191 _cards++;
ysr@777 192
apetrusenko@1231 193 if (!card_region->is_on_dirty_cards_region_list()) {
apetrusenko@1231 194 _g1h->push_dirty_cards_region(card_region);
apetrusenko@1231 195 }
apetrusenko@1231 196
tonyp@2849 197 // If the card is dirty, then we will scan it during updateRS.
tonyp@2849 198 if (!card_region->in_collection_set() &&
tonyp@2849 199 !_ct_bs->is_card_dirty(card_index)) {
tonyp@2849 200 scanCard(card_index, card_region);
ysr@777 201 }
ysr@777 202 }
iveresov@1182 203 if (!_try_claimed) {
iveresov@1182 204 hrrs->set_iter_complete();
iveresov@1182 205 }
ysr@777 206 return false;
ysr@777 207 }
ysr@777 208 size_t cards_done() { return _cards_done;}
ysr@777 209 size_t cards_looked_up() { return _cards;}
ysr@777 210 };
ysr@777 211
ysr@777 212 // We want the parallel threads to start their scanning at
ysr@777 213 // different collection set regions to avoid contention.
ysr@777 214 // If we have:
ysr@777 215 // n collection set regions
ysr@777 216 // p threads
ysr@777 217 // Then thread t will start at region t * floor (n/p)
ysr@777 218
johnc@2216 219 HeapRegion* G1RemSet::calculateStartRegion(int worker_i) {
ysr@777 220 HeapRegion* result = _g1p->collection_set();
ysr@777 221 if (ParallelGCThreads > 0) {
ysr@777 222 size_t cs_size = _g1p->collection_set_size();
ysr@777 223 int n_workers = _g1->workers()->total_workers();
ysr@777 224 size_t cs_spans = cs_size / n_workers;
ysr@777 225 size_t ind = cs_spans * worker_i;
ysr@777 226 for (size_t i = 0; i < ind; i++)
ysr@777 227 result = result->next_in_collection_set();
ysr@777 228 }
ysr@777 229 return result;
ysr@777 230 }
ysr@777 231
johnc@2216 232 void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
ysr@777 233 double rs_time_start = os::elapsedTime();
ysr@777 234 HeapRegion *startRegion = calculateStartRegion(worker_i);
ysr@777 235
iveresov@1696 236 ScanRSClosure scanRScl(oc, worker_i);
ysr@777 237 _g1->collection_set_iterate_from(startRegion, &scanRScl);
ysr@777 238 scanRScl.set_try_claimed();
ysr@777 239 _g1->collection_set_iterate_from(startRegion, &scanRScl);
ysr@777 240
iveresov@1696 241 double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
ysr@777 242
ysr@777 243 assert( _cards_scanned != NULL, "invariant" );
ysr@777 244 _cards_scanned[worker_i] = scanRScl.cards_done();
ysr@777 245
ysr@777 246 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
ysr@777 247 }
ysr@777 248
johnc@2060 249 // Closure used for updating RSets and recording references that
johnc@2060 250 // point into the collection set. Only called during an
johnc@2060 251 // evacuation pause.
ysr@777 252
johnc@2060 253 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure {
johnc@2060 254 G1RemSet* _g1rs;
johnc@2060 255 DirtyCardQueue* _into_cset_dcq;
johnc@2060 256 public:
johnc@2060 257 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h,
johnc@2060 258 DirtyCardQueue* into_cset_dcq) :
johnc@2060 259 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
johnc@2060 260 {}
johnc@2060 261 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
johnc@2060 262 // The only time we care about recording cards that
johnc@2060 263 // contain references that point into the collection set
johnc@2060 264 // is during RSet updating within an evacuation pause.
johnc@2060 265 // In this case worker_i should be the id of a GC worker thread.
johnc@2060 266 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
brutisso@2646 267 assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
johnc@2060 268
johnc@2060 269 if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
johnc@2060 270 // 'card_ptr' contains references that point into the collection
johnc@2060 271 // set. We need to record the card in the DCQS
johnc@2060 272 // (G1CollectedHeap::into_cset_dirty_card_queue_set())
johnc@2060 273 // that's used for that purpose.
johnc@2060 274 //
johnc@2060 275 // Enqueue the card
johnc@2060 276 _into_cset_dcq->enqueue(card_ptr);
johnc@2060 277 }
johnc@2060 278 return true;
johnc@2060 279 }
johnc@2060 280 };
johnc@2060 281
johnc@2216 282 void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
ysr@777 283 double start = os::elapsedTime();
johnc@2060 284 // Apply the given closure to all remaining log entries.
johnc@2060 285 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
johnc@2060 286 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
johnc@2060 287
iveresov@1229 288 // Now there should be no dirty cards.
iveresov@1229 289 if (G1RSLogCheckCardTable) {
iveresov@1229 290 CountNonCleanMemRegionClosure cl(_g1);
iveresov@1229 291 _ct_bs->mod_card_iterate(&cl);
iveresov@1229 292 // XXX This isn't true any more: keeping cards of young regions
iveresov@1229 293 // marked dirty broke it. Need some reasonable fix.
iveresov@1229 294 guarantee(cl.n() == 0, "Card table should be clean.");
ysr@777 295 }
iveresov@1229 296
ysr@777 297 _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
ysr@777 298 }
ysr@777 299
ysr@777 300 class CountRSSizeClosure: public HeapRegionClosure {
ysr@777 301 size_t _n;
ysr@777 302 size_t _tot;
ysr@777 303 size_t _max;
ysr@777 304 HeapRegion* _max_r;
ysr@777 305 enum {
ysr@777 306 N = 20,
ysr@777 307 MIN = 6
ysr@777 308 };
ysr@777 309 int _histo[N];
ysr@777 310 public:
ysr@777 311 CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) {
ysr@777 312 for (int i = 0; i < N; i++) _histo[i] = 0;
ysr@777 313 }
ysr@777 314 bool doHeapRegion(HeapRegion* r) {
ysr@777 315 if (!r->continuesHumongous()) {
ysr@777 316 size_t occ = r->rem_set()->occupied();
ysr@777 317 _n++;
ysr@777 318 _tot += occ;
ysr@777 319 if (occ > _max) {
ysr@777 320 _max = occ;
ysr@777 321 _max_r = r;
ysr@777 322 }
ysr@777 323 // Fit it into a histo bin.
ysr@777 324 int s = 1 << MIN;
ysr@777 325 int i = 0;
ysr@777 326 while (occ > (size_t) s && i < (N-1)) {
ysr@777 327 s = s << 1;
ysr@777 328 i++;
ysr@777 329 }
ysr@777 330 _histo[i]++;
ysr@777 331 }
ysr@777 332 return false;
ysr@777 333 }
ysr@777 334 size_t n() { return _n; }
ysr@777 335 size_t tot() { return _tot; }
ysr@777 336 size_t mx() { return _max; }
ysr@777 337 HeapRegion* mxr() { return _max_r; }
ysr@777 338 void print_histo() {
ysr@777 339 int mx = N;
ysr@777 340 while (mx >= 0) {
ysr@777 341 if (_histo[mx-1] > 0) break;
ysr@777 342 mx--;
ysr@777 343 }
ysr@777 344 gclog_or_tty->print_cr("Number of regions with given RS sizes:");
ysr@777 345 gclog_or_tty->print_cr(" <= %8d %8d", 1 << MIN, _histo[0]);
ysr@777 346 for (int i = 1; i < mx-1; i++) {
ysr@777 347 gclog_or_tty->print_cr(" %8d - %8d %8d",
ysr@777 348 (1 << (MIN + i - 1)) + 1,
ysr@777 349 1 << (MIN + i),
ysr@777 350 _histo[i]);
ysr@777 351 }
ysr@777 352 gclog_or_tty->print_cr(" > %8d %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]);
ysr@777 353 }
ysr@777 354 };
ysr@777 355
johnc@2216 356 void G1RemSet::cleanupHRRS() {
ysr@777 357 HeapRegionRemSet::cleanup();
ysr@777 358 }
ysr@777 359
johnc@2216 360 void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
ysr@777 361 int worker_i) {
ysr@777 362 #if CARD_REPEAT_HISTO
ysr@777 363 ct_freq_update_histo_and_reset();
ysr@777 364 #endif
ysr@777 365 if (worker_i == 0) {
ysr@777 366 _cg1r->clear_and_record_card_counts();
ysr@777 367 }
ysr@777 368
ysr@777 369 // Make this into a command-line flag...
ysr@777 370 if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) {
ysr@777 371 CountRSSizeClosure count_cl;
ysr@777 372 _g1->heap_region_iterate(&count_cl);
ysr@777 373 gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, "
ysr@777 374 "max region is " PTR_FORMAT,
ysr@777 375 count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(),
ysr@777 376 count_cl.mx(), count_cl.mxr());
ysr@777 377 count_cl.print_histo();
ysr@777 378 }
ysr@777 379
johnc@2060 380 // We cache the value of 'oc' closure into the appropriate slot in the
johnc@2060 381 // _cset_rs_update_cl for this worker
johnc@2060 382 assert(worker_i < (int)n_workers(), "sanity");
johnc@2060 383 _cset_rs_update_cl[worker_i] = oc;
johnc@2060 384
johnc@2060 385 // A DirtyCardQueue that is used to hold cards containing references
johnc@2060 386 // that point into the collection set. This DCQ is associated with a
johnc@2060 387 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal
johnc@2060 388 // circumstances (i.e. the pause successfully completes), these cards
johnc@2060 389 // are just discarded (there's no need to update the RSets of regions
johnc@2060 390 // that were in the collection set - after the pause these regions
johnc@2060 391 // are wholly 'free' of live objects. In the event of an evacuation
johnc@2060 392 // failure the cards/buffers in this queue set are:
johnc@2060 393 // * passed to the DirtyCardQueueSet that is used to manage deferred
johnc@2060 394 // RSet updates, or
johnc@2060 395 // * scanned for references that point into the collection set
johnc@2060 396 // and the RSet of the corresponding region in the collection set
johnc@2060 397 // is updated immediately.
johnc@2060 398 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
johnc@2060 399
johnc@2063 400 assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
johnc@2063 401
johnc@2063 402 // The two flags below were introduced temporarily to serialize
johnc@2063 403 // the updating and scanning of remembered sets. There are some
johnc@2063 404 // race conditions when these two operations are done in parallel
johnc@2063 405 // and they are causing failures. When we resolve said race
johnc@2063 406 // conditions, we'll revert back to parallel remembered set
johnc@2063 407 // updating and scanning. See CRs 6677707 and 6677708.
johnc@2063 408 if (G1UseParallelRSetUpdating || (worker_i == 0)) {
johnc@2063 409 updateRS(&into_cset_dcq, worker_i);
ysr@777 410 } else {
johnc@2063 411 _g1p->record_update_rs_processed_buffers(worker_i, 0.0);
johnc@2063 412 _g1p->record_update_rs_time(worker_i, 0.0);
johnc@2063 413 }
johnc@2063 414 if (G1UseParallelRSetScanning || (worker_i == 0)) {
johnc@2063 415 scanRS(oc, worker_i);
johnc@2063 416 } else {
johnc@2063 417 _g1p->record_scan_rs_time(worker_i, 0.0);
ysr@777 418 }
johnc@2060 419
johnc@2060 420 // We now clear the cached values of _cset_rs_update_cl for this worker
johnc@2060 421 _cset_rs_update_cl[worker_i] = NULL;
ysr@777 422 }
ysr@777 423
johnc@2216 424 void G1RemSet::prepare_for_oops_into_collection_set_do() {
ysr@777 425 cleanupHRRS();
ysr@777 426 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
ysr@777 427 _g1->set_refine_cte_cl_concurrency(false);
ysr@777 428 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 429 dcqs.concatenate_logs();
ysr@777 430
ysr@777 431 if (ParallelGCThreads > 0) {
jmasa@2188 432 _seq_task->set_n_threads((int)n_workers());
ysr@777 433 }
ysr@777 434 guarantee( _cards_scanned == NULL, "invariant" );
ysr@777 435 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
apetrusenko@980 436 for (uint i = 0; i < n_workers(); ++i) {
apetrusenko@980 437 _cards_scanned[i] = 0;
apetrusenko@980 438 }
ysr@777 439 _total_cards_scanned = 0;
ysr@777 440 }
ysr@777 441
ysr@777 442
johnc@2060 443 // This closure, applied to a DirtyCardQueueSet, is used to immediately
johnc@2060 444 // update the RSets for the regions in the CSet. For each card it iterates
johnc@2060 445 // through the oops which coincide with that card. It scans the reference
johnc@2060 446 // fields in each oop; when it finds an oop that points into the collection
johnc@2060 447 // set, the RSet for the region containing the referenced object is updated.
johnc@2060 448 class UpdateRSetCardTableEntryIntoCSetClosure: public CardTableEntryClosure {
iveresov@1051 449 G1CollectedHeap* _g1;
johnc@2060 450 CardTableModRefBS* _ct_bs;
iveresov@1051 451 public:
johnc@2060 452 UpdateRSetCardTableEntryIntoCSetClosure(G1CollectedHeap* g1,
johnc@2060 453 CardTableModRefBS* bs):
johnc@2060 454 _g1(g1), _ct_bs(bs)
johnc@2060 455 { }
johnc@2060 456
johnc@2060 457 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
johnc@2060 458 // Construct the region representing the card.
johnc@2060 459 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@2060 460 // And find the region containing it.
johnc@2060 461 HeapRegion* r = _g1->heap_region_containing(start);
johnc@2060 462 assert(r != NULL, "unexpected null");
johnc@2060 463
johnc@2060 464 // Scan oops in the card looking for references into the collection set
johnc@2060 465 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
johnc@2060 466 MemRegion scanRegion(start, end);
johnc@2060 467
johnc@2060 468 UpdateRSetImmediate update_rs_cl(_g1->g1_rem_set());
johnc@2060 469 FilterIntoCSClosure update_rs_cset_oop_cl(NULL, _g1, &update_rs_cl);
johnc@2060 470 FilterOutOfRegionClosure filter_then_update_rs_cset_oop_cl(r, &update_rs_cset_oop_cl);
johnc@2060 471
johnc@2060 472 // We can pass false as the "filter_young" parameter here as:
johnc@2060 473 // * we should be in a STW pause,
johnc@2060 474 // * the DCQS to which this closure is applied is used to hold
johnc@2060 475 // references that point into the collection set from the prior
johnc@2060 476 // RSet updating,
johnc@2060 477 // * the post-write barrier shouldn't be logging updates to young
johnc@2060 478 // regions (but there is a situation where this can happen - see
johnc@2216 479 // the comment in G1RemSet::concurrentRefineOneCard below -
johnc@2060 480 // that should not be applicable here), and
johnc@2060 481 // * during actual RSet updating, the filtering of cards in young
johnc@2060 482 // regions in HeapRegion::oops_on_card_seq_iterate_careful is
johnc@2060 483 // employed.
johnc@2060 484 // As a result, when this closure is applied to "refs into cset"
johnc@2060 485 // DCQS, we shouldn't see any cards in young regions.
johnc@2060 486 update_rs_cl.set_region(r);
johnc@2060 487 HeapWord* stop_point =
johnc@2060 488 r->oops_on_card_seq_iterate_careful(scanRegion,
tonyp@2849 489 &filter_then_update_rs_cset_oop_cl,
tonyp@2849 490 false /* filter_young */,
tonyp@2849 491 NULL /* card_ptr */);
johnc@2060 492
johnc@2060 493 // Since this is performed in the event of an evacuation failure, we
johnc@2060 494 // we shouldn't see a non-null stop point
johnc@2060 495 assert(stop_point == NULL, "saw an unallocated region");
johnc@2060 496 return true;
iveresov@1051 497 }
iveresov@1051 498 };
iveresov@1051 499
johnc@2216 500 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
ysr@777 501 guarantee( _cards_scanned != NULL, "invariant" );
ysr@777 502 _total_cards_scanned = 0;
tonyp@2974 503 for (uint i = 0; i < n_workers(); ++i) {
ysr@777 504 _total_cards_scanned += _cards_scanned[i];
tonyp@2974 505 }
ysr@777 506 FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
ysr@777 507 _cards_scanned = NULL;
ysr@777 508 // Cleanup after copy
ysr@777 509 _g1->set_refine_cte_cl_concurrency(true);
ysr@777 510 // Set all cards back to clean.
ysr@777 511 _g1->cleanUpCardTable();
iveresov@1229 512
johnc@2060 513 DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set();
johnc@2060 514 int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
johnc@2060 515
iveresov@1051 516 if (_g1->evacuation_failed()) {
johnc@2060 517 // Restore remembered sets for the regions pointing into the collection set.
johnc@2060 518
iveresov@1051 519 if (G1DeferredRSUpdate) {
johnc@2060 520 // If deferred RS updates are enabled then we just need to transfer
johnc@2060 521 // the completed buffers from (a) the DirtyCardQueueSet used to hold
johnc@2060 522 // cards that contain references that point into the collection set
johnc@2060 523 // to (b) the DCQS used to hold the deferred RS updates
johnc@2060 524 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs);
iveresov@1051 525 } else {
johnc@2060 526
johnc@2060 527 CardTableModRefBS* bs = (CardTableModRefBS*)_g1->barrier_set();
johnc@2060 528 UpdateRSetCardTableEntryIntoCSetClosure update_rs_cset_immediate(_g1, bs);
johnc@2060 529
johnc@2060 530 int n_completed_buffers = 0;
johnc@2060 531 while (into_cset_dcqs.apply_closure_to_completed_buffer(&update_rs_cset_immediate,
johnc@2060 532 0, 0, true)) {
johnc@2060 533 n_completed_buffers++;
johnc@2060 534 }
johnc@2060 535 assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers");
iveresov@1051 536 }
iveresov@1051 537 }
johnc@2060 538
johnc@2060 539 // Free any completed buffers in the DirtyCardQueueSet used to hold cards
johnc@2060 540 // which contain references that point into the collection.
johnc@2060 541 _g1->into_cset_dirty_card_queue_set().clear();
johnc@2060 542 assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0,
johnc@2060 543 "all buffers should be freed");
johnc@2060 544 _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers();
ysr@777 545 }
ysr@777 546
ysr@777 547 class ScrubRSClosure: public HeapRegionClosure {
ysr@777 548 G1CollectedHeap* _g1h;
ysr@777 549 BitMap* _region_bm;
ysr@777 550 BitMap* _card_bm;
ysr@777 551 CardTableModRefBS* _ctbs;
ysr@777 552 public:
ysr@777 553 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
ysr@777 554 _g1h(G1CollectedHeap::heap()),
ysr@777 555 _region_bm(region_bm), _card_bm(card_bm),
ysr@777 556 _ctbs(NULL)
ysr@777 557 {
ysr@777 558 ModRefBarrierSet* bs = _g1h->mr_bs();
ysr@777 559 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
ysr@777 560 _ctbs = (CardTableModRefBS*)bs;
ysr@777 561 }
ysr@777 562
ysr@777 563 bool doHeapRegion(HeapRegion* r) {
ysr@777 564 if (!r->continuesHumongous()) {
ysr@777 565 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
ysr@777 566 }
ysr@777 567 return false;
ysr@777 568 }
ysr@777 569 };
ysr@777 570
johnc@2216 571 void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
ysr@777 572 ScrubRSClosure scrub_cl(region_bm, card_bm);
ysr@777 573 _g1->heap_region_iterate(&scrub_cl);
ysr@777 574 }
ysr@777 575
johnc@2216 576 void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
ysr@777 577 int worker_num, int claim_val) {
ysr@777 578 ScrubRSClosure scrub_cl(region_bm, card_bm);
ysr@777 579 _g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
ysr@777 580 }
ysr@777 581
ysr@777 582
ysr@777 583 static IntHistogram out_of_histo(50, 50);
ysr@777 584
johnc@2060 585 class TriggerClosure : public OopClosure {
johnc@2060 586 bool _trigger;
johnc@2060 587 public:
johnc@2060 588 TriggerClosure() : _trigger(false) { }
johnc@2060 589 bool value() const { return _trigger; }
johnc@2060 590 template <class T> void do_oop_nv(T* p) { _trigger = true; }
johnc@2060 591 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@2060 592 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@2060 593 };
johnc@2060 594
johnc@2060 595 class InvokeIfNotTriggeredClosure: public OopClosure {
johnc@2060 596 TriggerClosure* _t;
johnc@2060 597 OopClosure* _oc;
johnc@2060 598 public:
johnc@2060 599 InvokeIfNotTriggeredClosure(TriggerClosure* t, OopClosure* oc):
johnc@2060 600 _t(t), _oc(oc) { }
johnc@2060 601 template <class T> void do_oop_nv(T* p) {
johnc@2060 602 if (!_t->value()) _oc->do_oop(p);
johnc@2060 603 }
johnc@2060 604 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@2060 605 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@2060 606 };
johnc@2060 607
johnc@2060 608 class Mux2Closure : public OopClosure {
johnc@2060 609 OopClosure* _c1;
johnc@2060 610 OopClosure* _c2;
johnc@2060 611 public:
johnc@2060 612 Mux2Closure(OopClosure *c1, OopClosure *c2) : _c1(c1), _c2(c2) { }
johnc@2060 613 template <class T> void do_oop_nv(T* p) {
johnc@2060 614 _c1->do_oop(p); _c2->do_oop(p);
johnc@2060 615 }
johnc@2060 616 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@2060 617 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@2060 618 };
johnc@2060 619
johnc@2216 620 bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
johnc@2060 621 bool check_for_refs_into_cset) {
johnc@1325 622 // Construct the region representing the card.
johnc@1325 623 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@1325 624 // And find the region containing it.
johnc@1325 625 HeapRegion* r = _g1->heap_region_containing(start);
johnc@1325 626 assert(r != NULL, "unexpected null");
johnc@1325 627
johnc@1325 628 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
johnc@1325 629 MemRegion dirtyRegion(start, end);
johnc@1325 630
johnc@1325 631 #if CARD_REPEAT_HISTO
johnc@2504 632 init_ct_freq_table(_g1->max_capacity());
johnc@1325 633 ct_freq_note_card(_ct_bs->index_for(start));
johnc@1325 634 #endif
johnc@1325 635
johnc@2302 636 assert(!check_for_refs_into_cset || _cset_rs_update_cl[worker_i] != NULL, "sanity");
johnc@2302 637 UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
johnc@2302 638 _g1->g1_rem_set(),
johnc@2302 639 _cset_rs_update_cl[worker_i],
johnc@2302 640 check_for_refs_into_cset,
johnc@2302 641 worker_i);
johnc@1325 642 update_rs_oop_cl.set_from(r);
johnc@2060 643
johnc@2060 644 TriggerClosure trigger_cl;
johnc@2060 645 FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
johnc@2060 646 InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
johnc@2060 647 Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
johnc@2060 648
johnc@2060 649 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
johnc@2060 650 (check_for_refs_into_cset ?
johnc@2060 651 (OopClosure*)&mux :
johnc@2060 652 (OopClosure*)&update_rs_oop_cl));
johnc@1325 653
johnc@2021 654 // The region for the current card may be a young region. The
johnc@2021 655 // current card may have been a card that was evicted from the
johnc@2021 656 // card cache. When the card was inserted into the cache, we had
johnc@2021 657 // determined that its region was non-young. While in the cache,
johnc@2021 658 // the region may have been freed during a cleanup pause, reallocated
johnc@2021 659 // and tagged as young.
johnc@2021 660 //
johnc@2021 661 // We wish to filter out cards for such a region but the current
tonyp@2849 662 // thread, if we're running concurrently, may "see" the young type
johnc@2021 663 // change at any time (so an earlier "is_young" check may pass or
johnc@2021 664 // fail arbitrarily). We tell the iteration code to perform this
johnc@2021 665 // filtering when it has been determined that there has been an actual
johnc@2021 666 // allocation in this region and making it safe to check the young type.
johnc@2021 667 bool filter_young = true;
johnc@2021 668
johnc@1325 669 HeapWord* stop_point =
johnc@1325 670 r->oops_on_card_seq_iterate_careful(dirtyRegion,
johnc@2021 671 &filter_then_update_rs_oop_cl,
tonyp@2849 672 filter_young,
tonyp@2849 673 card_ptr);
johnc@2021 674
johnc@1325 675 // If stop_point is non-null, then we encountered an unallocated region
johnc@1325 676 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
johnc@1325 677 // card and re-enqueue: if we put off the card until a GC pause, then the
johnc@1325 678 // unallocated portion will be filled in. Alternatively, we might try
johnc@1325 679 // the full complexity of the technique used in "regular" precleaning.
johnc@1325 680 if (stop_point != NULL) {
johnc@1325 681 // The card might have gotten re-dirtied and re-enqueued while we
johnc@1325 682 // worked. (In fact, it's pretty likely.)
johnc@1325 683 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
johnc@1325 684 *card_ptr = CardTableModRefBS::dirty_card_val();
johnc@1325 685 MutexLockerEx x(Shared_DirtyCardQ_lock,
johnc@1325 686 Mutex::_no_safepoint_check_flag);
johnc@1325 687 DirtyCardQueue* sdcq =
johnc@1325 688 JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
johnc@1325 689 sdcq->enqueue(card_ptr);
johnc@1325 690 }
johnc@1325 691 } else {
johnc@1325 692 out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region());
johnc@1325 693 _conc_refine_cards++;
johnc@1325 694 }
johnc@2060 695
johnc@2060 696 return trigger_cl.value();
johnc@1325 697 }
johnc@1325 698
johnc@2216 699 bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
johnc@2060 700 bool check_for_refs_into_cset) {
ysr@777 701 // If the card is no longer dirty, nothing to do.
johnc@2060 702 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
johnc@2060 703 // No need to return that this card contains refs that point
johnc@2060 704 // into the collection set.
johnc@2060 705 return false;
johnc@2060 706 }
ysr@777 707
ysr@777 708 // Construct the region representing the card.
ysr@777 709 HeapWord* start = _ct_bs->addr_for(card_ptr);
ysr@777 710 // And find the region containing it.
ysr@777 711 HeapRegion* r = _g1->heap_region_containing(start);
ysr@777 712 if (r == NULL) {
ysr@777 713 guarantee(_g1->is_in_permanent(start), "Or else where?");
johnc@2060 714 // Again no need to return that this card contains refs that
johnc@2060 715 // point into the collection set.
johnc@2060 716 return false; // Not in the G1 heap (might be in perm, for example.)
ysr@777 717 }
ysr@777 718 // Why do we have to check here whether a card is on a young region,
ysr@777 719 // given that we dirty young regions and, as a result, the
ysr@777 720 // post-barrier is supposed to filter them out and never to enqueue
ysr@777 721 // them? When we allocate a new region as the "allocation region" we
ysr@777 722 // actually dirty its cards after we release the lock, since card
ysr@777 723 // dirtying while holding the lock was a performance bottleneck. So,
ysr@777 724 // as a result, it is possible for other threads to actually
ysr@777 725 // allocate objects in the region (after the acquire the lock)
ysr@777 726 // before all the cards on the region are dirtied. This is unlikely,
ysr@777 727 // and it doesn't happen often, but it can happen. So, the extra
ysr@777 728 // check below filters out those cards.
iveresov@1072 729 if (r->is_young()) {
johnc@2060 730 return false;
ysr@777 731 }
ysr@777 732 // While we are processing RSet buffers during the collection, we
ysr@777 733 // actually don't want to scan any cards on the collection set,
ysr@777 734 // since we don't want to update remebered sets with entries that
ysr@777 735 // point into the collection set, given that live objects from the
ysr@777 736 // collection set are about to move and such entries will be stale
ysr@777 737 // very soon. This change also deals with a reliability issue which
ysr@777 738 // involves scanning a card in the collection set and coming across
ysr@777 739 // an array that was being chunked and looking malformed. Note,
ysr@777 740 // however, that if evacuation fails, we have to scan any objects
ysr@777 741 // that were not moved and create any missing entries.
ysr@777 742 if (r->in_collection_set()) {
johnc@2060 743 return false;
ysr@777 744 }
ysr@777 745
johnc@1325 746 // Should we defer processing the card?
johnc@1325 747 //
johnc@1325 748 // Previously the result from the insert_cache call would be
johnc@1325 749 // either card_ptr (implying that card_ptr was currently "cold"),
johnc@1325 750 // null (meaning we had inserted the card ptr into the "hot"
johnc@1325 751 // cache, which had some headroom), or a "hot" card ptr
johnc@1325 752 // extracted from the "hot" cache.
johnc@1325 753 //
johnc@1325 754 // Now that the _card_counts cache in the ConcurrentG1Refine
johnc@1325 755 // instance is an evicting hash table, the result we get back
johnc@1325 756 // could be from evicting the card ptr in an already occupied
johnc@1325 757 // bucket (in which case we have replaced the card ptr in the
johnc@1325 758 // bucket with card_ptr and "defer" is set to false). To avoid
johnc@1325 759 // having a data structure (updates to which would need a lock)
johnc@1325 760 // to hold these unprocessed dirty cards, we need to immediately
johnc@1325 761 // process card_ptr. The actions needed to be taken on return
johnc@1325 762 // from cache_insert are summarized in the following table:
johnc@1325 763 //
johnc@1325 764 // res defer action
johnc@1325 765 // --------------------------------------------------------------
johnc@1325 766 // null false card evicted from _card_counts & replaced with
johnc@1325 767 // card_ptr; evicted ptr added to hot cache.
johnc@1325 768 // No need to process res; immediately process card_ptr
johnc@1325 769 //
johnc@1325 770 // null true card not evicted from _card_counts; card_ptr added
johnc@1325 771 // to hot cache.
johnc@1325 772 // Nothing to do.
johnc@1325 773 //
johnc@1325 774 // non-null false card evicted from _card_counts & replaced with
johnc@1325 775 // card_ptr; evicted ptr is currently "cold" or
johnc@1325 776 // caused an eviction from the hot cache.
johnc@1325 777 // Immediately process res; process card_ptr.
johnc@1325 778 //
johnc@1325 779 // non-null true card not evicted from _card_counts; card_ptr is
johnc@1325 780 // currently cold, or caused an eviction from hot
johnc@1325 781 // cache.
johnc@1325 782 // Immediately process res; no need to process card_ptr.
johnc@1325 783
johnc@2060 784
johnc@1325 785 jbyte* res = card_ptr;
johnc@1325 786 bool defer = false;
johnc@2060 787
johnc@2060 788 // This gets set to true if the card being refined has references
johnc@2060 789 // that point into the collection set.
johnc@2060 790 bool oops_into_cset = false;
johnc@2060 791
ysr@777 792 if (_cg1r->use_cache()) {
johnc@1325 793 jbyte* res = _cg1r->cache_insert(card_ptr, &defer);
johnc@1325 794 if (res != NULL && (res != card_ptr || defer)) {
johnc@1325 795 start = _ct_bs->addr_for(res);
johnc@1325 796 r = _g1->heap_region_containing(start);
johnc@1325 797 if (r == NULL) {
johnc@1325 798 assert(_g1->is_in_permanent(start), "Or else where?");
johnc@1325 799 } else {
johnc@2021 800 // Checking whether the region we got back from the cache
johnc@2021 801 // is young here is inappropriate. The region could have been
johnc@2021 802 // freed, reallocated and tagged as young while in the cache.
johnc@2021 803 // Hence we could see its young type change at any time.
johnc@2021 804 //
johnc@2021 805 // Process card pointer we get back from the hot card cache. This
johnc@2021 806 // will check whether the region containing the card is young
johnc@2021 807 // _after_ checking that the region has been allocated from.
johnc@2060 808 oops_into_cset = concurrentRefineOneCard_impl(res, worker_i,
johnc@2060 809 false /* check_for_refs_into_cset */);
johnc@2060 810 // The above call to concurrentRefineOneCard_impl is only
johnc@2060 811 // performed if the hot card cache is enabled. This cache is
johnc@2060 812 // disabled during an evacuation pause - which is the only
johnc@2060 813 // time when we need know if the card contains references
johnc@2060 814 // that point into the collection set. Also when the hot card
johnc@2060 815 // cache is enabled, this code is executed by the concurrent
johnc@2060 816 // refine threads - rather than the GC worker threads - and
johnc@2060 817 // concurrentRefineOneCard_impl will return false.
johnc@2060 818 assert(!oops_into_cset, "should not see true here");
johnc@1325 819 }
ysr@777 820 }
ysr@777 821 }
ysr@777 822
johnc@1325 823 if (!defer) {
johnc@2060 824 oops_into_cset =
johnc@2060 825 concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset);
johnc@2060 826 // We should only be detecting that the card contains references
johnc@2060 827 // that point into the collection set if the current thread is
johnc@2060 828 // a GC worker thread.
johnc@2060 829 assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(),
johnc@2060 830 "invalid result at non safepoint");
ysr@777 831 }
johnc@2060 832 return oops_into_cset;
ysr@777 833 }
ysr@777 834
ysr@777 835 class HRRSStatsIter: public HeapRegionClosure {
ysr@777 836 size_t _occupied;
ysr@777 837 size_t _total_mem_sz;
ysr@777 838 size_t _max_mem_sz;
ysr@777 839 HeapRegion* _max_mem_sz_region;
ysr@777 840 public:
ysr@777 841 HRRSStatsIter() :
ysr@777 842 _occupied(0),
ysr@777 843 _total_mem_sz(0),
ysr@777 844 _max_mem_sz(0),
ysr@777 845 _max_mem_sz_region(NULL)
ysr@777 846 {}
ysr@777 847
ysr@777 848 bool doHeapRegion(HeapRegion* r) {
ysr@777 849 if (r->continuesHumongous()) return false;
ysr@777 850 size_t mem_sz = r->rem_set()->mem_size();
ysr@777 851 if (mem_sz > _max_mem_sz) {
ysr@777 852 _max_mem_sz = mem_sz;
ysr@777 853 _max_mem_sz_region = r;
ysr@777 854 }
ysr@777 855 _total_mem_sz += mem_sz;
ysr@777 856 size_t occ = r->rem_set()->occupied();
ysr@777 857 _occupied += occ;
ysr@777 858 return false;
ysr@777 859 }
ysr@777 860 size_t total_mem_sz() { return _total_mem_sz; }
ysr@777 861 size_t max_mem_sz() { return _max_mem_sz; }
ysr@777 862 size_t occupied() { return _occupied; }
ysr@777 863 HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
ysr@777 864 };
ysr@777 865
iveresov@1229 866 class PrintRSThreadVTimeClosure : public ThreadClosure {
iveresov@1229 867 public:
iveresov@1229 868 virtual void do_thread(Thread *t) {
iveresov@1229 869 ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
iveresov@1229 870 gclog_or_tty->print(" %5.2f", crt->vtime_accum());
iveresov@1229 871 }
iveresov@1229 872 };
iveresov@1229 873
johnc@2216 874 void G1RemSet::print_summary_info() {
ysr@777 875 G1CollectedHeap* g1 = G1CollectedHeap::heap();
ysr@777 876
ysr@777 877 #if CARD_REPEAT_HISTO
ysr@777 878 gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
ysr@777 879 gclog_or_tty->print_cr(" # of repeats --> # of cards with that number.");
ysr@777 880 card_repeat_count.print_on(gclog_or_tty);
ysr@777 881 #endif
ysr@777 882
ysr@777 883 if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) {
ysr@777 884 gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: ");
ysr@777 885 gclog_or_tty->print_cr(" # of CS ptrs --> # of cards with that number.");
ysr@777 886 out_of_histo.print_on(gclog_or_tty);
ysr@777 887 }
iveresov@1229 888 gclog_or_tty->print_cr("\n Concurrent RS processed %d cards",
iveresov@1229 889 _conc_refine_cards);
ysr@777 890 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 891 jint tot_processed_buffers =
ysr@777 892 dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread();
ysr@777 893 gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers);
iveresov@1229 894 gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.",
ysr@777 895 dcqs.processed_buffers_rs_thread(),
ysr@777 896 100.0*(float)dcqs.processed_buffers_rs_thread()/
ysr@777 897 (float)tot_processed_buffers);
ysr@777 898 gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.",
ysr@777 899 dcqs.processed_buffers_mut(),
ysr@777 900 100.0*(float)dcqs.processed_buffers_mut()/
ysr@777 901 (float)tot_processed_buffers);
iveresov@1229 902 gclog_or_tty->print_cr(" Conc RS threads times(s)");
iveresov@1229 903 PrintRSThreadVTimeClosure p;
iveresov@1229 904 gclog_or_tty->print(" ");
iveresov@1229 905 g1->concurrent_g1_refine()->threads_do(&p);
ysr@777 906 gclog_or_tty->print_cr("");
iveresov@1229 907
johnc@2216 908 HRRSStatsIter blk;
johnc@2216 909 g1->heap_region_iterate(&blk);
johnc@2216 910 gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K."
johnc@2216 911 " Max = " SIZE_FORMAT "K.",
johnc@2216 912 blk.total_mem_sz()/K, blk.max_mem_sz()/K);
johnc@2216 913 gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K,"
johnc@2216 914 " free_lists = " SIZE_FORMAT "K.",
johnc@2216 915 HeapRegionRemSet::static_mem_size()/K,
johnc@2216 916 HeapRegionRemSet::fl_mem_size()/K);
johnc@2216 917 gclog_or_tty->print_cr(" %d occupied cards represented.",
johnc@2216 918 blk.occupied());
johnc@2216 919 gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
johnc@2216 920 ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
johnc@2216 921 blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
johnc@2216 922 (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
johnc@2216 923 (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
johnc@2216 924 gclog_or_tty->print_cr(" Did %d coarsenings.", HeapRegionRemSet::n_coarsenings());
ysr@777 925 }
johnc@2060 926
johnc@2216 927 void G1RemSet::prepare_for_verify() {
iveresov@1072 928 if (G1HRRSFlushLogBuffersOnVerify &&
iveresov@1072 929 (VerifyBeforeGC || VerifyAfterGC)
iveresov@1072 930 && !_g1->full_collection()) {
ysr@777 931 cleanupHRRS();
ysr@777 932 _g1->set_refine_cte_cl_concurrency(false);
ysr@777 933 if (SafepointSynchronize::is_at_safepoint()) {
ysr@777 934 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 935 dcqs.concatenate_logs();
ysr@777 936 }
ysr@777 937 bool cg1r_use_cache = _cg1r->use_cache();
ysr@777 938 _cg1r->set_use_cache(false);
johnc@2060 939 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
johnc@2060 940 updateRS(&into_cset_dcq, 0);
johnc@2060 941 _g1->into_cset_dirty_card_queue_set().clear();
ysr@777 942 _cg1r->set_use_cache(cg1r_use_cache);
iveresov@1072 943
iveresov@1072 944 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
ysr@777 945 }
ysr@777 946 }

mercurial