src/share/vm/gc_implementation/g1/g1RemSet.cpp

Sat, 06 Oct 2012 01:17:44 -0700

author
johnc
date
Sat, 06 Oct 2012 01:17:44 -0700
changeset 4173
8a5ea0a9ccc4
parent 4037
da91efe96a93
child 5014
5c93c1f61226
permissions
-rw-r--r--

7127708: G1: change task num types from int to uint in concurrent mark
Summary: Change the type of various task num fields, parameters etc to unsigned and rename them to be more consistent with the other collectors. Code changes were also reviewed by Vitaly Davidovich.
Reviewed-by: johnc
Contributed-by: Kaushik Srenevasan <kaushik@twitter.com>

ysr@777 1 /*
johnc@3466 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/bufferingOopClosure.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
brutisso@3923 32 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
stefank@2314 33 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
stefank@2314 34 #include "gc_implementation/g1/g1RemSet.inline.hpp"
stefank@2314 35 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@2314 36 #include "memory/iterator.hpp"
stefank@2314 37 #include "oops/oop.inline.hpp"
stefank@2314 38 #include "utilities/intHisto.hpp"
ysr@777 39
ysr@777 40 #define CARD_REPEAT_HISTO 0
ysr@777 41
ysr@777 42 #if CARD_REPEAT_HISTO
ysr@777 43 static size_t ct_freq_sz;
ysr@777 44 static jbyte* ct_freq = NULL;
ysr@777 45
ysr@777 46 void init_ct_freq_table(size_t heap_sz_bytes) {
ysr@777 47 if (ct_freq == NULL) {
ysr@777 48 ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size;
ysr@777 49 ct_freq = new jbyte[ct_freq_sz];
ysr@777 50 for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0;
ysr@777 51 }
ysr@777 52 }
ysr@777 53
ysr@777 54 void ct_freq_note_card(size_t index) {
ysr@777 55 assert(0 <= index && index < ct_freq_sz, "Bounds error.");
ysr@777 56 if (ct_freq[index] < 100) { ct_freq[index]++; }
ysr@777 57 }
ysr@777 58
ysr@777 59 static IntHistogram card_repeat_count(10, 10);
ysr@777 60
ysr@777 61 void ct_freq_update_histo_and_reset() {
ysr@777 62 for (size_t j = 0; j < ct_freq_sz; j++) {
ysr@777 63 card_repeat_count.add_entry(ct_freq[j]);
ysr@777 64 ct_freq[j] = 0;
ysr@777 65 }
ysr@777 66
ysr@777 67 }
ysr@777 68 #endif
ysr@777 69
johnc@2216 70 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
johnc@2216 71 : _g1(g1), _conc_refine_cards(0),
johnc@2216 72 _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
ysr@777 73 _cg1r(g1->concurrent_g1_refine()),
johnc@2060 74 _cset_rs_update_cl(NULL),
ysr@777 75 _cards_scanned(NULL), _total_cards_scanned(0)
ysr@777 76 {
ysr@777 77 _seq_task = new SubTasksDone(NumSeqTasks);
iveresov@1051 78 guarantee(n_workers() > 0, "There should be some workers");
zgu@3900 79 _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers(), mtGC);
iveresov@1051 80 for (uint i = 0; i < n_workers(); i++) {
johnc@2060 81 _cset_rs_update_cl[i] = NULL;
iveresov@1051 82 }
ysr@777 83 }
ysr@777 84
johnc@2216 85 G1RemSet::~G1RemSet() {
ysr@777 86 delete _seq_task;
iveresov@1051 87 for (uint i = 0; i < n_workers(); i++) {
johnc@2060 88 assert(_cset_rs_update_cl[i] == NULL, "it should be");
iveresov@1051 89 }
zgu@3900 90 FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl, mtGC);
ysr@777 91 }
ysr@777 92
ysr@777 93 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
ysr@777 94 if (_g1->is_in_g1_reserved(mr.start())) {
ysr@777 95 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
ysr@777 96 if (_start_first == NULL) _start_first = mr.start();
ysr@777 97 }
ysr@777 98 }
ysr@777 99
ysr@777 100 class ScanRSClosure : public HeapRegionClosure {
ysr@777 101 size_t _cards_done, _cards;
ysr@777 102 G1CollectedHeap* _g1h;
ysr@777 103 OopsInHeapRegionClosure* _oc;
ysr@777 104 G1BlockOffsetSharedArray* _bot_shared;
ysr@777 105 CardTableModRefBS *_ct_bs;
ysr@777 106 int _worker_i;
iveresov@1696 107 int _block_size;
ysr@777 108 bool _try_claimed;
ysr@777 109 public:
ysr@777 110 ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
ysr@777 111 _oc(oc),
ysr@777 112 _cards(0),
ysr@777 113 _cards_done(0),
ysr@777 114 _worker_i(worker_i),
ysr@777 115 _try_claimed(false)
ysr@777 116 {
ysr@777 117 _g1h = G1CollectedHeap::heap();
ysr@777 118 _bot_shared = _g1h->bot_shared();
ysr@777 119 _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
iveresov@1696 120 _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
ysr@777 121 }
ysr@777 122
ysr@777 123 void set_try_claimed() { _try_claimed = true; }
ysr@777 124
ysr@777 125 void scanCard(size_t index, HeapRegion *r) {
johnc@3219 126 // Stack allocate the DirtyCardToOopClosure instance
johnc@3219 127 HeapRegionDCTOC cl(_g1h, r, _oc,
johnc@3219 128 CardTableModRefBS::Precise,
johnc@3219 129 HeapRegionDCTOC::IntoCSFilterKind);
ysr@777 130
ysr@777 131 // Set the "from" region in the closure.
ysr@777 132 _oc->set_region(r);
ysr@777 133 HeapWord* card_start = _bot_shared->address_for_index(index);
ysr@777 134 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
ysr@777 135 Space *sp = SharedHeap::heap()->space_containing(card_start);
tonyp@2849 136 MemRegion sm_region = sp->used_region_at_save_marks();
ysr@777 137 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
tonyp@2849 138 if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
tonyp@2849 139 // We make the card as "claimed" lazily (so races are possible
tonyp@2849 140 // but they're benign), which reduces the number of duplicate
tonyp@2849 141 // scans (the rsets of the regions in the cset can intersect).
tonyp@2849 142 _ct_bs->set_card_claimed(index);
tonyp@2849 143 _cards_done++;
johnc@3219 144 cl.do_MemRegion(mr);
ysr@777 145 }
ysr@777 146 }
ysr@777 147
ysr@777 148 void printCard(HeapRegion* card_region, size_t card_index,
ysr@777 149 HeapWord* card_start) {
ysr@777 150 gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
ysr@777 151 "RS names card %p: "
ysr@777 152 "[" PTR_FORMAT ", " PTR_FORMAT ")",
ysr@777 153 _worker_i,
ysr@777 154 card_region->bottom(), card_region->end(),
ysr@777 155 card_index,
ysr@777 156 card_start, card_start + G1BlockOffsetSharedArray::N_words);
ysr@777 157 }
ysr@777 158
ysr@777 159 bool doHeapRegion(HeapRegion* r) {
ysr@777 160 assert(r->in_collection_set(), "should only be called on elements of CS.");
ysr@777 161 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 162 if (hrrs->iter_is_complete()) return false; // All done.
ysr@777 163 if (!_try_claimed && !hrrs->claim_iter()) return false;
tonyp@2849 164 // If we ever free the collection set concurrently, we should also
tonyp@2849 165 // clear the card table concurrently therefore we won't need to
tonyp@2849 166 // add regions of the collection set to the dirty cards region.
apetrusenko@1231 167 _g1h->push_dirty_cards_region(r);
ysr@777 168 // If we didn't return above, then
ysr@777 169 // _try_claimed || r->claim_iter()
ysr@777 170 // is true: either we're supposed to work on claimed-but-not-complete
ysr@777 171 // regions, or we successfully claimed the region.
ysr@777 172 HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
ysr@777 173 hrrs->init_iterator(iter);
ysr@777 174 size_t card_index;
iveresov@1696 175
iveresov@1696 176 // We claim cards in block so as to recude the contention. The block size is determined by
iveresov@1696 177 // the G1RSetScanBlockSize parameter.
iveresov@1696 178 size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
iveresov@1696 179 for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
iveresov@1696 180 if (current_card >= jump_to_card + _block_size) {
iveresov@1696 181 jump_to_card = hrrs->iter_claimed_next(_block_size);
iveresov@1182 182 }
iveresov@1696 183 if (current_card < jump_to_card) continue;
ysr@777 184 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
ysr@777 185 #if 0
ysr@777 186 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
ysr@777 187 card_start, card_start + CardTableModRefBS::card_size_in_words);
ysr@777 188 #endif
ysr@777 189
ysr@777 190 HeapRegion* card_region = _g1h->heap_region_containing(card_start);
ysr@777 191 assert(card_region != NULL, "Yielding cards not in the heap?");
ysr@777 192 _cards++;
ysr@777 193
apetrusenko@1231 194 if (!card_region->is_on_dirty_cards_region_list()) {
apetrusenko@1231 195 _g1h->push_dirty_cards_region(card_region);
apetrusenko@1231 196 }
apetrusenko@1231 197
tonyp@2849 198 // If the card is dirty, then we will scan it during updateRS.
tonyp@2849 199 if (!card_region->in_collection_set() &&
tonyp@2849 200 !_ct_bs->is_card_dirty(card_index)) {
tonyp@2849 201 scanCard(card_index, card_region);
ysr@777 202 }
ysr@777 203 }
iveresov@1182 204 if (!_try_claimed) {
iveresov@1182 205 hrrs->set_iter_complete();
iveresov@1182 206 }
ysr@777 207 return false;
ysr@777 208 }
ysr@777 209 size_t cards_done() { return _cards_done;}
ysr@777 210 size_t cards_looked_up() { return _cards;}
ysr@777 211 };
ysr@777 212
johnc@2216 213 void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
ysr@777 214 double rs_time_start = os::elapsedTime();
johnc@3296 215 HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
ysr@777 216
iveresov@1696 217 ScanRSClosure scanRScl(oc, worker_i);
johnc@3175 218
ysr@777 219 _g1->collection_set_iterate_from(startRegion, &scanRScl);
ysr@777 220 scanRScl.set_try_claimed();
ysr@777 221 _g1->collection_set_iterate_from(startRegion, &scanRScl);
ysr@777 222
iveresov@1696 223 double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
ysr@777 224
ysr@777 225 assert( _cards_scanned != NULL, "invariant" );
ysr@777 226 _cards_scanned[worker_i] = scanRScl.cards_done();
ysr@777 227
brutisso@3923 228 _g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
ysr@777 229 }
ysr@777 230
johnc@2060 231 // Closure used for updating RSets and recording references that
johnc@2060 232 // point into the collection set. Only called during an
johnc@2060 233 // evacuation pause.
ysr@777 234
johnc@2060 235 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure {
johnc@2060 236 G1RemSet* _g1rs;
johnc@2060 237 DirtyCardQueue* _into_cset_dcq;
johnc@2060 238 public:
johnc@2060 239 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h,
johnc@2060 240 DirtyCardQueue* into_cset_dcq) :
johnc@2060 241 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
johnc@2060 242 {}
johnc@2060 243 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
johnc@2060 244 // The only time we care about recording cards that
johnc@2060 245 // contain references that point into the collection set
johnc@2060 246 // is during RSet updating within an evacuation pause.
johnc@2060 247 // In this case worker_i should be the id of a GC worker thread.
johnc@2060 248 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
brutisso@2646 249 assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
johnc@2060 250
johnc@2060 251 if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
johnc@2060 252 // 'card_ptr' contains references that point into the collection
johnc@2060 253 // set. We need to record the card in the DCQS
johnc@2060 254 // (G1CollectedHeap::into_cset_dirty_card_queue_set())
johnc@2060 255 // that's used for that purpose.
johnc@2060 256 //
johnc@2060 257 // Enqueue the card
johnc@2060 258 _into_cset_dcq->enqueue(card_ptr);
johnc@2060 259 }
johnc@2060 260 return true;
johnc@2060 261 }
johnc@2060 262 };
johnc@2060 263
johnc@2216 264 void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
ysr@777 265 double start = os::elapsedTime();
johnc@2060 266 // Apply the given closure to all remaining log entries.
johnc@2060 267 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
johnc@3175 268
johnc@2060 269 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
johnc@2060 270
iveresov@1229 271 // Now there should be no dirty cards.
iveresov@1229 272 if (G1RSLogCheckCardTable) {
iveresov@1229 273 CountNonCleanMemRegionClosure cl(_g1);
iveresov@1229 274 _ct_bs->mod_card_iterate(&cl);
iveresov@1229 275 // XXX This isn't true any more: keeping cards of young regions
iveresov@1229 276 // marked dirty broke it. Need some reasonable fix.
iveresov@1229 277 guarantee(cl.n() == 0, "Card table should be clean.");
ysr@777 278 }
iveresov@1229 279
brutisso@3923 280 _g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
ysr@777 281 }
ysr@777 282
johnc@2216 283 void G1RemSet::cleanupHRRS() {
ysr@777 284 HeapRegionRemSet::cleanup();
ysr@777 285 }
ysr@777 286
johnc@2216 287 void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
ysr@777 288 int worker_i) {
ysr@777 289 #if CARD_REPEAT_HISTO
ysr@777 290 ct_freq_update_histo_and_reset();
ysr@777 291 #endif
ysr@777 292 if (worker_i == 0) {
ysr@777 293 _cg1r->clear_and_record_card_counts();
ysr@777 294 }
ysr@777 295
johnc@2060 296 // We cache the value of 'oc' closure into the appropriate slot in the
johnc@2060 297 // _cset_rs_update_cl for this worker
johnc@2060 298 assert(worker_i < (int)n_workers(), "sanity");
johnc@2060 299 _cset_rs_update_cl[worker_i] = oc;
johnc@2060 300
johnc@2060 301 // A DirtyCardQueue that is used to hold cards containing references
johnc@2060 302 // that point into the collection set. This DCQ is associated with a
johnc@2060 303 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal
johnc@2060 304 // circumstances (i.e. the pause successfully completes), these cards
johnc@2060 305 // are just discarded (there's no need to update the RSets of regions
johnc@2060 306 // that were in the collection set - after the pause these regions
johnc@2060 307 // are wholly 'free' of live objects. In the event of an evacuation
johnc@2060 308 // failure the cards/buffers in this queue set are:
johnc@2060 309 // * passed to the DirtyCardQueueSet that is used to manage deferred
johnc@2060 310 // RSet updates, or
johnc@2060 311 // * scanned for references that point into the collection set
johnc@2060 312 // and the RSet of the corresponding region in the collection set
johnc@2060 313 // is updated immediately.
johnc@2060 314 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
johnc@2060 315
johnc@2063 316 assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
johnc@2063 317
johnc@2063 318 // The two flags below were introduced temporarily to serialize
johnc@2063 319 // the updating and scanning of remembered sets. There are some
johnc@2063 320 // race conditions when these two operations are done in parallel
johnc@2063 321 // and they are causing failures. When we resolve said race
johnc@2063 322 // conditions, we'll revert back to parallel remembered set
johnc@2063 323 // updating and scanning. See CRs 6677707 and 6677708.
johnc@2063 324 if (G1UseParallelRSetUpdating || (worker_i == 0)) {
johnc@2063 325 updateRS(&into_cset_dcq, worker_i);
ysr@777 326 } else {
brutisso@4015 327 _g1p->phase_times()->record_update_rs_processed_buffers(worker_i, 0);
brutisso@3923 328 _g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
johnc@2063 329 }
johnc@2063 330 if (G1UseParallelRSetScanning || (worker_i == 0)) {
johnc@2063 331 scanRS(oc, worker_i);
johnc@2063 332 } else {
brutisso@3923 333 _g1p->phase_times()->record_scan_rs_time(worker_i, 0.0);
ysr@777 334 }
johnc@2060 335
johnc@2060 336 // We now clear the cached values of _cset_rs_update_cl for this worker
johnc@2060 337 _cset_rs_update_cl[worker_i] = NULL;
ysr@777 338 }
ysr@777 339
johnc@2216 340 void G1RemSet::prepare_for_oops_into_collection_set_do() {
ysr@777 341 cleanupHRRS();
ysr@777 342 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
ysr@777 343 _g1->set_refine_cte_cl_concurrency(false);
ysr@777 344 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 345 dcqs.concatenate_logs();
ysr@777 346
jmasa@3294 347 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 348 // Don't set the number of workers here. It will be set
jmasa@3294 349 // when the task is run
jmasa@3294 350 // _seq_task->set_n_termination((int)n_workers());
ysr@777 351 }
ysr@777 352 guarantee( _cards_scanned == NULL, "invariant" );
zgu@3900 353 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC);
apetrusenko@980 354 for (uint i = 0; i < n_workers(); ++i) {
apetrusenko@980 355 _cards_scanned[i] = 0;
apetrusenko@980 356 }
ysr@777 357 _total_cards_scanned = 0;
ysr@777 358 }
ysr@777 359
ysr@777 360
johnc@2060 361 // This closure, applied to a DirtyCardQueueSet, is used to immediately
johnc@2060 362 // update the RSets for the regions in the CSet. For each card it iterates
johnc@2060 363 // through the oops which coincide with that card. It scans the reference
johnc@2060 364 // fields in each oop; when it finds an oop that points into the collection
johnc@2060 365 // set, the RSet for the region containing the referenced object is updated.
johnc@2060 366 class UpdateRSetCardTableEntryIntoCSetClosure: public CardTableEntryClosure {
iveresov@1051 367 G1CollectedHeap* _g1;
johnc@2060 368 CardTableModRefBS* _ct_bs;
iveresov@1051 369 public:
johnc@2060 370 UpdateRSetCardTableEntryIntoCSetClosure(G1CollectedHeap* g1,
johnc@2060 371 CardTableModRefBS* bs):
johnc@2060 372 _g1(g1), _ct_bs(bs)
johnc@2060 373 { }
johnc@2060 374
johnc@2060 375 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
johnc@2060 376 // Construct the region representing the card.
johnc@2060 377 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@2060 378 // And find the region containing it.
johnc@2060 379 HeapRegion* r = _g1->heap_region_containing(start);
johnc@2060 380 assert(r != NULL, "unexpected null");
johnc@2060 381
johnc@2060 382 // Scan oops in the card looking for references into the collection set
coleenp@4037 383 // Don't use addr_for(card_ptr + 1) which can ask for
coleenp@4037 384 // a card beyond the heap. This is not safe without a perm
coleenp@4037 385 // gen.
coleenp@4037 386 HeapWord* end = start + CardTableModRefBS::card_size_in_words;
johnc@2060 387 MemRegion scanRegion(start, end);
johnc@2060 388
johnc@2060 389 UpdateRSetImmediate update_rs_cl(_g1->g1_rem_set());
johnc@3179 390 FilterIntoCSClosure update_rs_cset_oop_cl(NULL, _g1, &update_rs_cl);
johnc@2060 391 FilterOutOfRegionClosure filter_then_update_rs_cset_oop_cl(r, &update_rs_cset_oop_cl);
johnc@2060 392
johnc@2060 393 // We can pass false as the "filter_young" parameter here as:
johnc@2060 394 // * we should be in a STW pause,
johnc@2060 395 // * the DCQS to which this closure is applied is used to hold
johnc@2060 396 // references that point into the collection set from the prior
johnc@2060 397 // RSet updating,
johnc@2060 398 // * the post-write barrier shouldn't be logging updates to young
johnc@2060 399 // regions (but there is a situation where this can happen - see
johnc@2216 400 // the comment in G1RemSet::concurrentRefineOneCard below -
johnc@2060 401 // that should not be applicable here), and
johnc@2060 402 // * during actual RSet updating, the filtering of cards in young
johnc@2060 403 // regions in HeapRegion::oops_on_card_seq_iterate_careful is
johnc@2060 404 // employed.
johnc@2060 405 // As a result, when this closure is applied to "refs into cset"
johnc@2060 406 // DCQS, we shouldn't see any cards in young regions.
johnc@2060 407 update_rs_cl.set_region(r);
johnc@2060 408 HeapWord* stop_point =
johnc@2060 409 r->oops_on_card_seq_iterate_careful(scanRegion,
tonyp@2849 410 &filter_then_update_rs_cset_oop_cl,
tonyp@2849 411 false /* filter_young */,
tonyp@2849 412 NULL /* card_ptr */);
johnc@2060 413
johnc@2060 414 // Since this is performed in the event of an evacuation failure, we
johnc@2060 415 // we shouldn't see a non-null stop point
johnc@2060 416 assert(stop_point == NULL, "saw an unallocated region");
johnc@2060 417 return true;
iveresov@1051 418 }
iveresov@1051 419 };
iveresov@1051 420
johnc@2216 421 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
ysr@777 422 guarantee( _cards_scanned != NULL, "invariant" );
ysr@777 423 _total_cards_scanned = 0;
tonyp@2974 424 for (uint i = 0; i < n_workers(); ++i) {
ysr@777 425 _total_cards_scanned += _cards_scanned[i];
tonyp@2974 426 }
zgu@3900 427 FREE_C_HEAP_ARRAY(size_t, _cards_scanned, mtGC);
ysr@777 428 _cards_scanned = NULL;
ysr@777 429 // Cleanup after copy
ysr@777 430 _g1->set_refine_cte_cl_concurrency(true);
ysr@777 431 // Set all cards back to clean.
ysr@777 432 _g1->cleanUpCardTable();
iveresov@1229 433
johnc@2060 434 DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set();
johnc@2060 435 int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
johnc@2060 436
iveresov@1051 437 if (_g1->evacuation_failed()) {
johnc@2060 438 // Restore remembered sets for the regions pointing into the collection set.
johnc@2060 439
iveresov@1051 440 if (G1DeferredRSUpdate) {
johnc@2060 441 // If deferred RS updates are enabled then we just need to transfer
johnc@2060 442 // the completed buffers from (a) the DirtyCardQueueSet used to hold
johnc@2060 443 // cards that contain references that point into the collection set
johnc@2060 444 // to (b) the DCQS used to hold the deferred RS updates
johnc@2060 445 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs);
iveresov@1051 446 } else {
johnc@2060 447
johnc@2060 448 CardTableModRefBS* bs = (CardTableModRefBS*)_g1->barrier_set();
johnc@2060 449 UpdateRSetCardTableEntryIntoCSetClosure update_rs_cset_immediate(_g1, bs);
johnc@2060 450
johnc@2060 451 int n_completed_buffers = 0;
johnc@2060 452 while (into_cset_dcqs.apply_closure_to_completed_buffer(&update_rs_cset_immediate,
johnc@2060 453 0, 0, true)) {
johnc@2060 454 n_completed_buffers++;
johnc@2060 455 }
johnc@2060 456 assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers");
iveresov@1051 457 }
iveresov@1051 458 }
johnc@2060 459
johnc@2060 460 // Free any completed buffers in the DirtyCardQueueSet used to hold cards
johnc@2060 461 // which contain references that point into the collection.
johnc@2060 462 _g1->into_cset_dirty_card_queue_set().clear();
johnc@2060 463 assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0,
johnc@2060 464 "all buffers should be freed");
johnc@2060 465 _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers();
ysr@777 466 }
ysr@777 467
ysr@777 468 class ScrubRSClosure: public HeapRegionClosure {
ysr@777 469 G1CollectedHeap* _g1h;
ysr@777 470 BitMap* _region_bm;
ysr@777 471 BitMap* _card_bm;
ysr@777 472 CardTableModRefBS* _ctbs;
ysr@777 473 public:
ysr@777 474 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
ysr@777 475 _g1h(G1CollectedHeap::heap()),
ysr@777 476 _region_bm(region_bm), _card_bm(card_bm),
ysr@777 477 _ctbs(NULL)
ysr@777 478 {
ysr@777 479 ModRefBarrierSet* bs = _g1h->mr_bs();
ysr@777 480 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
ysr@777 481 _ctbs = (CardTableModRefBS*)bs;
ysr@777 482 }
ysr@777 483
ysr@777 484 bool doHeapRegion(HeapRegion* r) {
ysr@777 485 if (!r->continuesHumongous()) {
ysr@777 486 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
ysr@777 487 }
ysr@777 488 return false;
ysr@777 489 }
ysr@777 490 };
ysr@777 491
johnc@2216 492 void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
ysr@777 493 ScrubRSClosure scrub_cl(region_bm, card_bm);
ysr@777 494 _g1->heap_region_iterate(&scrub_cl);
ysr@777 495 }
ysr@777 496
johnc@2216 497 void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
jmasa@3357 498 uint worker_num, int claim_val) {
ysr@777 499 ScrubRSClosure scrub_cl(region_bm, card_bm);
jmasa@3294 500 _g1->heap_region_par_iterate_chunked(&scrub_cl,
jmasa@3294 501 worker_num,
jmasa@3357 502 n_workers(),
jmasa@3294 503 claim_val);
ysr@777 504 }
ysr@777 505
ysr@777 506
johnc@2060 507
johnc@3466 508 G1TriggerClosure::G1TriggerClosure() :
johnc@3466 509 _triggered(false) { }
johnc@2060 510
johnc@3466 511 G1InvokeIfNotTriggeredClosure::G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t_cl,
johnc@3466 512 OopClosure* oop_cl) :
johnc@3466 513 _trigger_cl(t_cl), _oop_cl(oop_cl) { }
johnc@3466 514
johnc@3466 515 G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) :
johnc@3466 516 _c1(c1), _c2(c2) { }
johnc@3466 517
johnc@3466 518 G1UpdateRSOrPushRefOopClosure::
johnc@3466 519 G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
johnc@3466 520 G1RemSet* rs,
johnc@3466 521 OopsInHeapRegionClosure* push_ref_cl,
johnc@3466 522 bool record_refs_into_cset,
johnc@3466 523 int worker_i) :
johnc@3466 524 _g1(g1h), _g1_rem_set(rs), _from(NULL),
johnc@3466 525 _record_refs_into_cset(record_refs_into_cset),
johnc@3466 526 _push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
johnc@2060 527
johnc@2216 528 bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
johnc@2060 529 bool check_for_refs_into_cset) {
johnc@1325 530 // Construct the region representing the card.
johnc@1325 531 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@1325 532 // And find the region containing it.
johnc@1325 533 HeapRegion* r = _g1->heap_region_containing(start);
johnc@1325 534 assert(r != NULL, "unexpected null");
johnc@1325 535
coleenp@4037 536 // Don't use addr_for(card_ptr + 1) which can ask for
coleenp@4037 537 // a card beyond the heap. This is not safe without a perm
coleenp@4037 538 // gen at the upper end of the heap.
coleenp@4037 539 HeapWord* end = start + CardTableModRefBS::card_size_in_words;
johnc@1325 540 MemRegion dirtyRegion(start, end);
johnc@1325 541
johnc@1325 542 #if CARD_REPEAT_HISTO
johnc@2504 543 init_ct_freq_table(_g1->max_capacity());
johnc@1325 544 ct_freq_note_card(_ct_bs->index_for(start));
johnc@1325 545 #endif
johnc@1325 546
brutisso@3267 547 OopsInHeapRegionClosure* oops_in_heap_closure = NULL;
brutisso@3267 548 if (check_for_refs_into_cset) {
brutisso@3267 549 // ConcurrentG1RefineThreads have worker numbers larger than what
brutisso@3267 550 // _cset_rs_update_cl[] is set up to handle. But those threads should
brutisso@3267 551 // only be active outside of a collection which means that when they
brutisso@3267 552 // reach here they should have check_for_refs_into_cset == false.
brutisso@3267 553 assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length");
brutisso@3267 554 oops_in_heap_closure = _cset_rs_update_cl[worker_i];
brutisso@3267 555 }
johnc@3466 556 G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
johnc@3466 557 _g1->g1_rem_set(),
johnc@3466 558 oops_in_heap_closure,
johnc@3466 559 check_for_refs_into_cset,
johnc@3466 560 worker_i);
johnc@1325 561 update_rs_oop_cl.set_from(r);
johnc@2060 562
johnc@3466 563 G1TriggerClosure trigger_cl;
johnc@3179 564 FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
johnc@3466 565 G1InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
johnc@3466 566 G1Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
johnc@2060 567
johnc@2060 568 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
johnc@2060 569 (check_for_refs_into_cset ?
johnc@2060 570 (OopClosure*)&mux :
johnc@2060 571 (OopClosure*)&update_rs_oop_cl));
johnc@1325 572
johnc@2021 573 // The region for the current card may be a young region. The
johnc@2021 574 // current card may have been a card that was evicted from the
johnc@2021 575 // card cache. When the card was inserted into the cache, we had
johnc@2021 576 // determined that its region was non-young. While in the cache,
johnc@2021 577 // the region may have been freed during a cleanup pause, reallocated
johnc@2021 578 // and tagged as young.
johnc@2021 579 //
johnc@2021 580 // We wish to filter out cards for such a region but the current
tonyp@2849 581 // thread, if we're running concurrently, may "see" the young type
johnc@2021 582 // change at any time (so an earlier "is_young" check may pass or
johnc@2021 583 // fail arbitrarily). We tell the iteration code to perform this
johnc@2021 584 // filtering when it has been determined that there has been an actual
johnc@2021 585 // allocation in this region and making it safe to check the young type.
johnc@2021 586 bool filter_young = true;
johnc@2021 587
johnc@1325 588 HeapWord* stop_point =
johnc@1325 589 r->oops_on_card_seq_iterate_careful(dirtyRegion,
johnc@2021 590 &filter_then_update_rs_oop_cl,
tonyp@2849 591 filter_young,
tonyp@2849 592 card_ptr);
johnc@2021 593
johnc@1325 594 // If stop_point is non-null, then we encountered an unallocated region
johnc@1325 595 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
johnc@1325 596 // card and re-enqueue: if we put off the card until a GC pause, then the
johnc@1325 597 // unallocated portion will be filled in. Alternatively, we might try
johnc@1325 598 // the full complexity of the technique used in "regular" precleaning.
johnc@1325 599 if (stop_point != NULL) {
johnc@1325 600 // The card might have gotten re-dirtied and re-enqueued while we
johnc@1325 601 // worked. (In fact, it's pretty likely.)
johnc@1325 602 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
johnc@1325 603 *card_ptr = CardTableModRefBS::dirty_card_val();
johnc@1325 604 MutexLockerEx x(Shared_DirtyCardQ_lock,
johnc@1325 605 Mutex::_no_safepoint_check_flag);
johnc@1325 606 DirtyCardQueue* sdcq =
johnc@1325 607 JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
johnc@1325 608 sdcq->enqueue(card_ptr);
johnc@1325 609 }
johnc@1325 610 } else {
johnc@1325 611 _conc_refine_cards++;
johnc@1325 612 }
johnc@2060 613
johnc@3466 614 return trigger_cl.triggered();
johnc@1325 615 }
johnc@1325 616
johnc@2216 617 bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
johnc@2060 618 bool check_for_refs_into_cset) {
ysr@777 619 // If the card is no longer dirty, nothing to do.
johnc@2060 620 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
johnc@2060 621 // No need to return that this card contains refs that point
johnc@2060 622 // into the collection set.
johnc@2060 623 return false;
johnc@2060 624 }
ysr@777 625
ysr@777 626 // Construct the region representing the card.
ysr@777 627 HeapWord* start = _ct_bs->addr_for(card_ptr);
ysr@777 628 // And find the region containing it.
ysr@777 629 HeapRegion* r = _g1->heap_region_containing(start);
ysr@777 630 if (r == NULL) {
johnc@2060 631 // Again no need to return that this card contains refs that
johnc@2060 632 // point into the collection set.
johnc@2060 633 return false; // Not in the G1 heap (might be in perm, for example.)
ysr@777 634 }
ysr@777 635 // Why do we have to check here whether a card is on a young region,
ysr@777 636 // given that we dirty young regions and, as a result, the
ysr@777 637 // post-barrier is supposed to filter them out and never to enqueue
ysr@777 638 // them? When we allocate a new region as the "allocation region" we
ysr@777 639 // actually dirty its cards after we release the lock, since card
ysr@777 640 // dirtying while holding the lock was a performance bottleneck. So,
ysr@777 641 // as a result, it is possible for other threads to actually
ysr@777 642 // allocate objects in the region (after the acquire the lock)
ysr@777 643 // before all the cards on the region are dirtied. This is unlikely,
ysr@777 644 // and it doesn't happen often, but it can happen. So, the extra
ysr@777 645 // check below filters out those cards.
iveresov@1072 646 if (r->is_young()) {
johnc@2060 647 return false;
ysr@777 648 }
ysr@777 649 // While we are processing RSet buffers during the collection, we
ysr@777 650 // actually don't want to scan any cards on the collection set,
ysr@777 651 // since we don't want to update remebered sets with entries that
ysr@777 652 // point into the collection set, given that live objects from the
ysr@777 653 // collection set are about to move and such entries will be stale
ysr@777 654 // very soon. This change also deals with a reliability issue which
ysr@777 655 // involves scanning a card in the collection set and coming across
ysr@777 656 // an array that was being chunked and looking malformed. Note,
ysr@777 657 // however, that if evacuation fails, we have to scan any objects
ysr@777 658 // that were not moved and create any missing entries.
ysr@777 659 if (r->in_collection_set()) {
johnc@2060 660 return false;
ysr@777 661 }
ysr@777 662
johnc@1325 663 // Should we defer processing the card?
johnc@1325 664 //
johnc@1325 665 // Previously the result from the insert_cache call would be
johnc@1325 666 // either card_ptr (implying that card_ptr was currently "cold"),
johnc@1325 667 // null (meaning we had inserted the card ptr into the "hot"
johnc@1325 668 // cache, which had some headroom), or a "hot" card ptr
johnc@1325 669 // extracted from the "hot" cache.
johnc@1325 670 //
johnc@1325 671 // Now that the _card_counts cache in the ConcurrentG1Refine
johnc@1325 672 // instance is an evicting hash table, the result we get back
johnc@1325 673 // could be from evicting the card ptr in an already occupied
johnc@1325 674 // bucket (in which case we have replaced the card ptr in the
johnc@1325 675 // bucket with card_ptr and "defer" is set to false). To avoid
johnc@1325 676 // having a data structure (updates to which would need a lock)
johnc@1325 677 // to hold these unprocessed dirty cards, we need to immediately
johnc@1325 678 // process card_ptr. The actions needed to be taken on return
johnc@1325 679 // from cache_insert are summarized in the following table:
johnc@1325 680 //
johnc@1325 681 // res defer action
johnc@1325 682 // --------------------------------------------------------------
johnc@1325 683 // null false card evicted from _card_counts & replaced with
johnc@1325 684 // card_ptr; evicted ptr added to hot cache.
johnc@1325 685 // No need to process res; immediately process card_ptr
johnc@1325 686 //
johnc@1325 687 // null true card not evicted from _card_counts; card_ptr added
johnc@1325 688 // to hot cache.
johnc@1325 689 // Nothing to do.
johnc@1325 690 //
johnc@1325 691 // non-null false card evicted from _card_counts & replaced with
johnc@1325 692 // card_ptr; evicted ptr is currently "cold" or
johnc@1325 693 // caused an eviction from the hot cache.
johnc@1325 694 // Immediately process res; process card_ptr.
johnc@1325 695 //
johnc@1325 696 // non-null true card not evicted from _card_counts; card_ptr is
johnc@1325 697 // currently cold, or caused an eviction from hot
johnc@1325 698 // cache.
johnc@1325 699 // Immediately process res; no need to process card_ptr.
johnc@1325 700
johnc@2060 701
johnc@1325 702 jbyte* res = card_ptr;
johnc@1325 703 bool defer = false;
johnc@2060 704
johnc@2060 705 // This gets set to true if the card being refined has references
johnc@2060 706 // that point into the collection set.
johnc@2060 707 bool oops_into_cset = false;
johnc@2060 708
ysr@777 709 if (_cg1r->use_cache()) {
johnc@1325 710 jbyte* res = _cg1r->cache_insert(card_ptr, &defer);
johnc@1325 711 if (res != NULL && (res != card_ptr || defer)) {
johnc@1325 712 start = _ct_bs->addr_for(res);
johnc@1325 713 r = _g1->heap_region_containing(start);
coleenp@4037 714 if (r != NULL) {
johnc@2021 715 // Checking whether the region we got back from the cache
johnc@2021 716 // is young here is inappropriate. The region could have been
johnc@2021 717 // freed, reallocated and tagged as young while in the cache.
johnc@2021 718 // Hence we could see its young type change at any time.
johnc@2021 719 //
johnc@2021 720 // Process card pointer we get back from the hot card cache. This
johnc@2021 721 // will check whether the region containing the card is young
johnc@2021 722 // _after_ checking that the region has been allocated from.
johnc@2060 723 oops_into_cset = concurrentRefineOneCard_impl(res, worker_i,
johnc@2060 724 false /* check_for_refs_into_cset */);
johnc@2060 725 // The above call to concurrentRefineOneCard_impl is only
johnc@2060 726 // performed if the hot card cache is enabled. This cache is
johnc@2060 727 // disabled during an evacuation pause - which is the only
johnc@2060 728 // time when we need know if the card contains references
johnc@2060 729 // that point into the collection set. Also when the hot card
johnc@2060 730 // cache is enabled, this code is executed by the concurrent
johnc@2060 731 // refine threads - rather than the GC worker threads - and
johnc@2060 732 // concurrentRefineOneCard_impl will return false.
johnc@2060 733 assert(!oops_into_cset, "should not see true here");
johnc@1325 734 }
ysr@777 735 }
ysr@777 736 }
ysr@777 737
johnc@1325 738 if (!defer) {
johnc@2060 739 oops_into_cset =
johnc@2060 740 concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset);
johnc@2060 741 // We should only be detecting that the card contains references
johnc@2060 742 // that point into the collection set if the current thread is
johnc@2060 743 // a GC worker thread.
johnc@2060 744 assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(),
johnc@2060 745 "invalid result at non safepoint");
ysr@777 746 }
johnc@2060 747 return oops_into_cset;
ysr@777 748 }
ysr@777 749
ysr@777 750 class HRRSStatsIter: public HeapRegionClosure {
ysr@777 751 size_t _occupied;
ysr@777 752 size_t _total_mem_sz;
ysr@777 753 size_t _max_mem_sz;
ysr@777 754 HeapRegion* _max_mem_sz_region;
ysr@777 755 public:
ysr@777 756 HRRSStatsIter() :
ysr@777 757 _occupied(0),
ysr@777 758 _total_mem_sz(0),
ysr@777 759 _max_mem_sz(0),
ysr@777 760 _max_mem_sz_region(NULL)
ysr@777 761 {}
ysr@777 762
ysr@777 763 bool doHeapRegion(HeapRegion* r) {
ysr@777 764 if (r->continuesHumongous()) return false;
ysr@777 765 size_t mem_sz = r->rem_set()->mem_size();
ysr@777 766 if (mem_sz > _max_mem_sz) {
ysr@777 767 _max_mem_sz = mem_sz;
ysr@777 768 _max_mem_sz_region = r;
ysr@777 769 }
ysr@777 770 _total_mem_sz += mem_sz;
ysr@777 771 size_t occ = r->rem_set()->occupied();
ysr@777 772 _occupied += occ;
ysr@777 773 return false;
ysr@777 774 }
ysr@777 775 size_t total_mem_sz() { return _total_mem_sz; }
ysr@777 776 size_t max_mem_sz() { return _max_mem_sz; }
ysr@777 777 size_t occupied() { return _occupied; }
ysr@777 778 HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
ysr@777 779 };
ysr@777 780
iveresov@1229 781 class PrintRSThreadVTimeClosure : public ThreadClosure {
iveresov@1229 782 public:
iveresov@1229 783 virtual void do_thread(Thread *t) {
iveresov@1229 784 ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
iveresov@1229 785 gclog_or_tty->print(" %5.2f", crt->vtime_accum());
iveresov@1229 786 }
iveresov@1229 787 };
iveresov@1229 788
johnc@2216 789 void G1RemSet::print_summary_info() {
ysr@777 790 G1CollectedHeap* g1 = G1CollectedHeap::heap();
ysr@777 791
ysr@777 792 #if CARD_REPEAT_HISTO
ysr@777 793 gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
ysr@777 794 gclog_or_tty->print_cr(" # of repeats --> # of cards with that number.");
ysr@777 795 card_repeat_count.print_on(gclog_or_tty);
ysr@777 796 #endif
ysr@777 797
iveresov@1229 798 gclog_or_tty->print_cr("\n Concurrent RS processed %d cards",
iveresov@1229 799 _conc_refine_cards);
ysr@777 800 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 801 jint tot_processed_buffers =
ysr@777 802 dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread();
ysr@777 803 gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers);
iveresov@1229 804 gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.",
ysr@777 805 dcqs.processed_buffers_rs_thread(),
ysr@777 806 100.0*(float)dcqs.processed_buffers_rs_thread()/
ysr@777 807 (float)tot_processed_buffers);
ysr@777 808 gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.",
ysr@777 809 dcqs.processed_buffers_mut(),
ysr@777 810 100.0*(float)dcqs.processed_buffers_mut()/
ysr@777 811 (float)tot_processed_buffers);
iveresov@1229 812 gclog_or_tty->print_cr(" Conc RS threads times(s)");
iveresov@1229 813 PrintRSThreadVTimeClosure p;
iveresov@1229 814 gclog_or_tty->print(" ");
iveresov@1229 815 g1->concurrent_g1_refine()->threads_do(&p);
ysr@777 816 gclog_or_tty->print_cr("");
iveresov@1229 817
johnc@2216 818 HRRSStatsIter blk;
johnc@2216 819 g1->heap_region_iterate(&blk);
tonyp@3957 820 gclog_or_tty->print_cr(" Total heap region rem set sizes = "SIZE_FORMAT"K."
tonyp@3957 821 " Max = "SIZE_FORMAT"K.",
johnc@2216 822 blk.total_mem_sz()/K, blk.max_mem_sz()/K);
tonyp@3957 823 gclog_or_tty->print_cr(" Static structures = "SIZE_FORMAT"K,"
tonyp@3957 824 " free_lists = "SIZE_FORMAT"K.",
tonyp@3957 825 HeapRegionRemSet::static_mem_size() / K,
tonyp@3957 826 HeapRegionRemSet::fl_mem_size() / K);
tonyp@3957 827 gclog_or_tty->print_cr(" "SIZE_FORMAT" occupied cards represented.",
johnc@2216 828 blk.occupied());
tonyp@3957 829 HeapRegion* max_mem_sz_region = blk.max_mem_sz_region();
tonyp@3957 830 HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set();
tonyp@3957 831 gclog_or_tty->print_cr(" Max size region = "HR_FORMAT", "
tonyp@3957 832 "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
tonyp@3957 833 HR_FORMAT_PARAMS(max_mem_sz_region),
tonyp@3957 834 (rem_set->mem_size() + K - 1)/K,
tonyp@3957 835 (rem_set->occupied() + K - 1)/K);
tonyp@3957 836 gclog_or_tty->print_cr(" Did %d coarsenings.",
tonyp@3957 837 HeapRegionRemSet::n_coarsenings());
ysr@777 838 }
johnc@2060 839
johnc@2216 840 void G1RemSet::prepare_for_verify() {
iveresov@1072 841 if (G1HRRSFlushLogBuffersOnVerify &&
iveresov@1072 842 (VerifyBeforeGC || VerifyAfterGC)
iveresov@1072 843 && !_g1->full_collection()) {
ysr@777 844 cleanupHRRS();
ysr@777 845 _g1->set_refine_cte_cl_concurrency(false);
ysr@777 846 if (SafepointSynchronize::is_at_safepoint()) {
ysr@777 847 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 848 dcqs.concatenate_logs();
ysr@777 849 }
ysr@777 850 bool cg1r_use_cache = _cg1r->use_cache();
ysr@777 851 _cg1r->set_use_cache(false);
johnc@2060 852 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
johnc@2060 853 updateRS(&into_cset_dcq, 0);
johnc@2060 854 _g1->into_cset_dirty_card_queue_set().clear();
ysr@777 855 _cg1r->set_use_cache(cg1r_use_cache);
iveresov@1072 856
iveresov@1072 857 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
ysr@777 858 }
ysr@777 859 }

mercurial