src/share/vm/gc_implementation/g1/g1RemSet.cpp

Thu, 17 Nov 2011 12:40:15 -0800

author
johnc
date
Thu, 17 Nov 2011 12:40:15 -0800
changeset 3296
dc467e8b2c5e
parent 3294
bca17e38de00
child 3357
441e946dc1af
permissions
-rw-r--r--

7112743: G1: Reduce overhead of marking closure during evacuation pauses
Summary: Parallelize the serial code that was used to mark objects reachable from survivor objects in the collection set. Some minor improvments in the timers used to track the freeing of the collection set along with some tweaks to PrintGCDetails.
Reviewed-by: tonyp, brutisso

ysr@777 1 /*
johnc@2504 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/bufferingOopClosure.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 28 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
stefank@2314 32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
stefank@2314 33 #include "gc_implementation/g1/g1RemSet.inline.hpp"
stefank@2314 34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@2314 35 #include "memory/iterator.hpp"
stefank@2314 36 #include "oops/oop.inline.hpp"
stefank@2314 37 #include "utilities/intHisto.hpp"
ysr@777 38
ysr@777 39 #define CARD_REPEAT_HISTO 0
ysr@777 40
ysr@777 41 #if CARD_REPEAT_HISTO
ysr@777 42 static size_t ct_freq_sz;
ysr@777 43 static jbyte* ct_freq = NULL;
ysr@777 44
ysr@777 45 void init_ct_freq_table(size_t heap_sz_bytes) {
ysr@777 46 if (ct_freq == NULL) {
ysr@777 47 ct_freq_sz = heap_sz_bytes/CardTableModRefBS::card_size;
ysr@777 48 ct_freq = new jbyte[ct_freq_sz];
ysr@777 49 for (size_t j = 0; j < ct_freq_sz; j++) ct_freq[j] = 0;
ysr@777 50 }
ysr@777 51 }
ysr@777 52
ysr@777 53 void ct_freq_note_card(size_t index) {
ysr@777 54 assert(0 <= index && index < ct_freq_sz, "Bounds error.");
ysr@777 55 if (ct_freq[index] < 100) { ct_freq[index]++; }
ysr@777 56 }
ysr@777 57
ysr@777 58 static IntHistogram card_repeat_count(10, 10);
ysr@777 59
ysr@777 60 void ct_freq_update_histo_and_reset() {
ysr@777 61 for (size_t j = 0; j < ct_freq_sz; j++) {
ysr@777 62 card_repeat_count.add_entry(ct_freq[j]);
ysr@777 63 ct_freq[j] = 0;
ysr@777 64 }
ysr@777 65
ysr@777 66 }
ysr@777 67 #endif
ysr@777 68
johnc@2216 69 G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
johnc@2216 70 : _g1(g1), _conc_refine_cards(0),
johnc@2216 71 _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
ysr@777 72 _cg1r(g1->concurrent_g1_refine()),
johnc@2060 73 _cset_rs_update_cl(NULL),
ysr@777 74 _cards_scanned(NULL), _total_cards_scanned(0)
ysr@777 75 {
ysr@777 76 _seq_task = new SubTasksDone(NumSeqTasks);
iveresov@1051 77 guarantee(n_workers() > 0, "There should be some workers");
johnc@2060 78 _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers());
iveresov@1051 79 for (uint i = 0; i < n_workers(); i++) {
johnc@2060 80 _cset_rs_update_cl[i] = NULL;
iveresov@1051 81 }
ysr@777 82 }
ysr@777 83
johnc@2216 84 G1RemSet::~G1RemSet() {
ysr@777 85 delete _seq_task;
iveresov@1051 86 for (uint i = 0; i < n_workers(); i++) {
johnc@2060 87 assert(_cset_rs_update_cl[i] == NULL, "it should be");
iveresov@1051 88 }
johnc@2060 89 FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl);
ysr@777 90 }
ysr@777 91
ysr@777 92 void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
ysr@777 93 if (_g1->is_in_g1_reserved(mr.start())) {
ysr@777 94 _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
ysr@777 95 if (_start_first == NULL) _start_first = mr.start();
ysr@777 96 }
ysr@777 97 }
ysr@777 98
ysr@777 99 class ScanRSClosure : public HeapRegionClosure {
ysr@777 100 size_t _cards_done, _cards;
ysr@777 101 G1CollectedHeap* _g1h;
ysr@777 102 OopsInHeapRegionClosure* _oc;
ysr@777 103 G1BlockOffsetSharedArray* _bot_shared;
ysr@777 104 CardTableModRefBS *_ct_bs;
ysr@777 105 int _worker_i;
iveresov@1696 106 int _block_size;
ysr@777 107 bool _try_claimed;
ysr@777 108 public:
ysr@777 109 ScanRSClosure(OopsInHeapRegionClosure* oc, int worker_i) :
ysr@777 110 _oc(oc),
ysr@777 111 _cards(0),
ysr@777 112 _cards_done(0),
ysr@777 113 _worker_i(worker_i),
ysr@777 114 _try_claimed(false)
ysr@777 115 {
ysr@777 116 _g1h = G1CollectedHeap::heap();
ysr@777 117 _bot_shared = _g1h->bot_shared();
ysr@777 118 _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
iveresov@1696 119 _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
ysr@777 120 }
ysr@777 121
ysr@777 122 void set_try_claimed() { _try_claimed = true; }
ysr@777 123
ysr@777 124 void scanCard(size_t index, HeapRegion *r) {
johnc@3219 125 // Stack allocate the DirtyCardToOopClosure instance
johnc@3219 126 HeapRegionDCTOC cl(_g1h, r, _oc,
johnc@3219 127 CardTableModRefBS::Precise,
johnc@3219 128 HeapRegionDCTOC::IntoCSFilterKind);
ysr@777 129
ysr@777 130 // Set the "from" region in the closure.
ysr@777 131 _oc->set_region(r);
ysr@777 132 HeapWord* card_start = _bot_shared->address_for_index(index);
ysr@777 133 HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
ysr@777 134 Space *sp = SharedHeap::heap()->space_containing(card_start);
tonyp@2849 135 MemRegion sm_region = sp->used_region_at_save_marks();
ysr@777 136 MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
tonyp@2849 137 if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
tonyp@2849 138 // We make the card as "claimed" lazily (so races are possible
tonyp@2849 139 // but they're benign), which reduces the number of duplicate
tonyp@2849 140 // scans (the rsets of the regions in the cset can intersect).
tonyp@2849 141 _ct_bs->set_card_claimed(index);
tonyp@2849 142 _cards_done++;
johnc@3219 143 cl.do_MemRegion(mr);
ysr@777 144 }
ysr@777 145 }
ysr@777 146
ysr@777 147 void printCard(HeapRegion* card_region, size_t card_index,
ysr@777 148 HeapWord* card_start) {
ysr@777 149 gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
ysr@777 150 "RS names card %p: "
ysr@777 151 "[" PTR_FORMAT ", " PTR_FORMAT ")",
ysr@777 152 _worker_i,
ysr@777 153 card_region->bottom(), card_region->end(),
ysr@777 154 card_index,
ysr@777 155 card_start, card_start + G1BlockOffsetSharedArray::N_words);
ysr@777 156 }
ysr@777 157
ysr@777 158 bool doHeapRegion(HeapRegion* r) {
ysr@777 159 assert(r->in_collection_set(), "should only be called on elements of CS.");
ysr@777 160 HeapRegionRemSet* hrrs = r->rem_set();
ysr@777 161 if (hrrs->iter_is_complete()) return false; // All done.
ysr@777 162 if (!_try_claimed && !hrrs->claim_iter()) return false;
tonyp@2849 163 // If we ever free the collection set concurrently, we should also
tonyp@2849 164 // clear the card table concurrently therefore we won't need to
tonyp@2849 165 // add regions of the collection set to the dirty cards region.
apetrusenko@1231 166 _g1h->push_dirty_cards_region(r);
ysr@777 167 // If we didn't return above, then
ysr@777 168 // _try_claimed || r->claim_iter()
ysr@777 169 // is true: either we're supposed to work on claimed-but-not-complete
ysr@777 170 // regions, or we successfully claimed the region.
ysr@777 171 HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
ysr@777 172 hrrs->init_iterator(iter);
ysr@777 173 size_t card_index;
iveresov@1696 174
iveresov@1696 175 // We claim cards in block so as to recude the contention. The block size is determined by
iveresov@1696 176 // the G1RSetScanBlockSize parameter.
iveresov@1696 177 size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
iveresov@1696 178 for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
iveresov@1696 179 if (current_card >= jump_to_card + _block_size) {
iveresov@1696 180 jump_to_card = hrrs->iter_claimed_next(_block_size);
iveresov@1182 181 }
iveresov@1696 182 if (current_card < jump_to_card) continue;
ysr@777 183 HeapWord* card_start = _g1h->bot_shared()->address_for_index(card_index);
ysr@777 184 #if 0
ysr@777 185 gclog_or_tty->print("Rem set iteration yielded card [" PTR_FORMAT ", " PTR_FORMAT ").\n",
ysr@777 186 card_start, card_start + CardTableModRefBS::card_size_in_words);
ysr@777 187 #endif
ysr@777 188
ysr@777 189 HeapRegion* card_region = _g1h->heap_region_containing(card_start);
ysr@777 190 assert(card_region != NULL, "Yielding cards not in the heap?");
ysr@777 191 _cards++;
ysr@777 192
apetrusenko@1231 193 if (!card_region->is_on_dirty_cards_region_list()) {
apetrusenko@1231 194 _g1h->push_dirty_cards_region(card_region);
apetrusenko@1231 195 }
apetrusenko@1231 196
tonyp@2849 197 // If the card is dirty, then we will scan it during updateRS.
tonyp@2849 198 if (!card_region->in_collection_set() &&
tonyp@2849 199 !_ct_bs->is_card_dirty(card_index)) {
tonyp@2849 200 scanCard(card_index, card_region);
ysr@777 201 }
ysr@777 202 }
iveresov@1182 203 if (!_try_claimed) {
iveresov@1182 204 hrrs->set_iter_complete();
iveresov@1182 205 }
ysr@777 206 return false;
ysr@777 207 }
ysr@777 208 size_t cards_done() { return _cards_done;}
ysr@777 209 size_t cards_looked_up() { return _cards;}
ysr@777 210 };
ysr@777 211
johnc@2216 212 void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
ysr@777 213 double rs_time_start = os::elapsedTime();
johnc@3296 214 HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
ysr@777 215
iveresov@1696 216 ScanRSClosure scanRScl(oc, worker_i);
johnc@3175 217
ysr@777 218 _g1->collection_set_iterate_from(startRegion, &scanRScl);
ysr@777 219 scanRScl.set_try_claimed();
ysr@777 220 _g1->collection_set_iterate_from(startRegion, &scanRScl);
ysr@777 221
iveresov@1696 222 double scan_rs_time_sec = os::elapsedTime() - rs_time_start;
ysr@777 223
ysr@777 224 assert( _cards_scanned != NULL, "invariant" );
ysr@777 225 _cards_scanned[worker_i] = scanRScl.cards_done();
ysr@777 226
ysr@777 227 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
ysr@777 228 }
ysr@777 229
johnc@2060 230 // Closure used for updating RSets and recording references that
johnc@2060 231 // point into the collection set. Only called during an
johnc@2060 232 // evacuation pause.
ysr@777 233
johnc@2060 234 class RefineRecordRefsIntoCSCardTableEntryClosure: public CardTableEntryClosure {
johnc@2060 235 G1RemSet* _g1rs;
johnc@2060 236 DirtyCardQueue* _into_cset_dcq;
johnc@2060 237 public:
johnc@2060 238 RefineRecordRefsIntoCSCardTableEntryClosure(G1CollectedHeap* g1h,
johnc@2060 239 DirtyCardQueue* into_cset_dcq) :
johnc@2060 240 _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
johnc@2060 241 {}
johnc@2060 242 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
johnc@2060 243 // The only time we care about recording cards that
johnc@2060 244 // contain references that point into the collection set
johnc@2060 245 // is during RSet updating within an evacuation pause.
johnc@2060 246 // In this case worker_i should be the id of a GC worker thread.
johnc@2060 247 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
brutisso@2646 248 assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
johnc@2060 249
johnc@2060 250 if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
johnc@2060 251 // 'card_ptr' contains references that point into the collection
johnc@2060 252 // set. We need to record the card in the DCQS
johnc@2060 253 // (G1CollectedHeap::into_cset_dirty_card_queue_set())
johnc@2060 254 // that's used for that purpose.
johnc@2060 255 //
johnc@2060 256 // Enqueue the card
johnc@2060 257 _into_cset_dcq->enqueue(card_ptr);
johnc@2060 258 }
johnc@2060 259 return true;
johnc@2060 260 }
johnc@2060 261 };
johnc@2060 262
johnc@2216 263 void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
ysr@777 264 double start = os::elapsedTime();
johnc@2060 265 // Apply the given closure to all remaining log entries.
johnc@2060 266 RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
johnc@3175 267
johnc@2060 268 _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
johnc@2060 269
iveresov@1229 270 // Now there should be no dirty cards.
iveresov@1229 271 if (G1RSLogCheckCardTable) {
iveresov@1229 272 CountNonCleanMemRegionClosure cl(_g1);
iveresov@1229 273 _ct_bs->mod_card_iterate(&cl);
iveresov@1229 274 // XXX This isn't true any more: keeping cards of young regions
iveresov@1229 275 // marked dirty broke it. Need some reasonable fix.
iveresov@1229 276 guarantee(cl.n() == 0, "Card table should be clean.");
ysr@777 277 }
iveresov@1229 278
ysr@777 279 _g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
ysr@777 280 }
ysr@777 281
ysr@777 282 class CountRSSizeClosure: public HeapRegionClosure {
ysr@777 283 size_t _n;
ysr@777 284 size_t _tot;
ysr@777 285 size_t _max;
ysr@777 286 HeapRegion* _max_r;
ysr@777 287 enum {
ysr@777 288 N = 20,
ysr@777 289 MIN = 6
ysr@777 290 };
ysr@777 291 int _histo[N];
ysr@777 292 public:
ysr@777 293 CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) {
ysr@777 294 for (int i = 0; i < N; i++) _histo[i] = 0;
ysr@777 295 }
ysr@777 296 bool doHeapRegion(HeapRegion* r) {
ysr@777 297 if (!r->continuesHumongous()) {
ysr@777 298 size_t occ = r->rem_set()->occupied();
ysr@777 299 _n++;
ysr@777 300 _tot += occ;
ysr@777 301 if (occ > _max) {
ysr@777 302 _max = occ;
ysr@777 303 _max_r = r;
ysr@777 304 }
ysr@777 305 // Fit it into a histo bin.
ysr@777 306 int s = 1 << MIN;
ysr@777 307 int i = 0;
ysr@777 308 while (occ > (size_t) s && i < (N-1)) {
ysr@777 309 s = s << 1;
ysr@777 310 i++;
ysr@777 311 }
ysr@777 312 _histo[i]++;
ysr@777 313 }
ysr@777 314 return false;
ysr@777 315 }
ysr@777 316 size_t n() { return _n; }
ysr@777 317 size_t tot() { return _tot; }
ysr@777 318 size_t mx() { return _max; }
ysr@777 319 HeapRegion* mxr() { return _max_r; }
ysr@777 320 void print_histo() {
ysr@777 321 int mx = N;
ysr@777 322 while (mx >= 0) {
ysr@777 323 if (_histo[mx-1] > 0) break;
ysr@777 324 mx--;
ysr@777 325 }
ysr@777 326 gclog_or_tty->print_cr("Number of regions with given RS sizes:");
ysr@777 327 gclog_or_tty->print_cr(" <= %8d %8d", 1 << MIN, _histo[0]);
ysr@777 328 for (int i = 1; i < mx-1; i++) {
ysr@777 329 gclog_or_tty->print_cr(" %8d - %8d %8d",
ysr@777 330 (1 << (MIN + i - 1)) + 1,
ysr@777 331 1 << (MIN + i),
ysr@777 332 _histo[i]);
ysr@777 333 }
ysr@777 334 gclog_or_tty->print_cr(" > %8d %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]);
ysr@777 335 }
ysr@777 336 };
ysr@777 337
johnc@2216 338 void G1RemSet::cleanupHRRS() {
ysr@777 339 HeapRegionRemSet::cleanup();
ysr@777 340 }
ysr@777 341
johnc@2216 342 void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
ysr@777 343 int worker_i) {
ysr@777 344 #if CARD_REPEAT_HISTO
ysr@777 345 ct_freq_update_histo_and_reset();
ysr@777 346 #endif
ysr@777 347 if (worker_i == 0) {
ysr@777 348 _cg1r->clear_and_record_card_counts();
ysr@777 349 }
ysr@777 350
ysr@777 351 // Make this into a command-line flag...
ysr@777 352 if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) {
ysr@777 353 CountRSSizeClosure count_cl;
ysr@777 354 _g1->heap_region_iterate(&count_cl);
ysr@777 355 gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, "
ysr@777 356 "max region is " PTR_FORMAT,
ysr@777 357 count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(),
ysr@777 358 count_cl.mx(), count_cl.mxr());
ysr@777 359 count_cl.print_histo();
ysr@777 360 }
ysr@777 361
johnc@2060 362 // We cache the value of 'oc' closure into the appropriate slot in the
johnc@2060 363 // _cset_rs_update_cl for this worker
johnc@2060 364 assert(worker_i < (int)n_workers(), "sanity");
johnc@2060 365 _cset_rs_update_cl[worker_i] = oc;
johnc@2060 366
johnc@2060 367 // A DirtyCardQueue that is used to hold cards containing references
johnc@2060 368 // that point into the collection set. This DCQ is associated with a
johnc@2060 369 // special DirtyCardQueueSet (see g1CollectedHeap.hpp). Under normal
johnc@2060 370 // circumstances (i.e. the pause successfully completes), these cards
johnc@2060 371 // are just discarded (there's no need to update the RSets of regions
johnc@2060 372 // that were in the collection set - after the pause these regions
johnc@2060 373 // are wholly 'free' of live objects. In the event of an evacuation
johnc@2060 374 // failure the cards/buffers in this queue set are:
johnc@2060 375 // * passed to the DirtyCardQueueSet that is used to manage deferred
johnc@2060 376 // RSet updates, or
johnc@2060 377 // * scanned for references that point into the collection set
johnc@2060 378 // and the RSet of the corresponding region in the collection set
johnc@2060 379 // is updated immediately.
johnc@2060 380 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
johnc@2060 381
johnc@2063 382 assert((ParallelGCThreads > 0) || worker_i == 0, "invariant");
johnc@2063 383
johnc@2063 384 // The two flags below were introduced temporarily to serialize
johnc@2063 385 // the updating and scanning of remembered sets. There are some
johnc@2063 386 // race conditions when these two operations are done in parallel
johnc@2063 387 // and they are causing failures. When we resolve said race
johnc@2063 388 // conditions, we'll revert back to parallel remembered set
johnc@2063 389 // updating and scanning. See CRs 6677707 and 6677708.
johnc@2063 390 if (G1UseParallelRSetUpdating || (worker_i == 0)) {
johnc@2063 391 updateRS(&into_cset_dcq, worker_i);
ysr@777 392 } else {
johnc@2063 393 _g1p->record_update_rs_processed_buffers(worker_i, 0.0);
johnc@2063 394 _g1p->record_update_rs_time(worker_i, 0.0);
johnc@2063 395 }
johnc@2063 396 if (G1UseParallelRSetScanning || (worker_i == 0)) {
johnc@2063 397 scanRS(oc, worker_i);
johnc@2063 398 } else {
johnc@2063 399 _g1p->record_scan_rs_time(worker_i, 0.0);
ysr@777 400 }
johnc@2060 401
johnc@2060 402 // We now clear the cached values of _cset_rs_update_cl for this worker
johnc@2060 403 _cset_rs_update_cl[worker_i] = NULL;
ysr@777 404 }
ysr@777 405
johnc@2216 406 void G1RemSet::prepare_for_oops_into_collection_set_do() {
ysr@777 407 cleanupHRRS();
ysr@777 408 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine();
ysr@777 409 _g1->set_refine_cte_cl_concurrency(false);
ysr@777 410 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 411 dcqs.concatenate_logs();
ysr@777 412
jmasa@3294 413 if (G1CollectedHeap::use_parallel_gc_threads()) {
jmasa@3294 414 // Don't set the number of workers here. It will be set
jmasa@3294 415 // when the task is run
jmasa@3294 416 // _seq_task->set_n_termination((int)n_workers());
ysr@777 417 }
ysr@777 418 guarantee( _cards_scanned == NULL, "invariant" );
ysr@777 419 _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
apetrusenko@980 420 for (uint i = 0; i < n_workers(); ++i) {
apetrusenko@980 421 _cards_scanned[i] = 0;
apetrusenko@980 422 }
ysr@777 423 _total_cards_scanned = 0;
ysr@777 424 }
ysr@777 425
ysr@777 426
johnc@2060 427 // This closure, applied to a DirtyCardQueueSet, is used to immediately
johnc@2060 428 // update the RSets for the regions in the CSet. For each card it iterates
johnc@2060 429 // through the oops which coincide with that card. It scans the reference
johnc@2060 430 // fields in each oop; when it finds an oop that points into the collection
johnc@2060 431 // set, the RSet for the region containing the referenced object is updated.
johnc@2060 432 class UpdateRSetCardTableEntryIntoCSetClosure: public CardTableEntryClosure {
iveresov@1051 433 G1CollectedHeap* _g1;
johnc@2060 434 CardTableModRefBS* _ct_bs;
iveresov@1051 435 public:
johnc@2060 436 UpdateRSetCardTableEntryIntoCSetClosure(G1CollectedHeap* g1,
johnc@2060 437 CardTableModRefBS* bs):
johnc@2060 438 _g1(g1), _ct_bs(bs)
johnc@2060 439 { }
johnc@2060 440
johnc@2060 441 bool do_card_ptr(jbyte* card_ptr, int worker_i) {
johnc@2060 442 // Construct the region representing the card.
johnc@2060 443 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@2060 444 // And find the region containing it.
johnc@2060 445 HeapRegion* r = _g1->heap_region_containing(start);
johnc@2060 446 assert(r != NULL, "unexpected null");
johnc@2060 447
johnc@2060 448 // Scan oops in the card looking for references into the collection set
johnc@2060 449 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
johnc@2060 450 MemRegion scanRegion(start, end);
johnc@2060 451
johnc@2060 452 UpdateRSetImmediate update_rs_cl(_g1->g1_rem_set());
johnc@3179 453 FilterIntoCSClosure update_rs_cset_oop_cl(NULL, _g1, &update_rs_cl);
johnc@2060 454 FilterOutOfRegionClosure filter_then_update_rs_cset_oop_cl(r, &update_rs_cset_oop_cl);
johnc@2060 455
johnc@2060 456 // We can pass false as the "filter_young" parameter here as:
johnc@2060 457 // * we should be in a STW pause,
johnc@2060 458 // * the DCQS to which this closure is applied is used to hold
johnc@2060 459 // references that point into the collection set from the prior
johnc@2060 460 // RSet updating,
johnc@2060 461 // * the post-write barrier shouldn't be logging updates to young
johnc@2060 462 // regions (but there is a situation where this can happen - see
johnc@2216 463 // the comment in G1RemSet::concurrentRefineOneCard below -
johnc@2060 464 // that should not be applicable here), and
johnc@2060 465 // * during actual RSet updating, the filtering of cards in young
johnc@2060 466 // regions in HeapRegion::oops_on_card_seq_iterate_careful is
johnc@2060 467 // employed.
johnc@2060 468 // As a result, when this closure is applied to "refs into cset"
johnc@2060 469 // DCQS, we shouldn't see any cards in young regions.
johnc@2060 470 update_rs_cl.set_region(r);
johnc@2060 471 HeapWord* stop_point =
johnc@2060 472 r->oops_on_card_seq_iterate_careful(scanRegion,
tonyp@2849 473 &filter_then_update_rs_cset_oop_cl,
tonyp@2849 474 false /* filter_young */,
tonyp@2849 475 NULL /* card_ptr */);
johnc@2060 476
johnc@2060 477 // Since this is performed in the event of an evacuation failure, we
johnc@2060 478 // we shouldn't see a non-null stop point
johnc@2060 479 assert(stop_point == NULL, "saw an unallocated region");
johnc@2060 480 return true;
iveresov@1051 481 }
iveresov@1051 482 };
iveresov@1051 483
johnc@2216 484 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
ysr@777 485 guarantee( _cards_scanned != NULL, "invariant" );
ysr@777 486 _total_cards_scanned = 0;
tonyp@2974 487 for (uint i = 0; i < n_workers(); ++i) {
ysr@777 488 _total_cards_scanned += _cards_scanned[i];
tonyp@2974 489 }
ysr@777 490 FREE_C_HEAP_ARRAY(size_t, _cards_scanned);
ysr@777 491 _cards_scanned = NULL;
ysr@777 492 // Cleanup after copy
ysr@777 493 _g1->set_refine_cte_cl_concurrency(true);
ysr@777 494 // Set all cards back to clean.
ysr@777 495 _g1->cleanUpCardTable();
iveresov@1229 496
johnc@2060 497 DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set();
johnc@2060 498 int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
johnc@2060 499
iveresov@1051 500 if (_g1->evacuation_failed()) {
johnc@2060 501 // Restore remembered sets for the regions pointing into the collection set.
johnc@2060 502
iveresov@1051 503 if (G1DeferredRSUpdate) {
johnc@2060 504 // If deferred RS updates are enabled then we just need to transfer
johnc@2060 505 // the completed buffers from (a) the DirtyCardQueueSet used to hold
johnc@2060 506 // cards that contain references that point into the collection set
johnc@2060 507 // to (b) the DCQS used to hold the deferred RS updates
johnc@2060 508 _g1->dirty_card_queue_set().merge_bufferlists(&into_cset_dcqs);
iveresov@1051 509 } else {
johnc@2060 510
johnc@2060 511 CardTableModRefBS* bs = (CardTableModRefBS*)_g1->barrier_set();
johnc@2060 512 UpdateRSetCardTableEntryIntoCSetClosure update_rs_cset_immediate(_g1, bs);
johnc@2060 513
johnc@2060 514 int n_completed_buffers = 0;
johnc@2060 515 while (into_cset_dcqs.apply_closure_to_completed_buffer(&update_rs_cset_immediate,
johnc@2060 516 0, 0, true)) {
johnc@2060 517 n_completed_buffers++;
johnc@2060 518 }
johnc@2060 519 assert(n_completed_buffers == into_cset_n_buffers, "missed some buffers");
iveresov@1051 520 }
iveresov@1051 521 }
johnc@2060 522
johnc@2060 523 // Free any completed buffers in the DirtyCardQueueSet used to hold cards
johnc@2060 524 // which contain references that point into the collection.
johnc@2060 525 _g1->into_cset_dirty_card_queue_set().clear();
johnc@2060 526 assert(_g1->into_cset_dirty_card_queue_set().completed_buffers_num() == 0,
johnc@2060 527 "all buffers should be freed");
johnc@2060 528 _g1->into_cset_dirty_card_queue_set().clear_n_completed_buffers();
ysr@777 529 }
ysr@777 530
ysr@777 531 class ScrubRSClosure: public HeapRegionClosure {
ysr@777 532 G1CollectedHeap* _g1h;
ysr@777 533 BitMap* _region_bm;
ysr@777 534 BitMap* _card_bm;
ysr@777 535 CardTableModRefBS* _ctbs;
ysr@777 536 public:
ysr@777 537 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
ysr@777 538 _g1h(G1CollectedHeap::heap()),
ysr@777 539 _region_bm(region_bm), _card_bm(card_bm),
ysr@777 540 _ctbs(NULL)
ysr@777 541 {
ysr@777 542 ModRefBarrierSet* bs = _g1h->mr_bs();
ysr@777 543 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
ysr@777 544 _ctbs = (CardTableModRefBS*)bs;
ysr@777 545 }
ysr@777 546
ysr@777 547 bool doHeapRegion(HeapRegion* r) {
ysr@777 548 if (!r->continuesHumongous()) {
ysr@777 549 r->rem_set()->scrub(_ctbs, _region_bm, _card_bm);
ysr@777 550 }
ysr@777 551 return false;
ysr@777 552 }
ysr@777 553 };
ysr@777 554
johnc@2216 555 void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
ysr@777 556 ScrubRSClosure scrub_cl(region_bm, card_bm);
ysr@777 557 _g1->heap_region_iterate(&scrub_cl);
ysr@777 558 }
ysr@777 559
johnc@2216 560 void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
ysr@777 561 int worker_num, int claim_val) {
ysr@777 562 ScrubRSClosure scrub_cl(region_bm, card_bm);
jmasa@3294 563 _g1->heap_region_par_iterate_chunked(&scrub_cl,
jmasa@3294 564 worker_num,
jmasa@3294 565 (int) n_workers(),
jmasa@3294 566 claim_val);
ysr@777 567 }
ysr@777 568
ysr@777 569
ysr@777 570 static IntHistogram out_of_histo(50, 50);
ysr@777 571
johnc@2060 572 class TriggerClosure : public OopClosure {
johnc@2060 573 bool _trigger;
johnc@2060 574 public:
johnc@2060 575 TriggerClosure() : _trigger(false) { }
johnc@2060 576 bool value() const { return _trigger; }
johnc@2060 577 template <class T> void do_oop_nv(T* p) { _trigger = true; }
johnc@2060 578 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@2060 579 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@2060 580 };
johnc@2060 581
johnc@2060 582 class InvokeIfNotTriggeredClosure: public OopClosure {
johnc@2060 583 TriggerClosure* _t;
johnc@2060 584 OopClosure* _oc;
johnc@2060 585 public:
johnc@2060 586 InvokeIfNotTriggeredClosure(TriggerClosure* t, OopClosure* oc):
johnc@2060 587 _t(t), _oc(oc) { }
johnc@2060 588 template <class T> void do_oop_nv(T* p) {
johnc@2060 589 if (!_t->value()) _oc->do_oop(p);
johnc@2060 590 }
johnc@2060 591 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@2060 592 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@2060 593 };
johnc@2060 594
johnc@2060 595 class Mux2Closure : public OopClosure {
johnc@2060 596 OopClosure* _c1;
johnc@2060 597 OopClosure* _c2;
johnc@2060 598 public:
johnc@2060 599 Mux2Closure(OopClosure *c1, OopClosure *c2) : _c1(c1), _c2(c2) { }
johnc@2060 600 template <class T> void do_oop_nv(T* p) {
johnc@2060 601 _c1->do_oop(p); _c2->do_oop(p);
johnc@2060 602 }
johnc@2060 603 virtual void do_oop(oop* p) { do_oop_nv(p); }
johnc@2060 604 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
johnc@2060 605 };
johnc@2060 606
johnc@2216 607 bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
johnc@2060 608 bool check_for_refs_into_cset) {
johnc@1325 609 // Construct the region representing the card.
johnc@1325 610 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@1325 611 // And find the region containing it.
johnc@1325 612 HeapRegion* r = _g1->heap_region_containing(start);
johnc@1325 613 assert(r != NULL, "unexpected null");
johnc@1325 614
johnc@1325 615 HeapWord* end = _ct_bs->addr_for(card_ptr + 1);
johnc@1325 616 MemRegion dirtyRegion(start, end);
johnc@1325 617
johnc@1325 618 #if CARD_REPEAT_HISTO
johnc@2504 619 init_ct_freq_table(_g1->max_capacity());
johnc@1325 620 ct_freq_note_card(_ct_bs->index_for(start));
johnc@1325 621 #endif
johnc@1325 622
brutisso@3267 623 OopsInHeapRegionClosure* oops_in_heap_closure = NULL;
brutisso@3267 624 if (check_for_refs_into_cset) {
brutisso@3267 625 // ConcurrentG1RefineThreads have worker numbers larger than what
brutisso@3267 626 // _cset_rs_update_cl[] is set up to handle. But those threads should
brutisso@3267 627 // only be active outside of a collection which means that when they
brutisso@3267 628 // reach here they should have check_for_refs_into_cset == false.
brutisso@3267 629 assert((size_t)worker_i < n_workers(), "index of worker larger than _cset_rs_update_cl[].length");
brutisso@3267 630 oops_in_heap_closure = _cset_rs_update_cl[worker_i];
brutisso@3267 631 }
johnc@2302 632 UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1,
johnc@2302 633 _g1->g1_rem_set(),
brutisso@3267 634 oops_in_heap_closure,
johnc@2302 635 check_for_refs_into_cset,
johnc@2302 636 worker_i);
johnc@1325 637 update_rs_oop_cl.set_from(r);
johnc@2060 638
johnc@2060 639 TriggerClosure trigger_cl;
johnc@3179 640 FilterIntoCSClosure into_cs_cl(NULL, _g1, &trigger_cl);
johnc@2060 641 InvokeIfNotTriggeredClosure invoke_cl(&trigger_cl, &into_cs_cl);
johnc@2060 642 Mux2Closure mux(&invoke_cl, &update_rs_oop_cl);
johnc@2060 643
johnc@2060 644 FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r,
johnc@2060 645 (check_for_refs_into_cset ?
johnc@2060 646 (OopClosure*)&mux :
johnc@2060 647 (OopClosure*)&update_rs_oop_cl));
johnc@1325 648
johnc@2021 649 // The region for the current card may be a young region. The
johnc@2021 650 // current card may have been a card that was evicted from the
johnc@2021 651 // card cache. When the card was inserted into the cache, we had
johnc@2021 652 // determined that its region was non-young. While in the cache,
johnc@2021 653 // the region may have been freed during a cleanup pause, reallocated
johnc@2021 654 // and tagged as young.
johnc@2021 655 //
johnc@2021 656 // We wish to filter out cards for such a region but the current
tonyp@2849 657 // thread, if we're running concurrently, may "see" the young type
johnc@2021 658 // change at any time (so an earlier "is_young" check may pass or
johnc@2021 659 // fail arbitrarily). We tell the iteration code to perform this
johnc@2021 660 // filtering when it has been determined that there has been an actual
johnc@2021 661 // allocation in this region and making it safe to check the young type.
johnc@2021 662 bool filter_young = true;
johnc@2021 663
johnc@1325 664 HeapWord* stop_point =
johnc@1325 665 r->oops_on_card_seq_iterate_careful(dirtyRegion,
johnc@2021 666 &filter_then_update_rs_oop_cl,
tonyp@2849 667 filter_young,
tonyp@2849 668 card_ptr);
johnc@2021 669
johnc@1325 670 // If stop_point is non-null, then we encountered an unallocated region
johnc@1325 671 // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the
johnc@1325 672 // card and re-enqueue: if we put off the card until a GC pause, then the
johnc@1325 673 // unallocated portion will be filled in. Alternatively, we might try
johnc@1325 674 // the full complexity of the technique used in "regular" precleaning.
johnc@1325 675 if (stop_point != NULL) {
johnc@1325 676 // The card might have gotten re-dirtied and re-enqueued while we
johnc@1325 677 // worked. (In fact, it's pretty likely.)
johnc@1325 678 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
johnc@1325 679 *card_ptr = CardTableModRefBS::dirty_card_val();
johnc@1325 680 MutexLockerEx x(Shared_DirtyCardQ_lock,
johnc@1325 681 Mutex::_no_safepoint_check_flag);
johnc@1325 682 DirtyCardQueue* sdcq =
johnc@1325 683 JavaThread::dirty_card_queue_set().shared_dirty_card_queue();
johnc@1325 684 sdcq->enqueue(card_ptr);
johnc@1325 685 }
johnc@1325 686 } else {
johnc@1325 687 out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region());
johnc@1325 688 _conc_refine_cards++;
johnc@1325 689 }
johnc@2060 690
johnc@2060 691 return trigger_cl.value();
johnc@1325 692 }
johnc@1325 693
johnc@2216 694 bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
johnc@2060 695 bool check_for_refs_into_cset) {
ysr@777 696 // If the card is no longer dirty, nothing to do.
johnc@2060 697 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
johnc@2060 698 // No need to return that this card contains refs that point
johnc@2060 699 // into the collection set.
johnc@2060 700 return false;
johnc@2060 701 }
ysr@777 702
ysr@777 703 // Construct the region representing the card.
ysr@777 704 HeapWord* start = _ct_bs->addr_for(card_ptr);
ysr@777 705 // And find the region containing it.
ysr@777 706 HeapRegion* r = _g1->heap_region_containing(start);
ysr@777 707 if (r == NULL) {
ysr@777 708 guarantee(_g1->is_in_permanent(start), "Or else where?");
johnc@2060 709 // Again no need to return that this card contains refs that
johnc@2060 710 // point into the collection set.
johnc@2060 711 return false; // Not in the G1 heap (might be in perm, for example.)
ysr@777 712 }
ysr@777 713 // Why do we have to check here whether a card is on a young region,
ysr@777 714 // given that we dirty young regions and, as a result, the
ysr@777 715 // post-barrier is supposed to filter them out and never to enqueue
ysr@777 716 // them? When we allocate a new region as the "allocation region" we
ysr@777 717 // actually dirty its cards after we release the lock, since card
ysr@777 718 // dirtying while holding the lock was a performance bottleneck. So,
ysr@777 719 // as a result, it is possible for other threads to actually
ysr@777 720 // allocate objects in the region (after the acquire the lock)
ysr@777 721 // before all the cards on the region are dirtied. This is unlikely,
ysr@777 722 // and it doesn't happen often, but it can happen. So, the extra
ysr@777 723 // check below filters out those cards.
iveresov@1072 724 if (r->is_young()) {
johnc@2060 725 return false;
ysr@777 726 }
ysr@777 727 // While we are processing RSet buffers during the collection, we
ysr@777 728 // actually don't want to scan any cards on the collection set,
ysr@777 729 // since we don't want to update remebered sets with entries that
ysr@777 730 // point into the collection set, given that live objects from the
ysr@777 731 // collection set are about to move and such entries will be stale
ysr@777 732 // very soon. This change also deals with a reliability issue which
ysr@777 733 // involves scanning a card in the collection set and coming across
ysr@777 734 // an array that was being chunked and looking malformed. Note,
ysr@777 735 // however, that if evacuation fails, we have to scan any objects
ysr@777 736 // that were not moved and create any missing entries.
ysr@777 737 if (r->in_collection_set()) {
johnc@2060 738 return false;
ysr@777 739 }
ysr@777 740
johnc@1325 741 // Should we defer processing the card?
johnc@1325 742 //
johnc@1325 743 // Previously the result from the insert_cache call would be
johnc@1325 744 // either card_ptr (implying that card_ptr was currently "cold"),
johnc@1325 745 // null (meaning we had inserted the card ptr into the "hot"
johnc@1325 746 // cache, which had some headroom), or a "hot" card ptr
johnc@1325 747 // extracted from the "hot" cache.
johnc@1325 748 //
johnc@1325 749 // Now that the _card_counts cache in the ConcurrentG1Refine
johnc@1325 750 // instance is an evicting hash table, the result we get back
johnc@1325 751 // could be from evicting the card ptr in an already occupied
johnc@1325 752 // bucket (in which case we have replaced the card ptr in the
johnc@1325 753 // bucket with card_ptr and "defer" is set to false). To avoid
johnc@1325 754 // having a data structure (updates to which would need a lock)
johnc@1325 755 // to hold these unprocessed dirty cards, we need to immediately
johnc@1325 756 // process card_ptr. The actions needed to be taken on return
johnc@1325 757 // from cache_insert are summarized in the following table:
johnc@1325 758 //
johnc@1325 759 // res defer action
johnc@1325 760 // --------------------------------------------------------------
johnc@1325 761 // null false card evicted from _card_counts & replaced with
johnc@1325 762 // card_ptr; evicted ptr added to hot cache.
johnc@1325 763 // No need to process res; immediately process card_ptr
johnc@1325 764 //
johnc@1325 765 // null true card not evicted from _card_counts; card_ptr added
johnc@1325 766 // to hot cache.
johnc@1325 767 // Nothing to do.
johnc@1325 768 //
johnc@1325 769 // non-null false card evicted from _card_counts & replaced with
johnc@1325 770 // card_ptr; evicted ptr is currently "cold" or
johnc@1325 771 // caused an eviction from the hot cache.
johnc@1325 772 // Immediately process res; process card_ptr.
johnc@1325 773 //
johnc@1325 774 // non-null true card not evicted from _card_counts; card_ptr is
johnc@1325 775 // currently cold, or caused an eviction from hot
johnc@1325 776 // cache.
johnc@1325 777 // Immediately process res; no need to process card_ptr.
johnc@1325 778
johnc@2060 779
johnc@1325 780 jbyte* res = card_ptr;
johnc@1325 781 bool defer = false;
johnc@2060 782
johnc@2060 783 // This gets set to true if the card being refined has references
johnc@2060 784 // that point into the collection set.
johnc@2060 785 bool oops_into_cset = false;
johnc@2060 786
ysr@777 787 if (_cg1r->use_cache()) {
johnc@1325 788 jbyte* res = _cg1r->cache_insert(card_ptr, &defer);
johnc@1325 789 if (res != NULL && (res != card_ptr || defer)) {
johnc@1325 790 start = _ct_bs->addr_for(res);
johnc@1325 791 r = _g1->heap_region_containing(start);
johnc@1325 792 if (r == NULL) {
johnc@1325 793 assert(_g1->is_in_permanent(start), "Or else where?");
johnc@1325 794 } else {
johnc@2021 795 // Checking whether the region we got back from the cache
johnc@2021 796 // is young here is inappropriate. The region could have been
johnc@2021 797 // freed, reallocated and tagged as young while in the cache.
johnc@2021 798 // Hence we could see its young type change at any time.
johnc@2021 799 //
johnc@2021 800 // Process card pointer we get back from the hot card cache. This
johnc@2021 801 // will check whether the region containing the card is young
johnc@2021 802 // _after_ checking that the region has been allocated from.
johnc@2060 803 oops_into_cset = concurrentRefineOneCard_impl(res, worker_i,
johnc@2060 804 false /* check_for_refs_into_cset */);
johnc@2060 805 // The above call to concurrentRefineOneCard_impl is only
johnc@2060 806 // performed if the hot card cache is enabled. This cache is
johnc@2060 807 // disabled during an evacuation pause - which is the only
johnc@2060 808 // time when we need know if the card contains references
johnc@2060 809 // that point into the collection set. Also when the hot card
johnc@2060 810 // cache is enabled, this code is executed by the concurrent
johnc@2060 811 // refine threads - rather than the GC worker threads - and
johnc@2060 812 // concurrentRefineOneCard_impl will return false.
johnc@2060 813 assert(!oops_into_cset, "should not see true here");
johnc@1325 814 }
ysr@777 815 }
ysr@777 816 }
ysr@777 817
johnc@1325 818 if (!defer) {
johnc@2060 819 oops_into_cset =
johnc@2060 820 concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset);
johnc@2060 821 // We should only be detecting that the card contains references
johnc@2060 822 // that point into the collection set if the current thread is
johnc@2060 823 // a GC worker thread.
johnc@2060 824 assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(),
johnc@2060 825 "invalid result at non safepoint");
ysr@777 826 }
johnc@2060 827 return oops_into_cset;
ysr@777 828 }
ysr@777 829
ysr@777 830 class HRRSStatsIter: public HeapRegionClosure {
ysr@777 831 size_t _occupied;
ysr@777 832 size_t _total_mem_sz;
ysr@777 833 size_t _max_mem_sz;
ysr@777 834 HeapRegion* _max_mem_sz_region;
ysr@777 835 public:
ysr@777 836 HRRSStatsIter() :
ysr@777 837 _occupied(0),
ysr@777 838 _total_mem_sz(0),
ysr@777 839 _max_mem_sz(0),
ysr@777 840 _max_mem_sz_region(NULL)
ysr@777 841 {}
ysr@777 842
ysr@777 843 bool doHeapRegion(HeapRegion* r) {
ysr@777 844 if (r->continuesHumongous()) return false;
ysr@777 845 size_t mem_sz = r->rem_set()->mem_size();
ysr@777 846 if (mem_sz > _max_mem_sz) {
ysr@777 847 _max_mem_sz = mem_sz;
ysr@777 848 _max_mem_sz_region = r;
ysr@777 849 }
ysr@777 850 _total_mem_sz += mem_sz;
ysr@777 851 size_t occ = r->rem_set()->occupied();
ysr@777 852 _occupied += occ;
ysr@777 853 return false;
ysr@777 854 }
ysr@777 855 size_t total_mem_sz() { return _total_mem_sz; }
ysr@777 856 size_t max_mem_sz() { return _max_mem_sz; }
ysr@777 857 size_t occupied() { return _occupied; }
ysr@777 858 HeapRegion* max_mem_sz_region() { return _max_mem_sz_region; }
ysr@777 859 };
ysr@777 860
iveresov@1229 861 class PrintRSThreadVTimeClosure : public ThreadClosure {
iveresov@1229 862 public:
iveresov@1229 863 virtual void do_thread(Thread *t) {
iveresov@1229 864 ConcurrentG1RefineThread* crt = (ConcurrentG1RefineThread*) t;
iveresov@1229 865 gclog_or_tty->print(" %5.2f", crt->vtime_accum());
iveresov@1229 866 }
iveresov@1229 867 };
iveresov@1229 868
johnc@2216 869 void G1RemSet::print_summary_info() {
ysr@777 870 G1CollectedHeap* g1 = G1CollectedHeap::heap();
ysr@777 871
ysr@777 872 #if CARD_REPEAT_HISTO
ysr@777 873 gclog_or_tty->print_cr("\nG1 card_repeat count histogram: ");
ysr@777 874 gclog_or_tty->print_cr(" # of repeats --> # of cards with that number.");
ysr@777 875 card_repeat_count.print_on(gclog_or_tty);
ysr@777 876 #endif
ysr@777 877
ysr@777 878 if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) {
ysr@777 879 gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: ");
ysr@777 880 gclog_or_tty->print_cr(" # of CS ptrs --> # of cards with that number.");
ysr@777 881 out_of_histo.print_on(gclog_or_tty);
ysr@777 882 }
iveresov@1229 883 gclog_or_tty->print_cr("\n Concurrent RS processed %d cards",
iveresov@1229 884 _conc_refine_cards);
ysr@777 885 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 886 jint tot_processed_buffers =
ysr@777 887 dcqs.processed_buffers_mut() + dcqs.processed_buffers_rs_thread();
ysr@777 888 gclog_or_tty->print_cr(" Of %d completed buffers:", tot_processed_buffers);
iveresov@1229 889 gclog_or_tty->print_cr(" %8d (%5.1f%%) by conc RS threads.",
ysr@777 890 dcqs.processed_buffers_rs_thread(),
ysr@777 891 100.0*(float)dcqs.processed_buffers_rs_thread()/
ysr@777 892 (float)tot_processed_buffers);
ysr@777 893 gclog_or_tty->print_cr(" %8d (%5.1f%%) by mutator threads.",
ysr@777 894 dcqs.processed_buffers_mut(),
ysr@777 895 100.0*(float)dcqs.processed_buffers_mut()/
ysr@777 896 (float)tot_processed_buffers);
iveresov@1229 897 gclog_or_tty->print_cr(" Conc RS threads times(s)");
iveresov@1229 898 PrintRSThreadVTimeClosure p;
iveresov@1229 899 gclog_or_tty->print(" ");
iveresov@1229 900 g1->concurrent_g1_refine()->threads_do(&p);
ysr@777 901 gclog_or_tty->print_cr("");
iveresov@1229 902
johnc@2216 903 HRRSStatsIter blk;
johnc@2216 904 g1->heap_region_iterate(&blk);
johnc@2216 905 gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K."
johnc@2216 906 " Max = " SIZE_FORMAT "K.",
johnc@2216 907 blk.total_mem_sz()/K, blk.max_mem_sz()/K);
johnc@2216 908 gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K,"
johnc@2216 909 " free_lists = " SIZE_FORMAT "K.",
johnc@2216 910 HeapRegionRemSet::static_mem_size()/K,
johnc@2216 911 HeapRegionRemSet::fl_mem_size()/K);
johnc@2216 912 gclog_or_tty->print_cr(" %d occupied cards represented.",
johnc@2216 913 blk.occupied());
johnc@2216 914 gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
johnc@2216 915 ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
johnc@2216 916 blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
johnc@2216 917 (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
johnc@2216 918 (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
johnc@2216 919 gclog_or_tty->print_cr(" Did %d coarsenings.", HeapRegionRemSet::n_coarsenings());
ysr@777 920 }
johnc@2060 921
johnc@2216 922 void G1RemSet::prepare_for_verify() {
iveresov@1072 923 if (G1HRRSFlushLogBuffersOnVerify &&
iveresov@1072 924 (VerifyBeforeGC || VerifyAfterGC)
iveresov@1072 925 && !_g1->full_collection()) {
ysr@777 926 cleanupHRRS();
ysr@777 927 _g1->set_refine_cte_cl_concurrency(false);
ysr@777 928 if (SafepointSynchronize::is_at_safepoint()) {
ysr@777 929 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
ysr@777 930 dcqs.concatenate_logs();
ysr@777 931 }
ysr@777 932 bool cg1r_use_cache = _cg1r->use_cache();
ysr@777 933 _cg1r->set_use_cache(false);
johnc@2060 934 DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
johnc@2060 935 updateRS(&into_cset_dcq, 0);
johnc@2060 936 _g1->into_cset_dirty_card_queue_set().clear();
ysr@777 937 _cg1r->set_use_cache(cg1r_use_cache);
iveresov@1072 938
iveresov@1072 939 assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
ysr@777 940 }
ysr@777 941 }

mercurial