src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp

Mon, 19 Jul 2010 11:06:34 -0700

author
johnc
date
Mon, 19 Jul 2010 11:06:34 -0700
changeset 2021
5cbac8938c4c
parent 1907
c18cbe5936b8
child 2060
2d160770d2e5
permissions
-rw-r--r--

6956639: G1: assert(cached_ptr != card_ptr) failed: shouldn't be, concurrentG1Refine.cpp:307
Summary: During concurrent refinment, filter cards in young regions after it has been determined that the region has been allocated from and the young type of the region has been set.
Reviewed-by: iveresov, tonyp, jcoomes

ysr@777 1 /*
johnc@2021 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #include "incls/_precompiled.incl"
ysr@777 26 #include "incls/_concurrentG1Refine.cpp.incl"
ysr@777 27
johnc@1325 28 // Possible sizes for the card counts cache: odd primes that roughly double in size.
johnc@1325 29 // (See jvmtiTagMap.cpp).
johnc@1325 30 int ConcurrentG1Refine::_cc_cache_sizes[] = {
johnc@1325 31 16381, 32771, 76831, 150001, 307261,
johnc@1325 32 614563, 1228891, 2457733, 4915219, 9830479,
johnc@1325 33 19660831, 39321619, 78643219, 157286461, -1
johnc@1325 34 };
johnc@1325 35
ysr@777 36 ConcurrentG1Refine::ConcurrentG1Refine() :
johnc@1325 37 _card_counts(NULL), _card_epochs(NULL),
johnc@1325 38 _n_card_counts(0), _max_n_card_counts(0),
johnc@1325 39 _cache_size_index(0), _expand_card_counts(false),
ysr@777 40 _hot_cache(NULL),
ysr@777 41 _def_use_cache(false), _use_cache(false),
johnc@1325 42 _n_periods(0),
iveresov@1229 43 _threads(NULL), _n_threads(0)
ysr@777 44 {
iveresov@1546 45
iveresov@1546 46 // Ergomonically select initial concurrent refinement parameters
tonyp@1717 47 if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
tonyp@1717 48 FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
iveresov@1546 49 }
tonyp@1717 50 set_green_zone(G1ConcRefinementGreenZone);
iveresov@1546 51
tonyp@1717 52 if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
tonyp@1717 53 FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
iveresov@1546 54 }
tonyp@1717 55 set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
iveresov@1546 56
tonyp@1717 57 if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
tonyp@1717 58 FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
iveresov@1546 59 }
tonyp@1717 60 set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
iveresov@1546 61 _n_worker_threads = thread_num();
iveresov@1546 62 // We need one extra thread to do the young gen rset size sampling.
iveresov@1546 63 _n_threads = _n_worker_threads + 1;
iveresov@1546 64 reset_threshold_step();
iveresov@1546 65
iveresov@1546 66 _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads);
iveresov@1546 67 int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
iveresov@1546 68 ConcurrentG1RefineThread *next = NULL;
iveresov@1546 69 for (int i = _n_threads - 1; i >= 0; i--) {
iveresov@1546 70 ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
iveresov@1546 71 assert(t != NULL, "Conc refine should have been created");
iveresov@1546 72 assert(t->cg1r() == this, "Conc refine thread should refer to this");
iveresov@1546 73 _threads[i] = t;
iveresov@1546 74 next = t;
ysr@777 75 }
ysr@777 76 }
ysr@777 77
iveresov@1546 78 void ConcurrentG1Refine::reset_threshold_step() {
tonyp@1717 79 if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
iveresov@1546 80 _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
iveresov@1546 81 } else {
tonyp@1717 82 _thread_threshold_step = G1ConcRefinementThresholdStep;
iveresov@1230 83 }
iveresov@1546 84 }
iveresov@1546 85
iveresov@1546 86 int ConcurrentG1Refine::thread_num() {
tonyp@1717 87 return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
iveresov@1230 88 }
iveresov@1230 89
ysr@777 90 void ConcurrentG1Refine::init() {
johnc@1325 91 if (G1ConcRSLogCacheSize > 0) {
johnc@1325 92 _g1h = G1CollectedHeap::heap();
johnc@1325 93 _max_n_card_counts =
johnc@1325 94 (unsigned) (_g1h->g1_reserved_obj_bytes() >> CardTableModRefBS::card_shift);
johnc@1325 95
johnc@1325 96 size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
johnc@1325 97 guarantee(_max_n_card_counts < max_card_num, "card_num representation");
johnc@1325 98
johnc@1325 99 int desired = _max_n_card_counts / InitialCacheFraction;
johnc@1325 100 for (_cache_size_index = 0;
johnc@1325 101 _cc_cache_sizes[_cache_size_index] >= 0; _cache_size_index++) {
johnc@1325 102 if (_cc_cache_sizes[_cache_size_index] >= desired) break;
johnc@1325 103 }
johnc@1325 104 _cache_size_index = MAX2(0, (_cache_size_index - 1));
johnc@1325 105
johnc@1325 106 int initial_size = _cc_cache_sizes[_cache_size_index];
johnc@1325 107 if (initial_size < 0) initial_size = _max_n_card_counts;
johnc@1325 108
johnc@1325 109 // Make sure we don't go bigger than we will ever need
johnc@1325 110 _n_card_counts = MIN2((unsigned) initial_size, _max_n_card_counts);
johnc@1325 111
johnc@1325 112 _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
johnc@1325 113 _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
johnc@1325 114
johnc@1325 115 Copy::fill_to_bytes(&_card_counts[0],
johnc@1325 116 _n_card_counts * sizeof(CardCountCacheEntry));
johnc@1325 117 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
johnc@1325 118
johnc@1325 119 ModRefBarrierSet* bs = _g1h->mr_bs();
ysr@777 120 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
johnc@1325 121 _ct_bs = (CardTableModRefBS*)bs;
johnc@1325 122 _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
johnc@1325 123
ysr@777 124 _def_use_cache = true;
ysr@777 125 _use_cache = true;
ysr@777 126 _hot_cache_size = (1 << G1ConcRSLogCacheSize);
ysr@777 127 _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size);
ysr@777 128 _n_hot = 0;
ysr@777 129 _hot_cache_idx = 0;
johnc@1324 130
johnc@1324 131 // For refining the cards in the hot cache in parallel
johnc@1324 132 int n_workers = (ParallelGCThreads > 0 ?
johnc@1325 133 _g1h->workers()->total_workers() : 1);
johnc@1324 134 _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
johnc@1324 135 _hot_cache_par_claimed_idx = 0;
ysr@777 136 }
ysr@777 137 }
ysr@777 138
iveresov@1229 139 void ConcurrentG1Refine::stop() {
iveresov@1229 140 if (_threads != NULL) {
iveresov@1229 141 for (int i = 0; i < _n_threads; i++) {
iveresov@1229 142 _threads[i]->stop();
iveresov@1229 143 }
iveresov@1229 144 }
iveresov@1229 145 }
iveresov@1229 146
iveresov@1546 147 void ConcurrentG1Refine::reinitialize_threads() {
iveresov@1546 148 reset_threshold_step();
iveresov@1546 149 if (_threads != NULL) {
iveresov@1546 150 for (int i = 0; i < _n_threads; i++) {
iveresov@1546 151 _threads[i]->initialize();
iveresov@1546 152 }
iveresov@1546 153 }
iveresov@1546 154 }
iveresov@1546 155
ysr@777 156 ConcurrentG1Refine::~ConcurrentG1Refine() {
johnc@1325 157 if (G1ConcRSLogCacheSize > 0) {
ysr@777 158 assert(_card_counts != NULL, "Logic");
johnc@1325 159 FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
johnc@1325 160 assert(_card_epochs != NULL, "Logic");
johnc@1325 161 FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
ysr@777 162 assert(_hot_cache != NULL, "Logic");
ysr@777 163 FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
ysr@777 164 }
iveresov@1229 165 if (_threads != NULL) {
iveresov@1229 166 for (int i = 0; i < _n_threads; i++) {
iveresov@1229 167 delete _threads[i];
iveresov@1229 168 }
iveresov@1234 169 FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
ysr@777 170 }
ysr@777 171 }
ysr@777 172
iveresov@1229 173 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
iveresov@1229 174 if (_threads != NULL) {
iveresov@1229 175 for (int i = 0; i < _n_threads; i++) {
iveresov@1229 176 tc->do_thread(_threads[i]);
iveresov@1229 177 }
ysr@777 178 }
ysr@777 179 }
ysr@777 180
johnc@1325 181 bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {
johnc@1325 182 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@1325 183 HeapRegion* r = _g1h->heap_region_containing(start);
johnc@1325 184 if (r != NULL && r->is_young()) {
johnc@1325 185 return true;
johnc@1325 186 }
johnc@1325 187 // This card is not associated with a heap region
johnc@1325 188 // so can't be young.
johnc@1325 189 return false;
ysr@777 190 }
ysr@777 191
johnc@1325 192 jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* defer) {
johnc@1325 193 unsigned new_card_num = ptr_2_card_num(card_ptr);
johnc@1325 194 unsigned bucket = hash(new_card_num);
johnc@1325 195 assert(0 <= bucket && bucket < _n_card_counts, "Bounds");
johnc@1325 196
johnc@1325 197 CardCountCacheEntry* count_ptr = &_card_counts[bucket];
johnc@1325 198 CardEpochCacheEntry* epoch_ptr = &_card_epochs[bucket];
johnc@1325 199
johnc@1325 200 // We have to construct a new entry if we haven't updated the counts
johnc@1325 201 // during the current period, or if the count was updated for a
johnc@1325 202 // different card number.
johnc@1325 203 unsigned int new_epoch = (unsigned int) _n_periods;
johnc@1325 204 julong new_epoch_entry = make_epoch_entry(new_card_num, new_epoch);
johnc@1325 205
johnc@1325 206 while (true) {
johnc@1325 207 // Fetch the previous epoch value
johnc@1325 208 julong prev_epoch_entry = epoch_ptr->_value;
johnc@1325 209 julong cas_res;
johnc@1325 210
johnc@1325 211 if (extract_epoch(prev_epoch_entry) != new_epoch) {
johnc@1325 212 // This entry has not yet been updated during this period.
johnc@1325 213 // Note: we update the epoch value atomically to ensure
johnc@1325 214 // that there is only one winner that updates the cached
johnc@1325 215 // card_ptr value even though all the refine threads share
johnc@1325 216 // the same epoch value.
johnc@1325 217
johnc@1325 218 cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
johnc@1325 219 (volatile jlong*)&epoch_ptr->_value,
johnc@1325 220 (jlong) prev_epoch_entry);
johnc@1325 221
johnc@1325 222 if (cas_res == prev_epoch_entry) {
johnc@1325 223 // We have successfully won the race to update the
johnc@1325 224 // epoch and card_num value. Make it look like the
johnc@1325 225 // count and eviction count were previously cleared.
johnc@1325 226 count_ptr->_count = 1;
johnc@1325 227 count_ptr->_evict_count = 0;
johnc@1325 228 *count = 0;
johnc@1325 229 // We can defer the processing of card_ptr
johnc@1325 230 *defer = true;
johnc@1325 231 return card_ptr;
johnc@1325 232 }
johnc@1325 233 // We did not win the race to update the epoch field, so some other
johnc@1325 234 // thread must have done it. The value that gets returned by CAS
johnc@1325 235 // should be the new epoch value.
johnc@1325 236 assert(extract_epoch(cas_res) == new_epoch, "unexpected epoch");
johnc@1325 237 // We could 'continue' here or just re-read the previous epoch value
johnc@1325 238 prev_epoch_entry = epoch_ptr->_value;
johnc@1325 239 }
johnc@1325 240
johnc@1325 241 // The epoch entry for card_ptr has been updated during this period.
johnc@1325 242 unsigned old_card_num = extract_card_num(prev_epoch_entry);
johnc@1325 243
johnc@1325 244 // The card count that will be returned to caller
johnc@1325 245 *count = count_ptr->_count;
johnc@1325 246
johnc@1325 247 // Are we updating the count for the same card?
johnc@1325 248 if (new_card_num == old_card_num) {
johnc@1325 249 // Same card - just update the count. We could have more than one
johnc@1325 250 // thread racing to update count for the current card. It should be
johnc@1325 251 // OK not to use a CAS as the only penalty should be some missed
johnc@1325 252 // increments of the count which delays identifying the card as "hot".
johnc@1325 253
johnc@1325 254 if (*count < max_jubyte) count_ptr->_count++;
johnc@1325 255 // We can defer the processing of card_ptr
johnc@1325 256 *defer = true;
johnc@1325 257 return card_ptr;
johnc@1325 258 }
johnc@1325 259
johnc@1325 260 // Different card - evict old card info
johnc@1325 261 if (count_ptr->_evict_count < max_jubyte) count_ptr->_evict_count++;
johnc@1325 262 if (count_ptr->_evict_count > G1CardCountCacheExpandThreshold) {
johnc@1325 263 // Trigger a resize the next time we clear
johnc@1325 264 _expand_card_counts = true;
johnc@1325 265 }
johnc@1325 266
johnc@1325 267 cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
johnc@1325 268 (volatile jlong*)&epoch_ptr->_value,
johnc@1325 269 (jlong) prev_epoch_entry);
johnc@1325 270
johnc@1325 271 if (cas_res == prev_epoch_entry) {
johnc@1325 272 // We successfully updated the card num value in the epoch entry
johnc@1325 273 count_ptr->_count = 0; // initialize counter for new card num
johnc@2021 274 jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
johnc@1325 275
johnc@1325 276 // Even though the region containg the card at old_card_num was not
johnc@1325 277 // in the young list when old_card_num was recorded in the epoch
johnc@1325 278 // cache it could have been added to the free list and subsequently
johnc@2021 279 // added to the young list in the intervening time. See CR 6817995.
johnc@2021 280 // We do not deal with this case here - it will be handled in
johnc@2021 281 // HeapRegion::oops_on_card_seq_iterate_careful after it has been
johnc@2021 282 // determined that the region containing the card has been allocated
johnc@2021 283 // to, and it's safe to check the young type of the region.
johnc@1325 284
johnc@1325 285 // We do not want to defer processing of card_ptr in this case
johnc@1325 286 // (we need to refine old_card_ptr and card_ptr)
johnc@1325 287 *defer = false;
johnc@1325 288 return old_card_ptr;
johnc@1325 289 }
johnc@1325 290 // Someone else beat us - try again.
johnc@1325 291 }
johnc@1325 292 }
johnc@1325 293
johnc@1325 294 jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
johnc@1325 295 int count;
johnc@1325 296 jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
johnc@1325 297 assert(cached_ptr != NULL, "bad cached card ptr");
johnc@1681 298
johnc@2021 299 // We've just inserted a card pointer into the card count cache
johnc@2021 300 // and got back the card that we just inserted or (evicted) the
johnc@2021 301 // previous contents of that count slot.
johnc@1681 302
johnc@2021 303 // The card we got back could be in a young region. When the
johnc@2021 304 // returned card (if evicted) was originally inserted, we had
johnc@2021 305 // determined that its containing region was not young. However
johnc@2021 306 // it is possible for the region to be freed during a cleanup
johnc@2021 307 // pause, then reallocated and tagged as young which will result
johnc@2021 308 // in the returned card residing in a young region.
johnc@2021 309 //
johnc@2021 310 // We do not deal with this case here - the change from non-young
johnc@2021 311 // to young could be observed at any time - it will be handled in
johnc@2021 312 // HeapRegion::oops_on_card_seq_iterate_careful after it has been
johnc@2021 313 // determined that the region containing the card has been allocated
johnc@2021 314 // to.
johnc@1325 315
johnc@1325 316 // The card pointer we obtained from card count cache is not hot
johnc@1325 317 // so do not store it in the cache; return it for immediate
johnc@1325 318 // refining.
ysr@777 319 if (count < G1ConcRSHotCardLimit) {
johnc@1325 320 return cached_ptr;
ysr@777 321 }
johnc@1325 322
johnc@2021 323 // Otherwise, the pointer we got from the _card_counts cache is hot.
ysr@777 324 jbyte* res = NULL;
ysr@777 325 MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
ysr@777 326 if (_n_hot == _hot_cache_size) {
ysr@777 327 res = _hot_cache[_hot_cache_idx];
ysr@777 328 _n_hot--;
ysr@777 329 }
ysr@777 330 // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
johnc@1325 331 _hot_cache[_hot_cache_idx] = cached_ptr;
ysr@777 332 _hot_cache_idx++;
ysr@777 333 if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
ysr@777 334 _n_hot++;
johnc@1325 335
johnc@2021 336 // The card obtained from the hot card cache could be in a young
johnc@2021 337 // region. See above on how this can happen.
johnc@1325 338
ysr@777 339 return res;
ysr@777 340 }
ysr@777 341
ysr@777 342 void ConcurrentG1Refine::clean_up_cache(int worker_i, G1RemSet* g1rs) {
ysr@777 343 assert(!use_cache(), "cache should be disabled");
johnc@1324 344 int start_idx;
johnc@1324 345
johnc@1324 346 while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
johnc@1324 347 int end_idx = start_idx + _hot_cache_par_chunk_size;
johnc@1324 348
johnc@1324 349 if (start_idx ==
johnc@1324 350 Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
johnc@1324 351 // The current worker has successfully claimed the chunk [start_idx..end_idx)
johnc@1324 352 end_idx = MIN2(end_idx, _n_hot);
johnc@1324 353 for (int i = start_idx; i < end_idx; i++) {
johnc@1324 354 jbyte* entry = _hot_cache[i];
johnc@1324 355 if (entry != NULL) {
johnc@1324 356 g1rs->concurrentRefineOneCard(entry, worker_i);
johnc@1324 357 }
johnc@1324 358 }
ysr@777 359 }
ysr@777 360 }
ysr@777 361 }
ysr@777 362
johnc@1325 363 void ConcurrentG1Refine::expand_card_count_cache() {
johnc@1325 364 if (_n_card_counts < _max_n_card_counts) {
johnc@1325 365 int new_idx = _cache_size_index+1;
johnc@1325 366 int new_size = _cc_cache_sizes[new_idx];
johnc@1325 367 if (new_size < 0) new_size = _max_n_card_counts;
johnc@1325 368
johnc@1325 369 // Make sure we don't go bigger than we will ever need
johnc@1325 370 new_size = MIN2((unsigned) new_size, _max_n_card_counts);
johnc@1325 371
johnc@1325 372 // Expand the card count and card epoch tables
johnc@1325 373 if (new_size > (int)_n_card_counts) {
johnc@1325 374 // We can just free and allocate a new array as we're
johnc@1325 375 // not interested in preserving the contents
johnc@1325 376 assert(_card_counts != NULL, "Logic!");
johnc@1325 377 assert(_card_epochs != NULL, "Logic!");
johnc@1325 378 FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
johnc@1325 379 FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
johnc@1325 380 _n_card_counts = new_size;
johnc@1325 381 _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
johnc@1325 382 _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
johnc@1325 383 _cache_size_index = new_idx;
ysr@777 384 }
ysr@777 385 }
ysr@777 386 }
ysr@777 387
johnc@1325 388 void ConcurrentG1Refine::clear_and_record_card_counts() {
johnc@1325 389 if (G1ConcRSLogCacheSize == 0) return;
johnc@1325 390
johnc@1325 391 #ifndef PRODUCT
johnc@1325 392 double start = os::elapsedTime();
johnc@1325 393 #endif
johnc@1325 394
johnc@1325 395 if (_expand_card_counts) {
johnc@1325 396 expand_card_count_cache();
johnc@1325 397 _expand_card_counts = false;
johnc@1325 398 // Only need to clear the epochs.
johnc@1325 399 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
ysr@777 400 }
ysr@777 401
johnc@1325 402 int this_epoch = (int) _n_periods;
johnc@1325 403 assert((this_epoch+1) <= max_jint, "to many periods");
johnc@1325 404 // Update epoch
johnc@1325 405 _n_periods++;
johnc@1325 406
johnc@1325 407 #ifndef PRODUCT
johnc@1325 408 double elapsed = os::elapsedTime() - start;
johnc@1325 409 _g1h->g1_policy()->record_cc_clear_time(elapsed * 1000.0);
johnc@1325 410 #endif
ysr@777 411 }
tonyp@1454 412
tonyp@1454 413 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
tonyp@1454 414 for (int i = 0; i < _n_threads; ++i) {
tonyp@1454 415 _threads[i]->print_on(st);
tonyp@1454 416 st->cr();
tonyp@1454 417 }
tonyp@1454 418 }

mercurial