src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp

Thu, 15 Apr 2010 18:45:30 -0400

author
tonyp
date
Thu, 15 Apr 2010 18:45:30 -0400
changeset 1825
f9ec1e4bbb44
parent 1717
b81f3572f355
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6939027: G1: assertion failure during the concurrent phase of cleanup
Summary: The outgoing region map is not maintained properly and it's causing an assert failure. Given that we don't actually use it, I'm removing it. I'm piggy-backing a small change on this which removes a message that it's printed before a Full GC when DisableExplicitGC is set.
Reviewed-by: apetrusenko, ysr

ysr@777 1 /*
xdono@1279 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
ysr@777 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
ysr@777 20 * CA 95054 USA or visit www.sun.com if you need additional information or
ysr@777 21 * have any questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #include "incls/_precompiled.incl"
ysr@777 26 #include "incls/_concurrentG1Refine.cpp.incl"
ysr@777 27
johnc@1325 28 // Possible sizes for the card counts cache: odd primes that roughly double in size.
johnc@1325 29 // (See jvmtiTagMap.cpp).
johnc@1325 30 int ConcurrentG1Refine::_cc_cache_sizes[] = {
johnc@1325 31 16381, 32771, 76831, 150001, 307261,
johnc@1325 32 614563, 1228891, 2457733, 4915219, 9830479,
johnc@1325 33 19660831, 39321619, 78643219, 157286461, -1
johnc@1325 34 };
johnc@1325 35
ysr@777 36 ConcurrentG1Refine::ConcurrentG1Refine() :
johnc@1325 37 _card_counts(NULL), _card_epochs(NULL),
johnc@1325 38 _n_card_counts(0), _max_n_card_counts(0),
johnc@1325 39 _cache_size_index(0), _expand_card_counts(false),
ysr@777 40 _hot_cache(NULL),
ysr@777 41 _def_use_cache(false), _use_cache(false),
johnc@1325 42 _n_periods(0),
iveresov@1229 43 _threads(NULL), _n_threads(0)
ysr@777 44 {
iveresov@1546 45
iveresov@1546 46 // Ergomonically select initial concurrent refinement parameters
tonyp@1717 47 if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
tonyp@1717 48 FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
iveresov@1546 49 }
tonyp@1717 50 set_green_zone(G1ConcRefinementGreenZone);
iveresov@1546 51
tonyp@1717 52 if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
tonyp@1717 53 FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
iveresov@1546 54 }
tonyp@1717 55 set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
iveresov@1546 56
tonyp@1717 57 if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
tonyp@1717 58 FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
iveresov@1546 59 }
tonyp@1717 60 set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
iveresov@1546 61 _n_worker_threads = thread_num();
iveresov@1546 62 // We need one extra thread to do the young gen rset size sampling.
iveresov@1546 63 _n_threads = _n_worker_threads + 1;
iveresov@1546 64 reset_threshold_step();
iveresov@1546 65
iveresov@1546 66 _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads);
iveresov@1546 67 int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
iveresov@1546 68 ConcurrentG1RefineThread *next = NULL;
iveresov@1546 69 for (int i = _n_threads - 1; i >= 0; i--) {
iveresov@1546 70 ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
iveresov@1546 71 assert(t != NULL, "Conc refine should have been created");
iveresov@1546 72 assert(t->cg1r() == this, "Conc refine thread should refer to this");
iveresov@1546 73 _threads[i] = t;
iveresov@1546 74 next = t;
ysr@777 75 }
ysr@777 76 }
ysr@777 77
iveresov@1546 78 void ConcurrentG1Refine::reset_threshold_step() {
tonyp@1717 79 if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
iveresov@1546 80 _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
iveresov@1546 81 } else {
tonyp@1717 82 _thread_threshold_step = G1ConcRefinementThresholdStep;
iveresov@1230 83 }
iveresov@1546 84 }
iveresov@1546 85
iveresov@1546 86 int ConcurrentG1Refine::thread_num() {
tonyp@1717 87 return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
iveresov@1230 88 }
iveresov@1230 89
ysr@777 90 void ConcurrentG1Refine::init() {
johnc@1325 91 if (G1ConcRSLogCacheSize > 0) {
johnc@1325 92 _g1h = G1CollectedHeap::heap();
johnc@1325 93 _max_n_card_counts =
johnc@1325 94 (unsigned) (_g1h->g1_reserved_obj_bytes() >> CardTableModRefBS::card_shift);
johnc@1325 95
johnc@1325 96 size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
johnc@1325 97 guarantee(_max_n_card_counts < max_card_num, "card_num representation");
johnc@1325 98
johnc@1325 99 int desired = _max_n_card_counts / InitialCacheFraction;
johnc@1325 100 for (_cache_size_index = 0;
johnc@1325 101 _cc_cache_sizes[_cache_size_index] >= 0; _cache_size_index++) {
johnc@1325 102 if (_cc_cache_sizes[_cache_size_index] >= desired) break;
johnc@1325 103 }
johnc@1325 104 _cache_size_index = MAX2(0, (_cache_size_index - 1));
johnc@1325 105
johnc@1325 106 int initial_size = _cc_cache_sizes[_cache_size_index];
johnc@1325 107 if (initial_size < 0) initial_size = _max_n_card_counts;
johnc@1325 108
johnc@1325 109 // Make sure we don't go bigger than we will ever need
johnc@1325 110 _n_card_counts = MIN2((unsigned) initial_size, _max_n_card_counts);
johnc@1325 111
johnc@1325 112 _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
johnc@1325 113 _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
johnc@1325 114
johnc@1325 115 Copy::fill_to_bytes(&_card_counts[0],
johnc@1325 116 _n_card_counts * sizeof(CardCountCacheEntry));
johnc@1325 117 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
johnc@1325 118
johnc@1325 119 ModRefBarrierSet* bs = _g1h->mr_bs();
ysr@777 120 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
johnc@1325 121 _ct_bs = (CardTableModRefBS*)bs;
johnc@1325 122 _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
johnc@1325 123
ysr@777 124 _def_use_cache = true;
ysr@777 125 _use_cache = true;
ysr@777 126 _hot_cache_size = (1 << G1ConcRSLogCacheSize);
ysr@777 127 _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size);
ysr@777 128 _n_hot = 0;
ysr@777 129 _hot_cache_idx = 0;
johnc@1324 130
johnc@1324 131 // For refining the cards in the hot cache in parallel
johnc@1324 132 int n_workers = (ParallelGCThreads > 0 ?
johnc@1325 133 _g1h->workers()->total_workers() : 1);
johnc@1324 134 _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
johnc@1324 135 _hot_cache_par_claimed_idx = 0;
ysr@777 136 }
ysr@777 137 }
ysr@777 138
iveresov@1229 139 void ConcurrentG1Refine::stop() {
iveresov@1229 140 if (_threads != NULL) {
iveresov@1229 141 for (int i = 0; i < _n_threads; i++) {
iveresov@1229 142 _threads[i]->stop();
iveresov@1229 143 }
iveresov@1229 144 }
iveresov@1229 145 }
iveresov@1229 146
iveresov@1546 147 void ConcurrentG1Refine::reinitialize_threads() {
iveresov@1546 148 reset_threshold_step();
iveresov@1546 149 if (_threads != NULL) {
iveresov@1546 150 for (int i = 0; i < _n_threads; i++) {
iveresov@1546 151 _threads[i]->initialize();
iveresov@1546 152 }
iveresov@1546 153 }
iveresov@1546 154 }
iveresov@1546 155
ysr@777 156 ConcurrentG1Refine::~ConcurrentG1Refine() {
johnc@1325 157 if (G1ConcRSLogCacheSize > 0) {
ysr@777 158 assert(_card_counts != NULL, "Logic");
johnc@1325 159 FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
johnc@1325 160 assert(_card_epochs != NULL, "Logic");
johnc@1325 161 FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
ysr@777 162 assert(_hot_cache != NULL, "Logic");
ysr@777 163 FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
ysr@777 164 }
iveresov@1229 165 if (_threads != NULL) {
iveresov@1229 166 for (int i = 0; i < _n_threads; i++) {
iveresov@1229 167 delete _threads[i];
iveresov@1229 168 }
iveresov@1234 169 FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
ysr@777 170 }
ysr@777 171 }
ysr@777 172
iveresov@1229 173 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
iveresov@1229 174 if (_threads != NULL) {
iveresov@1229 175 for (int i = 0; i < _n_threads; i++) {
iveresov@1229 176 tc->do_thread(_threads[i]);
iveresov@1229 177 }
ysr@777 178 }
ysr@777 179 }
ysr@777 180
johnc@1325 181 bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {
johnc@1325 182 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@1325 183 HeapRegion* r = _g1h->heap_region_containing(start);
johnc@1325 184 if (r != NULL && r->is_young()) {
johnc@1325 185 return true;
johnc@1325 186 }
johnc@1325 187 // This card is not associated with a heap region
johnc@1325 188 // so can't be young.
johnc@1325 189 return false;
ysr@777 190 }
ysr@777 191
johnc@1325 192 jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* defer) {
johnc@1325 193 unsigned new_card_num = ptr_2_card_num(card_ptr);
johnc@1325 194 unsigned bucket = hash(new_card_num);
johnc@1325 195 assert(0 <= bucket && bucket < _n_card_counts, "Bounds");
johnc@1325 196
johnc@1325 197 CardCountCacheEntry* count_ptr = &_card_counts[bucket];
johnc@1325 198 CardEpochCacheEntry* epoch_ptr = &_card_epochs[bucket];
johnc@1325 199
johnc@1325 200 // We have to construct a new entry if we haven't updated the counts
johnc@1325 201 // during the current period, or if the count was updated for a
johnc@1325 202 // different card number.
johnc@1325 203 unsigned int new_epoch = (unsigned int) _n_periods;
johnc@1325 204 julong new_epoch_entry = make_epoch_entry(new_card_num, new_epoch);
johnc@1325 205
johnc@1325 206 while (true) {
johnc@1325 207 // Fetch the previous epoch value
johnc@1325 208 julong prev_epoch_entry = epoch_ptr->_value;
johnc@1325 209 julong cas_res;
johnc@1325 210
johnc@1325 211 if (extract_epoch(prev_epoch_entry) != new_epoch) {
johnc@1325 212 // This entry has not yet been updated during this period.
johnc@1325 213 // Note: we update the epoch value atomically to ensure
johnc@1325 214 // that there is only one winner that updates the cached
johnc@1325 215 // card_ptr value even though all the refine threads share
johnc@1325 216 // the same epoch value.
johnc@1325 217
johnc@1325 218 cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
johnc@1325 219 (volatile jlong*)&epoch_ptr->_value,
johnc@1325 220 (jlong) prev_epoch_entry);
johnc@1325 221
johnc@1325 222 if (cas_res == prev_epoch_entry) {
johnc@1325 223 // We have successfully won the race to update the
johnc@1325 224 // epoch and card_num value. Make it look like the
johnc@1325 225 // count and eviction count were previously cleared.
johnc@1325 226 count_ptr->_count = 1;
johnc@1325 227 count_ptr->_evict_count = 0;
johnc@1325 228 *count = 0;
johnc@1325 229 // We can defer the processing of card_ptr
johnc@1325 230 *defer = true;
johnc@1325 231 return card_ptr;
johnc@1325 232 }
johnc@1325 233 // We did not win the race to update the epoch field, so some other
johnc@1325 234 // thread must have done it. The value that gets returned by CAS
johnc@1325 235 // should be the new epoch value.
johnc@1325 236 assert(extract_epoch(cas_res) == new_epoch, "unexpected epoch");
johnc@1325 237 // We could 'continue' here or just re-read the previous epoch value
johnc@1325 238 prev_epoch_entry = epoch_ptr->_value;
johnc@1325 239 }
johnc@1325 240
johnc@1325 241 // The epoch entry for card_ptr has been updated during this period.
johnc@1325 242 unsigned old_card_num = extract_card_num(prev_epoch_entry);
johnc@1325 243
johnc@1325 244 // The card count that will be returned to caller
johnc@1325 245 *count = count_ptr->_count;
johnc@1325 246
johnc@1325 247 // Are we updating the count for the same card?
johnc@1325 248 if (new_card_num == old_card_num) {
johnc@1325 249 // Same card - just update the count. We could have more than one
johnc@1325 250 // thread racing to update count for the current card. It should be
johnc@1325 251 // OK not to use a CAS as the only penalty should be some missed
johnc@1325 252 // increments of the count which delays identifying the card as "hot".
johnc@1325 253
johnc@1325 254 if (*count < max_jubyte) count_ptr->_count++;
johnc@1325 255 // We can defer the processing of card_ptr
johnc@1325 256 *defer = true;
johnc@1325 257 return card_ptr;
johnc@1325 258 }
johnc@1325 259
johnc@1325 260 // Different card - evict old card info
johnc@1325 261 if (count_ptr->_evict_count < max_jubyte) count_ptr->_evict_count++;
johnc@1325 262 if (count_ptr->_evict_count > G1CardCountCacheExpandThreshold) {
johnc@1325 263 // Trigger a resize the next time we clear
johnc@1325 264 _expand_card_counts = true;
johnc@1325 265 }
johnc@1325 266
johnc@1325 267 cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
johnc@1325 268 (volatile jlong*)&epoch_ptr->_value,
johnc@1325 269 (jlong) prev_epoch_entry);
johnc@1325 270
johnc@1325 271 if (cas_res == prev_epoch_entry) {
johnc@1325 272 // We successfully updated the card num value in the epoch entry
johnc@1325 273 count_ptr->_count = 0; // initialize counter for new card num
johnc@1325 274
johnc@1325 275 // Even though the region containg the card at old_card_num was not
johnc@1325 276 // in the young list when old_card_num was recorded in the epoch
johnc@1325 277 // cache it could have been added to the free list and subsequently
johnc@1325 278 // added to the young list in the intervening time. If the evicted
johnc@1325 279 // card is in a young region just return the card_ptr and the evicted
johnc@1325 280 // card will not be cleaned. See CR 6817995.
johnc@1325 281
johnc@1325 282 jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
johnc@1325 283 if (is_young_card(old_card_ptr)) {
johnc@1325 284 *count = 0;
johnc@1325 285 // We can defer the processing of card_ptr
johnc@1325 286 *defer = true;
johnc@1325 287 return card_ptr;
johnc@1325 288 }
johnc@1325 289
johnc@1325 290 // We do not want to defer processing of card_ptr in this case
johnc@1325 291 // (we need to refine old_card_ptr and card_ptr)
johnc@1325 292 *defer = false;
johnc@1325 293 return old_card_ptr;
johnc@1325 294 }
johnc@1325 295 // Someone else beat us - try again.
johnc@1325 296 }
johnc@1325 297 }
johnc@1325 298
johnc@1325 299 jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
johnc@1325 300 int count;
johnc@1325 301 jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
johnc@1325 302 assert(cached_ptr != NULL, "bad cached card ptr");
johnc@1681 303
johnc@1681 304 if (is_young_card(cached_ptr)) {
johnc@1681 305 // The region containing cached_ptr has been freed during a clean up
johnc@1681 306 // pause, reallocated, and tagged as young.
johnc@1681 307 assert(cached_ptr != card_ptr, "shouldn't be");
johnc@1681 308
johnc@1681 309 // We've just inserted a new old-gen card pointer into the card count
johnc@1681 310 // cache and evicted the previous contents of that count slot.
johnc@1681 311 // The evicted card pointer has been determined to be in a young region
johnc@1681 312 // and so cannot be the newly inserted card pointer (that will be
johnc@1681 313 // in an old region).
johnc@1681 314 // The count for newly inserted card will be set to zero during the
johnc@1681 315 // insertion, so we don't want to defer the cleaning of the newly
johnc@1681 316 // inserted card pointer.
johnc@1681 317 assert(*defer == false, "deferring non-hot card");
johnc@1681 318 return NULL;
johnc@1681 319 }
johnc@1325 320
johnc@1325 321 // The card pointer we obtained from card count cache is not hot
johnc@1325 322 // so do not store it in the cache; return it for immediate
johnc@1325 323 // refining.
ysr@777 324 if (count < G1ConcRSHotCardLimit) {
johnc@1325 325 return cached_ptr;
ysr@777 326 }
johnc@1325 327
johnc@1325 328 // Otherwise, the pointer we got from the _card_counts is hot.
ysr@777 329 jbyte* res = NULL;
ysr@777 330 MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
ysr@777 331 if (_n_hot == _hot_cache_size) {
ysr@777 332 res = _hot_cache[_hot_cache_idx];
ysr@777 333 _n_hot--;
ysr@777 334 }
ysr@777 335 // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
johnc@1325 336 _hot_cache[_hot_cache_idx] = cached_ptr;
ysr@777 337 _hot_cache_idx++;
ysr@777 338 if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
ysr@777 339 _n_hot++;
johnc@1325 340
johnc@1325 341 if (res != NULL) {
johnc@1325 342 // Even though the region containg res was not in the young list
johnc@1325 343 // when it was recorded in the hot cache it could have been added
johnc@1325 344 // to the free list and subsequently added to the young list in
johnc@1325 345 // the intervening time. If res is in a young region, return NULL
johnc@1325 346 // so that res is not cleaned. See CR 6817995.
johnc@1325 347
johnc@1325 348 if (is_young_card(res)) {
johnc@1325 349 res = NULL;
johnc@1325 350 }
johnc@1325 351 }
johnc@1325 352
ysr@777 353 return res;
ysr@777 354 }
ysr@777 355
ysr@777 356 void ConcurrentG1Refine::clean_up_cache(int worker_i, G1RemSet* g1rs) {
ysr@777 357 assert(!use_cache(), "cache should be disabled");
johnc@1324 358 int start_idx;
johnc@1324 359
johnc@1324 360 while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
johnc@1324 361 int end_idx = start_idx + _hot_cache_par_chunk_size;
johnc@1324 362
johnc@1324 363 if (start_idx ==
johnc@1324 364 Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
johnc@1324 365 // The current worker has successfully claimed the chunk [start_idx..end_idx)
johnc@1324 366 end_idx = MIN2(end_idx, _n_hot);
johnc@1324 367 for (int i = start_idx; i < end_idx; i++) {
johnc@1324 368 jbyte* entry = _hot_cache[i];
johnc@1324 369 if (entry != NULL) {
johnc@1324 370 g1rs->concurrentRefineOneCard(entry, worker_i);
johnc@1324 371 }
johnc@1324 372 }
ysr@777 373 }
ysr@777 374 }
ysr@777 375 }
ysr@777 376
johnc@1325 377 void ConcurrentG1Refine::expand_card_count_cache() {
johnc@1325 378 if (_n_card_counts < _max_n_card_counts) {
johnc@1325 379 int new_idx = _cache_size_index+1;
johnc@1325 380 int new_size = _cc_cache_sizes[new_idx];
johnc@1325 381 if (new_size < 0) new_size = _max_n_card_counts;
johnc@1325 382
johnc@1325 383 // Make sure we don't go bigger than we will ever need
johnc@1325 384 new_size = MIN2((unsigned) new_size, _max_n_card_counts);
johnc@1325 385
johnc@1325 386 // Expand the card count and card epoch tables
johnc@1325 387 if (new_size > (int)_n_card_counts) {
johnc@1325 388 // We can just free and allocate a new array as we're
johnc@1325 389 // not interested in preserving the contents
johnc@1325 390 assert(_card_counts != NULL, "Logic!");
johnc@1325 391 assert(_card_epochs != NULL, "Logic!");
johnc@1325 392 FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
johnc@1325 393 FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
johnc@1325 394 _n_card_counts = new_size;
johnc@1325 395 _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
johnc@1325 396 _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
johnc@1325 397 _cache_size_index = new_idx;
ysr@777 398 }
ysr@777 399 }
ysr@777 400 }
ysr@777 401
johnc@1325 402 void ConcurrentG1Refine::clear_and_record_card_counts() {
johnc@1325 403 if (G1ConcRSLogCacheSize == 0) return;
johnc@1325 404
johnc@1325 405 #ifndef PRODUCT
johnc@1325 406 double start = os::elapsedTime();
johnc@1325 407 #endif
johnc@1325 408
johnc@1325 409 if (_expand_card_counts) {
johnc@1325 410 expand_card_count_cache();
johnc@1325 411 _expand_card_counts = false;
johnc@1325 412 // Only need to clear the epochs.
johnc@1325 413 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
ysr@777 414 }
ysr@777 415
johnc@1325 416 int this_epoch = (int) _n_periods;
johnc@1325 417 assert((this_epoch+1) <= max_jint, "to many periods");
johnc@1325 418 // Update epoch
johnc@1325 419 _n_periods++;
johnc@1325 420
johnc@1325 421 #ifndef PRODUCT
johnc@1325 422 double elapsed = os::elapsedTime() - start;
johnc@1325 423 _g1h->g1_policy()->record_cc_clear_time(elapsed * 1000.0);
johnc@1325 424 #endif
ysr@777 425 }
tonyp@1454 426
tonyp@1454 427 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
tonyp@1454 428 for (int i = 0; i < _n_threads; ++i) {
tonyp@1454 429 _threads[i]->print_on(st);
tonyp@1454 430 st->cr();
tonyp@1454 431 }
tonyp@1454 432 }

mercurial