src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp

Mon, 28 Mar 2011 10:58:54 -0700

author
johnc
date
Mon, 28 Mar 2011 10:58:54 -0700
changeset 2713
02f49b66361a
parent 2646
04d1138b4cce
child 2716
c84ee870e0b9
permissions
-rw-r--r--

7026932: G1: No need to abort VM when card count cache expansion fails
Summary: Manage allocation/freeing of the card cache counts and epochs arrays directly so that an allocation failure while attempting to expand these arrays does not abort the JVM. Failure to expand these arrays is not fatal.
Reviewed-by: iveresov, tonyp

ysr@777 1 /*
johnc@2504 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
stefank@2314 27 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
stefank@2314 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1RemSet.hpp"
stefank@2314 31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@2314 32 #include "memory/space.inline.hpp"
stefank@2314 33 #include "runtime/atomic.hpp"
johnc@2713 34 #include "runtime/java.hpp"
stefank@2314 35 #include "utilities/copy.hpp"
ysr@777 36
johnc@1325 37 // Possible sizes for the card counts cache: odd primes that roughly double in size.
johnc@1325 38 // (See jvmtiTagMap.cpp).
johnc@2713 39
johnc@2713 40 #define MAX_SIZE ((size_t) -1)
johnc@2713 41
johnc@2713 42 size_t ConcurrentG1Refine::_cc_cache_sizes[] = {
johnc@2713 43 16381, 32771, 76831, 150001, 307261,
johnc@2713 44 614563, 1228891, 2457733, 4915219, 9830479,
johnc@2713 45 19660831, 39321619, 78643219, 157286461, MAX_SIZE
johnc@1325 46 };
johnc@1325 47
ysr@777 48 ConcurrentG1Refine::ConcurrentG1Refine() :
johnc@1325 49 _card_counts(NULL), _card_epochs(NULL),
johnc@2713 50 _n_card_counts(0), _max_cards(0), _max_n_card_counts(0),
johnc@1325 51 _cache_size_index(0), _expand_card_counts(false),
ysr@777 52 _hot_cache(NULL),
ysr@777 53 _def_use_cache(false), _use_cache(false),
johnc@1325 54 _n_periods(0),
iveresov@1229 55 _threads(NULL), _n_threads(0)
ysr@777 56 {
iveresov@1546 57
iveresov@1546 58 // Ergomonically select initial concurrent refinement parameters
tonyp@1717 59 if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
tonyp@1717 60 FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
iveresov@1546 61 }
tonyp@1717 62 set_green_zone(G1ConcRefinementGreenZone);
iveresov@1546 63
tonyp@1717 64 if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
tonyp@1717 65 FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
iveresov@1546 66 }
tonyp@1717 67 set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
iveresov@1546 68
tonyp@1717 69 if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
tonyp@1717 70 FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
iveresov@1546 71 }
tonyp@1717 72 set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
iveresov@1546 73 _n_worker_threads = thread_num();
iveresov@1546 74 // We need one extra thread to do the young gen rset size sampling.
iveresov@1546 75 _n_threads = _n_worker_threads + 1;
iveresov@1546 76 reset_threshold_step();
iveresov@1546 77
iveresov@1546 78 _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads);
iveresov@1546 79 int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
iveresov@1546 80 ConcurrentG1RefineThread *next = NULL;
iveresov@1546 81 for (int i = _n_threads - 1; i >= 0; i--) {
iveresov@1546 82 ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
iveresov@1546 83 assert(t != NULL, "Conc refine should have been created");
iveresov@1546 84 assert(t->cg1r() == this, "Conc refine thread should refer to this");
iveresov@1546 85 _threads[i] = t;
iveresov@1546 86 next = t;
ysr@777 87 }
ysr@777 88 }
ysr@777 89
iveresov@1546 90 void ConcurrentG1Refine::reset_threshold_step() {
tonyp@1717 91 if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
iveresov@1546 92 _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
iveresov@1546 93 } else {
tonyp@1717 94 _thread_threshold_step = G1ConcRefinementThresholdStep;
iveresov@1230 95 }
iveresov@1546 96 }
iveresov@1546 97
iveresov@1546 98 int ConcurrentG1Refine::thread_num() {
tonyp@1717 99 return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
iveresov@1230 100 }
iveresov@1230 101
ysr@777 102 void ConcurrentG1Refine::init() {
johnc@1325 103 if (G1ConcRSLogCacheSize > 0) {
johnc@1325 104 _g1h = G1CollectedHeap::heap();
johnc@2713 105
johnc@2713 106 _max_cards = _g1h->max_capacity() >> CardTableModRefBS::card_shift;
johnc@2713 107 _max_n_card_counts = _max_cards * G1MaxHotCardCountSizePercent / 100;
johnc@1325 108
johnc@1325 109 size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
johnc@2713 110 guarantee(_max_cards < max_card_num, "card_num representation");
johnc@1325 111
johnc@2713 112 // We need _n_card_counts to be less than _max_n_card_counts here
johnc@2713 113 // so that the expansion call (below) actually allocates the
johnc@2713 114 // _counts and _epochs arrays.
johnc@2713 115 assert(_n_card_counts == 0, "pre-condition");
johnc@2713 116 assert(_max_n_card_counts > 0, "pre-condition");
johnc@2713 117
johnc@2713 118 // Find the index into cache size array that is of a size that's
johnc@2713 119 // large enough to hold desired_sz.
johnc@2713 120 size_t desired_sz = _max_cards / InitialCacheFraction;
johnc@2713 121 int desired_sz_index = 0;
johnc@2713 122 while (_cc_cache_sizes[desired_sz_index] < desired_sz) {
johnc@2713 123 desired_sz_index += 1;
johnc@2713 124 assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant");
johnc@1325 125 }
johnc@2713 126 assert(desired_sz_index < MAX_CC_CACHE_INDEX, "invariant");
johnc@1325 127
johnc@2713 128 // If the desired_sz value is between two sizes then
johnc@2713 129 // _cc_cache_sizes[desired_sz_index-1] < desired_sz <= _cc_cache_sizes[desired_sz_index]
johnc@2713 130 // we will start with the lower size in the optimistic expectation that
johnc@2713 131 // we will not need to expand up. Note desired_sz_index could also be 0.
johnc@2713 132 if (desired_sz_index > 0 &&
johnc@2713 133 _cc_cache_sizes[desired_sz_index] > desired_sz) {
johnc@2713 134 desired_sz_index -= 1;
johnc@2713 135 }
johnc@1325 136
johnc@2713 137 if (!expand_card_count_cache(desired_sz_index)) {
johnc@2713 138 // Allocation was unsuccessful - exit
johnc@2713 139 vm_exit_during_initialization("Could not reserve enough space for card count cache");
johnc@2713 140 }
johnc@2713 141 assert(_n_card_counts > 0, "post-condition");
johnc@2713 142 assert(_cache_size_index == desired_sz_index, "post-condition");
johnc@1325 143
johnc@1325 144 Copy::fill_to_bytes(&_card_counts[0],
johnc@1325 145 _n_card_counts * sizeof(CardCountCacheEntry));
johnc@1325 146 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
johnc@1325 147
johnc@1325 148 ModRefBarrierSet* bs = _g1h->mr_bs();
ysr@777 149 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
johnc@1325 150 _ct_bs = (CardTableModRefBS*)bs;
johnc@1325 151 _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
johnc@1325 152
ysr@777 153 _def_use_cache = true;
ysr@777 154 _use_cache = true;
ysr@777 155 _hot_cache_size = (1 << G1ConcRSLogCacheSize);
ysr@777 156 _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size);
ysr@777 157 _n_hot = 0;
ysr@777 158 _hot_cache_idx = 0;
johnc@1324 159
johnc@1324 160 // For refining the cards in the hot cache in parallel
johnc@1324 161 int n_workers = (ParallelGCThreads > 0 ?
johnc@1325 162 _g1h->workers()->total_workers() : 1);
johnc@1324 163 _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
johnc@1324 164 _hot_cache_par_claimed_idx = 0;
ysr@777 165 }
ysr@777 166 }
ysr@777 167
iveresov@1229 168 void ConcurrentG1Refine::stop() {
iveresov@1229 169 if (_threads != NULL) {
iveresov@1229 170 for (int i = 0; i < _n_threads; i++) {
iveresov@1229 171 _threads[i]->stop();
iveresov@1229 172 }
iveresov@1229 173 }
iveresov@1229 174 }
iveresov@1229 175
iveresov@1546 176 void ConcurrentG1Refine::reinitialize_threads() {
iveresov@1546 177 reset_threshold_step();
iveresov@1546 178 if (_threads != NULL) {
iveresov@1546 179 for (int i = 0; i < _n_threads; i++) {
iveresov@1546 180 _threads[i]->initialize();
iveresov@1546 181 }
iveresov@1546 182 }
iveresov@1546 183 }
iveresov@1546 184
ysr@777 185 ConcurrentG1Refine::~ConcurrentG1Refine() {
johnc@1325 186 if (G1ConcRSLogCacheSize > 0) {
johnc@2713 187 // Please see the comment in allocate_card_count_cache
johnc@2713 188 // for why we call os::malloc() and os::free() directly.
ysr@777 189 assert(_card_counts != NULL, "Logic");
johnc@2713 190 os::free(_card_counts);
johnc@1325 191 assert(_card_epochs != NULL, "Logic");
johnc@2713 192 os::free(_card_epochs);
johnc@2713 193
ysr@777 194 assert(_hot_cache != NULL, "Logic");
ysr@777 195 FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
ysr@777 196 }
iveresov@1229 197 if (_threads != NULL) {
iveresov@1229 198 for (int i = 0; i < _n_threads; i++) {
iveresov@1229 199 delete _threads[i];
iveresov@1229 200 }
iveresov@1234 201 FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
ysr@777 202 }
ysr@777 203 }
ysr@777 204
iveresov@1229 205 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
iveresov@1229 206 if (_threads != NULL) {
iveresov@1229 207 for (int i = 0; i < _n_threads; i++) {
iveresov@1229 208 tc->do_thread(_threads[i]);
iveresov@1229 209 }
ysr@777 210 }
ysr@777 211 }
ysr@777 212
johnc@1325 213 bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {
johnc@1325 214 HeapWord* start = _ct_bs->addr_for(card_ptr);
johnc@1325 215 HeapRegion* r = _g1h->heap_region_containing(start);
johnc@1325 216 if (r != NULL && r->is_young()) {
johnc@1325 217 return true;
johnc@1325 218 }
johnc@1325 219 // This card is not associated with a heap region
johnc@1325 220 // so can't be young.
johnc@1325 221 return false;
ysr@777 222 }
ysr@777 223
johnc@1325 224 jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* defer) {
johnc@1325 225 unsigned new_card_num = ptr_2_card_num(card_ptr);
johnc@1325 226 unsigned bucket = hash(new_card_num);
johnc@1325 227 assert(0 <= bucket && bucket < _n_card_counts, "Bounds");
johnc@1325 228
johnc@1325 229 CardCountCacheEntry* count_ptr = &_card_counts[bucket];
johnc@1325 230 CardEpochCacheEntry* epoch_ptr = &_card_epochs[bucket];
johnc@1325 231
johnc@1325 232 // We have to construct a new entry if we haven't updated the counts
johnc@1325 233 // during the current period, or if the count was updated for a
johnc@1325 234 // different card number.
johnc@1325 235 unsigned int new_epoch = (unsigned int) _n_periods;
johnc@1325 236 julong new_epoch_entry = make_epoch_entry(new_card_num, new_epoch);
johnc@1325 237
johnc@1325 238 while (true) {
johnc@1325 239 // Fetch the previous epoch value
johnc@1325 240 julong prev_epoch_entry = epoch_ptr->_value;
johnc@1325 241 julong cas_res;
johnc@1325 242
johnc@1325 243 if (extract_epoch(prev_epoch_entry) != new_epoch) {
johnc@1325 244 // This entry has not yet been updated during this period.
johnc@1325 245 // Note: we update the epoch value atomically to ensure
johnc@1325 246 // that there is only one winner that updates the cached
johnc@1325 247 // card_ptr value even though all the refine threads share
johnc@1325 248 // the same epoch value.
johnc@1325 249
johnc@1325 250 cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
johnc@1325 251 (volatile jlong*)&epoch_ptr->_value,
johnc@1325 252 (jlong) prev_epoch_entry);
johnc@1325 253
johnc@1325 254 if (cas_res == prev_epoch_entry) {
johnc@1325 255 // We have successfully won the race to update the
johnc@1325 256 // epoch and card_num value. Make it look like the
johnc@1325 257 // count and eviction count were previously cleared.
johnc@1325 258 count_ptr->_count = 1;
johnc@1325 259 count_ptr->_evict_count = 0;
johnc@1325 260 *count = 0;
johnc@1325 261 // We can defer the processing of card_ptr
johnc@1325 262 *defer = true;
johnc@1325 263 return card_ptr;
johnc@1325 264 }
johnc@1325 265 // We did not win the race to update the epoch field, so some other
johnc@1325 266 // thread must have done it. The value that gets returned by CAS
johnc@1325 267 // should be the new epoch value.
johnc@1325 268 assert(extract_epoch(cas_res) == new_epoch, "unexpected epoch");
johnc@1325 269 // We could 'continue' here or just re-read the previous epoch value
johnc@1325 270 prev_epoch_entry = epoch_ptr->_value;
johnc@1325 271 }
johnc@1325 272
johnc@1325 273 // The epoch entry for card_ptr has been updated during this period.
johnc@1325 274 unsigned old_card_num = extract_card_num(prev_epoch_entry);
johnc@1325 275
johnc@1325 276 // The card count that will be returned to caller
johnc@1325 277 *count = count_ptr->_count;
johnc@1325 278
johnc@1325 279 // Are we updating the count for the same card?
johnc@1325 280 if (new_card_num == old_card_num) {
johnc@1325 281 // Same card - just update the count. We could have more than one
johnc@1325 282 // thread racing to update count for the current card. It should be
johnc@1325 283 // OK not to use a CAS as the only penalty should be some missed
johnc@1325 284 // increments of the count which delays identifying the card as "hot".
johnc@1325 285
johnc@1325 286 if (*count < max_jubyte) count_ptr->_count++;
johnc@1325 287 // We can defer the processing of card_ptr
johnc@1325 288 *defer = true;
johnc@1325 289 return card_ptr;
johnc@1325 290 }
johnc@1325 291
johnc@1325 292 // Different card - evict old card info
johnc@1325 293 if (count_ptr->_evict_count < max_jubyte) count_ptr->_evict_count++;
johnc@1325 294 if (count_ptr->_evict_count > G1CardCountCacheExpandThreshold) {
johnc@1325 295 // Trigger a resize the next time we clear
johnc@1325 296 _expand_card_counts = true;
johnc@1325 297 }
johnc@1325 298
johnc@1325 299 cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
johnc@1325 300 (volatile jlong*)&epoch_ptr->_value,
johnc@1325 301 (jlong) prev_epoch_entry);
johnc@1325 302
johnc@1325 303 if (cas_res == prev_epoch_entry) {
johnc@1325 304 // We successfully updated the card num value in the epoch entry
johnc@1325 305 count_ptr->_count = 0; // initialize counter for new card num
johnc@2021 306 jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
johnc@1325 307
johnc@1325 308 // Even though the region containg the card at old_card_num was not
johnc@1325 309 // in the young list when old_card_num was recorded in the epoch
johnc@1325 310 // cache it could have been added to the free list and subsequently
johnc@2021 311 // added to the young list in the intervening time. See CR 6817995.
johnc@2021 312 // We do not deal with this case here - it will be handled in
johnc@2021 313 // HeapRegion::oops_on_card_seq_iterate_careful after it has been
johnc@2021 314 // determined that the region containing the card has been allocated
johnc@2021 315 // to, and it's safe to check the young type of the region.
johnc@1325 316
johnc@1325 317 // We do not want to defer processing of card_ptr in this case
johnc@1325 318 // (we need to refine old_card_ptr and card_ptr)
johnc@1325 319 *defer = false;
johnc@1325 320 return old_card_ptr;
johnc@1325 321 }
johnc@1325 322 // Someone else beat us - try again.
johnc@1325 323 }
johnc@1325 324 }
johnc@1325 325
johnc@1325 326 jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
johnc@1325 327 int count;
johnc@1325 328 jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
johnc@1325 329 assert(cached_ptr != NULL, "bad cached card ptr");
johnc@1681 330
johnc@2021 331 // We've just inserted a card pointer into the card count cache
johnc@2021 332 // and got back the card that we just inserted or (evicted) the
johnc@2021 333 // previous contents of that count slot.
johnc@1681 334
johnc@2021 335 // The card we got back could be in a young region. When the
johnc@2021 336 // returned card (if evicted) was originally inserted, we had
johnc@2021 337 // determined that its containing region was not young. However
johnc@2021 338 // it is possible for the region to be freed during a cleanup
johnc@2021 339 // pause, then reallocated and tagged as young which will result
johnc@2021 340 // in the returned card residing in a young region.
johnc@2021 341 //
johnc@2021 342 // We do not deal with this case here - the change from non-young
johnc@2021 343 // to young could be observed at any time - it will be handled in
johnc@2021 344 // HeapRegion::oops_on_card_seq_iterate_careful after it has been
johnc@2021 345 // determined that the region containing the card has been allocated
johnc@2021 346 // to.
johnc@1325 347
johnc@1325 348 // The card pointer we obtained from card count cache is not hot
johnc@1325 349 // so do not store it in the cache; return it for immediate
johnc@1325 350 // refining.
ysr@777 351 if (count < G1ConcRSHotCardLimit) {
johnc@1325 352 return cached_ptr;
ysr@777 353 }
johnc@1325 354
johnc@2021 355 // Otherwise, the pointer we got from the _card_counts cache is hot.
ysr@777 356 jbyte* res = NULL;
ysr@777 357 MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
ysr@777 358 if (_n_hot == _hot_cache_size) {
ysr@777 359 res = _hot_cache[_hot_cache_idx];
ysr@777 360 _n_hot--;
ysr@777 361 }
ysr@777 362 // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
johnc@1325 363 _hot_cache[_hot_cache_idx] = cached_ptr;
ysr@777 364 _hot_cache_idx++;
ysr@777 365 if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
ysr@777 366 _n_hot++;
johnc@1325 367
johnc@2021 368 // The card obtained from the hot card cache could be in a young
johnc@2021 369 // region. See above on how this can happen.
johnc@1325 370
ysr@777 371 return res;
ysr@777 372 }
ysr@777 373
johnc@2060 374 void ConcurrentG1Refine::clean_up_cache(int worker_i,
johnc@2060 375 G1RemSet* g1rs,
johnc@2060 376 DirtyCardQueue* into_cset_dcq) {
ysr@777 377 assert(!use_cache(), "cache should be disabled");
johnc@1324 378 int start_idx;
johnc@1324 379
johnc@1324 380 while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
johnc@1324 381 int end_idx = start_idx + _hot_cache_par_chunk_size;
johnc@1324 382
johnc@1324 383 if (start_idx ==
johnc@1324 384 Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
johnc@1324 385 // The current worker has successfully claimed the chunk [start_idx..end_idx)
johnc@1324 386 end_idx = MIN2(end_idx, _n_hot);
johnc@1324 387 for (int i = start_idx; i < end_idx; i++) {
johnc@1324 388 jbyte* entry = _hot_cache[i];
johnc@1324 389 if (entry != NULL) {
johnc@2060 390 if (g1rs->concurrentRefineOneCard(entry, worker_i, true)) {
johnc@2060 391 // 'entry' contains references that point into the current
johnc@2060 392 // collection set. We need to record 'entry' in the DCQS
johnc@2060 393 // that's used for that purpose.
johnc@2060 394 //
johnc@2060 395 // The only time we care about recording cards that contain
johnc@2060 396 // references that point into the collection set is during
johnc@2060 397 // RSet updating while within an evacuation pause.
johnc@2060 398 // In this case worker_i should be the id of a GC worker thread
johnc@2060 399 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
brutisso@2646 400 assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
johnc@2060 401 into_cset_dcq->enqueue(entry);
johnc@2060 402 }
johnc@1324 403 }
johnc@1324 404 }
ysr@777 405 }
ysr@777 406 }
ysr@777 407 }
ysr@777 408
johnc@2713 409 // The arrays used to hold the card counts and the epochs must have
johnc@2713 410 // a 1:1 correspondence. Hence they are allocated and freed together
johnc@2713 411 // Returns true if the allocations of both the counts and epochs
johnc@2713 412 // were successful; false otherwise.
johnc@2713 413 bool ConcurrentG1Refine::allocate_card_count_cache(size_t n,
johnc@2713 414 CardCountCacheEntry** counts,
johnc@2713 415 CardEpochCacheEntry** epochs) {
johnc@2713 416 // We call the allocation/free routines directly for the counts
johnc@2713 417 // and epochs arrays. The NEW_C_HEAP_ARRAY/FREE_C_HEAP_ARRAY
johnc@2713 418 // macros call AllocateHeap and FreeHeap respectively.
johnc@2713 419 // AllocateHeap will call vm_exit_out_of_memory in the event
johnc@2713 420 // of an allocation failure and abort the JVM. With the
johnc@2713 421 // _counts/epochs arrays we only need to abort the JVM if the
johnc@2713 422 // initial allocation of these arrays fails.
johnc@2713 423 //
johnc@2713 424 // Additionally AllocateHeap/FreeHeap do some tracing of
johnc@2713 425 // allocate/free calls so calling one without calling the
johnc@2713 426 // other can cause inconsistencies in the tracing. So we
johnc@2713 427 // call neither.
johnc@2713 428
johnc@2713 429 assert(*counts == NULL, "out param");
johnc@2713 430 assert(*epochs == NULL, "out param");
johnc@2713 431
johnc@2713 432 size_t counts_size = n * sizeof(CardCountCacheEntry);
johnc@2713 433 size_t epochs_size = n * sizeof(CardEpochCacheEntry);
johnc@2713 434
johnc@2713 435 *counts = (CardCountCacheEntry*) os::malloc(counts_size);
johnc@2713 436 if (*counts == NULL) {
johnc@2713 437 // allocation was unsuccessful
johnc@2713 438 return false;
johnc@2713 439 }
johnc@2713 440
johnc@2713 441 *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size);
johnc@2713 442 if (*epochs == NULL) {
johnc@2713 443 // allocation was unsuccessful - free counts array
johnc@2713 444 assert(*counts != NULL, "must be");
johnc@2713 445 os::free(*counts);
johnc@2713 446 *counts = NULL;
johnc@2713 447 return false;
johnc@2713 448 }
johnc@2713 449
johnc@2713 450 // We successfully allocated both counts and epochs
johnc@2713 451 return true;
johnc@2713 452 }
johnc@2713 453
johnc@2713 454 // Returns true if the card counts/epochs cache was
johnc@2713 455 // successfully expanded; false otherwise.
johnc@2713 456 bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) {
johnc@2713 457 // Can we expand the card count and epoch tables?
johnc@1325 458 if (_n_card_counts < _max_n_card_counts) {
johnc@2713 459 assert(cache_size_idx >= 0 && cache_size_idx < MAX_CC_CACHE_INDEX, "oob");
johnc@1325 460
johnc@2713 461 size_t cache_size = _cc_cache_sizes[cache_size_idx];
johnc@1325 462 // Make sure we don't go bigger than we will ever need
johnc@2713 463 cache_size = MIN2(cache_size, _max_n_card_counts);
johnc@1325 464
johnc@2713 465 // Should we expand the card count and card epoch tables?
johnc@2713 466 if (cache_size > _n_card_counts) {
johnc@2713 467 // We have been asked to allocate new, larger, arrays for
johnc@2713 468 // the card counts and the epochs. Attempt the allocation
johnc@2713 469 // of both before we free the existing arrays in case
johnc@2713 470 // the allocation is unsuccessful...
johnc@2713 471 CardCountCacheEntry* counts = NULL;
johnc@2713 472 CardEpochCacheEntry* epochs = NULL;
johnc@2713 473
johnc@2713 474 if (allocate_card_count_cache(cache_size, &counts, &epochs)) {
johnc@2713 475 // Allocation was successful.
johnc@2713 476 // We can just free the old arrays; we're
johnc@2713 477 // not interested in preserving the contents
johnc@2713 478 if (_card_counts != NULL) os::free(_card_counts);
johnc@2713 479 if (_card_epochs != NULL) os::free(_card_epochs);
johnc@2713 480
johnc@2713 481 // Cache the size of the arrays and the index that got us there.
johnc@2713 482 _n_card_counts = cache_size;
johnc@2713 483 _cache_size_index = cache_size_idx;
johnc@2713 484
johnc@2713 485 _card_counts = counts;
johnc@2713 486 _card_epochs = epochs;
johnc@2713 487
johnc@2713 488 // We successfully allocated/expanded the caches.
johnc@2713 489 return true;
johnc@2713 490 }
ysr@777 491 }
ysr@777 492 }
johnc@2713 493
johnc@2713 494 // We did not successfully expand the caches.
johnc@2713 495 return false;
ysr@777 496 }
ysr@777 497
johnc@1325 498 void ConcurrentG1Refine::clear_and_record_card_counts() {
johnc@1325 499 if (G1ConcRSLogCacheSize == 0) return;
johnc@1325 500
johnc@1325 501 #ifndef PRODUCT
johnc@1325 502 double start = os::elapsedTime();
johnc@1325 503 #endif
johnc@1325 504
johnc@1325 505 if (_expand_card_counts) {
johnc@2713 506 int new_idx = _cache_size_index + 1;
johnc@2713 507
johnc@2713 508 if (expand_card_count_cache(new_idx)) {
johnc@2713 509 // Allocation was successful and _n_card_counts has
johnc@2713 510 // been updated to the new size. We only need to clear
johnc@2713 511 // the epochs so we don't read a bogus epoch value
johnc@2713 512 // when inserting a card into the hot card cache.
johnc@2713 513 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
johnc@2713 514 }
johnc@1325 515 _expand_card_counts = false;
ysr@777 516 }
ysr@777 517
johnc@1325 518 int this_epoch = (int) _n_periods;
johnc@1325 519 assert((this_epoch+1) <= max_jint, "to many periods");
johnc@1325 520 // Update epoch
johnc@1325 521 _n_periods++;
johnc@1325 522
johnc@1325 523 #ifndef PRODUCT
johnc@1325 524 double elapsed = os::elapsedTime() - start;
johnc@1325 525 _g1h->g1_policy()->record_cc_clear_time(elapsed * 1000.0);
johnc@1325 526 #endif
ysr@777 527 }
tonyp@1454 528
tonyp@1454 529 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
tonyp@1454 530 for (int i = 0; i < _n_threads; ++i) {
tonyp@1454 531 _threads[i]->print_on(st);
tonyp@1454 532 st->cr();
tonyp@1454 533 }
tonyp@1454 534 }

mercurial