src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp

changeset 5078
194f52aa2f23
parent 3924
3a431b605145
child 5204
e72f7eecc96d
     1.1 --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Thu May 09 12:23:43 2013 +0200
     1.2 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Thu May 09 11:16:39 2013 -0700
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     1.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -26,40 +26,12 @@
    1.11  #include "gc_implementation/g1/concurrentG1Refine.hpp"
    1.12  #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
    1.13  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    1.14 -#include "gc_implementation/g1/g1CollectorPolicy.hpp"
    1.15 -#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
    1.16 -#include "gc_implementation/g1/g1RemSet.hpp"
    1.17 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    1.18 -#include "memory/space.inline.hpp"
    1.19 -#include "runtime/atomic.hpp"
    1.20 -#include "runtime/java.hpp"
    1.21 -#include "utilities/copy.hpp"
    1.22 +#include "gc_implementation/g1/g1HotCardCache.hpp"
    1.23  
    1.24 -// Possible sizes for the card counts cache: odd primes that roughly double in size.
    1.25 -// (See jvmtiTagMap.cpp).
    1.26 -
    1.27 -#define MAX_SIZE ((size_t) -1)
    1.28 -
    1.29 -size_t ConcurrentG1Refine::_cc_cache_sizes[] = {
    1.30 -          16381,    32771,    76831,    150001,   307261,
    1.31 -         614563,  1228891,  2457733,   4915219,  9830479,
    1.32 -       19660831, 39321619, 78643219, 157286461,  MAX_SIZE
    1.33 -  };
    1.34 -
    1.35 -ConcurrentG1Refine::ConcurrentG1Refine() :
    1.36 -  _card_counts(NULL), _card_epochs(NULL),
    1.37 -  _n_card_counts(0), _max_cards(0), _max_n_card_counts(0),
    1.38 -  _cache_size_index(0), _expand_card_counts(false),
    1.39 -  _hot_cache(NULL),
    1.40 -  _def_use_cache(false), _use_cache(false),
    1.41 -  // We initialize the epochs of the array to 0. By initializing
    1.42 -  // _n_periods to 1 and not 0 we automatically invalidate all the
    1.43 -  // entries on the array. Otherwise we might accidentally think that
    1.44 -  // we claimed a card that was in fact never set (see CR7033292).
    1.45 -  _n_periods(1),
    1.46 -  _threads(NULL), _n_threads(0)
    1.47 +ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
    1.48 +  _threads(NULL), _n_threads(0),
    1.49 +  _hot_card_cache(g1h)
    1.50  {
    1.51 -
    1.52    // Ergomonically select initial concurrent refinement parameters
    1.53    if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
    1.54      FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
    1.55 @@ -75,13 +47,17 @@
    1.56      FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
    1.57    }
    1.58    set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
    1.59 +
    1.60    _n_worker_threads = thread_num();
    1.61    // We need one extra thread to do the young gen rset size sampling.
    1.62    _n_threads = _n_worker_threads + 1;
    1.63 +
    1.64    reset_threshold_step();
    1.65  
    1.66    _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC);
    1.67 +
    1.68    int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
    1.69 +
    1.70    ConcurrentG1RefineThread *next = NULL;
    1.71    for (int i = _n_threads - 1; i >= 0; i--) {
    1.72      ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
    1.73 @@ -100,74 +76,8 @@
    1.74    }
    1.75  }
    1.76  
    1.77 -int ConcurrentG1Refine::thread_num() {
    1.78 -  return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
    1.79 -}
    1.80 -
    1.81  void ConcurrentG1Refine::init() {
    1.82 -  if (G1ConcRSLogCacheSize > 0) {
    1.83 -    _g1h = G1CollectedHeap::heap();
    1.84 -
    1.85 -    _max_cards = _g1h->max_capacity() >> CardTableModRefBS::card_shift;
    1.86 -    _max_n_card_counts = _max_cards * G1MaxHotCardCountSizePercent / 100;
    1.87 -
    1.88 -    size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
    1.89 -    guarantee(_max_cards < max_card_num, "card_num representation");
    1.90 -
    1.91 -    // We need _n_card_counts to be less than _max_n_card_counts here
    1.92 -    // so that the expansion call (below) actually allocates the
    1.93 -    // _counts and _epochs arrays.
    1.94 -    assert(_n_card_counts == 0, "pre-condition");
    1.95 -    assert(_max_n_card_counts > 0, "pre-condition");
    1.96 -
    1.97 -    // Find the index into cache size array that is of a size that's
    1.98 -    // large enough to hold desired_sz.
    1.99 -    size_t desired_sz = _max_cards / InitialCacheFraction;
   1.100 -    int desired_sz_index = 0;
   1.101 -    while (_cc_cache_sizes[desired_sz_index] < desired_sz) {
   1.102 -      desired_sz_index += 1;
   1.103 -      assert(desired_sz_index <  MAX_CC_CACHE_INDEX, "invariant");
   1.104 -    }
   1.105 -    assert(desired_sz_index <  MAX_CC_CACHE_INDEX, "invariant");
   1.106 -
   1.107 -    // If the desired_sz value is between two sizes then
   1.108 -    // _cc_cache_sizes[desired_sz_index-1] < desired_sz <= _cc_cache_sizes[desired_sz_index]
   1.109 -    // we will start with the lower size in the optimistic expectation that
   1.110 -    // we will not need to expand up. Note desired_sz_index could also be 0.
   1.111 -    if (desired_sz_index > 0 &&
   1.112 -        _cc_cache_sizes[desired_sz_index] > desired_sz) {
   1.113 -      desired_sz_index -= 1;
   1.114 -    }
   1.115 -
   1.116 -    if (!expand_card_count_cache(desired_sz_index)) {
   1.117 -      // Allocation was unsuccessful - exit
   1.118 -      vm_exit_during_initialization("Could not reserve enough space for card count cache");
   1.119 -    }
   1.120 -    assert(_n_card_counts > 0, "post-condition");
   1.121 -    assert(_cache_size_index == desired_sz_index, "post-condition");
   1.122 -
   1.123 -    Copy::fill_to_bytes(&_card_counts[0],
   1.124 -                        _n_card_counts * sizeof(CardCountCacheEntry));
   1.125 -    Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
   1.126 -
   1.127 -    ModRefBarrierSet* bs = _g1h->mr_bs();
   1.128 -    guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
   1.129 -    _ct_bs = (CardTableModRefBS*)bs;
   1.130 -    _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
   1.131 -
   1.132 -    _def_use_cache = true;
   1.133 -    _use_cache = true;
   1.134 -    _hot_cache_size = (1 << G1ConcRSLogCacheSize);
   1.135 -    _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
   1.136 -    _n_hot = 0;
   1.137 -    _hot_cache_idx = 0;
   1.138 -
   1.139 -    // For refining the cards in the hot cache in parallel
   1.140 -    int n_workers = (ParallelGCThreads > 0 ?
   1.141 -                        _g1h->workers()->total_workers() : 1);
   1.142 -    _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
   1.143 -    _hot_cache_par_claimed_idx = 0;
   1.144 -  }
   1.145 +  _hot_card_cache.initialize();
   1.146  }
   1.147  
   1.148  void ConcurrentG1Refine::stop() {
   1.149 @@ -188,17 +98,6 @@
   1.150  }
   1.151  
   1.152  ConcurrentG1Refine::~ConcurrentG1Refine() {
   1.153 -  if (G1ConcRSLogCacheSize > 0) {
   1.154 -    // Please see the comment in allocate_card_count_cache
   1.155 -    // for why we call os::malloc() and os::free() directly.
   1.156 -    assert(_card_counts != NULL, "Logic");
   1.157 -    os::free(_card_counts, mtGC);
   1.158 -    assert(_card_epochs != NULL, "Logic");
   1.159 -    os::free(_card_epochs, mtGC);
   1.160 -
   1.161 -    assert(_hot_cache != NULL, "Logic");
   1.162 -    FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
   1.163 -  }
   1.164    if (_threads != NULL) {
   1.165      for (int i = 0; i < _n_threads; i++) {
   1.166        delete _threads[i];
   1.167 @@ -215,317 +114,10 @@
   1.168    }
   1.169  }
   1.170  
   1.171 -bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {
   1.172 -  HeapWord* start = _ct_bs->addr_for(card_ptr);
   1.173 -  HeapRegion* r = _g1h->heap_region_containing(start);
   1.174 -  if (r != NULL && r->is_young()) {
   1.175 -    return true;
   1.176 -  }
   1.177 -  // This card is not associated with a heap region
   1.178 -  // so can't be young.
   1.179 -  return false;
   1.180 -}
   1.181 -
   1.182 -jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* defer) {
   1.183 -  unsigned new_card_num = ptr_2_card_num(card_ptr);
   1.184 -  unsigned bucket = hash(new_card_num);
   1.185 -  assert(0 <= bucket && bucket < _n_card_counts, "Bounds");
   1.186 -
   1.187 -  CardCountCacheEntry* count_ptr = &_card_counts[bucket];
   1.188 -  CardEpochCacheEntry* epoch_ptr = &_card_epochs[bucket];
   1.189 -
   1.190 -  // We have to construct a new entry if we haven't updated the counts
   1.191 -  // during the current period, or if the count was updated for a
   1.192 -  // different card number.
   1.193 -  unsigned int new_epoch = (unsigned int) _n_periods;
   1.194 -  julong new_epoch_entry = make_epoch_entry(new_card_num, new_epoch);
   1.195 -
   1.196 -  while (true) {
   1.197 -    // Fetch the previous epoch value
   1.198 -    julong prev_epoch_entry = epoch_ptr->_value;
   1.199 -    julong cas_res;
   1.200 -
   1.201 -    if (extract_epoch(prev_epoch_entry) != new_epoch) {
   1.202 -      // This entry has not yet been updated during this period.
   1.203 -      // Note: we update the epoch value atomically to ensure
   1.204 -      // that there is only one winner that updates the cached
   1.205 -      // card_ptr value even though all the refine threads share
   1.206 -      // the same epoch value.
   1.207 -
   1.208 -      cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
   1.209 -                                         (volatile jlong*)&epoch_ptr->_value,
   1.210 -                                         (jlong) prev_epoch_entry);
   1.211 -
   1.212 -      if (cas_res == prev_epoch_entry) {
   1.213 -        // We have successfully won the race to update the
   1.214 -        // epoch and card_num value. Make it look like the
   1.215 -        // count and eviction count were previously cleared.
   1.216 -        count_ptr->_count = 1;
   1.217 -        count_ptr->_evict_count = 0;
   1.218 -        *count = 0;
   1.219 -        // We can defer the processing of card_ptr
   1.220 -        *defer = true;
   1.221 -        return card_ptr;
   1.222 -      }
   1.223 -      // We did not win the race to update the epoch field, so some other
   1.224 -      // thread must have done it. The value that gets returned by CAS
   1.225 -      // should be the new epoch value.
   1.226 -      assert(extract_epoch(cas_res) == new_epoch, "unexpected epoch");
   1.227 -      // We could 'continue' here or just re-read the previous epoch value
   1.228 -      prev_epoch_entry = epoch_ptr->_value;
   1.229 -    }
   1.230 -
   1.231 -    // The epoch entry for card_ptr has been updated during this period.
   1.232 -    unsigned old_card_num = extract_card_num(prev_epoch_entry);
   1.233 -
   1.234 -    // The card count that will be returned to caller
   1.235 -    *count = count_ptr->_count;
   1.236 -
   1.237 -    // Are we updating the count for the same card?
   1.238 -    if (new_card_num == old_card_num) {
   1.239 -      // Same card - just update the count. We could have more than one
   1.240 -      // thread racing to update count for the current card. It should be
   1.241 -      // OK not to use a CAS as the only penalty should be some missed
   1.242 -      // increments of the count which delays identifying the card as "hot".
   1.243 -
   1.244 -      if (*count < max_jubyte) count_ptr->_count++;
   1.245 -      // We can defer the processing of card_ptr
   1.246 -      *defer = true;
   1.247 -      return card_ptr;
   1.248 -    }
   1.249 -
   1.250 -    // Different card - evict old card info
   1.251 -    if (count_ptr->_evict_count < max_jubyte) count_ptr->_evict_count++;
   1.252 -    if (count_ptr->_evict_count > G1CardCountCacheExpandThreshold) {
   1.253 -      // Trigger a resize the next time we clear
   1.254 -      _expand_card_counts = true;
   1.255 -    }
   1.256 -
   1.257 -    cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
   1.258 -                                       (volatile jlong*)&epoch_ptr->_value,
   1.259 -                                       (jlong) prev_epoch_entry);
   1.260 -
   1.261 -    if (cas_res == prev_epoch_entry) {
   1.262 -      // We successfully updated the card num value in the epoch entry
   1.263 -      count_ptr->_count = 0; // initialize counter for new card num
   1.264 -      jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
   1.265 -
   1.266 -      // Even though the region containg the card at old_card_num was not
   1.267 -      // in the young list when old_card_num was recorded in the epoch
   1.268 -      // cache it could have been added to the free list and subsequently
   1.269 -      // added to the young list in the intervening time. See CR 6817995.
   1.270 -      // We do not deal with this case here - it will be handled in
   1.271 -      // HeapRegion::oops_on_card_seq_iterate_careful after it has been
   1.272 -      // determined that the region containing the card has been allocated
   1.273 -      // to, and it's safe to check the young type of the region.
   1.274 -
   1.275 -      // We do not want to defer processing of card_ptr in this case
   1.276 -      // (we need to refine old_card_ptr and card_ptr)
   1.277 -      *defer = false;
   1.278 -      return old_card_ptr;
   1.279 -    }
   1.280 -    // Someone else beat us - try again.
   1.281 -  }
   1.282 -}
   1.283 -
   1.284 -jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
   1.285 -  int count;
   1.286 -  jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
   1.287 -  assert(cached_ptr != NULL, "bad cached card ptr");
   1.288 -
   1.289 -  // We've just inserted a card pointer into the card count cache
   1.290 -  // and got back the card that we just inserted or (evicted) the
   1.291 -  // previous contents of that count slot.
   1.292 -
   1.293 -  // The card we got back could be in a young region. When the
   1.294 -  // returned card (if evicted) was originally inserted, we had
   1.295 -  // determined that its containing region was not young. However
   1.296 -  // it is possible for the region to be freed during a cleanup
   1.297 -  // pause, then reallocated and tagged as young which will result
   1.298 -  // in the returned card residing in a young region.
   1.299 -  //
   1.300 -  // We do not deal with this case here - the change from non-young
   1.301 -  // to young could be observed at any time - it will be handled in
   1.302 -  // HeapRegion::oops_on_card_seq_iterate_careful after it has been
   1.303 -  // determined that the region containing the card has been allocated
   1.304 -  // to.
   1.305 -
   1.306 -  // The card pointer we obtained from card count cache is not hot
   1.307 -  // so do not store it in the cache; return it for immediate
   1.308 -  // refining.
   1.309 -  if (count < G1ConcRSHotCardLimit) {
   1.310 -    return cached_ptr;
   1.311 -  }
   1.312 -
   1.313 -  // Otherwise, the pointer we got from the _card_counts cache is hot.
   1.314 -  jbyte* res = NULL;
   1.315 -  MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
   1.316 -  if (_n_hot == _hot_cache_size) {
   1.317 -    res = _hot_cache[_hot_cache_idx];
   1.318 -    _n_hot--;
   1.319 -  }
   1.320 -  // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
   1.321 -  _hot_cache[_hot_cache_idx] = cached_ptr;
   1.322 -  _hot_cache_idx++;
   1.323 -  if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
   1.324 -  _n_hot++;
   1.325 -
   1.326 -  // The card obtained from the hot card cache could be in a young
   1.327 -  // region. See above on how this can happen.
   1.328 -
   1.329 -  return res;
   1.330 -}
   1.331 -
   1.332 -void ConcurrentG1Refine::clean_up_cache(int worker_i,
   1.333 -                                        G1RemSet* g1rs,
   1.334 -                                        DirtyCardQueue* into_cset_dcq) {
   1.335 -  assert(!use_cache(), "cache should be disabled");
   1.336 -  int start_idx;
   1.337 -
   1.338 -  while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
   1.339 -    int end_idx = start_idx + _hot_cache_par_chunk_size;
   1.340 -
   1.341 -    if (start_idx ==
   1.342 -        Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
   1.343 -      // The current worker has successfully claimed the chunk [start_idx..end_idx)
   1.344 -      end_idx = MIN2(end_idx, _n_hot);
   1.345 -      for (int i = start_idx; i < end_idx; i++) {
   1.346 -        jbyte* entry = _hot_cache[i];
   1.347 -        if (entry != NULL) {
   1.348 -          if (g1rs->concurrentRefineOneCard(entry, worker_i, true)) {
   1.349 -            // 'entry' contains references that point into the current
   1.350 -            // collection set. We need to record 'entry' in the DCQS
   1.351 -            // that's used for that purpose.
   1.352 -            //
   1.353 -            // The only time we care about recording cards that contain
   1.354 -            // references that point into the collection set is during
   1.355 -            // RSet updating while within an evacuation pause.
   1.356 -            // In this case worker_i should be the id of a GC worker thread
   1.357 -            assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
   1.358 -            assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
   1.359 -            into_cset_dcq->enqueue(entry);
   1.360 -          }
   1.361 -        }
   1.362 -      }
   1.363 -    }
   1.364 -  }
   1.365 -}
   1.366 -
   1.367 -// The arrays used to hold the card counts and the epochs must have
   1.368 -// a 1:1 correspondence. Hence they are allocated and freed together
   1.369 -// Returns true if the allocations of both the counts and epochs
   1.370 -// were successful; false otherwise.
   1.371 -bool ConcurrentG1Refine::allocate_card_count_cache(size_t n,
   1.372 -                                                   CardCountCacheEntry** counts,
   1.373 -                                                   CardEpochCacheEntry** epochs) {
   1.374 -  // We call the allocation/free routines directly for the counts
   1.375 -  // and epochs arrays. The NEW_C_HEAP_ARRAY/FREE_C_HEAP_ARRAY
   1.376 -  // macros call AllocateHeap and FreeHeap respectively.
   1.377 -  // AllocateHeap will call vm_exit_out_of_memory in the event
   1.378 -  // of an allocation failure and abort the JVM. With the
   1.379 -  // _counts/epochs arrays we only need to abort the JVM if the
   1.380 -  // initial allocation of these arrays fails.
   1.381 -  //
   1.382 -  // Additionally AllocateHeap/FreeHeap do some tracing of
   1.383 -  // allocate/free calls so calling one without calling the
   1.384 -  // other can cause inconsistencies in the tracing. So we
   1.385 -  // call neither.
   1.386 -
   1.387 -  assert(*counts == NULL, "out param");
   1.388 -  assert(*epochs == NULL, "out param");
   1.389 -
   1.390 -  size_t counts_size = n * sizeof(CardCountCacheEntry);
   1.391 -  size_t epochs_size = n * sizeof(CardEpochCacheEntry);
   1.392 -
   1.393 -  *counts = (CardCountCacheEntry*) os::malloc(counts_size, mtGC);
   1.394 -  if (*counts == NULL) {
   1.395 -    // allocation was unsuccessful
   1.396 -    return false;
   1.397 -  }
   1.398 -
   1.399 -  *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size, mtGC);
   1.400 -  if (*epochs == NULL) {
   1.401 -    // allocation was unsuccessful - free counts array
   1.402 -    assert(*counts != NULL, "must be");
   1.403 -    os::free(*counts, mtGC);
   1.404 -    *counts = NULL;
   1.405 -    return false;
   1.406 -  }
   1.407 -
   1.408 -  // We successfully allocated both counts and epochs
   1.409 -  return true;
   1.410 -}
   1.411 -
   1.412 -// Returns true if the card counts/epochs cache was
   1.413 -// successfully expanded; false otherwise.
   1.414 -bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) {
   1.415 -  // Can we expand the card count and epoch tables?
   1.416 -  if (_n_card_counts < _max_n_card_counts) {
   1.417 -    assert(cache_size_idx >= 0 && cache_size_idx  < MAX_CC_CACHE_INDEX, "oob");
   1.418 -
   1.419 -    size_t cache_size = _cc_cache_sizes[cache_size_idx];
   1.420 -    // Make sure we don't go bigger than we will ever need
   1.421 -    cache_size = MIN2(cache_size, _max_n_card_counts);
   1.422 -
   1.423 -    // Should we expand the card count and card epoch tables?
   1.424 -    if (cache_size > _n_card_counts) {
   1.425 -      // We have been asked to allocate new, larger, arrays for
   1.426 -      // the card counts and the epochs. Attempt the allocation
   1.427 -      // of both before we free the existing arrays in case
   1.428 -      // the allocation is unsuccessful...
   1.429 -      CardCountCacheEntry* counts = NULL;
   1.430 -      CardEpochCacheEntry* epochs = NULL;
   1.431 -
   1.432 -      if (allocate_card_count_cache(cache_size, &counts, &epochs)) {
   1.433 -        // Allocation was successful.
   1.434 -        // We can just free the old arrays; we're
   1.435 -        // not interested in preserving the contents
   1.436 -        if (_card_counts != NULL) os::free(_card_counts, mtGC);
   1.437 -        if (_card_epochs != NULL) os::free(_card_epochs, mtGC);
   1.438 -
   1.439 -        // Cache the size of the arrays and the index that got us there.
   1.440 -        _n_card_counts = cache_size;
   1.441 -        _cache_size_index = cache_size_idx;
   1.442 -
   1.443 -        _card_counts = counts;
   1.444 -        _card_epochs = epochs;
   1.445 -
   1.446 -        // We successfully allocated/expanded the caches.
   1.447 -        return true;
   1.448 -      }
   1.449 -    }
   1.450 -  }
   1.451 -
   1.452 -  // We did not successfully expand the caches.
   1.453 -  return false;
   1.454 -}
   1.455 -
   1.456 -void ConcurrentG1Refine::clear_and_record_card_counts() {
   1.457 -  if (G1ConcRSLogCacheSize == 0) {
   1.458 -    return;
   1.459 -  }
   1.460 -
   1.461 -  double start = os::elapsedTime();
   1.462 -
   1.463 -  if (_expand_card_counts) {
   1.464 -    int new_idx = _cache_size_index + 1;
   1.465 -
   1.466 -    if (expand_card_count_cache(new_idx)) {
   1.467 -      // Allocation was successful and  _n_card_counts has
   1.468 -      // been updated to the new size. We only need to clear
   1.469 -      // the epochs so we don't read a bogus epoch value
   1.470 -      // when inserting a card into the hot card cache.
   1.471 -      Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
   1.472 -    }
   1.473 -    _expand_card_counts = false;
   1.474 -  }
   1.475 -
   1.476 -  int this_epoch = (int) _n_periods;
   1.477 -  assert((this_epoch+1) <= max_jint, "to many periods");
   1.478 -  // Update epoch
   1.479 -  _n_periods++;
   1.480 -  double cc_clear_time_ms = (os::elapsedTime() - start) * 1000;
   1.481 -  _g1h->g1_policy()->phase_times()->record_cc_clear_time_ms(cc_clear_time_ms);
   1.482 +int ConcurrentG1Refine::thread_num() {
   1.483 +  int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
   1.484 +                                                : ParallelGCThreads;
   1.485 +  return MAX2<int>(n_threads, 1);
   1.486  }
   1.487  
   1.488  void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {

mercurial