src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

changeset 2715
abdfc822206f
parent 2472
0fa27f37d4d4
child 2963
c3f1170908be
     1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Tue Mar 29 22:36:16 2011 -0400
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Mar 30 10:26:59 2011 -0400
     1.3 @@ -27,6 +27,7 @@
     1.4  
     1.5  #include "gc_implementation/g1/concurrentMark.hpp"
     1.6  #include "gc_implementation/g1/g1CollectedHeap.hpp"
     1.7 +#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
     1.8  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
     1.9  #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    1.10  #include "utilities/taskqueue.hpp"
    1.11 @@ -59,131 +60,23 @@
    1.12    return r != NULL && r->in_collection_set();
    1.13  }
    1.14  
    1.15 -// See the comment in the .hpp file about the locking protocol and
    1.16 -// assumptions of this method (and other related ones).
    1.17  inline HeapWord*
    1.18 -G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
    1.19 -                                                size_t word_size,
    1.20 -                                                bool with_heap_lock) {
    1.21 -  assert_not_at_safepoint();
    1.22 -  assert(with_heap_lock == Heap_lock->owned_by_self(),
    1.23 -         "with_heap_lock and Heap_lock->owned_by_self() should be a tautology");
    1.24 -  assert(cur_alloc_region != NULL, "pre-condition of the method");
    1.25 -  assert(cur_alloc_region->is_young(),
    1.26 -         "we only support young current alloc regions");
    1.27 -  assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
    1.28 -         "should not be used for humongous allocations");
    1.29 -  assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
    1.30 +G1CollectedHeap::attempt_allocation(size_t word_size,
    1.31 +                                    unsigned int* gc_count_before_ret) {
    1.32 +  assert_heap_not_locked_and_not_at_safepoint();
    1.33 +  assert(!isHumongous(word_size), "attempt_allocation() should not "
    1.34 +         "be called for humongous allocation requests");
    1.35  
    1.36 -  assert(!cur_alloc_region->is_empty(),
    1.37 -         err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
    1.38 -                 cur_alloc_region->bottom(), cur_alloc_region->end()));
    1.39 -  HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size);
    1.40 +  HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
    1.41 +                                                      false /* bot_updates */);
    1.42 +  if (result == NULL) {
    1.43 +    result = attempt_allocation_slow(word_size, gc_count_before_ret);
    1.44 +  }
    1.45 +  assert_heap_not_locked();
    1.46    if (result != NULL) {
    1.47 -    assert(is_in(result), "result should be in the heap");
    1.48 -
    1.49 -    if (with_heap_lock) {
    1.50 -      Heap_lock->unlock();
    1.51 -    }
    1.52 -    assert_heap_not_locked();
    1.53 -    // Do the dirtying after we release the Heap_lock.
    1.54      dirty_young_block(result, word_size);
    1.55 -    return result;
    1.56    }
    1.57 -
    1.58 -  if (with_heap_lock) {
    1.59 -    assert_heap_locked();
    1.60 -  } else {
    1.61 -    assert_heap_not_locked();
    1.62 -  }
    1.63 -  return NULL;
    1.64 -}
    1.65 -
    1.66 -// See the comment in the .hpp file about the locking protocol and
    1.67 -// assumptions of this method (and other related ones).
    1.68 -inline HeapWord*
    1.69 -G1CollectedHeap::attempt_allocation(size_t word_size) {
    1.70 -  assert_heap_not_locked_and_not_at_safepoint();
    1.71 -  assert(!isHumongous(word_size), "attempt_allocation() should not be called "
    1.72 -         "for humongous allocation requests");
    1.73 -
    1.74 -  HeapRegion* cur_alloc_region = _cur_alloc_region;
    1.75 -  if (cur_alloc_region != NULL) {
    1.76 -    HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
    1.77 -                                                   word_size,
    1.78 -                                                   false /* with_heap_lock */);
    1.79 -    assert_heap_not_locked();
    1.80 -    if (result != NULL) {
    1.81 -      return result;
    1.82 -    }
    1.83 -  }
    1.84 -
    1.85 -  // Our attempt to allocate lock-free failed as the current
    1.86 -  // allocation region is either NULL or full. So, we'll now take the
    1.87 -  // Heap_lock and retry.
    1.88 -  Heap_lock->lock();
    1.89 -
    1.90 -  HeapWord* result = attempt_allocation_locked(word_size);
    1.91 -  if (result != NULL) {
    1.92 -    assert_heap_not_locked();
    1.93 -    return result;
    1.94 -  }
    1.95 -
    1.96 -  assert_heap_locked();
    1.97 -  return NULL;
    1.98 -}
    1.99 -
   1.100 -inline void
   1.101 -G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
   1.102 -  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   1.103 -  assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
   1.104 -         "pre-condition of the call");
   1.105 -  assert(cur_alloc_region->is_young(),
   1.106 -         "we only support young current alloc regions");
   1.107 -
   1.108 -  // The region is guaranteed to be young
   1.109 -  g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
   1.110 -  _summary_bytes_used += cur_alloc_region->used();
   1.111 -  _cur_alloc_region = NULL;
   1.112 -}
   1.113 -
   1.114 -inline HeapWord*
   1.115 -G1CollectedHeap::attempt_allocation_locked(size_t word_size) {
   1.116 -  assert_heap_locked_and_not_at_safepoint();
   1.117 -  assert(!isHumongous(word_size), "attempt_allocation_locked() "
   1.118 -         "should not be called for humongous allocation requests");
   1.119 -
   1.120 -  // First, reread the current alloc region and retry the allocation
   1.121 -  // in case somebody replaced it while we were waiting to get the
   1.122 -  // Heap_lock.
   1.123 -  HeapRegion* cur_alloc_region = _cur_alloc_region;
   1.124 -  if (cur_alloc_region != NULL) {
   1.125 -    HeapWord* result = allocate_from_cur_alloc_region(
   1.126 -                                                  cur_alloc_region, word_size,
   1.127 -                                                  true /* with_heap_lock */);
   1.128 -    if (result != NULL) {
   1.129 -      assert_heap_not_locked();
   1.130 -      return result;
   1.131 -    }
   1.132 -
   1.133 -    // We failed to allocate out of the current alloc region, so let's
   1.134 -    // retire it before getting a new one.
   1.135 -    retire_cur_alloc_region(cur_alloc_region);
   1.136 -  }
   1.137 -
   1.138 -  assert_heap_locked();
   1.139 -  // Try to get a new region and allocate out of it
   1.140 -  HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
   1.141 -                                                     false, /* at_safepoint */
   1.142 -                                                     true,  /* do_dirtying */
   1.143 -                                                     false  /* can_expand */);
   1.144 -  if (result != NULL) {
   1.145 -    assert_heap_not_locked();
   1.146 -    return result;
   1.147 -  }
   1.148 -
   1.149 -  assert_heap_locked();
   1.150 -  return NULL;
   1.151 +  return result;
   1.152  }
   1.153  
   1.154  // It dirties the cards that cover the block so that so that the post

mercurial