src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

changeset 2715
abdfc822206f
parent 2714
455328d90876
child 2817
49a67202bc67
     1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Mar 29 22:36:16 2011 -0400
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Mar 30 10:26:59 2011 -0400
     1.3 @@ -28,6 +28,7 @@
     1.4  #include "gc_implementation/g1/concurrentG1Refine.hpp"
     1.5  #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
     1.6  #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
     1.7 +#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
     1.8  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
     1.9  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    1.10  #include "gc_implementation/g1/g1MarkSweep.hpp"
    1.11 @@ -517,8 +518,7 @@
    1.12    return NULL;
    1.13  }
    1.14  
    1.15 -HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
    1.16 -                                             bool do_expand) {
    1.17 +HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
    1.18    assert(!isHumongous(word_size) ||
    1.19                                    word_size <= (size_t) HeapRegion::GrainWords,
    1.20           "the only time we use this to allocate a humongous region is "
    1.21 @@ -566,7 +566,7 @@
    1.22                                                   size_t word_size) {
    1.23    HeapRegion* alloc_region = NULL;
    1.24    if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
    1.25 -    alloc_region = new_region_work(word_size, true /* do_expand */);
    1.26 +    alloc_region = new_region(word_size, true /* do_expand */);
    1.27      if (purpose == GCAllocForSurvived && alloc_region != NULL) {
    1.28        alloc_region->set_survivor();
    1.29      }
    1.30 @@ -587,7 +587,7 @@
    1.31      // Only one region to allocate, no need to go through the slower
    1.32      // path. The caller will attempt the expasion if this fails, so
    1.33      // let's not try to expand here too.
    1.34 -    HeapRegion* hr = new_region_work(word_size, false /* do_expand */);
    1.35 +    HeapRegion* hr = new_region(word_size, false /* do_expand */);
    1.36      if (hr != NULL) {
    1.37        first = hr->hrs_index();
    1.38      } else {
    1.39 @@ -788,407 +788,12 @@
    1.40    return result;
    1.41  }
    1.42  
    1.43 -void
    1.44 -G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
    1.45 -  // Other threads might still be trying to allocate using CASes out
    1.46 -  // of the region we are retiring, as they can do so without holding
    1.47 -  // the Heap_lock. So we first have to make sure that noone else can
    1.48 -  // allocate in it by doing a maximal allocation. Even if our CAS
    1.49 -  // attempt fails a few times, we'll succeed sooner or later given
    1.50 -  // that a failed CAS attempt mean that the region is getting closed
    1.51 -  // to being full (someone else succeeded in allocating into it).
    1.52 -  size_t free_word_size = cur_alloc_region->free() / HeapWordSize;
    1.53 -
    1.54 -  // This is the minimum free chunk we can turn into a dummy
    1.55 -  // object. If the free space falls below this, then noone can
    1.56 -  // allocate in this region anyway (all allocation requests will be
    1.57 -  // of a size larger than this) so we won't have to perform the dummy
    1.58 -  // allocation.
    1.59 -  size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
    1.60 -
    1.61 -  while (free_word_size >= min_word_size_to_fill) {
    1.62 -    HeapWord* dummy =
    1.63 -      cur_alloc_region->par_allocate_no_bot_updates(free_word_size);
    1.64 -    if (dummy != NULL) {
    1.65 -      // If the allocation was successful we should fill in the space.
    1.66 -      CollectedHeap::fill_with_object(dummy, free_word_size);
    1.67 -      break;
    1.68 -    }
    1.69 -
    1.70 -    free_word_size = cur_alloc_region->free() / HeapWordSize;
    1.71 -    // It's also possible that someone else beats us to the
    1.72 -    // allocation and they fill up the region. In that case, we can
    1.73 -    // just get out of the loop
    1.74 -  }
    1.75 -  assert(cur_alloc_region->free() / HeapWordSize < min_word_size_to_fill,
    1.76 -         "sanity");
    1.77 -
    1.78 -  retire_cur_alloc_region_common(cur_alloc_region);
    1.79 -  assert(_cur_alloc_region == NULL, "post-condition");
    1.80 -}
    1.81 -
    1.82 -// See the comment in the .hpp file about the locking protocol and
    1.83 -// assumptions of this method (and other related ones).
    1.84 -HeapWord*
    1.85 -G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
    1.86 -                                                       bool at_safepoint,
    1.87 -                                                       bool do_dirtying,
    1.88 -                                                       bool can_expand) {
    1.89 -  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
    1.90 -  assert(_cur_alloc_region == NULL,
    1.91 -         "replace_cur_alloc_region_and_allocate() should only be called "
    1.92 -         "after retiring the previous current alloc region");
    1.93 -  assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
    1.94 -         "at_safepoint and is_at_safepoint() should be a tautology");
    1.95 -  assert(!can_expand || g1_policy()->can_expand_young_list(),
    1.96 -         "we should not call this method with can_expand == true if "
    1.97 -         "we are not allowed to expand the young gen");
    1.98 -
    1.99 -  if (can_expand || !g1_policy()->is_young_list_full()) {
   1.100 -    HeapRegion* new_cur_alloc_region = new_alloc_region(word_size);
   1.101 -    if (new_cur_alloc_region != NULL) {
   1.102 -      assert(new_cur_alloc_region->is_empty(),
   1.103 -             "the newly-allocated region should be empty, "
   1.104 -             "as right now we only allocate new regions out of the free list");
   1.105 -      g1_policy()->update_region_num(true /* next_is_young */);
   1.106 -      set_region_short_lived_locked(new_cur_alloc_region);
   1.107 -
   1.108 -      assert(!new_cur_alloc_region->isHumongous(),
   1.109 -             "Catch a regression of this bug.");
   1.110 -
   1.111 -      // We need to ensure that the stores to _cur_alloc_region and,
   1.112 -      // subsequently, to top do not float above the setting of the
   1.113 -      // young type.
   1.114 -      OrderAccess::storestore();
   1.115 -
   1.116 -      // Now, perform the allocation out of the region we just
   1.117 -      // allocated. Note that noone else can access that region at
   1.118 -      // this point (as _cur_alloc_region has not been updated yet),
   1.119 -      // so we can just go ahead and do the allocation without any
   1.120 -      // atomics (and we expect this allocation attempt to
   1.121 -      // suceeded). Given that other threads can attempt an allocation
   1.122 -      // with a CAS and without needing the Heap_lock, if we assigned
   1.123 -      // the new region to _cur_alloc_region before first allocating
   1.124 -      // into it other threads might have filled up the new region
   1.125 -      // before we got a chance to do the allocation ourselves. In
   1.126 -      // that case, we would have needed to retire the region, grab a
   1.127 -      // new one, and go through all this again. Allocating out of the
   1.128 -      // new region before assigning it to _cur_alloc_region avoids
   1.129 -      // all this.
   1.130 -      HeapWord* result =
   1.131 -                     new_cur_alloc_region->allocate_no_bot_updates(word_size);
   1.132 -      assert(result != NULL, "we just allocate out of an empty region "
   1.133 -             "so allocation should have been successful");
   1.134 -      assert(is_in(result), "result should be in the heap");
   1.135 -
   1.136 -      // Now make sure that the store to _cur_alloc_region does not
   1.137 -      // float above the store to top.
   1.138 -      OrderAccess::storestore();
   1.139 -      _cur_alloc_region = new_cur_alloc_region;
   1.140 -
   1.141 -      if (!at_safepoint) {
   1.142 -        Heap_lock->unlock();
   1.143 -      }
   1.144 -
   1.145 -      // do the dirtying, if necessary, after we release the Heap_lock
   1.146 -      if (do_dirtying) {
   1.147 -        dirty_young_block(result, word_size);
   1.148 -      }
   1.149 -      return result;
   1.150 -    }
   1.151 -  }
   1.152 -
   1.153 -  assert(_cur_alloc_region == NULL, "we failed to allocate a new current "
   1.154 -         "alloc region, it should still be NULL");
   1.155 -  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   1.156 -  return NULL;
   1.157 -}
   1.158 -
   1.159 -// See the comment in the .hpp file about the locking protocol and
   1.160 -// assumptions of this method (and other related ones).
   1.161 -HeapWord*
   1.162 -G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
   1.163 -  assert_heap_locked_and_not_at_safepoint();
   1.164 -  assert(!isHumongous(word_size), "attempt_allocation_slow() should not be "
   1.165 -         "used for humongous allocations");
   1.166 -
   1.167 -  // We should only reach here when we were unable to allocate
   1.168 -  // otherwise. So, we should have not active current alloc region.
   1.169 -  assert(_cur_alloc_region == NULL, "current alloc region should be NULL");
   1.170 -
   1.171 -  // We will loop while succeeded is false, which means that we tried
   1.172 -  // to do a collection, but the VM op did not succeed. So, when we
   1.173 -  // exit the loop, either one of the allocation attempts was
   1.174 -  // successful, or we succeeded in doing the VM op but which was
   1.175 -  // unable to allocate after the collection.
   1.176 -  for (int try_count = 1; /* we'll return or break */; try_count += 1) {
   1.177 -    bool succeeded = true;
   1.178 -
   1.179 -    // Every time we go round the loop we should be holding the Heap_lock.
   1.180 -    assert_heap_locked();
   1.181 -
   1.182 -    if (GC_locker::is_active_and_needs_gc()) {
   1.183 -      // We are locked out of GC because of the GC locker. We can
   1.184 -      // allocate a new region only if we can expand the young gen.
   1.185 -
   1.186 -      if (g1_policy()->can_expand_young_list()) {
   1.187 -        // Yes, we are allowed to expand the young gen. Let's try to
   1.188 -        // allocate a new current alloc region.
   1.189 -        HeapWord* result =
   1.190 -          replace_cur_alloc_region_and_allocate(word_size,
   1.191 -                                                false, /* at_safepoint */
   1.192 -                                                true,  /* do_dirtying */
   1.193 -                                                true   /* can_expand */);
   1.194 -        if (result != NULL) {
   1.195 -          assert_heap_not_locked();
   1.196 -          return result;
   1.197 -        }
   1.198 -      }
   1.199 -      // We could not expand the young gen further (or we could but we
   1.200 -      // failed to allocate a new region). We'll stall until the GC
   1.201 -      // locker forces a GC.
   1.202 -
   1.203 -      // If this thread is not in a jni critical section, we stall
   1.204 -      // the requestor until the critical section has cleared and
   1.205 -      // GC allowed. When the critical section clears, a GC is
   1.206 -      // initiated by the last thread exiting the critical section; so
   1.207 -      // we retry the allocation sequence from the beginning of the loop,
   1.208 -      // rather than causing more, now probably unnecessary, GC attempts.
   1.209 -      JavaThread* jthr = JavaThread::current();
   1.210 -      assert(jthr != NULL, "sanity");
   1.211 -      if (jthr->in_critical()) {
   1.212 -        if (CheckJNICalls) {
   1.213 -          fatal("Possible deadlock due to allocating while"
   1.214 -                " in jni critical section");
   1.215 -        }
   1.216 -        // We are returning NULL so the protocol is that we're still
   1.217 -        // holding the Heap_lock.
   1.218 -        assert_heap_locked();
   1.219 -        return NULL;
   1.220 -      }
   1.221 -
   1.222 -      Heap_lock->unlock();
   1.223 -      GC_locker::stall_until_clear();
   1.224 -
   1.225 -      // No need to relock the Heap_lock. We'll fall off to the code
   1.226 -      // below the else-statement which assumes that we are not
   1.227 -      // holding the Heap_lock.
   1.228 -    } else {
   1.229 -      // We are not locked out. So, let's try to do a GC. The VM op
   1.230 -      // will retry the allocation before it completes.
   1.231 -
   1.232 -      // Read the GC count while holding the Heap_lock
   1.233 -      unsigned int gc_count_before = SharedHeap::heap()->total_collections();
   1.234 -
   1.235 -      Heap_lock->unlock();
   1.236 -
   1.237 -      HeapWord* result =
   1.238 -        do_collection_pause(word_size, gc_count_before, &succeeded);
   1.239 -      assert_heap_not_locked();
   1.240 -      if (result != NULL) {
   1.241 -        assert(succeeded, "the VM op should have succeeded");
   1.242 -
   1.243 -        // Allocations that take place on VM operations do not do any
   1.244 -        // card dirtying and we have to do it here.
   1.245 -        dirty_young_block(result, word_size);
   1.246 -        return result;
   1.247 -      }
   1.248 -    }
   1.249 -
   1.250 -    // Both paths that get us here from above unlock the Heap_lock.
   1.251 -    assert_heap_not_locked();
   1.252 -
   1.253 -    // We can reach here when we were unsuccessful in doing a GC,
   1.254 -    // because another thread beat us to it, or because we were locked
   1.255 -    // out of GC due to the GC locker. In either case a new alloc
   1.256 -    // region might be available so we will retry the allocation.
   1.257 -    HeapWord* result = attempt_allocation(word_size);
   1.258 -    if (result != NULL) {
   1.259 -      assert_heap_not_locked();
   1.260 -      return result;
   1.261 -    }
   1.262 -
   1.263 -    // So far our attempts to allocate failed. The only time we'll go
   1.264 -    // around the loop and try again is if we tried to do a GC and the
   1.265 -    // VM op that we tried to schedule was not successful because
   1.266 -    // another thread beat us to it. If that happened it's possible
   1.267 -    // that by the time we grabbed the Heap_lock again and tried to
   1.268 -    // allocate other threads filled up the young generation, which
   1.269 -    // means that the allocation attempt after the GC also failed. So,
   1.270 -    // it's worth trying to schedule another GC pause.
   1.271 -    if (succeeded) {
   1.272 -      break;
   1.273 -    }
   1.274 -
   1.275 -    // Give a warning if we seem to be looping forever.
   1.276 -    if ((QueuedAllocationWarningCount > 0) &&
   1.277 -        (try_count % QueuedAllocationWarningCount == 0)) {
   1.278 -      warning("G1CollectedHeap::attempt_allocation_slow() "
   1.279 -              "retries %d times", try_count);
   1.280 -    }
   1.281 -  }
   1.282 -
   1.283 -  assert_heap_locked();
   1.284 -  return NULL;
   1.285 -}
   1.286 -
   1.287 -// See the comment in the .hpp file about the locking protocol and
   1.288 -// assumptions of this method (and other related ones).
   1.289 -HeapWord*
   1.290 -G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
   1.291 -                                              bool at_safepoint) {
   1.292 -  // This is the method that will allocate a humongous object. All
   1.293 -  // allocation paths that attempt to allocate a humongous object
   1.294 -  // should eventually reach here. Currently, the only paths are from
   1.295 -  // mem_allocate() and attempt_allocation_at_safepoint().
   1.296 -  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   1.297 -  assert(isHumongous(word_size), "attempt_allocation_humongous() "
   1.298 -         "should only be used for humongous allocations");
   1.299 -  assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
   1.300 -         "at_safepoint and is_at_safepoint() should be a tautology");
   1.301 -
   1.302 -  HeapWord* result = NULL;
   1.303 -
   1.304 -  // We will loop while succeeded is false, which means that we tried
   1.305 -  // to do a collection, but the VM op did not succeed. So, when we
   1.306 -  // exit the loop, either one of the allocation attempts was
   1.307 -  // successful, or we succeeded in doing the VM op but which was
   1.308 -  // unable to allocate after the collection.
   1.309 -  for (int try_count = 1; /* we'll return or break */; try_count += 1) {
   1.310 -    bool succeeded = true;
   1.311 -
   1.312 -    // Given that humongous objects are not allocated in young
   1.313 -    // regions, we'll first try to do the allocation without doing a
   1.314 -    // collection hoping that there's enough space in the heap.
   1.315 -    result = humongous_obj_allocate(word_size);
   1.316 -    assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
   1.317 -           "catch a regression of this bug.");
   1.318 -    if (result != NULL) {
   1.319 -      if (!at_safepoint) {
   1.320 -        // If we're not at a safepoint, unlock the Heap_lock.
   1.321 -        Heap_lock->unlock();
   1.322 -      }
   1.323 -      return result;
   1.324 -    }
   1.325 -
   1.326 -    // If we failed to allocate the humongous object, we should try to
   1.327 -    // do a collection pause (if we're allowed) in case it reclaims
   1.328 -    // enough space for the allocation to succeed after the pause.
   1.329 -    if (!at_safepoint) {
   1.330 -      // Read the GC count while holding the Heap_lock
   1.331 -      unsigned int gc_count_before = SharedHeap::heap()->total_collections();
   1.332 -
   1.333 -      // If we're allowed to do a collection we're not at a
   1.334 -      // safepoint, so it is safe to unlock the Heap_lock.
   1.335 -      Heap_lock->unlock();
   1.336 -
   1.337 -      result = do_collection_pause(word_size, gc_count_before, &succeeded);
   1.338 -      assert_heap_not_locked();
   1.339 -      if (result != NULL) {
   1.340 -        assert(succeeded, "the VM op should have succeeded");
   1.341 -        return result;
   1.342 -      }
   1.343 -
   1.344 -      // If we get here, the VM operation either did not succeed
   1.345 -      // (i.e., another thread beat us to it) or it succeeded but
   1.346 -      // failed to allocate the object.
   1.347 -
   1.348 -      // If we're allowed to do a collection we're not at a
   1.349 -      // safepoint, so it is safe to lock the Heap_lock.
   1.350 -      Heap_lock->lock();
   1.351 -    }
   1.352 -
   1.353 -    assert(result == NULL, "otherwise we should have exited the loop earlier");
   1.354 -
   1.355 -    // So far our attempts to allocate failed. The only time we'll go
   1.356 -    // around the loop and try again is if we tried to do a GC and the
   1.357 -    // VM op that we tried to schedule was not successful because
   1.358 -    // another thread beat us to it. That way it's possible that some
   1.359 -    // space was freed up by the thread that successfully scheduled a
   1.360 -    // GC. So it's worth trying to allocate again.
   1.361 -    if (succeeded) {
   1.362 -      break;
   1.363 -    }
   1.364 -
   1.365 -    // Give a warning if we seem to be looping forever.
   1.366 -    if ((QueuedAllocationWarningCount > 0) &&
   1.367 -        (try_count % QueuedAllocationWarningCount == 0)) {
   1.368 -      warning("G1CollectedHeap::attempt_allocation_humongous "
   1.369 -              "retries %d times", try_count);
   1.370 -    }
   1.371 -  }
   1.372 -
   1.373 -  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   1.374 -  return NULL;
   1.375 -}
   1.376 -
   1.377 -HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
   1.378 -                                           bool expect_null_cur_alloc_region) {
   1.379 -  assert_at_safepoint(true /* should_be_vm_thread */);
   1.380 -  assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region,
   1.381 -         err_msg("the current alloc region was unexpectedly found "
   1.382 -                 "to be non-NULL, cur alloc region: "PTR_FORMAT" "
   1.383 -                 "expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT,
   1.384 -                 _cur_alloc_region, expect_null_cur_alloc_region, word_size));
   1.385 -
   1.386 -  if (!isHumongous(word_size)) {
   1.387 -    if (!expect_null_cur_alloc_region) {
   1.388 -      HeapRegion* cur_alloc_region = _cur_alloc_region;
   1.389 -      if (cur_alloc_region != NULL) {
   1.390 -        // We are at a safepoint so no reason to use the MT-safe version.
   1.391 -        HeapWord* result = cur_alloc_region->allocate_no_bot_updates(word_size);
   1.392 -        if (result != NULL) {
   1.393 -          assert(is_in(result), "result should be in the heap");
   1.394 -
   1.395 -          // We will not do any dirtying here. This is guaranteed to be
   1.396 -          // called during a safepoint and the thread that scheduled the
   1.397 -          // pause will do the dirtying if we return a non-NULL result.
   1.398 -          return result;
   1.399 -        }
   1.400 -
   1.401 -        retire_cur_alloc_region_common(cur_alloc_region);
   1.402 -      }
   1.403 -    }
   1.404 -
   1.405 -    assert(_cur_alloc_region == NULL,
   1.406 -           "at this point we should have no cur alloc region");
   1.407 -    return replace_cur_alloc_region_and_allocate(word_size,
   1.408 -                                                 true, /* at_safepoint */
   1.409 -                                                 false /* do_dirtying */,
   1.410 -                                                 false /* can_expand */);
   1.411 -  } else {
   1.412 -    return attempt_allocation_humongous(word_size,
   1.413 -                                        true /* at_safepoint */);
   1.414 -  }
   1.415 -
   1.416 -  ShouldNotReachHere();
   1.417 -}
   1.418 -
   1.419  HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   1.420    assert_heap_not_locked_and_not_at_safepoint();
   1.421 -  assert(!isHumongous(word_size), "we do not allow TLABs of humongous size");
   1.422 -
   1.423 -  // First attempt: Try allocating out of the current alloc region
   1.424 -  // using a CAS. If that fails, take the Heap_lock and retry the
   1.425 -  // allocation, potentially replacing the current alloc region.
   1.426 -  HeapWord* result = attempt_allocation(word_size);
   1.427 -  if (result != NULL) {
   1.428 -    assert_heap_not_locked();
   1.429 -    return result;
   1.430 -  }
   1.431 -
   1.432 -  // Second attempt: Go to the slower path where we might try to
   1.433 -  // schedule a collection.
   1.434 -  result = attempt_allocation_slow(word_size);
   1.435 -  if (result != NULL) {
   1.436 -    assert_heap_not_locked();
   1.437 -    return result;
   1.438 -  }
   1.439 -
   1.440 -  assert_heap_locked();
   1.441 -  // Need to unlock the Heap_lock before returning.
   1.442 -  Heap_lock->unlock();
   1.443 -  return NULL;
   1.444 +  assert(!isHumongous(word_size), "we do not allow humongous TLABs");
   1.445 +
   1.446 +  unsigned int dummy_gc_count_before;
   1.447 +  return attempt_allocation(word_size, &dummy_gc_count_before);
   1.448  }
   1.449  
   1.450  HeapWord*
   1.451 @@ -1200,48 +805,18 @@
   1.452    assert(!is_tlab, "mem_allocate() this should not be called directly "
   1.453           "to allocate TLABs");
   1.454  
   1.455 -  // Loop until the allocation is satisified,
   1.456 -  // or unsatisfied after GC.
   1.457 +  // Loop until the allocation is satisified, or unsatisfied after GC.
   1.458    for (int try_count = 1; /* we'll return */; try_count += 1) {
   1.459      unsigned int gc_count_before;
   1.460 -    {
   1.461 -      if (!isHumongous(word_size)) {
   1.462 -        // First attempt: Try allocating out of the current alloc region
   1.463 -        // using a CAS. If that fails, take the Heap_lock and retry the
   1.464 -        // allocation, potentially replacing the current alloc region.
   1.465 -        HeapWord* result = attempt_allocation(word_size);
   1.466 -        if (result != NULL) {
   1.467 -          assert_heap_not_locked();
   1.468 -          return result;
   1.469 -        }
   1.470 -
   1.471 -        assert_heap_locked();
   1.472 -
   1.473 -        // Second attempt: Go to the slower path where we might try to
   1.474 -        // schedule a collection.
   1.475 -        result = attempt_allocation_slow(word_size);
   1.476 -        if (result != NULL) {
   1.477 -          assert_heap_not_locked();
   1.478 -          return result;
   1.479 -        }
   1.480 -      } else {
   1.481 -        // attempt_allocation_humongous() requires the Heap_lock to be held.
   1.482 -        Heap_lock->lock();
   1.483 -
   1.484 -        HeapWord* result = attempt_allocation_humongous(word_size,
   1.485 -                                                     false /* at_safepoint */);
   1.486 -        if (result != NULL) {
   1.487 -          assert_heap_not_locked();
   1.488 -          return result;
   1.489 -        }
   1.490 -      }
   1.491 -
   1.492 -      assert_heap_locked();
   1.493 -      // Read the gc count while the heap lock is held.
   1.494 -      gc_count_before = SharedHeap::heap()->total_collections();
   1.495 -
   1.496 -      // Release the Heap_lock before attempting the collection.
   1.497 -      Heap_lock->unlock();
   1.498 +
   1.499 +    HeapWord* result = NULL;
   1.500 +    if (!isHumongous(word_size)) {
   1.501 +      result = attempt_allocation(word_size, &gc_count_before);
   1.502 +    } else {
   1.503 +      result = attempt_allocation_humongous(word_size, &gc_count_before);
   1.504 +    }
   1.505 +    if (result != NULL) {
   1.506 +      return result;
   1.507      }
   1.508  
   1.509      // Create the garbage collection operation...
   1.510 @@ -1249,7 +824,6 @@
   1.511      // ...and get the VM thread to execute it.
   1.512      VMThread::execute(&op);
   1.513  
   1.514 -    assert_heap_not_locked();
   1.515      if (op.prologue_succeeded() && op.pause_succeeded()) {
   1.516        // If the operation was successful we'll return the result even
   1.517        // if it is NULL. If the allocation attempt failed immediately
   1.518 @@ -1275,21 +849,207 @@
   1.519    }
   1.520  
   1.521    ShouldNotReachHere();
   1.522 +  return NULL;
   1.523  }
   1.524  
   1.525 -void G1CollectedHeap::abandon_cur_alloc_region() {
   1.526 +HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
   1.527 +                                           unsigned int *gc_count_before_ret) {
   1.528 +  // Make sure you read the note in attempt_allocation_humongous().
   1.529 +
   1.530 +  assert_heap_not_locked_and_not_at_safepoint();
   1.531 +  assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
   1.532 +         "be called for humongous allocation requests");
   1.533 +
   1.534 +  // We should only get here after the first-level allocation attempt
   1.535 +  // (attempt_allocation()) failed to allocate.
   1.536 +
   1.537 +  // We will loop until a) we manage to successfully perform the
   1.538 +  // allocation or b) we successfully schedule a collection which
   1.539 +  // fails to perform the allocation. b) is the only case when we'll
   1.540 +  // return NULL.
   1.541 +  HeapWord* result = NULL;
   1.542 +  for (int try_count = 1; /* we'll return */; try_count += 1) {
   1.543 +    bool should_try_gc;
   1.544 +    unsigned int gc_count_before;
   1.545 +
   1.546 +    {
   1.547 +      MutexLockerEx x(Heap_lock);
   1.548 +
   1.549 +      result = _mutator_alloc_region.attempt_allocation_locked(word_size,
   1.550 +                                                      false /* bot_updates */);
   1.551 +      if (result != NULL) {
   1.552 +        return result;
   1.553 +      }
   1.554 +
   1.555 +      // If we reach here, attempt_allocation_locked() above failed to
   1.556 +      // allocate a new region. So the mutator alloc region should be NULL.
   1.557 +      assert(_mutator_alloc_region.get() == NULL, "only way to get here");
   1.558 +
   1.559 +      if (GC_locker::is_active_and_needs_gc()) {
   1.560 +        if (g1_policy()->can_expand_young_list()) {
   1.561 +          result = _mutator_alloc_region.attempt_allocation_force(word_size,
   1.562 +                                                      false /* bot_updates */);
   1.563 +          if (result != NULL) {
   1.564 +            return result;
   1.565 +          }
   1.566 +        }
   1.567 +        should_try_gc = false;
   1.568 +      } else {
   1.569 +        // Read the GC count while still holding the Heap_lock.
   1.570 +        gc_count_before = SharedHeap::heap()->total_collections();
   1.571 +        should_try_gc = true;
   1.572 +      }
   1.573 +    }
   1.574 +
   1.575 +    if (should_try_gc) {
   1.576 +      bool succeeded;
   1.577 +      result = do_collection_pause(word_size, gc_count_before, &succeeded);
   1.578 +      if (result != NULL) {
   1.579 +        assert(succeeded, "only way to get back a non-NULL result");
   1.580 +        return result;
   1.581 +      }
   1.582 +
   1.583 +      if (succeeded) {
   1.584 +        // If we get here we successfully scheduled a collection which
   1.585 +        // failed to allocate. No point in trying to allocate
   1.586 +        // further. We'll just return NULL.
   1.587 +        MutexLockerEx x(Heap_lock);
   1.588 +        *gc_count_before_ret = SharedHeap::heap()->total_collections();
   1.589 +        return NULL;
   1.590 +      }
   1.591 +    } else {
   1.592 +      GC_locker::stall_until_clear();
   1.593 +    }
   1.594 +
   1.595 +    // We can reach here if we were unsuccessul in scheduling a
   1.596 +    // collection (because another thread beat us to it) or if we were
   1.597 +    // stalled due to the GC locker. In either can we should retry the
   1.598 +    // allocation attempt in case another thread successfully
   1.599 +    // performed a collection and reclaimed enough space. We do the
   1.600 +    // first attempt (without holding the Heap_lock) here and the
   1.601 +    // follow-on attempt will be at the start of the next loop
   1.602 +    // iteration (after taking the Heap_lock).
   1.603 +    result = _mutator_alloc_region.attempt_allocation(word_size,
   1.604 +                                                      false /* bot_updates */);
   1.605 +    if (result != NULL ){
   1.606 +      return result;
   1.607 +    }
   1.608 +
   1.609 +    // Give a warning if we seem to be looping forever.
   1.610 +    if ((QueuedAllocationWarningCount > 0) &&
   1.611 +        (try_count % QueuedAllocationWarningCount == 0)) {
   1.612 +      warning("G1CollectedHeap::attempt_allocation_slow() "
   1.613 +              "retries %d times", try_count);
   1.614 +    }
   1.615 +  }
   1.616 +
   1.617 +  ShouldNotReachHere();
   1.618 +  return NULL;
   1.619 +}
   1.620 +
   1.621 +HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
   1.622 +                                          unsigned int * gc_count_before_ret) {
   1.623 +  // The structure of this method has a lot of similarities to
   1.624 +  // attempt_allocation_slow(). The reason these two were not merged
   1.625 +  // into a single one is that such a method would require several "if
   1.626 +  // allocation is not humongous do this, otherwise do that"
   1.627 +  // conditional paths which would obscure its flow. In fact, an early
   1.628 +  // version of this code did use a unified method which was harder to
   1.629 +  // follow and, as a result, it had subtle bugs that were hard to
   1.630 +  // track down. So keeping these two methods separate allows each to
   1.631 +  // be more readable. It will be good to keep these two in sync as
   1.632 +  // much as possible.
   1.633 +
   1.634 +  assert_heap_not_locked_and_not_at_safepoint();
   1.635 +  assert(isHumongous(word_size), "attempt_allocation_humongous() "
   1.636 +         "should only be called for humongous allocations");
   1.637 +
   1.638 +  // We will loop until a) we manage to successfully perform the
   1.639 +  // allocation or b) we successfully schedule a collection which
   1.640 +  // fails to perform the allocation. b) is the only case when we'll
   1.641 +  // return NULL.
   1.642 +  HeapWord* result = NULL;
   1.643 +  for (int try_count = 1; /* we'll return */; try_count += 1) {
   1.644 +    bool should_try_gc;
   1.645 +    unsigned int gc_count_before;
   1.646 +
   1.647 +    {
   1.648 +      MutexLockerEx x(Heap_lock);
   1.649 +
   1.650 +      // Given that humongous objects are not allocated in young
   1.651 +      // regions, we'll first try to do the allocation without doing a
   1.652 +      // collection hoping that there's enough space in the heap.
   1.653 +      result = humongous_obj_allocate(word_size);
   1.654 +      if (result != NULL) {
   1.655 +        return result;
   1.656 +      }
   1.657 +
   1.658 +      if (GC_locker::is_active_and_needs_gc()) {
   1.659 +        should_try_gc = false;
   1.660 +      } else {
   1.661 +        // Read the GC count while still holding the Heap_lock.
   1.662 +        gc_count_before = SharedHeap::heap()->total_collections();
   1.663 +        should_try_gc = true;
   1.664 +      }
   1.665 +    }
   1.666 +
   1.667 +    if (should_try_gc) {
   1.668 +      // If we failed to allocate the humongous object, we should try to
   1.669 +      // do a collection pause (if we're allowed) in case it reclaims
   1.670 +      // enough space for the allocation to succeed after the pause.
   1.671 +
   1.672 +      bool succeeded;
   1.673 +      result = do_collection_pause(word_size, gc_count_before, &succeeded);
   1.674 +      if (result != NULL) {
   1.675 +        assert(succeeded, "only way to get back a non-NULL result");
   1.676 +        return result;
   1.677 +      }
   1.678 +
   1.679 +      if (succeeded) {
   1.680 +        // If we get here we successfully scheduled a collection which
   1.681 +        // failed to allocate. No point in trying to allocate
   1.682 +        // further. We'll just return NULL.
   1.683 +        MutexLockerEx x(Heap_lock);
   1.684 +        *gc_count_before_ret = SharedHeap::heap()->total_collections();
   1.685 +        return NULL;
   1.686 +      }
   1.687 +    } else {
   1.688 +      GC_locker::stall_until_clear();
   1.689 +    }
   1.690 +
   1.691 +    // We can reach here if we were unsuccessul in scheduling a
   1.692 +    // collection (because another thread beat us to it) or if we were
   1.693 +    // stalled due to the GC locker. In either can we should retry the
   1.694 +    // allocation attempt in case another thread successfully
   1.695 +    // performed a collection and reclaimed enough space.  Give a
   1.696 +    // warning if we seem to be looping forever.
   1.697 +
   1.698 +    if ((QueuedAllocationWarningCount > 0) &&
   1.699 +        (try_count % QueuedAllocationWarningCount == 0)) {
   1.700 +      warning("G1CollectedHeap::attempt_allocation_humongous() "
   1.701 +              "retries %d times", try_count);
   1.702 +    }
   1.703 +  }
   1.704 +
   1.705 +  ShouldNotReachHere();
   1.706 +  return NULL;
   1.707 +}
   1.708 +
   1.709 +HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
   1.710 +                                       bool expect_null_mutator_alloc_region) {
   1.711    assert_at_safepoint(true /* should_be_vm_thread */);
   1.712 -
   1.713 -  HeapRegion* cur_alloc_region = _cur_alloc_region;
   1.714 -  if (cur_alloc_region != NULL) {
   1.715 -    assert(!cur_alloc_region->is_empty(),
   1.716 -           "the current alloc region can never be empty");
   1.717 -    assert(cur_alloc_region->is_young(),
   1.718 -           "the current alloc region should be young");
   1.719 -
   1.720 -    retire_cur_alloc_region_common(cur_alloc_region);
   1.721 -  }
   1.722 -  assert(_cur_alloc_region == NULL, "post-condition");
   1.723 +  assert(_mutator_alloc_region.get() == NULL ||
   1.724 +                                             !expect_null_mutator_alloc_region,
   1.725 +         "the current alloc region was unexpectedly found to be non-NULL");
   1.726 +
   1.727 +  if (!isHumongous(word_size)) {
   1.728 +    return _mutator_alloc_region.attempt_allocation_locked(word_size,
   1.729 +                                                      false /* bot_updates */);
   1.730 +  } else {
   1.731 +    return humongous_obj_allocate(word_size);
   1.732 +  }
   1.733 +
   1.734 +  ShouldNotReachHere();
   1.735  }
   1.736  
   1.737  void G1CollectedHeap::abandon_gc_alloc_regions() {
   1.738 @@ -1417,8 +1177,8 @@
   1.739  
   1.740      if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
   1.741        HandleMark hm;  // Discard invalid handles created during verification
   1.742 +      gclog_or_tty->print(" VerifyBeforeGC:");
   1.743        prepare_for_verify();
   1.744 -      gclog_or_tty->print(" VerifyBeforeGC:");
   1.745        Universe::verify(true);
   1.746      }
   1.747  
   1.748 @@ -1439,9 +1199,8 @@
   1.749      concurrent_mark()->abort();
   1.750  
   1.751      // Make sure we'll choose a new allocation region afterwards.
   1.752 -    abandon_cur_alloc_region();
   1.753 +    release_mutator_alloc_region();
   1.754      abandon_gc_alloc_regions();
   1.755 -    assert(_cur_alloc_region == NULL, "Invariant.");
   1.756      g1_rem_set()->cleanupHRRS();
   1.757      tear_down_region_lists();
   1.758  
   1.759 @@ -1547,6 +1306,8 @@
   1.760      // evacuation pause.
   1.761      clear_cset_fast_test();
   1.762  
   1.763 +    init_mutator_alloc_region();
   1.764 +
   1.765      double end = os::elapsedTime();
   1.766      g1_policy()->record_full_collection_end();
   1.767  
   1.768 @@ -1720,8 +1481,9 @@
   1.769  
   1.770    *succeeded = true;
   1.771    // Let's attempt the allocation first.
   1.772 -  HeapWord* result = attempt_allocation_at_safepoint(word_size,
   1.773 -                                     false /* expect_null_cur_alloc_region */);
   1.774 +  HeapWord* result =
   1.775 +    attempt_allocation_at_safepoint(word_size,
   1.776 +                                 false /* expect_null_mutator_alloc_region */);
   1.777    if (result != NULL) {
   1.778      assert(*succeeded, "sanity");
   1.779      return result;
   1.780 @@ -1748,7 +1510,7 @@
   1.781  
   1.782    // Retry the allocation
   1.783    result = attempt_allocation_at_safepoint(word_size,
   1.784 -                                      true /* expect_null_cur_alloc_region */);
   1.785 +                                  true /* expect_null_mutator_alloc_region */);
   1.786    if (result != NULL) {
   1.787      assert(*succeeded, "sanity");
   1.788      return result;
   1.789 @@ -1765,7 +1527,7 @@
   1.790  
   1.791    // Retry the allocation once more
   1.792    result = attempt_allocation_at_safepoint(word_size,
   1.793 -                                      true /* expect_null_cur_alloc_region */);
   1.794 +                                  true /* expect_null_mutator_alloc_region */);
   1.795    if (result != NULL) {
   1.796      assert(*succeeded, "sanity");
   1.797      return result;
   1.798 @@ -1796,7 +1558,7 @@
   1.799    if (expand(expand_bytes)) {
   1.800      verify_region_sets_optional();
   1.801      return attempt_allocation_at_safepoint(word_size,
   1.802 -                                          false /* expect_null_cur_alloc_region */);
   1.803 +                                 false /* expect_null_mutator_alloc_region */);
   1.804    }
   1.805    return NULL;
   1.806  }
   1.807 @@ -1940,7 +1702,6 @@
   1.808    _evac_failure_scan_stack(NULL) ,
   1.809    _mark_in_progress(false),
   1.810    _cg1r(NULL), _summary_bytes_used(0),
   1.811 -  _cur_alloc_region(NULL),
   1.812    _refine_cte_cl(NULL),
   1.813    _full_collection(false),
   1.814    _free_list("Master Free List"),
   1.815 @@ -2099,7 +1860,6 @@
   1.816    _g1_max_committed = _g1_committed;
   1.817    _hrs = new HeapRegionSeq(_expansion_regions);
   1.818    guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
   1.819 -  guarantee(_cur_alloc_region == NULL, "from constructor");
   1.820  
   1.821    // 6843694 - ensure that the maximum region index can fit
   1.822    // in the remembered set structures.
   1.823 @@ -2195,6 +1955,22 @@
   1.824    // Do later initialization work for concurrent refinement.
   1.825    _cg1r->init();
   1.826  
   1.827 +  // Here we allocate the dummy full region that is required by the
   1.828 +  // G1AllocRegion class. If we don't pass an address in the reserved
   1.829 +  // space here, lots of asserts fire.
   1.830 +  MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords);
   1.831 +  HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true);
   1.832 +  // We'll re-use the same region whether the alloc region will
   1.833 +  // require BOT updates or not and, if it doesn't, then a non-young
   1.834 +  // region will complain that it cannot support allocations without
   1.835 +  // BOT updates. So we'll tag the dummy region as young to avoid that.
   1.836 +  dummy_region->set_young();
   1.837 +  // Make sure it's full.
   1.838 +  dummy_region->set_top(dummy_region->end());
   1.839 +  G1AllocRegion::setup(this, dummy_region);
   1.840 +
   1.841 +  init_mutator_alloc_region();
   1.842 +
   1.843    return JNI_OK;
   1.844  }
   1.845  
   1.846 @@ -2261,7 +2037,7 @@
   1.847           "Should be owned on this thread's behalf.");
   1.848    size_t result = _summary_bytes_used;
   1.849    // Read only once in case it is set to NULL concurrently
   1.850 -  HeapRegion* hr = _cur_alloc_region;
   1.851 +  HeapRegion* hr = _mutator_alloc_region.get();
   1.852    if (hr != NULL)
   1.853      result += hr->used();
   1.854    return result;
   1.855 @@ -2324,13 +2100,11 @@
   1.856    // to free(), resulting in a SIGSEGV. Note that this doesn't appear
   1.857    // to be a problem in the optimized build, since the two loads of the
   1.858    // current allocation region field are optimized away.
   1.859 -  HeapRegion* car = _cur_alloc_region;
   1.860 -
   1.861 -  // FIXME: should iterate over all regions?
   1.862 -  if (car == NULL) {
   1.863 +  HeapRegion* hr = _mutator_alloc_region.get();
   1.864 +  if (hr == NULL) {
   1.865      return 0;
   1.866    }
   1.867 -  return car->free();
   1.868 +  return hr->free();
   1.869  }
   1.870  
   1.871  bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
   1.872 @@ -2781,16 +2555,12 @@
   1.873    // since we can't allow tlabs to grow big enough to accomodate
   1.874    // humongous objects.
   1.875  
   1.876 -  // We need to store the cur alloc region locally, since it might change
   1.877 -  // between when we test for NULL and when we use it later.
   1.878 -  ContiguousSpace* cur_alloc_space = _cur_alloc_region;
   1.879 +  HeapRegion* hr = _mutator_alloc_region.get();
   1.880    size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
   1.881 -
   1.882 -  if (cur_alloc_space == NULL) {
   1.883 +  if (hr == NULL) {
   1.884      return max_tlab_size;
   1.885    } else {
   1.886 -    return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize),
   1.887 -                max_tlab_size);
   1.888 +    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
   1.889    }
   1.890  }
   1.891  
   1.892 @@ -3364,6 +3134,7 @@
   1.893    }
   1.894  
   1.895    verify_region_sets_optional();
   1.896 +  verify_dirty_young_regions();
   1.897  
   1.898    {
   1.899      // This call will decide whether this pause is an initial-mark
   1.900 @@ -3425,8 +3196,8 @@
   1.901  
   1.902        if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
   1.903          HandleMark hm;  // Discard invalid handles created during verification
   1.904 +        gclog_or_tty->print(" VerifyBeforeGC:");
   1.905          prepare_for_verify();
   1.906 -        gclog_or_tty->print(" VerifyBeforeGC:");
   1.907          Universe::verify(false);
   1.908        }
   1.909  
   1.910 @@ -3442,7 +3213,7 @@
   1.911  
   1.912        // Forget the current alloc region (we might even choose it to be part
   1.913        // of the collection set!).
   1.914 -      abandon_cur_alloc_region();
   1.915 +      release_mutator_alloc_region();
   1.916  
   1.917        // The elapsed time induced by the start time below deliberately elides
   1.918        // the possible verification above.
   1.919 @@ -3573,6 +3344,8 @@
   1.920        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
   1.921  #endif // YOUNG_LIST_VERBOSE
   1.922  
   1.923 +      init_mutator_alloc_region();
   1.924 +
   1.925        double end_time_sec = os::elapsedTime();
   1.926        double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
   1.927        g1_policy()->record_pause_time_ms(pause_time_ms);
   1.928 @@ -3655,6 +3428,15 @@
   1.929    return gclab_word_size;
   1.930  }
   1.931  
   1.932 +void G1CollectedHeap::init_mutator_alloc_region() {
   1.933 +  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
   1.934 +  _mutator_alloc_region.init();
   1.935 +}
   1.936 +
   1.937 +void G1CollectedHeap::release_mutator_alloc_region() {
   1.938 +  _mutator_alloc_region.release();
   1.939 +  assert(_mutator_alloc_region.get() == NULL, "post-condition");
   1.940 +}
   1.941  
   1.942  void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
   1.943    assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
   1.944 @@ -5140,10 +4922,8 @@
   1.945    CardTableModRefBS* _ct_bs;
   1.946  public:
   1.947    G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
   1.948 -    : _ct_bs(ct_bs)
   1.949 -  { }
   1.950 -  virtual bool doHeapRegion(HeapRegion* r)
   1.951 -  {
   1.952 +    : _ct_bs(ct_bs) { }
   1.953 +  virtual bool doHeapRegion(HeapRegion* r) {
   1.954      MemRegion mr(r->bottom(), r->end());
   1.955      if (r->is_survivor()) {
   1.956        _ct_bs->verify_dirty_region(mr);
   1.957 @@ -5153,6 +4933,29 @@
   1.958      return false;
   1.959    }
   1.960  };
   1.961 +
   1.962 +void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
   1.963 +  CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
   1.964 +  for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
   1.965 +    // We cannot guarantee that [bottom(),end()] is dirty.  Threads
   1.966 +    // dirty allocated blocks as they allocate them. The thread that
   1.967 +    // retires each region and replaces it with a new one will do a
   1.968 +    // maximal allocation to fill in [pre_dummy_top(),end()] but will
   1.969 +    // not dirty that area (one less thing to have to do while holding
   1.970 +    // a lock). So we can only verify that [bottom(),pre_dummy_top()]
   1.971 +    // is dirty. Also note that verify_dirty_region() requires
   1.972 +    // mr.start() and mr.end() to be card aligned and pre_dummy_top()
   1.973 +    // is not guaranteed to be.
   1.974 +    MemRegion mr(hr->bottom(),
   1.975 +                 ct_bs->align_to_card_boundary(hr->pre_dummy_top()));
   1.976 +    ct_bs->verify_dirty_region(mr);
   1.977 +  }
   1.978 +}
   1.979 +
   1.980 +void G1CollectedHeap::verify_dirty_young_regions() {
   1.981 +  verify_dirty_young_list(_young_list->first_region());
   1.982 +  verify_dirty_young_list(_young_list->first_survivor_region());
   1.983 +}
   1.984  #endif
   1.985  
   1.986  void G1CollectedHeap::cleanUpCardTable() {
   1.987 @@ -5500,6 +5303,44 @@
   1.988    }
   1.989  }
   1.990  
   1.991 +HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
   1.992 +                                                      bool force) {
   1.993 +  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   1.994 +  assert(!force || g1_policy()->can_expand_young_list(),
   1.995 +         "if force is true we should be able to expand the young list");
   1.996 +  if (force || !g1_policy()->is_young_list_full()) {
   1.997 +    HeapRegion* new_alloc_region = new_region(word_size,
   1.998 +                                              false /* do_expand */);
   1.999 +    if (new_alloc_region != NULL) {
  1.1000 +      g1_policy()->update_region_num(true /* next_is_young */);
  1.1001 +      set_region_short_lived_locked(new_alloc_region);
  1.1002 +      return new_alloc_region;
  1.1003 +    }
  1.1004 +  }
  1.1005 +  return NULL;
  1.1006 +}
  1.1007 +
  1.1008 +void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
  1.1009 +                                                  size_t allocated_bytes) {
  1.1010 +  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  1.1011 +  assert(alloc_region->is_young(), "all mutator alloc regions should be young");
  1.1012 +
  1.1013 +  g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
  1.1014 +  _summary_bytes_used += allocated_bytes;
  1.1015 +}
  1.1016 +
  1.1017 +HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
  1.1018 +                                                    bool force) {
  1.1019 +  return _g1h->new_mutator_alloc_region(word_size, force);
  1.1020 +}
  1.1021 +
  1.1022 +void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
  1.1023 +                                       size_t allocated_bytes) {
  1.1024 +  _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
  1.1025 +}
  1.1026 +
  1.1027 +// Heap region set verification
  1.1028 +
  1.1029  class VerifyRegionListsClosure : public HeapRegionClosure {
  1.1030  private:
  1.1031    HumongousRegionSet* _humongous_set;

mercurial