7023069: G1: Introduce symmetric locking in the slow allocation path

Wed, 30 Mar 2011 10:26:59 -0400

author
tonyp
date
Wed, 30 Mar 2011 10:26:59 -0400
changeset 2715
abdfc822206f
parent 2714
455328d90876
child 2716
c84ee870e0b9

7023069: G1: Introduce symmetric locking in the slow allocation path
7023151: G1: refactor the code that operates on _cur_alloc_region to be re-used for allocs by the GC threads
7018286: G1: humongous allocation attempts should take the GC locker into account
Summary: First, this change replaces the asymmetric locking scheme in the G1 slow alloc path by a summetric one. Second, it factors out the code that operates on _cur_alloc_region so that it can be re-used for allocations by the GC threads in the future.
Reviewed-by: stefank, brutisso, johnc

src/share/vm/gc_implementation/g1/g1AllocRegion.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1AllocRegion.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegion.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegion.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegion.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/cardTableModRefBS.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/space.cpp file | annotate | diff | comparison | revisions
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Wed Mar 30 10:26:59 2011 -0400
     1.3 @@ -0,0 +1,208 @@
     1.4 +/*
     1.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
    1.30 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    1.31 +
    1.32 +G1CollectedHeap* G1AllocRegion::_g1h = NULL;
    1.33 +HeapRegion* G1AllocRegion::_dummy_region = NULL;
    1.34 +
    1.35 +void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
    1.36 +  assert(_dummy_region == NULL, "should be set once");
    1.37 +  assert(dummy_region != NULL, "pre-condition");
    1.38 +  assert(dummy_region->free() == 0, "pre-condition");
    1.39 +
    1.40 +  // Make sure that any allocation attempt on this region will fail
    1.41 +  // and will not trigger any asserts.
    1.42 +  assert(allocate(dummy_region, 1, false) == NULL, "should fail");
    1.43 +  assert(par_allocate(dummy_region, 1, false) == NULL, "should fail");
    1.44 +  assert(allocate(dummy_region, 1, true) == NULL, "should fail");
    1.45 +  assert(par_allocate(dummy_region, 1, true) == NULL, "should fail");
    1.46 +
    1.47 +  _g1h = g1h;
    1.48 +  _dummy_region = dummy_region;
    1.49 +}
    1.50 +
    1.51 +void G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region,
    1.52 +                                            bool bot_updates) {
    1.53 +  assert(alloc_region != NULL && alloc_region != _dummy_region,
    1.54 +         "pre-condition");
    1.55 +
    1.56 +  // Other threads might still be trying to allocate using a CAS out
    1.57 +  // of the region we are trying to retire, as they can do so without
    1.58 +  // holding the lock. So, we first have to make sure that noone else
    1.59 +  // can allocate out of it by doing a maximal allocation. Even if our
    1.60 +  // CAS attempt fails a few times, we'll succeed sooner or later
    1.61 +  // given that failed CAS attempts mean that the region is getting
    1.62 +  // closed to being full.
    1.63 +  size_t free_word_size = alloc_region->free() / HeapWordSize;
    1.64 +
    1.65 +  // This is the minimum free chunk we can turn into a dummy
    1.66 +  // object. If the free space falls below this, then noone can
    1.67 +  // allocate in this region anyway (all allocation requests will be
    1.68 +  // of a size larger than this) so we won't have to perform the dummy
    1.69 +  // allocation.
    1.70 +  size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
    1.71 +
    1.72 +  while (free_word_size >= min_word_size_to_fill) {
    1.73 +    HeapWord* dummy = par_allocate(alloc_region, free_word_size, bot_updates);
    1.74 +    if (dummy != NULL) {
    1.75 +      // If the allocation was successful we should fill in the space.
    1.76 +      CollectedHeap::fill_with_object(dummy, free_word_size);
    1.77 +      alloc_region->set_pre_dummy_top(dummy);
    1.78 +      break;
    1.79 +    }
    1.80 +
    1.81 +    free_word_size = alloc_region->free() / HeapWordSize;
    1.82 +    // It's also possible that someone else beats us to the
    1.83 +    // allocation and they fill up the region. In that case, we can
    1.84 +    // just get out of the loop.
    1.85 +  }
    1.86 +  assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
    1.87 +         "post-condition");
    1.88 +}
    1.89 +
    1.90 +void G1AllocRegion::retire(bool fill_up) {
    1.91 +  assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
    1.92 +
    1.93 +  trace("retiring");
    1.94 +  HeapRegion* alloc_region = _alloc_region;
    1.95 +  if (alloc_region != _dummy_region) {
    1.96 +    // We never have to check whether the active region is empty or not,
    1.97 +    // and potentially free it if it is, given that it's guaranteed that
    1.98 +    // it will never be empty.
    1.99 +    assert(!alloc_region->is_empty(),
   1.100 +           ar_ext_msg(this, "the alloc region should never be empty"));
   1.101 +
   1.102 +    if (fill_up) {
   1.103 +      fill_up_remaining_space(alloc_region, _bot_updates);
   1.104 +    }
   1.105 +
   1.106 +    assert(alloc_region->used() >= _used_bytes_before,
   1.107 +           ar_ext_msg(this, "invariant"));
   1.108 +    size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
   1.109 +    retire_region(alloc_region, allocated_bytes);
   1.110 +    _used_bytes_before = 0;
   1.111 +    _alloc_region = _dummy_region;
   1.112 +  }
   1.113 +  trace("retired");
   1.114 +}
   1.115 +
   1.116 +HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
   1.117 +                                                       bool force) {
   1.118 +  assert(_alloc_region == _dummy_region, ar_ext_msg(this, "pre-condition"));
   1.119 +  assert(_used_bytes_before == 0, ar_ext_msg(this, "pre-condition"));
   1.120 +
   1.121 +  trace("attempting region allocation");
   1.122 +  HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
   1.123 +  if (new_alloc_region != NULL) {
   1.124 +    new_alloc_region->reset_pre_dummy_top();
   1.125 +    // Need to do this before the allocation
   1.126 +    _used_bytes_before = new_alloc_region->used();
   1.127 +    HeapWord* result = allocate(new_alloc_region, word_size, _bot_updates);
   1.128 +    assert(result != NULL, ar_ext_msg(this, "the allocation should succeeded"));
   1.129 +
   1.130 +    OrderAccess::storestore();
   1.131 +    // Note that we first perform the allocation and then we store the
   1.132 +    // region in _alloc_region. This is the reason why an active region
   1.133 +    // can never be empty.
   1.134 +    _alloc_region = new_alloc_region;
   1.135 +    trace("region allocation successful");
   1.136 +    return result;
   1.137 +  } else {
   1.138 +    trace("region allocation failed");
   1.139 +    return NULL;
   1.140 +  }
   1.141 +  ShouldNotReachHere();
   1.142 +}
   1.143 +
   1.144 +void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
   1.145 +  msg->append("[%s] %s b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
   1.146 +              _name, message, BOOL_TO_STR(_bot_updates),
   1.147 +              _alloc_region, _used_bytes_before);
   1.148 +}
   1.149 +
   1.150 +void G1AllocRegion::init() {
   1.151 +  trace("initializing");
   1.152 +  assert(_alloc_region == NULL && _used_bytes_before == 0,
   1.153 +         ar_ext_msg(this, "pre-condition"));
   1.154 +  assert(_dummy_region != NULL, "should have been set");
   1.155 +  _alloc_region = _dummy_region;
   1.156 +  trace("initialized");
   1.157 +}
   1.158 +
   1.159 +HeapRegion* G1AllocRegion::release() {
   1.160 +  trace("releasing");
   1.161 +  HeapRegion* alloc_region = _alloc_region;
   1.162 +  retire(false /* fill_up */);
   1.163 +  assert(_alloc_region == _dummy_region, "post-condition of retire()");
   1.164 +  _alloc_region = NULL;
   1.165 +  trace("released");
   1.166 +  return (alloc_region == _dummy_region) ? NULL : alloc_region;
   1.167 +}
   1.168 +
   1.169 +#if G1_ALLOC_REGION_TRACING
   1.170 +void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
   1.171 +  // All the calls to trace that set either just the size or the size
   1.172 +  // and the result are considered part of level 2 tracing and are
   1.173 +  // skipped during level 1 tracing.
   1.174 +  if ((word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) {
   1.175 +    const size_t buffer_length = 128;
   1.176 +    char hr_buffer[buffer_length];
   1.177 +    char rest_buffer[buffer_length];
   1.178 +
   1.179 +    HeapRegion* alloc_region = _alloc_region;
   1.180 +    if (alloc_region == NULL) {
   1.181 +      jio_snprintf(hr_buffer, buffer_length, "NULL");
   1.182 +    } else if (alloc_region == _dummy_region) {
   1.183 +      jio_snprintf(hr_buffer, buffer_length, "DUMMY");
   1.184 +    } else {
   1.185 +      jio_snprintf(hr_buffer, buffer_length,
   1.186 +                   HR_FORMAT, HR_FORMAT_PARAMS(alloc_region));
   1.187 +    }
   1.188 +
   1.189 +    if (G1_ALLOC_REGION_TRACING > 1) {
   1.190 +      if (result != NULL) {
   1.191 +        jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT" "PTR_FORMAT,
   1.192 +                     word_size, result);
   1.193 +      } else if (word_size != 0) {
   1.194 +        jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT, word_size);
   1.195 +      } else {
   1.196 +        jio_snprintf(rest_buffer, buffer_length, "");
   1.197 +      }
   1.198 +    } else {
   1.199 +      jio_snprintf(rest_buffer, buffer_length, "");
   1.200 +    }
   1.201 +
   1.202 +    tty->print_cr("[%s] %s : %s %s", _name, hr_buffer, str, rest_buffer);
   1.203 +  }
   1.204 +}
   1.205 +#endif // G1_ALLOC_REGION_TRACING
   1.206 +
   1.207 +G1AllocRegion::G1AllocRegion(const char* name,
   1.208 +                             bool bot_updates)
   1.209 +  : _name(name), _bot_updates(bot_updates),
   1.210 +    _alloc_region(NULL), _used_bytes_before(0) { }
   1.211 +
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Wed Mar 30 10:26:59 2011 -0400
     2.3 @@ -0,0 +1,174 @@
     2.4 +/*
     2.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
     2.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.7 + *
     2.8 + * This code is free software; you can redistribute it and/or modify it
     2.9 + * under the terms of the GNU General Public License version 2 only, as
    2.10 + * published by the Free Software Foundation.
    2.11 + *
    2.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    2.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    2.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    2.15 + * version 2 for more details (a copy is included in the LICENSE file that
    2.16 + * accompanied this code).
    2.17 + *
    2.18 + * You should have received a copy of the GNU General Public License version
    2.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    2.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    2.21 + *
    2.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    2.23 + * or visit www.oracle.com if you need additional information or have any
    2.24 + * questions.
    2.25 + *
    2.26 + */
    2.27 +
    2.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
    2.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
    2.30 +
    2.31 +#include "gc_implementation/g1/heapRegion.hpp"
    2.32 +
    2.33 +class G1CollectedHeap;
    2.34 +
    2.35 +// 0 -> no tracing, 1 -> basic tracing, 2 -> basic + allocation tracing
    2.36 +#define G1_ALLOC_REGION_TRACING 0
    2.37 +
    2.38 +class ar_ext_msg;
    2.39 +
    2.40 +// A class that holds a region that is active in satisfying allocation
    2.41 +// requests, potentially issued in parallel. When the active region is
    2.42 +// full it will be retired it replaced with a new one. The
    2.43 +// implementation assumes that fast-path allocations will be lock-free
    2.44 +// and a lock will need to be taken when the active region needs to be
    2.45 +// replaced.
    2.46 +
    2.47 +class G1AllocRegion VALUE_OBJ_CLASS_SPEC {
    2.48 +  friend class ar_ext_msg;
    2.49 +
    2.50 +private:
    2.51 +  // The active allocating region we are currently allocating out
    2.52 +  // of. The invariant is that if this object is initialized (i.e.,
    2.53 +  // init() has been called and release() has not) then _alloc_region
    2.54 +  // is either an active allocating region or the dummy region (i.e.,
    2.55 +  // it can never be NULL) and this object can be used to satisfy
    2.56 +  // allocation requests. If this object is not initialized
    2.57 +  // (i.e. init() has not been called or release() has been called)
    2.58 +  // then _alloc_region is NULL and this object should not be used to
    2.59 +  // satisfy allocation requests (it was done this way to force the
    2.60 +  // correct use of init() and release()).
    2.61 +  HeapRegion* _alloc_region;
    2.62 +
    2.63 +  // When we set up a new active region we save its used bytes in this
    2.64 +  // field so that, when we retire it, we can calculate how much space
    2.65 +  // we allocated in it.
    2.66 +  size_t _used_bytes_before;
    2.67 +
    2.68 +  // Specifies whether the allocate calls will do BOT updates or not.
    2.69 +  bool _bot_updates;
    2.70 +
    2.71 +  // Useful for debugging and tracing.
    2.72 +  const char* _name;
    2.73 +
    2.74 +  // A dummy region (i.e., it's been allocated specially for this
    2.75 +  // purpose and it is not part of the heap) that is full (i.e., top()
    2.76 +  // == end()). When we don't have a valid active region we make
    2.77 +  // _alloc_region point to this. This allows us to skip checking
    2.78 +  // whether the _alloc_region is NULL or not.
    2.79 +  static HeapRegion* _dummy_region;
    2.80 +
    2.81 +  // Some of the methods below take a bot_updates parameter. Its value
    2.82 +  // should be the same as the _bot_updates field. The idea is that
    2.83 +  // the parameter will be a constant for a particular alloc region
    2.84 +  // and, given that these methods will be hopefully inlined, the
    2.85 +  // compiler should compile out the test.
    2.86 +
    2.87 +  // Perform a non-MT-safe allocation out of the given region.
    2.88 +  static inline HeapWord* allocate(HeapRegion* alloc_region,
    2.89 +                                   size_t word_size,
    2.90 +                                   bool bot_updates);
    2.91 +
    2.92 +  // Perform a MT-safe allocation out of the given region.
    2.93 +  static inline HeapWord* par_allocate(HeapRegion* alloc_region,
    2.94 +                                       size_t word_size,
    2.95 +                                       bool bot_updates);
    2.96 +
    2.97 +  // Ensure that the region passed as a parameter has been filled up
    2.98 +  // so that noone else can allocate out of it any more.
    2.99 +  static void fill_up_remaining_space(HeapRegion* alloc_region,
   2.100 +                                      bool bot_updates);
   2.101 +
   2.102 +  // Retire the active allocating region. If fill_up is true then make
   2.103 +  // sure that the region is full before we retire it so that noone
   2.104 +  // else can allocate out of it.
   2.105 +  void retire(bool fill_up);
   2.106 +
   2.107 +  // Allocate a new active region and use it to perform a word_size
   2.108 +  // allocation. The force parameter will be passed on to
   2.109 +  // G1CollectedHeap::allocate_new_alloc_region() and tells it to try
   2.110 +  // to allocate a new region even if the max has been reached.
   2.111 +  HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force);
   2.112 +
   2.113 +  void fill_in_ext_msg(ar_ext_msg* msg, const char* message);
   2.114 +
   2.115 +protected:
   2.116 +  // For convenience as subclasses use it.
   2.117 +  static G1CollectedHeap* _g1h;
   2.118 +
   2.119 +  virtual HeapRegion* allocate_new_region(size_t word_size, bool force) = 0;
   2.120 +  virtual void retire_region(HeapRegion* alloc_region,
   2.121 +                             size_t allocated_bytes) = 0;
   2.122 +
   2.123 +  G1AllocRegion(const char* name, bool bot_updates);
   2.124 +
   2.125 +public:
   2.126 +  static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region);
   2.127 +
   2.128 +  HeapRegion* get() const {
   2.129 +    // Make sure that the dummy region does not escape this class.
   2.130 +    return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
   2.131 +  }
   2.132 +
   2.133 +  // The following two are the building blocks for the allocation method.
   2.134 +
   2.135 +  // First-level allocation: Should be called without holding a
   2.136 +  // lock. It will try to allocate lock-free out of the active region,
   2.137 +  // or return NULL if it was unable to.
   2.138 +  inline HeapWord* attempt_allocation(size_t word_size, bool bot_updates);
   2.139 +
   2.140 +  // Second-level allocation: Should be called while holding a
   2.141 +  // lock. It will try to first allocate lock-free out of the active
   2.142 +  // region or, if it's unable to, it will try to replace the active
   2.143 +  // alloc region with a new one. We require that the caller takes the
   2.144 +  // appropriate lock before calling this so that it is easier to make
   2.145 +  // it conform to its locking protocol.
   2.146 +  inline HeapWord* attempt_allocation_locked(size_t word_size,
   2.147 +                                             bool bot_updates);
   2.148 +
   2.149 +  // Should be called to allocate a new region even if the max of this
   2.150 +  // type of regions has been reached. Should only be called if other
   2.151 +  // allocation attempts have failed and we are not holding a valid
   2.152 +  // active region.
   2.153 +  inline HeapWord* attempt_allocation_force(size_t word_size,
   2.154 +                                            bool bot_updates);
   2.155 +
   2.156 +  // Should be called before we start using this object.
   2.157 +  void init();
   2.158 +
   2.159 +  // Should be called when we want to release the active region which
   2.160 +  // is returned after it's been retired.
   2.161 +  HeapRegion* release();
   2.162 +
   2.163 +#if G1_ALLOC_REGION_TRACING
   2.164 +  void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL);
   2.165 +#else // G1_ALLOC_REGION_TRACING
   2.166 +  void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL) { }
   2.167 +#endif // G1_ALLOC_REGION_TRACING
   2.168 +};
   2.169 +
   2.170 +class ar_ext_msg : public err_msg {
   2.171 +public:
   2.172 +  ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("") {
   2.173 +    alloc_region->fill_in_ext_msg(this, message);
   2.174 +  }
   2.175 +};
   2.176 +
   2.177 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Wed Mar 30 10:26:59 2011 -0400
     3.3 @@ -0,0 +1,106 @@
     3.4 +/*
     3.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
     3.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3.7 + *
     3.8 + * This code is free software; you can redistribute it and/or modify it
     3.9 + * under the terms of the GNU General Public License version 2 only, as
    3.10 + * published by the Free Software Foundation.
    3.11 + *
    3.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    3.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    3.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    3.15 + * version 2 for more details (a copy is included in the LICENSE file that
    3.16 + * accompanied this code).
    3.17 + *
    3.18 + * You should have received a copy of the GNU General Public License version
    3.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    3.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    3.21 + *
    3.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    3.23 + * or visit www.oracle.com if you need additional information or have any
    3.24 + * questions.
    3.25 + *
    3.26 + */
    3.27 +
    3.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
    3.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
    3.30 +
    3.31 +#include "gc_implementation/g1/g1AllocRegion.hpp"
    3.32 +
    3.33 +inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region,
    3.34 +                                         size_t word_size,
    3.35 +                                         bool bot_updates) {
    3.36 +  assert(alloc_region != NULL, err_msg("pre-condition"));
    3.37 +
    3.38 +  if (!bot_updates) {
    3.39 +    return alloc_region->allocate_no_bot_updates(word_size);
    3.40 +  } else {
    3.41 +    return alloc_region->allocate(word_size);
    3.42 +  }
    3.43 +}
    3.44 +
    3.45 +inline HeapWord* G1AllocRegion::par_allocate(HeapRegion* alloc_region,
    3.46 +                                             size_t word_size,
    3.47 +                                             bool bot_updates) {
    3.48 +  assert(alloc_region != NULL, err_msg("pre-condition"));
    3.49 +  assert(!alloc_region->is_empty(), err_msg("pre-condition"));
    3.50 +
    3.51 +  if (!bot_updates) {
    3.52 +    return alloc_region->par_allocate_no_bot_updates(word_size);
    3.53 +  } else {
    3.54 +    return alloc_region->par_allocate(word_size);
    3.55 +  }
    3.56 +}
    3.57 +
    3.58 +inline HeapWord* G1AllocRegion::attempt_allocation(size_t word_size,
    3.59 +                                                   bool bot_updates) {
    3.60 +  assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition"));
    3.61 +
    3.62 +  HeapRegion* alloc_region = _alloc_region;
    3.63 +  assert(alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
    3.64 +
    3.65 +  HeapWord* result = par_allocate(alloc_region, word_size, bot_updates);
    3.66 +  if (result != NULL) {
    3.67 +    trace("alloc", word_size, result);
    3.68 +    return result;
    3.69 +  }
    3.70 +  trace("alloc failed", word_size);
    3.71 +  return NULL;
    3.72 +}
    3.73 +
    3.74 +inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size,
    3.75 +                                                          bool bot_updates) {
    3.76 +  // First we have to tedo the allocation, assuming we're holding the
    3.77 +  // appropriate lock, in case another thread changed the region while
    3.78 +  // we were waiting to get the lock.
    3.79 +  HeapWord* result = attempt_allocation(word_size, bot_updates);
    3.80 +  if (result != NULL) {
    3.81 +    return result;
    3.82 +  }
    3.83 +
    3.84 +  retire(true /* fill_up */);
    3.85 +  result = new_alloc_region_and_allocate(word_size, false /* force */);
    3.86 +  if (result != NULL) {
    3.87 +    trace("alloc locked (second attempt)", word_size, result);
    3.88 +    return result;
    3.89 +  }
    3.90 +  trace("alloc locked failed", word_size);
    3.91 +  return NULL;
    3.92 +}
    3.93 +
    3.94 +inline HeapWord* G1AllocRegion::attempt_allocation_force(size_t word_size,
    3.95 +                                                         bool bot_updates) {
    3.96 +  assert(bot_updates == _bot_updates, ar_ext_msg(this, "pre-condition"));
    3.97 +  assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
    3.98 +
    3.99 +  trace("forcing alloc");
   3.100 +  HeapWord* result = new_alloc_region_and_allocate(word_size, true /* force */);
   3.101 +  if (result != NULL) {
   3.102 +    trace("alloc forced", word_size, result);
   3.103 +    return result;
   3.104 +  }
   3.105 +  trace("alloc forced failed", word_size);
   3.106 +  return NULL;
   3.107 +}
   3.108 +
   3.109 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
     4.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Mar 29 22:36:16 2011 -0400
     4.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Mar 30 10:26:59 2011 -0400
     4.3 @@ -28,6 +28,7 @@
     4.4  #include "gc_implementation/g1/concurrentG1Refine.hpp"
     4.5  #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
     4.6  #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
     4.7 +#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
     4.8  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
     4.9  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    4.10  #include "gc_implementation/g1/g1MarkSweep.hpp"
    4.11 @@ -517,8 +518,7 @@
    4.12    return NULL;
    4.13  }
    4.14  
    4.15 -HeapRegion* G1CollectedHeap::new_region_work(size_t word_size,
    4.16 -                                             bool do_expand) {
    4.17 +HeapRegion* G1CollectedHeap::new_region(size_t word_size, bool do_expand) {
    4.18    assert(!isHumongous(word_size) ||
    4.19                                    word_size <= (size_t) HeapRegion::GrainWords,
    4.20           "the only time we use this to allocate a humongous region is "
    4.21 @@ -566,7 +566,7 @@
    4.22                                                   size_t word_size) {
    4.23    HeapRegion* alloc_region = NULL;
    4.24    if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
    4.25 -    alloc_region = new_region_work(word_size, true /* do_expand */);
    4.26 +    alloc_region = new_region(word_size, true /* do_expand */);
    4.27      if (purpose == GCAllocForSurvived && alloc_region != NULL) {
    4.28        alloc_region->set_survivor();
    4.29      }
    4.30 @@ -587,7 +587,7 @@
    4.31      // Only one region to allocate, no need to go through the slower
    4.32      // path. The caller will attempt the expasion if this fails, so
    4.33      // let's not try to expand here too.
    4.34 -    HeapRegion* hr = new_region_work(word_size, false /* do_expand */);
    4.35 +    HeapRegion* hr = new_region(word_size, false /* do_expand */);
    4.36      if (hr != NULL) {
    4.37        first = hr->hrs_index();
    4.38      } else {
    4.39 @@ -788,407 +788,12 @@
    4.40    return result;
    4.41  }
    4.42  
    4.43 -void
    4.44 -G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
    4.45 -  // Other threads might still be trying to allocate using CASes out
    4.46 -  // of the region we are retiring, as they can do so without holding
    4.47 -  // the Heap_lock. So we first have to make sure that noone else can
    4.48 -  // allocate in it by doing a maximal allocation. Even if our CAS
    4.49 -  // attempt fails a few times, we'll succeed sooner or later given
    4.50 -  // that a failed CAS attempt mean that the region is getting closed
    4.51 -  // to being full (someone else succeeded in allocating into it).
    4.52 -  size_t free_word_size = cur_alloc_region->free() / HeapWordSize;
    4.53 -
    4.54 -  // This is the minimum free chunk we can turn into a dummy
    4.55 -  // object. If the free space falls below this, then noone can
    4.56 -  // allocate in this region anyway (all allocation requests will be
    4.57 -  // of a size larger than this) so we won't have to perform the dummy
    4.58 -  // allocation.
    4.59 -  size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
    4.60 -
    4.61 -  while (free_word_size >= min_word_size_to_fill) {
    4.62 -    HeapWord* dummy =
    4.63 -      cur_alloc_region->par_allocate_no_bot_updates(free_word_size);
    4.64 -    if (dummy != NULL) {
    4.65 -      // If the allocation was successful we should fill in the space.
    4.66 -      CollectedHeap::fill_with_object(dummy, free_word_size);
    4.67 -      break;
    4.68 -    }
    4.69 -
    4.70 -    free_word_size = cur_alloc_region->free() / HeapWordSize;
    4.71 -    // It's also possible that someone else beats us to the
    4.72 -    // allocation and they fill up the region. In that case, we can
    4.73 -    // just get out of the loop
    4.74 -  }
    4.75 -  assert(cur_alloc_region->free() / HeapWordSize < min_word_size_to_fill,
    4.76 -         "sanity");
    4.77 -
    4.78 -  retire_cur_alloc_region_common(cur_alloc_region);
    4.79 -  assert(_cur_alloc_region == NULL, "post-condition");
    4.80 -}
    4.81 -
    4.82 -// See the comment in the .hpp file about the locking protocol and
    4.83 -// assumptions of this method (and other related ones).
    4.84 -HeapWord*
    4.85 -G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
    4.86 -                                                       bool at_safepoint,
    4.87 -                                                       bool do_dirtying,
    4.88 -                                                       bool can_expand) {
    4.89 -  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
    4.90 -  assert(_cur_alloc_region == NULL,
    4.91 -         "replace_cur_alloc_region_and_allocate() should only be called "
    4.92 -         "after retiring the previous current alloc region");
    4.93 -  assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
    4.94 -         "at_safepoint and is_at_safepoint() should be a tautology");
    4.95 -  assert(!can_expand || g1_policy()->can_expand_young_list(),
    4.96 -         "we should not call this method with can_expand == true if "
    4.97 -         "we are not allowed to expand the young gen");
    4.98 -
    4.99 -  if (can_expand || !g1_policy()->is_young_list_full()) {
   4.100 -    HeapRegion* new_cur_alloc_region = new_alloc_region(word_size);
   4.101 -    if (new_cur_alloc_region != NULL) {
   4.102 -      assert(new_cur_alloc_region->is_empty(),
   4.103 -             "the newly-allocated region should be empty, "
   4.104 -             "as right now we only allocate new regions out of the free list");
   4.105 -      g1_policy()->update_region_num(true /* next_is_young */);
   4.106 -      set_region_short_lived_locked(new_cur_alloc_region);
   4.107 -
   4.108 -      assert(!new_cur_alloc_region->isHumongous(),
   4.109 -             "Catch a regression of this bug.");
   4.110 -
   4.111 -      // We need to ensure that the stores to _cur_alloc_region and,
   4.112 -      // subsequently, to top do not float above the setting of the
   4.113 -      // young type.
   4.114 -      OrderAccess::storestore();
   4.115 -
   4.116 -      // Now, perform the allocation out of the region we just
   4.117 -      // allocated. Note that noone else can access that region at
   4.118 -      // this point (as _cur_alloc_region has not been updated yet),
   4.119 -      // so we can just go ahead and do the allocation without any
   4.120 -      // atomics (and we expect this allocation attempt to
   4.121 -      // suceeded). Given that other threads can attempt an allocation
   4.122 -      // with a CAS and without needing the Heap_lock, if we assigned
   4.123 -      // the new region to _cur_alloc_region before first allocating
   4.124 -      // into it other threads might have filled up the new region
   4.125 -      // before we got a chance to do the allocation ourselves. In
   4.126 -      // that case, we would have needed to retire the region, grab a
   4.127 -      // new one, and go through all this again. Allocating out of the
   4.128 -      // new region before assigning it to _cur_alloc_region avoids
   4.129 -      // all this.
   4.130 -      HeapWord* result =
   4.131 -                     new_cur_alloc_region->allocate_no_bot_updates(word_size);
   4.132 -      assert(result != NULL, "we just allocate out of an empty region "
   4.133 -             "so allocation should have been successful");
   4.134 -      assert(is_in(result), "result should be in the heap");
   4.135 -
   4.136 -      // Now make sure that the store to _cur_alloc_region does not
   4.137 -      // float above the store to top.
   4.138 -      OrderAccess::storestore();
   4.139 -      _cur_alloc_region = new_cur_alloc_region;
   4.140 -
   4.141 -      if (!at_safepoint) {
   4.142 -        Heap_lock->unlock();
   4.143 -      }
   4.144 -
   4.145 -      // do the dirtying, if necessary, after we release the Heap_lock
   4.146 -      if (do_dirtying) {
   4.147 -        dirty_young_block(result, word_size);
   4.148 -      }
   4.149 -      return result;
   4.150 -    }
   4.151 -  }
   4.152 -
   4.153 -  assert(_cur_alloc_region == NULL, "we failed to allocate a new current "
   4.154 -         "alloc region, it should still be NULL");
   4.155 -  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   4.156 -  return NULL;
   4.157 -}
   4.158 -
   4.159 -// See the comment in the .hpp file about the locking protocol and
   4.160 -// assumptions of this method (and other related ones).
   4.161 -HeapWord*
   4.162 -G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
   4.163 -  assert_heap_locked_and_not_at_safepoint();
   4.164 -  assert(!isHumongous(word_size), "attempt_allocation_slow() should not be "
   4.165 -         "used for humongous allocations");
   4.166 -
   4.167 -  // We should only reach here when we were unable to allocate
   4.168 -  // otherwise. So, we should have not active current alloc region.
   4.169 -  assert(_cur_alloc_region == NULL, "current alloc region should be NULL");
   4.170 -
   4.171 -  // We will loop while succeeded is false, which means that we tried
   4.172 -  // to do a collection, but the VM op did not succeed. So, when we
   4.173 -  // exit the loop, either one of the allocation attempts was
   4.174 -  // successful, or we succeeded in doing the VM op but which was
   4.175 -  // unable to allocate after the collection.
   4.176 -  for (int try_count = 1; /* we'll return or break */; try_count += 1) {
   4.177 -    bool succeeded = true;
   4.178 -
   4.179 -    // Every time we go round the loop we should be holding the Heap_lock.
   4.180 -    assert_heap_locked();
   4.181 -
   4.182 -    if (GC_locker::is_active_and_needs_gc()) {
   4.183 -      // We are locked out of GC because of the GC locker. We can
   4.184 -      // allocate a new region only if we can expand the young gen.
   4.185 -
   4.186 -      if (g1_policy()->can_expand_young_list()) {
   4.187 -        // Yes, we are allowed to expand the young gen. Let's try to
   4.188 -        // allocate a new current alloc region.
   4.189 -        HeapWord* result =
   4.190 -          replace_cur_alloc_region_and_allocate(word_size,
   4.191 -                                                false, /* at_safepoint */
   4.192 -                                                true,  /* do_dirtying */
   4.193 -                                                true   /* can_expand */);
   4.194 -        if (result != NULL) {
   4.195 -          assert_heap_not_locked();
   4.196 -          return result;
   4.197 -        }
   4.198 -      }
   4.199 -      // We could not expand the young gen further (or we could but we
   4.200 -      // failed to allocate a new region). We'll stall until the GC
   4.201 -      // locker forces a GC.
   4.202 -
   4.203 -      // If this thread is not in a jni critical section, we stall
   4.204 -      // the requestor until the critical section has cleared and
   4.205 -      // GC allowed. When the critical section clears, a GC is
   4.206 -      // initiated by the last thread exiting the critical section; so
   4.207 -      // we retry the allocation sequence from the beginning of the loop,
   4.208 -      // rather than causing more, now probably unnecessary, GC attempts.
   4.209 -      JavaThread* jthr = JavaThread::current();
   4.210 -      assert(jthr != NULL, "sanity");
   4.211 -      if (jthr->in_critical()) {
   4.212 -        if (CheckJNICalls) {
   4.213 -          fatal("Possible deadlock due to allocating while"
   4.214 -                " in jni critical section");
   4.215 -        }
   4.216 -        // We are returning NULL so the protocol is that we're still
   4.217 -        // holding the Heap_lock.
   4.218 -        assert_heap_locked();
   4.219 -        return NULL;
   4.220 -      }
   4.221 -
   4.222 -      Heap_lock->unlock();
   4.223 -      GC_locker::stall_until_clear();
   4.224 -
   4.225 -      // No need to relock the Heap_lock. We'll fall off to the code
   4.226 -      // below the else-statement which assumes that we are not
   4.227 -      // holding the Heap_lock.
   4.228 -    } else {
   4.229 -      // We are not locked out. So, let's try to do a GC. The VM op
   4.230 -      // will retry the allocation before it completes.
   4.231 -
   4.232 -      // Read the GC count while holding the Heap_lock
   4.233 -      unsigned int gc_count_before = SharedHeap::heap()->total_collections();
   4.234 -
   4.235 -      Heap_lock->unlock();
   4.236 -
   4.237 -      HeapWord* result =
   4.238 -        do_collection_pause(word_size, gc_count_before, &succeeded);
   4.239 -      assert_heap_not_locked();
   4.240 -      if (result != NULL) {
   4.241 -        assert(succeeded, "the VM op should have succeeded");
   4.242 -
   4.243 -        // Allocations that take place on VM operations do not do any
   4.244 -        // card dirtying and we have to do it here.
   4.245 -        dirty_young_block(result, word_size);
   4.246 -        return result;
   4.247 -      }
   4.248 -    }
   4.249 -
   4.250 -    // Both paths that get us here from above unlock the Heap_lock.
   4.251 -    assert_heap_not_locked();
   4.252 -
   4.253 -    // We can reach here when we were unsuccessful in doing a GC,
   4.254 -    // because another thread beat us to it, or because we were locked
   4.255 -    // out of GC due to the GC locker. In either case a new alloc
   4.256 -    // region might be available so we will retry the allocation.
   4.257 -    HeapWord* result = attempt_allocation(word_size);
   4.258 -    if (result != NULL) {
   4.259 -      assert_heap_not_locked();
   4.260 -      return result;
   4.261 -    }
   4.262 -
   4.263 -    // So far our attempts to allocate failed. The only time we'll go
   4.264 -    // around the loop and try again is if we tried to do a GC and the
   4.265 -    // VM op that we tried to schedule was not successful because
   4.266 -    // another thread beat us to it. If that happened it's possible
   4.267 -    // that by the time we grabbed the Heap_lock again and tried to
   4.268 -    // allocate other threads filled up the young generation, which
   4.269 -    // means that the allocation attempt after the GC also failed. So,
   4.270 -    // it's worth trying to schedule another GC pause.
   4.271 -    if (succeeded) {
   4.272 -      break;
   4.273 -    }
   4.274 -
   4.275 -    // Give a warning if we seem to be looping forever.
   4.276 -    if ((QueuedAllocationWarningCount > 0) &&
   4.277 -        (try_count % QueuedAllocationWarningCount == 0)) {
   4.278 -      warning("G1CollectedHeap::attempt_allocation_slow() "
   4.279 -              "retries %d times", try_count);
   4.280 -    }
   4.281 -  }
   4.282 -
   4.283 -  assert_heap_locked();
   4.284 -  return NULL;
   4.285 -}
   4.286 -
   4.287 -// See the comment in the .hpp file about the locking protocol and
   4.288 -// assumptions of this method (and other related ones).
   4.289 -HeapWord*
   4.290 -G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
   4.291 -                                              bool at_safepoint) {
   4.292 -  // This is the method that will allocate a humongous object. All
   4.293 -  // allocation paths that attempt to allocate a humongous object
   4.294 -  // should eventually reach here. Currently, the only paths are from
   4.295 -  // mem_allocate() and attempt_allocation_at_safepoint().
   4.296 -  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   4.297 -  assert(isHumongous(word_size), "attempt_allocation_humongous() "
   4.298 -         "should only be used for humongous allocations");
   4.299 -  assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
   4.300 -         "at_safepoint and is_at_safepoint() should be a tautology");
   4.301 -
   4.302 -  HeapWord* result = NULL;
   4.303 -
   4.304 -  // We will loop while succeeded is false, which means that we tried
   4.305 -  // to do a collection, but the VM op did not succeed. So, when we
   4.306 -  // exit the loop, either one of the allocation attempts was
   4.307 -  // successful, or we succeeded in doing the VM op but which was
   4.308 -  // unable to allocate after the collection.
   4.309 -  for (int try_count = 1; /* we'll return or break */; try_count += 1) {
   4.310 -    bool succeeded = true;
   4.311 -
   4.312 -    // Given that humongous objects are not allocated in young
   4.313 -    // regions, we'll first try to do the allocation without doing a
   4.314 -    // collection hoping that there's enough space in the heap.
   4.315 -    result = humongous_obj_allocate(word_size);
   4.316 -    assert(_cur_alloc_region == NULL || !_cur_alloc_region->isHumongous(),
   4.317 -           "catch a regression of this bug.");
   4.318 -    if (result != NULL) {
   4.319 -      if (!at_safepoint) {
   4.320 -        // If we're not at a safepoint, unlock the Heap_lock.
   4.321 -        Heap_lock->unlock();
   4.322 -      }
   4.323 -      return result;
   4.324 -    }
   4.325 -
   4.326 -    // If we failed to allocate the humongous object, we should try to
   4.327 -    // do a collection pause (if we're allowed) in case it reclaims
   4.328 -    // enough space for the allocation to succeed after the pause.
   4.329 -    if (!at_safepoint) {
   4.330 -      // Read the GC count while holding the Heap_lock
   4.331 -      unsigned int gc_count_before = SharedHeap::heap()->total_collections();
   4.332 -
   4.333 -      // If we're allowed to do a collection we're not at a
   4.334 -      // safepoint, so it is safe to unlock the Heap_lock.
   4.335 -      Heap_lock->unlock();
   4.336 -
   4.337 -      result = do_collection_pause(word_size, gc_count_before, &succeeded);
   4.338 -      assert_heap_not_locked();
   4.339 -      if (result != NULL) {
   4.340 -        assert(succeeded, "the VM op should have succeeded");
   4.341 -        return result;
   4.342 -      }
   4.343 -
   4.344 -      // If we get here, the VM operation either did not succeed
   4.345 -      // (i.e., another thread beat us to it) or it succeeded but
   4.346 -      // failed to allocate the object.
   4.347 -
   4.348 -      // If we're allowed to do a collection we're not at a
   4.349 -      // safepoint, so it is safe to lock the Heap_lock.
   4.350 -      Heap_lock->lock();
   4.351 -    }
   4.352 -
   4.353 -    assert(result == NULL, "otherwise we should have exited the loop earlier");
   4.354 -
   4.355 -    // So far our attempts to allocate failed. The only time we'll go
   4.356 -    // around the loop and try again is if we tried to do a GC and the
   4.357 -    // VM op that we tried to schedule was not successful because
   4.358 -    // another thread beat us to it. That way it's possible that some
   4.359 -    // space was freed up by the thread that successfully scheduled a
   4.360 -    // GC. So it's worth trying to allocate again.
   4.361 -    if (succeeded) {
   4.362 -      break;
   4.363 -    }
   4.364 -
   4.365 -    // Give a warning if we seem to be looping forever.
   4.366 -    if ((QueuedAllocationWarningCount > 0) &&
   4.367 -        (try_count % QueuedAllocationWarningCount == 0)) {
   4.368 -      warning("G1CollectedHeap::attempt_allocation_humongous "
   4.369 -              "retries %d times", try_count);
   4.370 -    }
   4.371 -  }
   4.372 -
   4.373 -  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   4.374 -  return NULL;
   4.375 -}
   4.376 -
   4.377 -HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
   4.378 -                                           bool expect_null_cur_alloc_region) {
   4.379 -  assert_at_safepoint(true /* should_be_vm_thread */);
   4.380 -  assert(_cur_alloc_region == NULL || !expect_null_cur_alloc_region,
   4.381 -         err_msg("the current alloc region was unexpectedly found "
   4.382 -                 "to be non-NULL, cur alloc region: "PTR_FORMAT" "
   4.383 -                 "expect_null_cur_alloc_region: %d word_size: "SIZE_FORMAT,
   4.384 -                 _cur_alloc_region, expect_null_cur_alloc_region, word_size));
   4.385 -
   4.386 -  if (!isHumongous(word_size)) {
   4.387 -    if (!expect_null_cur_alloc_region) {
   4.388 -      HeapRegion* cur_alloc_region = _cur_alloc_region;
   4.389 -      if (cur_alloc_region != NULL) {
   4.390 -        // We are at a safepoint so no reason to use the MT-safe version.
   4.391 -        HeapWord* result = cur_alloc_region->allocate_no_bot_updates(word_size);
   4.392 -        if (result != NULL) {
   4.393 -          assert(is_in(result), "result should be in the heap");
   4.394 -
   4.395 -          // We will not do any dirtying here. This is guaranteed to be
   4.396 -          // called during a safepoint and the thread that scheduled the
   4.397 -          // pause will do the dirtying if we return a non-NULL result.
   4.398 -          return result;
   4.399 -        }
   4.400 -
   4.401 -        retire_cur_alloc_region_common(cur_alloc_region);
   4.402 -      }
   4.403 -    }
   4.404 -
   4.405 -    assert(_cur_alloc_region == NULL,
   4.406 -           "at this point we should have no cur alloc region");
   4.407 -    return replace_cur_alloc_region_and_allocate(word_size,
   4.408 -                                                 true, /* at_safepoint */
   4.409 -                                                 false /* do_dirtying */,
   4.410 -                                                 false /* can_expand */);
   4.411 -  } else {
   4.412 -    return attempt_allocation_humongous(word_size,
   4.413 -                                        true /* at_safepoint */);
   4.414 -  }
   4.415 -
   4.416 -  ShouldNotReachHere();
   4.417 -}
   4.418 -
   4.419  HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
   4.420    assert_heap_not_locked_and_not_at_safepoint();
   4.421 -  assert(!isHumongous(word_size), "we do not allow TLABs of humongous size");
   4.422 -
   4.423 -  // First attempt: Try allocating out of the current alloc region
   4.424 -  // using a CAS. If that fails, take the Heap_lock and retry the
   4.425 -  // allocation, potentially replacing the current alloc region.
   4.426 -  HeapWord* result = attempt_allocation(word_size);
   4.427 -  if (result != NULL) {
   4.428 -    assert_heap_not_locked();
   4.429 -    return result;
   4.430 -  }
   4.431 -
   4.432 -  // Second attempt: Go to the slower path where we might try to
   4.433 -  // schedule a collection.
   4.434 -  result = attempt_allocation_slow(word_size);
   4.435 -  if (result != NULL) {
   4.436 -    assert_heap_not_locked();
   4.437 -    return result;
   4.438 -  }
   4.439 -
   4.440 -  assert_heap_locked();
   4.441 -  // Need to unlock the Heap_lock before returning.
   4.442 -  Heap_lock->unlock();
   4.443 -  return NULL;
   4.444 +  assert(!isHumongous(word_size), "we do not allow humongous TLABs");
   4.445 +
   4.446 +  unsigned int dummy_gc_count_before;
   4.447 +  return attempt_allocation(word_size, &dummy_gc_count_before);
   4.448  }
   4.449  
   4.450  HeapWord*
   4.451 @@ -1200,48 +805,18 @@
   4.452    assert(!is_tlab, "mem_allocate() this should not be called directly "
   4.453           "to allocate TLABs");
   4.454  
   4.455 -  // Loop until the allocation is satisified,
   4.456 -  // or unsatisfied after GC.
   4.457 +  // Loop until the allocation is satisified, or unsatisfied after GC.
   4.458    for (int try_count = 1; /* we'll return */; try_count += 1) {
   4.459      unsigned int gc_count_before;
   4.460 -    {
   4.461 -      if (!isHumongous(word_size)) {
   4.462 -        // First attempt: Try allocating out of the current alloc region
   4.463 -        // using a CAS. If that fails, take the Heap_lock and retry the
   4.464 -        // allocation, potentially replacing the current alloc region.
   4.465 -        HeapWord* result = attempt_allocation(word_size);
   4.466 -        if (result != NULL) {
   4.467 -          assert_heap_not_locked();
   4.468 -          return result;
   4.469 -        }
   4.470 -
   4.471 -        assert_heap_locked();
   4.472 -
   4.473 -        // Second attempt: Go to the slower path where we might try to
   4.474 -        // schedule a collection.
   4.475 -        result = attempt_allocation_slow(word_size);
   4.476 -        if (result != NULL) {
   4.477 -          assert_heap_not_locked();
   4.478 -          return result;
   4.479 -        }
   4.480 -      } else {
   4.481 -        // attempt_allocation_humongous() requires the Heap_lock to be held.
   4.482 -        Heap_lock->lock();
   4.483 -
   4.484 -        HeapWord* result = attempt_allocation_humongous(word_size,
   4.485 -                                                     false /* at_safepoint */);
   4.486 -        if (result != NULL) {
   4.487 -          assert_heap_not_locked();
   4.488 -          return result;
   4.489 -        }
   4.490 -      }
   4.491 -
   4.492 -      assert_heap_locked();
   4.493 -      // Read the gc count while the heap lock is held.
   4.494 -      gc_count_before = SharedHeap::heap()->total_collections();
   4.495 -
   4.496 -      // Release the Heap_lock before attempting the collection.
   4.497 -      Heap_lock->unlock();
   4.498 +
   4.499 +    HeapWord* result = NULL;
   4.500 +    if (!isHumongous(word_size)) {
   4.501 +      result = attempt_allocation(word_size, &gc_count_before);
   4.502 +    } else {
   4.503 +      result = attempt_allocation_humongous(word_size, &gc_count_before);
   4.504 +    }
   4.505 +    if (result != NULL) {
   4.506 +      return result;
   4.507      }
   4.508  
   4.509      // Create the garbage collection operation...
   4.510 @@ -1249,7 +824,6 @@
   4.511      // ...and get the VM thread to execute it.
   4.512      VMThread::execute(&op);
   4.513  
   4.514 -    assert_heap_not_locked();
   4.515      if (op.prologue_succeeded() && op.pause_succeeded()) {
   4.516        // If the operation was successful we'll return the result even
   4.517        // if it is NULL. If the allocation attempt failed immediately
   4.518 @@ -1275,21 +849,207 @@
   4.519    }
   4.520  
   4.521    ShouldNotReachHere();
   4.522 +  return NULL;
   4.523  }
   4.524  
   4.525 -void G1CollectedHeap::abandon_cur_alloc_region() {
   4.526 +HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
   4.527 +                                           unsigned int *gc_count_before_ret) {
   4.528 +  // Make sure you read the note in attempt_allocation_humongous().
   4.529 +
   4.530 +  assert_heap_not_locked_and_not_at_safepoint();
   4.531 +  assert(!isHumongous(word_size), "attempt_allocation_slow() should not "
   4.532 +         "be called for humongous allocation requests");
   4.533 +
   4.534 +  // We should only get here after the first-level allocation attempt
   4.535 +  // (attempt_allocation()) failed to allocate.
   4.536 +
   4.537 +  // We will loop until a) we manage to successfully perform the
   4.538 +  // allocation or b) we successfully schedule a collection which
   4.539 +  // fails to perform the allocation. b) is the only case when we'll
   4.540 +  // return NULL.
   4.541 +  HeapWord* result = NULL;
   4.542 +  for (int try_count = 1; /* we'll return */; try_count += 1) {
   4.543 +    bool should_try_gc;
   4.544 +    unsigned int gc_count_before;
   4.545 +
   4.546 +    {
   4.547 +      MutexLockerEx x(Heap_lock);
   4.548 +
   4.549 +      result = _mutator_alloc_region.attempt_allocation_locked(word_size,
   4.550 +                                                      false /* bot_updates */);
   4.551 +      if (result != NULL) {
   4.552 +        return result;
   4.553 +      }
   4.554 +
   4.555 +      // If we reach here, attempt_allocation_locked() above failed to
   4.556 +      // allocate a new region. So the mutator alloc region should be NULL.
   4.557 +      assert(_mutator_alloc_region.get() == NULL, "only way to get here");
   4.558 +
   4.559 +      if (GC_locker::is_active_and_needs_gc()) {
   4.560 +        if (g1_policy()->can_expand_young_list()) {
   4.561 +          result = _mutator_alloc_region.attempt_allocation_force(word_size,
   4.562 +                                                      false /* bot_updates */);
   4.563 +          if (result != NULL) {
   4.564 +            return result;
   4.565 +          }
   4.566 +        }
   4.567 +        should_try_gc = false;
   4.568 +      } else {
   4.569 +        // Read the GC count while still holding the Heap_lock.
   4.570 +        gc_count_before = SharedHeap::heap()->total_collections();
   4.571 +        should_try_gc = true;
   4.572 +      }
   4.573 +    }
   4.574 +
   4.575 +    if (should_try_gc) {
   4.576 +      bool succeeded;
   4.577 +      result = do_collection_pause(word_size, gc_count_before, &succeeded);
   4.578 +      if (result != NULL) {
   4.579 +        assert(succeeded, "only way to get back a non-NULL result");
   4.580 +        return result;
   4.581 +      }
   4.582 +
   4.583 +      if (succeeded) {
   4.584 +        // If we get here we successfully scheduled a collection which
   4.585 +        // failed to allocate. No point in trying to allocate
   4.586 +        // further. We'll just return NULL.
   4.587 +        MutexLockerEx x(Heap_lock);
   4.588 +        *gc_count_before_ret = SharedHeap::heap()->total_collections();
   4.589 +        return NULL;
   4.590 +      }
   4.591 +    } else {
   4.592 +      GC_locker::stall_until_clear();
   4.593 +    }
   4.594 +
   4.595 +    // We can reach here if we were unsuccessul in scheduling a
   4.596 +    // collection (because another thread beat us to it) or if we were
   4.597 +    // stalled due to the GC locker. In either can we should retry the
   4.598 +    // allocation attempt in case another thread successfully
   4.599 +    // performed a collection and reclaimed enough space. We do the
   4.600 +    // first attempt (without holding the Heap_lock) here and the
   4.601 +    // follow-on attempt will be at the start of the next loop
   4.602 +    // iteration (after taking the Heap_lock).
   4.603 +    result = _mutator_alloc_region.attempt_allocation(word_size,
   4.604 +                                                      false /* bot_updates */);
   4.605 +    if (result != NULL ){
   4.606 +      return result;
   4.607 +    }
   4.608 +
   4.609 +    // Give a warning if we seem to be looping forever.
   4.610 +    if ((QueuedAllocationWarningCount > 0) &&
   4.611 +        (try_count % QueuedAllocationWarningCount == 0)) {
   4.612 +      warning("G1CollectedHeap::attempt_allocation_slow() "
   4.613 +              "retries %d times", try_count);
   4.614 +    }
   4.615 +  }
   4.616 +
   4.617 +  ShouldNotReachHere();
   4.618 +  return NULL;
   4.619 +}
   4.620 +
   4.621 +HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
   4.622 +                                          unsigned int * gc_count_before_ret) {
   4.623 +  // The structure of this method has a lot of similarities to
   4.624 +  // attempt_allocation_slow(). The reason these two were not merged
   4.625 +  // into a single one is that such a method would require several "if
   4.626 +  // allocation is not humongous do this, otherwise do that"
   4.627 +  // conditional paths which would obscure its flow. In fact, an early
   4.628 +  // version of this code did use a unified method which was harder to
   4.629 +  // follow and, as a result, it had subtle bugs that were hard to
   4.630 +  // track down. So keeping these two methods separate allows each to
   4.631 +  // be more readable. It will be good to keep these two in sync as
   4.632 +  // much as possible.
   4.633 +
   4.634 +  assert_heap_not_locked_and_not_at_safepoint();
   4.635 +  assert(isHumongous(word_size), "attempt_allocation_humongous() "
   4.636 +         "should only be called for humongous allocations");
   4.637 +
   4.638 +  // We will loop until a) we manage to successfully perform the
   4.639 +  // allocation or b) we successfully schedule a collection which
   4.640 +  // fails to perform the allocation. b) is the only case when we'll
   4.641 +  // return NULL.
   4.642 +  HeapWord* result = NULL;
   4.643 +  for (int try_count = 1; /* we'll return */; try_count += 1) {
   4.644 +    bool should_try_gc;
   4.645 +    unsigned int gc_count_before;
   4.646 +
   4.647 +    {
   4.648 +      MutexLockerEx x(Heap_lock);
   4.649 +
   4.650 +      // Given that humongous objects are not allocated in young
   4.651 +      // regions, we'll first try to do the allocation without doing a
   4.652 +      // collection hoping that there's enough space in the heap.
   4.653 +      result = humongous_obj_allocate(word_size);
   4.654 +      if (result != NULL) {
   4.655 +        return result;
   4.656 +      }
   4.657 +
   4.658 +      if (GC_locker::is_active_and_needs_gc()) {
   4.659 +        should_try_gc = false;
   4.660 +      } else {
   4.661 +        // Read the GC count while still holding the Heap_lock.
   4.662 +        gc_count_before = SharedHeap::heap()->total_collections();
   4.663 +        should_try_gc = true;
   4.664 +      }
   4.665 +    }
   4.666 +
   4.667 +    if (should_try_gc) {
   4.668 +      // If we failed to allocate the humongous object, we should try to
   4.669 +      // do a collection pause (if we're allowed) in case it reclaims
   4.670 +      // enough space for the allocation to succeed after the pause.
   4.671 +
   4.672 +      bool succeeded;
   4.673 +      result = do_collection_pause(word_size, gc_count_before, &succeeded);
   4.674 +      if (result != NULL) {
   4.675 +        assert(succeeded, "only way to get back a non-NULL result");
   4.676 +        return result;
   4.677 +      }
   4.678 +
   4.679 +      if (succeeded) {
   4.680 +        // If we get here we successfully scheduled a collection which
   4.681 +        // failed to allocate. No point in trying to allocate
   4.682 +        // further. We'll just return NULL.
   4.683 +        MutexLockerEx x(Heap_lock);
   4.684 +        *gc_count_before_ret = SharedHeap::heap()->total_collections();
   4.685 +        return NULL;
   4.686 +      }
   4.687 +    } else {
   4.688 +      GC_locker::stall_until_clear();
   4.689 +    }
   4.690 +
   4.691 +    // We can reach here if we were unsuccessul in scheduling a
   4.692 +    // collection (because another thread beat us to it) or if we were
   4.693 +    // stalled due to the GC locker. In either can we should retry the
   4.694 +    // allocation attempt in case another thread successfully
   4.695 +    // performed a collection and reclaimed enough space.  Give a
   4.696 +    // warning if we seem to be looping forever.
   4.697 +
   4.698 +    if ((QueuedAllocationWarningCount > 0) &&
   4.699 +        (try_count % QueuedAllocationWarningCount == 0)) {
   4.700 +      warning("G1CollectedHeap::attempt_allocation_humongous() "
   4.701 +              "retries %d times", try_count);
   4.702 +    }
   4.703 +  }
   4.704 +
   4.705 +  ShouldNotReachHere();
   4.706 +  return NULL;
   4.707 +}
   4.708 +
   4.709 +HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
   4.710 +                                       bool expect_null_mutator_alloc_region) {
   4.711    assert_at_safepoint(true /* should_be_vm_thread */);
   4.712 -
   4.713 -  HeapRegion* cur_alloc_region = _cur_alloc_region;
   4.714 -  if (cur_alloc_region != NULL) {
   4.715 -    assert(!cur_alloc_region->is_empty(),
   4.716 -           "the current alloc region can never be empty");
   4.717 -    assert(cur_alloc_region->is_young(),
   4.718 -           "the current alloc region should be young");
   4.719 -
   4.720 -    retire_cur_alloc_region_common(cur_alloc_region);
   4.721 -  }
   4.722 -  assert(_cur_alloc_region == NULL, "post-condition");
   4.723 +  assert(_mutator_alloc_region.get() == NULL ||
   4.724 +                                             !expect_null_mutator_alloc_region,
   4.725 +         "the current alloc region was unexpectedly found to be non-NULL");
   4.726 +
   4.727 +  if (!isHumongous(word_size)) {
   4.728 +    return _mutator_alloc_region.attempt_allocation_locked(word_size,
   4.729 +                                                      false /* bot_updates */);
   4.730 +  } else {
   4.731 +    return humongous_obj_allocate(word_size);
   4.732 +  }
   4.733 +
   4.734 +  ShouldNotReachHere();
   4.735  }
   4.736  
   4.737  void G1CollectedHeap::abandon_gc_alloc_regions() {
   4.738 @@ -1417,8 +1177,8 @@
   4.739  
   4.740      if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
   4.741        HandleMark hm;  // Discard invalid handles created during verification
   4.742 +      gclog_or_tty->print(" VerifyBeforeGC:");
   4.743        prepare_for_verify();
   4.744 -      gclog_or_tty->print(" VerifyBeforeGC:");
   4.745        Universe::verify(true);
   4.746      }
   4.747  
   4.748 @@ -1439,9 +1199,8 @@
   4.749      concurrent_mark()->abort();
   4.750  
   4.751      // Make sure we'll choose a new allocation region afterwards.
   4.752 -    abandon_cur_alloc_region();
   4.753 +    release_mutator_alloc_region();
   4.754      abandon_gc_alloc_regions();
   4.755 -    assert(_cur_alloc_region == NULL, "Invariant.");
   4.756      g1_rem_set()->cleanupHRRS();
   4.757      tear_down_region_lists();
   4.758  
   4.759 @@ -1547,6 +1306,8 @@
   4.760      // evacuation pause.
   4.761      clear_cset_fast_test();
   4.762  
   4.763 +    init_mutator_alloc_region();
   4.764 +
   4.765      double end = os::elapsedTime();
   4.766      g1_policy()->record_full_collection_end();
   4.767  
   4.768 @@ -1720,8 +1481,9 @@
   4.769  
   4.770    *succeeded = true;
   4.771    // Let's attempt the allocation first.
   4.772 -  HeapWord* result = attempt_allocation_at_safepoint(word_size,
   4.773 -                                     false /* expect_null_cur_alloc_region */);
   4.774 +  HeapWord* result =
   4.775 +    attempt_allocation_at_safepoint(word_size,
   4.776 +                                 false /* expect_null_mutator_alloc_region */);
   4.777    if (result != NULL) {
   4.778      assert(*succeeded, "sanity");
   4.779      return result;
   4.780 @@ -1748,7 +1510,7 @@
   4.781  
   4.782    // Retry the allocation
   4.783    result = attempt_allocation_at_safepoint(word_size,
   4.784 -                                      true /* expect_null_cur_alloc_region */);
   4.785 +                                  true /* expect_null_mutator_alloc_region */);
   4.786    if (result != NULL) {
   4.787      assert(*succeeded, "sanity");
   4.788      return result;
   4.789 @@ -1765,7 +1527,7 @@
   4.790  
   4.791    // Retry the allocation once more
   4.792    result = attempt_allocation_at_safepoint(word_size,
   4.793 -                                      true /* expect_null_cur_alloc_region */);
   4.794 +                                  true /* expect_null_mutator_alloc_region */);
   4.795    if (result != NULL) {
   4.796      assert(*succeeded, "sanity");
   4.797      return result;
   4.798 @@ -1796,7 +1558,7 @@
   4.799    if (expand(expand_bytes)) {
   4.800      verify_region_sets_optional();
   4.801      return attempt_allocation_at_safepoint(word_size,
   4.802 -                                          false /* expect_null_cur_alloc_region */);
   4.803 +                                 false /* expect_null_mutator_alloc_region */);
   4.804    }
   4.805    return NULL;
   4.806  }
   4.807 @@ -1940,7 +1702,6 @@
   4.808    _evac_failure_scan_stack(NULL) ,
   4.809    _mark_in_progress(false),
   4.810    _cg1r(NULL), _summary_bytes_used(0),
   4.811 -  _cur_alloc_region(NULL),
   4.812    _refine_cte_cl(NULL),
   4.813    _full_collection(false),
   4.814    _free_list("Master Free List"),
   4.815 @@ -2099,7 +1860,6 @@
   4.816    _g1_max_committed = _g1_committed;
   4.817    _hrs = new HeapRegionSeq(_expansion_regions);
   4.818    guarantee(_hrs != NULL, "Couldn't allocate HeapRegionSeq");
   4.819 -  guarantee(_cur_alloc_region == NULL, "from constructor");
   4.820  
   4.821    // 6843694 - ensure that the maximum region index can fit
   4.822    // in the remembered set structures.
   4.823 @@ -2195,6 +1955,22 @@
   4.824    // Do later initialization work for concurrent refinement.
   4.825    _cg1r->init();
   4.826  
   4.827 +  // Here we allocate the dummy full region that is required by the
   4.828 +  // G1AllocRegion class. If we don't pass an address in the reserved
   4.829 +  // space here, lots of asserts fire.
   4.830 +  MemRegion mr(_g1_reserved.start(), HeapRegion::GrainWords);
   4.831 +  HeapRegion* dummy_region = new HeapRegion(_bot_shared, mr, true);
   4.832 +  // We'll re-use the same region whether the alloc region will
   4.833 +  // require BOT updates or not and, if it doesn't, then a non-young
   4.834 +  // region will complain that it cannot support allocations without
   4.835 +  // BOT updates. So we'll tag the dummy region as young to avoid that.
   4.836 +  dummy_region->set_young();
   4.837 +  // Make sure it's full.
   4.838 +  dummy_region->set_top(dummy_region->end());
   4.839 +  G1AllocRegion::setup(this, dummy_region);
   4.840 +
   4.841 +  init_mutator_alloc_region();
   4.842 +
   4.843    return JNI_OK;
   4.844  }
   4.845  
   4.846 @@ -2261,7 +2037,7 @@
   4.847           "Should be owned on this thread's behalf.");
   4.848    size_t result = _summary_bytes_used;
   4.849    // Read only once in case it is set to NULL concurrently
   4.850 -  HeapRegion* hr = _cur_alloc_region;
   4.851 +  HeapRegion* hr = _mutator_alloc_region.get();
   4.852    if (hr != NULL)
   4.853      result += hr->used();
   4.854    return result;
   4.855 @@ -2324,13 +2100,11 @@
   4.856    // to free(), resulting in a SIGSEGV. Note that this doesn't appear
   4.857    // to be a problem in the optimized build, since the two loads of the
   4.858    // current allocation region field are optimized away.
   4.859 -  HeapRegion* car = _cur_alloc_region;
   4.860 -
   4.861 -  // FIXME: should iterate over all regions?
   4.862 -  if (car == NULL) {
   4.863 +  HeapRegion* hr = _mutator_alloc_region.get();
   4.864 +  if (hr == NULL) {
   4.865      return 0;
   4.866    }
   4.867 -  return car->free();
   4.868 +  return hr->free();
   4.869  }
   4.870  
   4.871  bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
   4.872 @@ -2781,16 +2555,12 @@
   4.873    // since we can't allow tlabs to grow big enough to accomodate
   4.874    // humongous objects.
   4.875  
   4.876 -  // We need to store the cur alloc region locally, since it might change
   4.877 -  // between when we test for NULL and when we use it later.
   4.878 -  ContiguousSpace* cur_alloc_space = _cur_alloc_region;
   4.879 +  HeapRegion* hr = _mutator_alloc_region.get();
   4.880    size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
   4.881 -
   4.882 -  if (cur_alloc_space == NULL) {
   4.883 +  if (hr == NULL) {
   4.884      return max_tlab_size;
   4.885    } else {
   4.886 -    return MIN2(MAX2(cur_alloc_space->free(), (size_t)MinTLABSize),
   4.887 -                max_tlab_size);
   4.888 +    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
   4.889    }
   4.890  }
   4.891  
   4.892 @@ -3364,6 +3134,7 @@
   4.893    }
   4.894  
   4.895    verify_region_sets_optional();
   4.896 +  verify_dirty_young_regions();
   4.897  
   4.898    {
   4.899      // This call will decide whether this pause is an initial-mark
   4.900 @@ -3425,8 +3196,8 @@
   4.901  
   4.902        if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
   4.903          HandleMark hm;  // Discard invalid handles created during verification
   4.904 +        gclog_or_tty->print(" VerifyBeforeGC:");
   4.905          prepare_for_verify();
   4.906 -        gclog_or_tty->print(" VerifyBeforeGC:");
   4.907          Universe::verify(false);
   4.908        }
   4.909  
   4.910 @@ -3442,7 +3213,7 @@
   4.911  
   4.912        // Forget the current alloc region (we might even choose it to be part
   4.913        // of the collection set!).
   4.914 -      abandon_cur_alloc_region();
   4.915 +      release_mutator_alloc_region();
   4.916  
   4.917        // The elapsed time induced by the start time below deliberately elides
   4.918        // the possible verification above.
   4.919 @@ -3573,6 +3344,8 @@
   4.920        g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
   4.921  #endif // YOUNG_LIST_VERBOSE
   4.922  
   4.923 +      init_mutator_alloc_region();
   4.924 +
   4.925        double end_time_sec = os::elapsedTime();
   4.926        double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
   4.927        g1_policy()->record_pause_time_ms(pause_time_ms);
   4.928 @@ -3655,6 +3428,15 @@
   4.929    return gclab_word_size;
   4.930  }
   4.931  
   4.932 +void G1CollectedHeap::init_mutator_alloc_region() {
   4.933 +  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
   4.934 +  _mutator_alloc_region.init();
   4.935 +}
   4.936 +
   4.937 +void G1CollectedHeap::release_mutator_alloc_region() {
   4.938 +  _mutator_alloc_region.release();
   4.939 +  assert(_mutator_alloc_region.get() == NULL, "post-condition");
   4.940 +}
   4.941  
   4.942  void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
   4.943    assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
   4.944 @@ -5140,10 +4922,8 @@
   4.945    CardTableModRefBS* _ct_bs;
   4.946  public:
   4.947    G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
   4.948 -    : _ct_bs(ct_bs)
   4.949 -  { }
   4.950 -  virtual bool doHeapRegion(HeapRegion* r)
   4.951 -  {
   4.952 +    : _ct_bs(ct_bs) { }
   4.953 +  virtual bool doHeapRegion(HeapRegion* r) {
   4.954      MemRegion mr(r->bottom(), r->end());
   4.955      if (r->is_survivor()) {
   4.956        _ct_bs->verify_dirty_region(mr);
   4.957 @@ -5153,6 +4933,29 @@
   4.958      return false;
   4.959    }
   4.960  };
   4.961 +
   4.962 +void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
   4.963 +  CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
   4.964 +  for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
   4.965 +    // We cannot guarantee that [bottom(),end()] is dirty.  Threads
   4.966 +    // dirty allocated blocks as they allocate them. The thread that
   4.967 +    // retires each region and replaces it with a new one will do a
   4.968 +    // maximal allocation to fill in [pre_dummy_top(),end()] but will
   4.969 +    // not dirty that area (one less thing to have to do while holding
   4.970 +    // a lock). So we can only verify that [bottom(),pre_dummy_top()]
   4.971 +    // is dirty. Also note that verify_dirty_region() requires
   4.972 +    // mr.start() and mr.end() to be card aligned and pre_dummy_top()
   4.973 +    // is not guaranteed to be.
   4.974 +    MemRegion mr(hr->bottom(),
   4.975 +                 ct_bs->align_to_card_boundary(hr->pre_dummy_top()));
   4.976 +    ct_bs->verify_dirty_region(mr);
   4.977 +  }
   4.978 +}
   4.979 +
   4.980 +void G1CollectedHeap::verify_dirty_young_regions() {
   4.981 +  verify_dirty_young_list(_young_list->first_region());
   4.982 +  verify_dirty_young_list(_young_list->first_survivor_region());
   4.983 +}
   4.984  #endif
   4.985  
   4.986  void G1CollectedHeap::cleanUpCardTable() {
   4.987 @@ -5500,6 +5303,44 @@
   4.988    }
   4.989  }
   4.990  
   4.991 +HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
   4.992 +                                                      bool force) {
   4.993 +  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   4.994 +  assert(!force || g1_policy()->can_expand_young_list(),
   4.995 +         "if force is true we should be able to expand the young list");
   4.996 +  if (force || !g1_policy()->is_young_list_full()) {
   4.997 +    HeapRegion* new_alloc_region = new_region(word_size,
   4.998 +                                              false /* do_expand */);
   4.999 +    if (new_alloc_region != NULL) {
  4.1000 +      g1_policy()->update_region_num(true /* next_is_young */);
  4.1001 +      set_region_short_lived_locked(new_alloc_region);
  4.1002 +      return new_alloc_region;
  4.1003 +    }
  4.1004 +  }
  4.1005 +  return NULL;
  4.1006 +}
  4.1007 +
  4.1008 +void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
  4.1009 +                                                  size_t allocated_bytes) {
  4.1010 +  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
  4.1011 +  assert(alloc_region->is_young(), "all mutator alloc regions should be young");
  4.1012 +
  4.1013 +  g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
  4.1014 +  _summary_bytes_used += allocated_bytes;
  4.1015 +}
  4.1016 +
  4.1017 +HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
  4.1018 +                                                    bool force) {
  4.1019 +  return _g1h->new_mutator_alloc_region(word_size, force);
  4.1020 +}
  4.1021 +
  4.1022 +void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
  4.1023 +                                       size_t allocated_bytes) {
  4.1024 +  _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
  4.1025 +}
  4.1026 +
  4.1027 +// Heap region set verification
  4.1028 +
  4.1029  class VerifyRegionListsClosure : public HeapRegionClosure {
  4.1030  private:
  4.1031    HumongousRegionSet* _humongous_set;
     5.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Mar 29 22:36:16 2011 -0400
     5.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Mar 30 10:26:59 2011 -0400
     5.3 @@ -26,6 +26,7 @@
     5.4  #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
     5.5  
     5.6  #include "gc_implementation/g1/concurrentMark.hpp"
     5.7 +#include "gc_implementation/g1/g1AllocRegion.hpp"
     5.8  #include "gc_implementation/g1/g1RemSet.hpp"
     5.9  #include "gc_implementation/g1/heapRegionSets.hpp"
    5.10  #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
    5.11 @@ -128,6 +129,15 @@
    5.12    void          print();
    5.13  };
    5.14  
    5.15 +class MutatorAllocRegion : public G1AllocRegion {
    5.16 +protected:
    5.17 +  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
    5.18 +  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
    5.19 +public:
    5.20 +  MutatorAllocRegion()
    5.21 +    : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
    5.22 +};
    5.23 +
    5.24  class RefineCardTableEntryClosure;
    5.25  class G1CollectedHeap : public SharedHeap {
    5.26    friend class VM_G1CollectForAllocation;
    5.27 @@ -135,6 +145,7 @@
    5.28    friend class VM_G1CollectFull;
    5.29    friend class VM_G1IncCollectionPause;
    5.30    friend class VMStructs;
    5.31 +  friend class MutatorAllocRegion;
    5.32  
    5.33    // Closures used in implementation.
    5.34    friend class G1ParCopyHelper;
    5.35 @@ -197,12 +208,15 @@
    5.36    // The sequence of all heap regions in the heap.
    5.37    HeapRegionSeq* _hrs;
    5.38  
    5.39 -  // The region from which normal-sized objects are currently being
    5.40 -  // allocated.  May be NULL.
    5.41 -  HeapRegion* _cur_alloc_region;
    5.42 +  // Alloc region used to satisfy mutator allocation requests.
    5.43 +  MutatorAllocRegion _mutator_alloc_region;
    5.44  
    5.45 -  // Postcondition: cur_alloc_region == NULL.
    5.46 -  void abandon_cur_alloc_region();
    5.47 +  // It resets the mutator alloc region before new allocations can take place.
    5.48 +  void init_mutator_alloc_region();
    5.49 +
    5.50 +  // It releases the mutator alloc region.
    5.51 +  void release_mutator_alloc_region();
    5.52 +
    5.53    void abandon_gc_alloc_regions();
    5.54  
    5.55    // The to-space memory regions into which objects are being copied during
    5.56 @@ -360,27 +374,21 @@
    5.57    G1CollectorPolicy* _g1_policy;
    5.58  
    5.59    // This is the second level of trying to allocate a new region. If
    5.60 -  // new_region_work didn't find a region in the free_list, this call
    5.61 -  // will check whether there's anything available in the
    5.62 -  // secondary_free_list and/or wait for more regions to appear in that
    5.63 -  // list, if _free_regions_coming is set.
    5.64 +  // new_region() didn't find a region on the free_list, this call will
    5.65 +  // check whether there's anything available on the
    5.66 +  // secondary_free_list and/or wait for more regions to appear on
    5.67 +  // that list, if _free_regions_coming is set.
    5.68    HeapRegion* new_region_try_secondary_free_list();
    5.69  
    5.70    // Try to allocate a single non-humongous HeapRegion sufficient for
    5.71    // an allocation of the given word_size. If do_expand is true,
    5.72    // attempt to expand the heap if necessary to satisfy the allocation
    5.73    // request.
    5.74 -  HeapRegion* new_region_work(size_t word_size, bool do_expand);
    5.75 +  HeapRegion* new_region(size_t word_size, bool do_expand);
    5.76  
    5.77 -  // Try to allocate a new region to be used for allocation by a
    5.78 -  // mutator thread. Attempt to expand the heap if no region is
    5.79 +  // Try to allocate a new region to be used for allocation by
    5.80 +  // a GC thread. It will try to expand the heap if no region is
    5.81    // available.
    5.82 -  HeapRegion* new_alloc_region(size_t word_size) {
    5.83 -    return new_region_work(word_size, false /* do_expand */);
    5.84 -  }
    5.85 -
    5.86 -  // Try to allocate a new region to be used for allocation by a GC
    5.87 -  // thread. Attempt to expand the heap if no region is available.
    5.88    HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
    5.89  
    5.90    // Attempt to satisfy a humongous allocation request of the given
    5.91 @@ -415,10 +423,6 @@
    5.92    // * All non-TLAB allocation requests should go to mem_allocate()
    5.93    //   and mem_allocate() should never be called with is_tlab == true.
    5.94    //
    5.95 -  // * If the GC locker is active we currently stall until we can
    5.96 -  //   allocate a new young region. This will be changed in the
    5.97 -  //   near future (see CR 6994056).
    5.98 -  //
    5.99    // * If either call cannot satisfy the allocation request using the
   5.100    //   current allocating region, they will try to get a new one. If
   5.101    //   this fails, they will attempt to do an evacuation pause and
   5.102 @@ -441,122 +445,38 @@
   5.103                                   bool   is_tlab, /* expected to be false */
   5.104                                   bool*  gc_overhead_limit_was_exceeded);
   5.105  
   5.106 -  // The following methods, allocate_from_cur_allocation_region(),
   5.107 -  // attempt_allocation(), attempt_allocation_locked(),
   5.108 -  // replace_cur_alloc_region_and_allocate(),
   5.109 -  // attempt_allocation_slow(), and attempt_allocation_humongous()
   5.110 -  // have very awkward pre- and post-conditions with respect to
   5.111 -  // locking:
   5.112 -  //
   5.113 -  // If they are called outside a safepoint they assume the caller
   5.114 -  // holds the Heap_lock when it calls them. However, on exit they
   5.115 -  // will release the Heap_lock if they return a non-NULL result, but
   5.116 -  // keep holding the Heap_lock if they return a NULL result. The
   5.117 -  // reason for this is that we need to dirty the cards that span
   5.118 -  // allocated blocks on young regions to avoid having to take the
   5.119 -  // slow path of the write barrier (for performance reasons we don't
   5.120 -  // update RSets for references whose source is a young region, so we
   5.121 -  // don't need to look at dirty cards on young regions). But, doing
   5.122 -  // this card dirtying while holding the Heap_lock can be a
   5.123 -  // scalability bottleneck, especially given that some allocation
   5.124 -  // requests might be of non-trivial size (and the larger the region
   5.125 -  // size is, the fewer allocations requests will be considered
   5.126 -  // humongous, as the humongous size limit is a fraction of the
   5.127 -  // region size). So, when one of these calls succeeds in allocating
   5.128 -  // a block it does the card dirtying after it releases the Heap_lock
   5.129 -  // which is why it will return without holding it.
   5.130 -  //
   5.131 -  // The above assymetry is the reason why locking / unlocking is done
   5.132 -  // explicitly (i.e., with Heap_lock->lock() and
   5.133 -  // Heap_lock->unlocked()) instead of using MutexLocker and
   5.134 -  // MutexUnlocker objects. The latter would ensure that the lock is
   5.135 -  // unlocked / re-locked at every possible exit out of the basic
   5.136 -  // block. However, we only want that action to happen in selected
   5.137 -  // places.
   5.138 -  //
   5.139 -  // Further, if the above methods are called during a safepoint, then
   5.140 -  // naturally there's no assumption about the Heap_lock being held or
   5.141 -  // there's no attempt to unlock it. The parameter at_safepoint
   5.142 -  // indicates whether the call is made during a safepoint or not (as
   5.143 -  // an optimization, to avoid reading the global flag with
   5.144 -  // SafepointSynchronize::is_at_safepoint()).
   5.145 -  //
   5.146 -  // The methods share these parameters:
   5.147 -  //
   5.148 -  // * word_size     : the size of the allocation request in words
   5.149 -  // * at_safepoint  : whether the call is done at a safepoint; this
   5.150 -  //                   also determines whether a GC is permitted
   5.151 -  //                   (at_safepoint == false) or not (at_safepoint == true)
   5.152 -  // * do_dirtying   : whether the method should dirty the allocated
   5.153 -  //                   block before returning
   5.154 -  //
   5.155 -  // They all return either the address of the block, if they
   5.156 -  // successfully manage to allocate it, or NULL.
   5.157 +  // The following three methods take a gc_count_before_ret
   5.158 +  // parameter which is used to return the GC count if the method
   5.159 +  // returns NULL. Given that we are required to read the GC count
   5.160 +  // while holding the Heap_lock, and these paths will take the
   5.161 +  // Heap_lock at some point, it's easier to get them to read the GC
   5.162 +  // count while holding the Heap_lock before they return NULL instead
   5.163 +  // of the caller (namely: mem_allocate()) having to also take the
   5.164 +  // Heap_lock just to read the GC count.
   5.165  
   5.166 -  // It tries to satisfy an allocation request out of the current
   5.167 -  // alloc region, which is passed as a parameter. It assumes that the
   5.168 -  // caller has checked that the current alloc region is not NULL.
   5.169 -  // Given that the caller has to check the current alloc region for
   5.170 -  // at least NULL, it might as well pass it as the first parameter so
   5.171 -  // that the method doesn't have to read it from the
   5.172 -  // _cur_alloc_region field again. It is called from both
   5.173 -  // attempt_allocation() and attempt_allocation_locked() and the
   5.174 -  // with_heap_lock parameter indicates whether the caller was holding
   5.175 -  // the heap lock when it called it or not.
   5.176 -  inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
   5.177 -                                                  size_t word_size,
   5.178 -                                                  bool with_heap_lock);
   5.179 +  // First-level mutator allocation attempt: try to allocate out of
   5.180 +  // the mutator alloc region without taking the Heap_lock. This
   5.181 +  // should only be used for non-humongous allocations.
   5.182 +  inline HeapWord* attempt_allocation(size_t word_size,
   5.183 +                                      unsigned int* gc_count_before_ret);
   5.184  
   5.185 -  // First-level of allocation slow path: it attempts to allocate out
   5.186 -  // of the current alloc region in a lock-free manner using a CAS. If
   5.187 -  // that fails it takes the Heap_lock and calls
   5.188 -  // attempt_allocation_locked() for the second-level slow path.
   5.189 -  inline HeapWord* attempt_allocation(size_t word_size);
   5.190 +  // Second-level mutator allocation attempt: take the Heap_lock and
   5.191 +  // retry the allocation attempt, potentially scheduling a GC
   5.192 +  // pause. This should only be used for non-humongous allocations.
   5.193 +  HeapWord* attempt_allocation_slow(size_t word_size,
   5.194 +                                    unsigned int* gc_count_before_ret);
   5.195  
   5.196 -  // Second-level of allocation slow path: while holding the Heap_lock
   5.197 -  // it tries to allocate out of the current alloc region and, if that
   5.198 -  // fails, tries to allocate out of a new current alloc region.
   5.199 -  inline HeapWord* attempt_allocation_locked(size_t word_size);
   5.200 +  // Takes the Heap_lock and attempts a humongous allocation. It can
   5.201 +  // potentially schedule a GC pause.
   5.202 +  HeapWord* attempt_allocation_humongous(size_t word_size,
   5.203 +                                         unsigned int* gc_count_before_ret);
   5.204  
   5.205 -  // It assumes that the current alloc region has been retired and
   5.206 -  // tries to allocate a new one. If it's successful, it performs the
   5.207 -  // allocation out of the new current alloc region and updates
   5.208 -  // _cur_alloc_region. Normally, it would try to allocate a new
   5.209 -  // region if the young gen is not full, unless can_expand is true in
   5.210 -  // which case it would always try to allocate a new region.
   5.211 -  HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
   5.212 -                                                  bool at_safepoint,
   5.213 -                                                  bool do_dirtying,
   5.214 -                                                  bool can_expand);
   5.215 -
   5.216 -  // Third-level of allocation slow path: when we are unable to
   5.217 -  // allocate a new current alloc region to satisfy an allocation
   5.218 -  // request (i.e., when attempt_allocation_locked() fails). It will
   5.219 -  // try to do an evacuation pause, which might stall due to the GC
   5.220 -  // locker, and retry the allocation attempt when appropriate.
   5.221 -  HeapWord* attempt_allocation_slow(size_t word_size);
   5.222 -
   5.223 -  // The method that tries to satisfy a humongous allocation
   5.224 -  // request. If it cannot satisfy it it will try to do an evacuation
   5.225 -  // pause to perhaps reclaim enough space to be able to satisfy the
   5.226 -  // allocation request afterwards.
   5.227 -  HeapWord* attempt_allocation_humongous(size_t word_size,
   5.228 -                                         bool at_safepoint);
   5.229 -
   5.230 -  // It does the common work when we are retiring the current alloc region.
   5.231 -  inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region);
   5.232 -
   5.233 -  // It retires the current alloc region, which is passed as a
   5.234 -  // parameter (since, typically, the caller is already holding on to
   5.235 -  // it). It sets _cur_alloc_region to NULL.
   5.236 -  void retire_cur_alloc_region(HeapRegion* cur_alloc_region);
   5.237 -
   5.238 -  // It attempts to do an allocation immediately before or after an
   5.239 -  // evacuation pause and can only be called by the VM thread. It has
   5.240 -  // slightly different assumptions that the ones before (i.e.,
   5.241 -  // assumes that the current alloc region has been retired).
   5.242 +  // Allocation attempt that should be called during safepoints (e.g.,
   5.243 +  // at the end of a successful GC). expect_null_mutator_alloc_region
   5.244 +  // specifies whether the mutator alloc region is expected to be NULL
   5.245 +  // or not.
   5.246    HeapWord* attempt_allocation_at_safepoint(size_t word_size,
   5.247 -                                            bool expect_null_cur_alloc_region);
   5.248 +                                       bool expect_null_mutator_alloc_region);
   5.249  
   5.250    // It dirties the cards that cover the block so that so that the post
   5.251    // write barrier never queues anything when updating objects on this
   5.252 @@ -583,6 +503,12 @@
   5.253    // GC pause.
   5.254    void  retire_alloc_region(HeapRegion* alloc_region, bool par);
   5.255  
   5.256 +  // These two methods are the "callbacks" from the G1AllocRegion class.
   5.257 +
   5.258 +  HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
   5.259 +  void retire_mutator_alloc_region(HeapRegion* alloc_region,
   5.260 +                                   size_t allocated_bytes);
   5.261 +
   5.262    // - if explicit_gc is true, the GC is for a System.gc() or a heap
   5.263    //   inspection request and should collect the entire heap
   5.264    // - if clear_all_soft_refs is true, all soft references should be
   5.265 @@ -1027,6 +953,9 @@
   5.266    // The number of regions available for "regular" expansion.
   5.267    size_t expansion_regions() { return _expansion_regions; }
   5.268  
   5.269 +  void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
   5.270 +  void verify_dirty_young_regions() PRODUCT_RETURN;
   5.271 +
   5.272    // verify_region_sets() performs verification over the region
   5.273    // lists. It will be compiled in the product code to be used when
   5.274    // necessary (i.e., during heap verification).
     6.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Tue Mar 29 22:36:16 2011 -0400
     6.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Mar 30 10:26:59 2011 -0400
     6.3 @@ -27,6 +27,7 @@
     6.4  
     6.5  #include "gc_implementation/g1/concurrentMark.hpp"
     6.6  #include "gc_implementation/g1/g1CollectedHeap.hpp"
     6.7 +#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
     6.8  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
     6.9  #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    6.10  #include "utilities/taskqueue.hpp"
    6.11 @@ -59,131 +60,23 @@
    6.12    return r != NULL && r->in_collection_set();
    6.13  }
    6.14  
    6.15 -// See the comment in the .hpp file about the locking protocol and
    6.16 -// assumptions of this method (and other related ones).
    6.17  inline HeapWord*
    6.18 -G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
    6.19 -                                                size_t word_size,
    6.20 -                                                bool with_heap_lock) {
    6.21 -  assert_not_at_safepoint();
    6.22 -  assert(with_heap_lock == Heap_lock->owned_by_self(),
    6.23 -         "with_heap_lock and Heap_lock->owned_by_self() should be a tautology");
    6.24 -  assert(cur_alloc_region != NULL, "pre-condition of the method");
    6.25 -  assert(cur_alloc_region->is_young(),
    6.26 -         "we only support young current alloc regions");
    6.27 -  assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
    6.28 -         "should not be used for humongous allocations");
    6.29 -  assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
    6.30 +G1CollectedHeap::attempt_allocation(size_t word_size,
    6.31 +                                    unsigned int* gc_count_before_ret) {
    6.32 +  assert_heap_not_locked_and_not_at_safepoint();
    6.33 +  assert(!isHumongous(word_size), "attempt_allocation() should not "
    6.34 +         "be called for humongous allocation requests");
    6.35  
    6.36 -  assert(!cur_alloc_region->is_empty(),
    6.37 -         err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
    6.38 -                 cur_alloc_region->bottom(), cur_alloc_region->end()));
    6.39 -  HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size);
    6.40 +  HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
    6.41 +                                                      false /* bot_updates */);
    6.42 +  if (result == NULL) {
    6.43 +    result = attempt_allocation_slow(word_size, gc_count_before_ret);
    6.44 +  }
    6.45 +  assert_heap_not_locked();
    6.46    if (result != NULL) {
    6.47 -    assert(is_in(result), "result should be in the heap");
    6.48 -
    6.49 -    if (with_heap_lock) {
    6.50 -      Heap_lock->unlock();
    6.51 -    }
    6.52 -    assert_heap_not_locked();
    6.53 -    // Do the dirtying after we release the Heap_lock.
    6.54      dirty_young_block(result, word_size);
    6.55 -    return result;
    6.56    }
    6.57 -
    6.58 -  if (with_heap_lock) {
    6.59 -    assert_heap_locked();
    6.60 -  } else {
    6.61 -    assert_heap_not_locked();
    6.62 -  }
    6.63 -  return NULL;
    6.64 -}
    6.65 -
    6.66 -// See the comment in the .hpp file about the locking protocol and
    6.67 -// assumptions of this method (and other related ones).
    6.68 -inline HeapWord*
    6.69 -G1CollectedHeap::attempt_allocation(size_t word_size) {
    6.70 -  assert_heap_not_locked_and_not_at_safepoint();
    6.71 -  assert(!isHumongous(word_size), "attempt_allocation() should not be called "
    6.72 -         "for humongous allocation requests");
    6.73 -
    6.74 -  HeapRegion* cur_alloc_region = _cur_alloc_region;
    6.75 -  if (cur_alloc_region != NULL) {
    6.76 -    HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
    6.77 -                                                   word_size,
    6.78 -                                                   false /* with_heap_lock */);
    6.79 -    assert_heap_not_locked();
    6.80 -    if (result != NULL) {
    6.81 -      return result;
    6.82 -    }
    6.83 -  }
    6.84 -
    6.85 -  // Our attempt to allocate lock-free failed as the current
    6.86 -  // allocation region is either NULL or full. So, we'll now take the
    6.87 -  // Heap_lock and retry.
    6.88 -  Heap_lock->lock();
    6.89 -
    6.90 -  HeapWord* result = attempt_allocation_locked(word_size);
    6.91 -  if (result != NULL) {
    6.92 -    assert_heap_not_locked();
    6.93 -    return result;
    6.94 -  }
    6.95 -
    6.96 -  assert_heap_locked();
    6.97 -  return NULL;
    6.98 -}
    6.99 -
   6.100 -inline void
   6.101 -G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
   6.102 -  assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   6.103 -  assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
   6.104 -         "pre-condition of the call");
   6.105 -  assert(cur_alloc_region->is_young(),
   6.106 -         "we only support young current alloc regions");
   6.107 -
   6.108 -  // The region is guaranteed to be young
   6.109 -  g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
   6.110 -  _summary_bytes_used += cur_alloc_region->used();
   6.111 -  _cur_alloc_region = NULL;
   6.112 -}
   6.113 -
   6.114 -inline HeapWord*
   6.115 -G1CollectedHeap::attempt_allocation_locked(size_t word_size) {
   6.116 -  assert_heap_locked_and_not_at_safepoint();
   6.117 -  assert(!isHumongous(word_size), "attempt_allocation_locked() "
   6.118 -         "should not be called for humongous allocation requests");
   6.119 -
   6.120 -  // First, reread the current alloc region and retry the allocation
   6.121 -  // in case somebody replaced it while we were waiting to get the
   6.122 -  // Heap_lock.
   6.123 -  HeapRegion* cur_alloc_region = _cur_alloc_region;
   6.124 -  if (cur_alloc_region != NULL) {
   6.125 -    HeapWord* result = allocate_from_cur_alloc_region(
   6.126 -                                                  cur_alloc_region, word_size,
   6.127 -                                                  true /* with_heap_lock */);
   6.128 -    if (result != NULL) {
   6.129 -      assert_heap_not_locked();
   6.130 -      return result;
   6.131 -    }
   6.132 -
   6.133 -    // We failed to allocate out of the current alloc region, so let's
   6.134 -    // retire it before getting a new one.
   6.135 -    retire_cur_alloc_region(cur_alloc_region);
   6.136 -  }
   6.137 -
   6.138 -  assert_heap_locked();
   6.139 -  // Try to get a new region and allocate out of it
   6.140 -  HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
   6.141 -                                                     false, /* at_safepoint */
   6.142 -                                                     true,  /* do_dirtying */
   6.143 -                                                     false  /* can_expand */);
   6.144 -  if (result != NULL) {
   6.145 -    assert_heap_not_locked();
   6.146 -    return result;
   6.147 -  }
   6.148 -
   6.149 -  assert_heap_locked();
   6.150 -  return NULL;
   6.151 +  return result;
   6.152  }
   6.153  
   6.154  // It dirties the cards that cover the block so that so that the post
     7.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Mar 29 22:36:16 2011 -0400
     7.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Mar 30 10:26:59 2011 -0400
     7.3 @@ -360,6 +360,7 @@
     7.4    set_young_index_in_cset(-1);
     7.5    uninstall_surv_rate_group();
     7.6    set_young_type(NotYoung);
     7.7 +  reset_pre_dummy_top();
     7.8  
     7.9    if (!par) {
    7.10      // If this is parallel, this will be done later.
    7.11 @@ -923,11 +924,11 @@
    7.12      ContiguousSpace::set_saved_mark();
    7.13      OrderAccess::storestore();
    7.14      _gc_time_stamp = curr_gc_time_stamp;
    7.15 -    // The following fence is to force a flush of the writes above, but
    7.16 -    // is strictly not needed because when an allocating worker thread
    7.17 -    // calls set_saved_mark() it does so under the ParGCRareEvent_lock;
    7.18 -    // when the lock is released, the write will be flushed.
    7.19 -    // OrderAccess::fence();
    7.20 +    // No need to do another barrier to flush the writes above. If
    7.21 +    // this is called in parallel with other threads trying to
    7.22 +    // allocate into the region, the caller should call this while
    7.23 +    // holding a lock and when the lock is released the writes will be
    7.24 +    // flushed.
    7.25    }
    7.26  }
    7.27  
     8.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Mar 29 22:36:16 2011 -0400
     8.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Mar 30 10:26:59 2011 -0400
     8.3 @@ -149,6 +149,13 @@
     8.4    G1BlockOffsetArrayContigSpace _offsets;
     8.5    Mutex _par_alloc_lock;
     8.6    volatile unsigned _gc_time_stamp;
     8.7 +  // When we need to retire an allocation region, while other threads
     8.8 +  // are also concurrently trying to allocate into it, we typically
     8.9 +  // allocate a dummy object at the end of the region to ensure that
    8.10 +  // no more allocations can take place in it. However, sometimes we
    8.11 +  // want to know where the end of the last "real" object we allocated
    8.12 +  // into the region was and this is what this keeps track.
    8.13 +  HeapWord* _pre_dummy_top;
    8.14  
    8.15   public:
    8.16    // Constructor.  If "is_zeroed" is true, the MemRegion "mr" may be
    8.17 @@ -163,6 +170,17 @@
    8.18    virtual void set_saved_mark();
    8.19    void reset_gc_time_stamp() { _gc_time_stamp = 0; }
    8.20  
    8.21 +  // See the comment above in the declaration of _pre_dummy_top for an
    8.22 +  // explanation of what it is.
    8.23 +  void set_pre_dummy_top(HeapWord* pre_dummy_top) {
    8.24 +    assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
    8.25 +    _pre_dummy_top = pre_dummy_top;
    8.26 +  }
    8.27 +  HeapWord* pre_dummy_top() {
    8.28 +    return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
    8.29 +  }
    8.30 +  void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
    8.31 +
    8.32    virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
    8.33    virtual void clear(bool mangle_space);
    8.34  
     9.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Tue Mar 29 22:36:16 2011 -0400
     9.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Wed Mar 30 10:26:59 2011 -0400
     9.3 @@ -38,15 +38,8 @@
     9.4  // this is used for larger LAB allocations only.
     9.5  inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
     9.6    MutexLocker x(&_par_alloc_lock);
     9.7 -  // This ought to be just "allocate", because of the lock above, but that
     9.8 -  // ContiguousSpace::allocate asserts that either the allocating thread
     9.9 -  // holds the heap lock or it is the VM thread and we're at a safepoint.
    9.10 -  // The best I (dld) could figure was to put a field in ContiguousSpace
    9.11 -  // meaning "locking at safepoint taken care of", and set/reset that
    9.12 -  // here.  But this will do for now, especially in light of the comment
    9.13 -  // above.  Perhaps in the future some lock-free manner of keeping the
    9.14 -  // coordination.
    9.15 -  HeapWord* res = ContiguousSpace::par_allocate(size);
    9.16 +  // Given that we take the lock no need to use par_allocate() here.
    9.17 +  HeapWord* res = ContiguousSpace::allocate(size);
    9.18    if (res != NULL) {
    9.19      _offsets.alloc_block(res, size);
    9.20    }
    10.1 --- a/src/share/vm/memory/cardTableModRefBS.hpp	Tue Mar 29 22:36:16 2011 -0400
    10.2 +++ b/src/share/vm/memory/cardTableModRefBS.hpp	Wed Mar 30 10:26:59 2011 -0400
    10.3 @@ -382,6 +382,11 @@
    10.4      return (addr_for(pcard) == p);
    10.5    }
    10.6  
    10.7 +  HeapWord* align_to_card_boundary(HeapWord* p) {
    10.8 +    jbyte* pcard = byte_for(p + card_size_in_words - 1);
    10.9 +    return addr_for(pcard);
   10.10 +  }
   10.11 +
   10.12    // The kinds of precision a CardTableModRefBS may offer.
   10.13    enum PrecisionStyle {
   10.14      Precise,
    11.1 --- a/src/share/vm/memory/space.cpp	Tue Mar 29 22:36:16 2011 -0400
    11.2 +++ b/src/share/vm/memory/space.cpp	Wed Mar 30 10:26:59 2011 -0400
    11.3 @@ -818,9 +818,14 @@
    11.4  // This version requires locking.
    11.5  inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
    11.6                                                  HeapWord* const end_value) {
    11.7 +  // In G1 there are places where a GC worker can allocates into a
    11.8 +  // region using this serial allocation code without being prone to a
    11.9 +  // race with other GC workers (we ensure that no other GC worker can
   11.10 +  // access the same region at the same time). So the assert below is
   11.11 +  // too strong in the case of G1.
   11.12    assert(Heap_lock->owned_by_self() ||
   11.13           (SafepointSynchronize::is_at_safepoint() &&
   11.14 -          Thread::current()->is_VM_thread()),
   11.15 +                               (Thread::current()->is_VM_thread() || UseG1GC)),
   11.16           "not locked");
   11.17    HeapWord* obj = top();
   11.18    if (pointer_delta(end_value, obj) >= size) {

mercurial