src/share/vm/gc_implementation/g1/g1AllocRegion.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,228 @@
     1.4 +/*
     1.5 + * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
    1.30 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    1.31 +
    1.32 +G1CollectedHeap* G1AllocRegion::_g1h = NULL;
    1.33 +HeapRegion* G1AllocRegion::_dummy_region = NULL;
    1.34 +
    1.35 +void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
    1.36 +  assert(_dummy_region == NULL, "should be set once");
    1.37 +  assert(dummy_region != NULL, "pre-condition");
    1.38 +  assert(dummy_region->free() == 0, "pre-condition");
    1.39 +
    1.40 +  // Make sure that any allocation attempt on this region will fail
    1.41 +  // and will not trigger any asserts.
    1.42 +  assert(allocate(dummy_region, 1, false) == NULL, "should fail");
    1.43 +  assert(par_allocate(dummy_region, 1, false) == NULL, "should fail");
    1.44 +  assert(allocate(dummy_region, 1, true) == NULL, "should fail");
    1.45 +  assert(par_allocate(dummy_region, 1, true) == NULL, "should fail");
    1.46 +
    1.47 +  _g1h = g1h;
    1.48 +  _dummy_region = dummy_region;
    1.49 +}
    1.50 +
    1.51 +void G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region,
    1.52 +                                            bool bot_updates) {
    1.53 +  assert(alloc_region != NULL && alloc_region != _dummy_region,
    1.54 +         "pre-condition");
    1.55 +
    1.56 +  // Other threads might still be trying to allocate using a CAS out
    1.57 +  // of the region we are trying to retire, as they can do so without
    1.58 +  // holding the lock. So, we first have to make sure that noone else
    1.59 +  // can allocate out of it by doing a maximal allocation. Even if our
    1.60 +  // CAS attempt fails a few times, we'll succeed sooner or later
    1.61 +  // given that failed CAS attempts mean that the region is getting
    1.62 +  // closed to being full.
    1.63 +  size_t free_word_size = alloc_region->free() / HeapWordSize;
    1.64 +
    1.65 +  // This is the minimum free chunk we can turn into a dummy
    1.66 +  // object. If the free space falls below this, then noone can
    1.67 +  // allocate in this region anyway (all allocation requests will be
    1.68 +  // of a size larger than this) so we won't have to perform the dummy
    1.69 +  // allocation.
    1.70 +  size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
    1.71 +
    1.72 +  while (free_word_size >= min_word_size_to_fill) {
    1.73 +    HeapWord* dummy = par_allocate(alloc_region, free_word_size, bot_updates);
    1.74 +    if (dummy != NULL) {
    1.75 +      // If the allocation was successful we should fill in the space.
    1.76 +      CollectedHeap::fill_with_object(dummy, free_word_size);
    1.77 +      alloc_region->set_pre_dummy_top(dummy);
    1.78 +      break;
    1.79 +    }
    1.80 +
    1.81 +    free_word_size = alloc_region->free() / HeapWordSize;
    1.82 +    // It's also possible that someone else beats us to the
    1.83 +    // allocation and they fill up the region. In that case, we can
    1.84 +    // just get out of the loop.
    1.85 +  }
    1.86 +  assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
    1.87 +         "post-condition");
    1.88 +}
    1.89 +
    1.90 +void G1AllocRegion::retire(bool fill_up) {
    1.91 +  assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
    1.92 +
    1.93 +  trace("retiring");
    1.94 +  HeapRegion* alloc_region = _alloc_region;
    1.95 +  if (alloc_region != _dummy_region) {
    1.96 +    // We never have to check whether the active region is empty or not,
    1.97 +    // and potentially free it if it is, given that it's guaranteed that
    1.98 +    // it will never be empty.
    1.99 +    assert(!alloc_region->is_empty(),
   1.100 +           ar_ext_msg(this, "the alloc region should never be empty"));
   1.101 +
   1.102 +    if (fill_up) {
   1.103 +      fill_up_remaining_space(alloc_region, _bot_updates);
   1.104 +    }
   1.105 +
   1.106 +    assert(alloc_region->used() >= _used_bytes_before,
   1.107 +           ar_ext_msg(this, "invariant"));
   1.108 +    size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
   1.109 +    retire_region(alloc_region, allocated_bytes);
   1.110 +    _used_bytes_before = 0;
   1.111 +    _alloc_region = _dummy_region;
   1.112 +  }
   1.113 +  trace("retired");
   1.114 +}
   1.115 +
   1.116 +HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
   1.117 +                                                       bool force) {
   1.118 +  assert(_alloc_region == _dummy_region, ar_ext_msg(this, "pre-condition"));
   1.119 +  assert(_used_bytes_before == 0, ar_ext_msg(this, "pre-condition"));
   1.120 +
   1.121 +  trace("attempting region allocation");
   1.122 +  HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
   1.123 +  if (new_alloc_region != NULL) {
   1.124 +    new_alloc_region->reset_pre_dummy_top();
   1.125 +    // Need to do this before the allocation
   1.126 +    _used_bytes_before = new_alloc_region->used();
   1.127 +    HeapWord* result = allocate(new_alloc_region, word_size, _bot_updates);
   1.128 +    assert(result != NULL, ar_ext_msg(this, "the allocation should succeeded"));
   1.129 +
   1.130 +    OrderAccess::storestore();
   1.131 +    // Note that we first perform the allocation and then we store the
   1.132 +    // region in _alloc_region. This is the reason why an active region
   1.133 +    // can never be empty.
   1.134 +    _alloc_region = new_alloc_region;
   1.135 +    _count += 1;
   1.136 +    trace("region allocation successful");
   1.137 +    return result;
   1.138 +  } else {
   1.139 +    trace("region allocation failed");
   1.140 +    return NULL;
   1.141 +  }
   1.142 +  ShouldNotReachHere();
   1.143 +}
   1.144 +
   1.145 +void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
   1.146 +  msg->append("[%s] %s c: %u b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
   1.147 +              _name, message, _count, BOOL_TO_STR(_bot_updates),
   1.148 +              p2i(_alloc_region), _used_bytes_before);
   1.149 +}
   1.150 +
   1.151 +void G1AllocRegion::init() {
   1.152 +  trace("initializing");
   1.153 +  assert(_alloc_region == NULL && _used_bytes_before == 0,
   1.154 +         ar_ext_msg(this, "pre-condition"));
   1.155 +  assert(_dummy_region != NULL, ar_ext_msg(this, "should have been set"));
   1.156 +  _alloc_region = _dummy_region;
   1.157 +  _count = 0;
   1.158 +  trace("initialized");
   1.159 +}
   1.160 +
   1.161 +void G1AllocRegion::set(HeapRegion* alloc_region) {
   1.162 +  trace("setting");
   1.163 +  // We explicitly check that the region is not empty to make sure we
   1.164 +  // maintain the "the alloc region cannot be empty" invariant.
   1.165 +  assert(alloc_region != NULL && !alloc_region->is_empty(),
   1.166 +         ar_ext_msg(this, "pre-condition"));
   1.167 +  assert(_alloc_region == _dummy_region &&
   1.168 +         _used_bytes_before == 0 && _count == 0,
   1.169 +         ar_ext_msg(this, "pre-condition"));
   1.170 +
   1.171 +  _used_bytes_before = alloc_region->used();
   1.172 +  _alloc_region = alloc_region;
   1.173 +  _count += 1;
   1.174 +  trace("set");
   1.175 +}
   1.176 +
   1.177 +HeapRegion* G1AllocRegion::release() {
   1.178 +  trace("releasing");
   1.179 +  HeapRegion* alloc_region = _alloc_region;
   1.180 +  retire(false /* fill_up */);
   1.181 +  assert(_alloc_region == _dummy_region,
   1.182 +         ar_ext_msg(this, "post-condition of retire()"));
   1.183 +  _alloc_region = NULL;
   1.184 +  trace("released");
   1.185 +  return (alloc_region == _dummy_region) ? NULL : alloc_region;
   1.186 +}
   1.187 +
   1.188 +#if G1_ALLOC_REGION_TRACING
   1.189 +void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
   1.190 +  // All the calls to trace that set either just the size or the size
   1.191 +  // and the result are considered part of level 2 tracing and are
   1.192 +  // skipped during level 1 tracing.
   1.193 +  if ((word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) {
   1.194 +    const size_t buffer_length = 128;
   1.195 +    char hr_buffer[buffer_length];
   1.196 +    char rest_buffer[buffer_length];
   1.197 +
   1.198 +    HeapRegion* alloc_region = _alloc_region;
   1.199 +    if (alloc_region == NULL) {
   1.200 +      jio_snprintf(hr_buffer, buffer_length, "NULL");
   1.201 +    } else if (alloc_region == _dummy_region) {
   1.202 +      jio_snprintf(hr_buffer, buffer_length, "DUMMY");
   1.203 +    } else {
   1.204 +      jio_snprintf(hr_buffer, buffer_length,
   1.205 +                   HR_FORMAT, HR_FORMAT_PARAMS(alloc_region));
   1.206 +    }
   1.207 +
   1.208 +    if (G1_ALLOC_REGION_TRACING > 1) {
   1.209 +      if (result != NULL) {
   1.210 +        jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT" "PTR_FORMAT,
   1.211 +                     word_size, result);
   1.212 +      } else if (word_size != 0) {
   1.213 +        jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT, word_size);
   1.214 +      } else {
   1.215 +        jio_snprintf(rest_buffer, buffer_length, "");
   1.216 +      }
   1.217 +    } else {
   1.218 +      jio_snprintf(rest_buffer, buffer_length, "");
   1.219 +    }
   1.220 +
   1.221 +    tty->print_cr("[%s] %u %s : %s %s",
   1.222 +                  _name, _count, hr_buffer, str, rest_buffer);
   1.223 +  }
   1.224 +}
   1.225 +#endif // G1_ALLOC_REGION_TRACING
   1.226 +
   1.227 +G1AllocRegion::G1AllocRegion(const char* name,
   1.228 +                             bool bot_updates)
   1.229 +  : _name(name), _bot_updates(bot_updates),
   1.230 +    _alloc_region(NULL), _count(0), _used_bytes_before(0) { }
   1.231 +

mercurial