src/share/vm/gc_implementation/g1/g1AllocRegion.cpp

Wed, 30 Mar 2011 10:26:59 -0400

author
tonyp
date
Wed, 30 Mar 2011 10:26:59 -0400
changeset 2715
abdfc822206f
child 3028
f44782f04dd4
permissions
-rw-r--r--

7023069: G1: Introduce symmetric locking in the slow allocation path
7023151: G1: refactor the code that operates on _cur_alloc_region to be re-used for allocs by the GC threads
7018286: G1: humongous allocation attempts should take the GC locker into account
Summary: First, this change replaces the asymmetric locking scheme in the G1 slow alloc path by a summetric one. Second, it factors out the code that operates on _cur_alloc_region so that it can be re-used for allocations by the GC threads in the future.
Reviewed-by: stefank, brutisso, johnc

     1 /*
     2  * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
    27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    29 G1CollectedHeap* G1AllocRegion::_g1h = NULL;
    30 HeapRegion* G1AllocRegion::_dummy_region = NULL;
    32 void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
    33   assert(_dummy_region == NULL, "should be set once");
    34   assert(dummy_region != NULL, "pre-condition");
    35   assert(dummy_region->free() == 0, "pre-condition");
    37   // Make sure that any allocation attempt on this region will fail
    38   // and will not trigger any asserts.
    39   assert(allocate(dummy_region, 1, false) == NULL, "should fail");
    40   assert(par_allocate(dummy_region, 1, false) == NULL, "should fail");
    41   assert(allocate(dummy_region, 1, true) == NULL, "should fail");
    42   assert(par_allocate(dummy_region, 1, true) == NULL, "should fail");
    44   _g1h = g1h;
    45   _dummy_region = dummy_region;
    46 }
    48 void G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region,
    49                                             bool bot_updates) {
    50   assert(alloc_region != NULL && alloc_region != _dummy_region,
    51          "pre-condition");
    53   // Other threads might still be trying to allocate using a CAS out
    54   // of the region we are trying to retire, as they can do so without
    55   // holding the lock. So, we first have to make sure that noone else
    56   // can allocate out of it by doing a maximal allocation. Even if our
    57   // CAS attempt fails a few times, we'll succeed sooner or later
    58   // given that failed CAS attempts mean that the region is getting
    59   // closed to being full.
    60   size_t free_word_size = alloc_region->free() / HeapWordSize;
    62   // This is the minimum free chunk we can turn into a dummy
    63   // object. If the free space falls below this, then noone can
    64   // allocate in this region anyway (all allocation requests will be
    65   // of a size larger than this) so we won't have to perform the dummy
    66   // allocation.
    67   size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
    69   while (free_word_size >= min_word_size_to_fill) {
    70     HeapWord* dummy = par_allocate(alloc_region, free_word_size, bot_updates);
    71     if (dummy != NULL) {
    72       // If the allocation was successful we should fill in the space.
    73       CollectedHeap::fill_with_object(dummy, free_word_size);
    74       alloc_region->set_pre_dummy_top(dummy);
    75       break;
    76     }
    78     free_word_size = alloc_region->free() / HeapWordSize;
    79     // It's also possible that someone else beats us to the
    80     // allocation and they fill up the region. In that case, we can
    81     // just get out of the loop.
    82   }
    83   assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
    84          "post-condition");
    85 }
    87 void G1AllocRegion::retire(bool fill_up) {
    88   assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
    90   trace("retiring");
    91   HeapRegion* alloc_region = _alloc_region;
    92   if (alloc_region != _dummy_region) {
    93     // We never have to check whether the active region is empty or not,
    94     // and potentially free it if it is, given that it's guaranteed that
    95     // it will never be empty.
    96     assert(!alloc_region->is_empty(),
    97            ar_ext_msg(this, "the alloc region should never be empty"));
    99     if (fill_up) {
   100       fill_up_remaining_space(alloc_region, _bot_updates);
   101     }
   103     assert(alloc_region->used() >= _used_bytes_before,
   104            ar_ext_msg(this, "invariant"));
   105     size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
   106     retire_region(alloc_region, allocated_bytes);
   107     _used_bytes_before = 0;
   108     _alloc_region = _dummy_region;
   109   }
   110   trace("retired");
   111 }
   113 HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
   114                                                        bool force) {
   115   assert(_alloc_region == _dummy_region, ar_ext_msg(this, "pre-condition"));
   116   assert(_used_bytes_before == 0, ar_ext_msg(this, "pre-condition"));
   118   trace("attempting region allocation");
   119   HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
   120   if (new_alloc_region != NULL) {
   121     new_alloc_region->reset_pre_dummy_top();
   122     // Need to do this before the allocation
   123     _used_bytes_before = new_alloc_region->used();
   124     HeapWord* result = allocate(new_alloc_region, word_size, _bot_updates);
   125     assert(result != NULL, ar_ext_msg(this, "the allocation should succeeded"));
   127     OrderAccess::storestore();
   128     // Note that we first perform the allocation and then we store the
   129     // region in _alloc_region. This is the reason why an active region
   130     // can never be empty.
   131     _alloc_region = new_alloc_region;
   132     trace("region allocation successful");
   133     return result;
   134   } else {
   135     trace("region allocation failed");
   136     return NULL;
   137   }
   138   ShouldNotReachHere();
   139 }
   141 void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
   142   msg->append("[%s] %s b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
   143               _name, message, BOOL_TO_STR(_bot_updates),
   144               _alloc_region, _used_bytes_before);
   145 }
   147 void G1AllocRegion::init() {
   148   trace("initializing");
   149   assert(_alloc_region == NULL && _used_bytes_before == 0,
   150          ar_ext_msg(this, "pre-condition"));
   151   assert(_dummy_region != NULL, "should have been set");
   152   _alloc_region = _dummy_region;
   153   trace("initialized");
   154 }
   156 HeapRegion* G1AllocRegion::release() {
   157   trace("releasing");
   158   HeapRegion* alloc_region = _alloc_region;
   159   retire(false /* fill_up */);
   160   assert(_alloc_region == _dummy_region, "post-condition of retire()");
   161   _alloc_region = NULL;
   162   trace("released");
   163   return (alloc_region == _dummy_region) ? NULL : alloc_region;
   164 }
   166 #if G1_ALLOC_REGION_TRACING
   167 void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
   168   // All the calls to trace that set either just the size or the size
   169   // and the result are considered part of level 2 tracing and are
   170   // skipped during level 1 tracing.
   171   if ((word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) {
   172     const size_t buffer_length = 128;
   173     char hr_buffer[buffer_length];
   174     char rest_buffer[buffer_length];
   176     HeapRegion* alloc_region = _alloc_region;
   177     if (alloc_region == NULL) {
   178       jio_snprintf(hr_buffer, buffer_length, "NULL");
   179     } else if (alloc_region == _dummy_region) {
   180       jio_snprintf(hr_buffer, buffer_length, "DUMMY");
   181     } else {
   182       jio_snprintf(hr_buffer, buffer_length,
   183                    HR_FORMAT, HR_FORMAT_PARAMS(alloc_region));
   184     }
   186     if (G1_ALLOC_REGION_TRACING > 1) {
   187       if (result != NULL) {
   188         jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT" "PTR_FORMAT,
   189                      word_size, result);
   190       } else if (word_size != 0) {
   191         jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT, word_size);
   192       } else {
   193         jio_snprintf(rest_buffer, buffer_length, "");
   194       }
   195     } else {
   196       jio_snprintf(rest_buffer, buffer_length, "");
   197     }
   199     tty->print_cr("[%s] %s : %s %s", _name, hr_buffer, str, rest_buffer);
   200   }
   201 }
   202 #endif // G1_ALLOC_REGION_TRACING
   204 G1AllocRegion::G1AllocRegion(const char* name,
   205                              bool bot_updates)
   206   : _name(name), _bot_updates(bot_updates),
   207     _alloc_region(NULL), _used_bytes_before(0) { }

mercurial