src/share/vm/gc_implementation/g1/g1AllocRegion.cpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7118
227a9e5e4b4a
child 7535
7ae4e26cb1e0
child 7651
c132be0fb74d
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

     1 /*
     2  * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
    27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    28 #include "runtime/orderAccess.inline.hpp"
    30 G1CollectedHeap* G1AllocRegion::_g1h = NULL;
    31 HeapRegion* G1AllocRegion::_dummy_region = NULL;
    33 void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) {
    34   assert(_dummy_region == NULL, "should be set once");
    35   assert(dummy_region != NULL, "pre-condition");
    36   assert(dummy_region->free() == 0, "pre-condition");
    38   // Make sure that any allocation attempt on this region will fail
    39   // and will not trigger any asserts.
    40   assert(allocate(dummy_region, 1, false) == NULL, "should fail");
    41   assert(par_allocate(dummy_region, 1, false) == NULL, "should fail");
    42   assert(allocate(dummy_region, 1, true) == NULL, "should fail");
    43   assert(par_allocate(dummy_region, 1, true) == NULL, "should fail");
    45   _g1h = g1h;
    46   _dummy_region = dummy_region;
    47 }
    49 void G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region,
    50                                             bool bot_updates) {
    51   assert(alloc_region != NULL && alloc_region != _dummy_region,
    52          "pre-condition");
    54   // Other threads might still be trying to allocate using a CAS out
    55   // of the region we are trying to retire, as they can do so without
    56   // holding the lock. So, we first have to make sure that noone else
    57   // can allocate out of it by doing a maximal allocation. Even if our
    58   // CAS attempt fails a few times, we'll succeed sooner or later
    59   // given that failed CAS attempts mean that the region is getting
    60   // closed to being full.
    61   size_t free_word_size = alloc_region->free() / HeapWordSize;
    63   // This is the minimum free chunk we can turn into a dummy
    64   // object. If the free space falls below this, then noone can
    65   // allocate in this region anyway (all allocation requests will be
    66   // of a size larger than this) so we won't have to perform the dummy
    67   // allocation.
    68   size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
    70   while (free_word_size >= min_word_size_to_fill) {
    71     HeapWord* dummy = par_allocate(alloc_region, free_word_size, bot_updates);
    72     if (dummy != NULL) {
    73       // If the allocation was successful we should fill in the space.
    74       CollectedHeap::fill_with_object(dummy, free_word_size);
    75       alloc_region->set_pre_dummy_top(dummy);
    76       break;
    77     }
    79     free_word_size = alloc_region->free() / HeapWordSize;
    80     // It's also possible that someone else beats us to the
    81     // allocation and they fill up the region. In that case, we can
    82     // just get out of the loop.
    83   }
    84   assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill,
    85          "post-condition");
    86 }
    88 void G1AllocRegion::retire(bool fill_up) {
    89   assert(_alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
    91   trace("retiring");
    92   HeapRegion* alloc_region = _alloc_region;
    93   if (alloc_region != _dummy_region) {
    94     // We never have to check whether the active region is empty or not,
    95     // and potentially free it if it is, given that it's guaranteed that
    96     // it will never be empty.
    97     assert(!alloc_region->is_empty(),
    98            ar_ext_msg(this, "the alloc region should never be empty"));
   100     if (fill_up) {
   101       fill_up_remaining_space(alloc_region, _bot_updates);
   102     }
   104     assert(alloc_region->used() >= _used_bytes_before,
   105            ar_ext_msg(this, "invariant"));
   106     size_t allocated_bytes = alloc_region->used() - _used_bytes_before;
   107     retire_region(alloc_region, allocated_bytes);
   108     _used_bytes_before = 0;
   109     _alloc_region = _dummy_region;
   110   }
   111   trace("retired");
   112 }
   114 HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
   115                                                        bool force) {
   116   assert(_alloc_region == _dummy_region, ar_ext_msg(this, "pre-condition"));
   117   assert(_used_bytes_before == 0, ar_ext_msg(this, "pre-condition"));
   119   trace("attempting region allocation");
   120   HeapRegion* new_alloc_region = allocate_new_region(word_size, force);
   121   if (new_alloc_region != NULL) {
   122     new_alloc_region->reset_pre_dummy_top();
   123     // Need to do this before the allocation
   124     _used_bytes_before = new_alloc_region->used();
   125     HeapWord* result = allocate(new_alloc_region, word_size, _bot_updates);
   126     assert(result != NULL, ar_ext_msg(this, "the allocation should succeeded"));
   128     OrderAccess::storestore();
   129     // Note that we first perform the allocation and then we store the
   130     // region in _alloc_region. This is the reason why an active region
   131     // can never be empty.
   132     update_alloc_region(new_alloc_region);
   133     trace("region allocation successful");
   134     return result;
   135   } else {
   136     trace("region allocation failed");
   137     return NULL;
   138   }
   139   ShouldNotReachHere();
   140 }
   142 void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
   143   msg->append("[%s] %s c: %u b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
   144               _name, message, _count, BOOL_TO_STR(_bot_updates),
   145               p2i(_alloc_region), _used_bytes_before);
   146 }
   148 void G1AllocRegion::init() {
   149   trace("initializing");
   150   assert(_alloc_region == NULL && _used_bytes_before == 0,
   151          ar_ext_msg(this, "pre-condition"));
   152   assert(_dummy_region != NULL, ar_ext_msg(this, "should have been set"));
   153   _alloc_region = _dummy_region;
   154   _count = 0;
   155   trace("initialized");
   156 }
   158 void G1AllocRegion::set(HeapRegion* alloc_region) {
   159   trace("setting");
   160   // We explicitly check that the region is not empty to make sure we
   161   // maintain the "the alloc region cannot be empty" invariant.
   162   assert(alloc_region != NULL && !alloc_region->is_empty(),
   163          ar_ext_msg(this, "pre-condition"));
   164   assert(_alloc_region == _dummy_region &&
   165          _used_bytes_before == 0 && _count == 0,
   166          ar_ext_msg(this, "pre-condition"));
   168   _used_bytes_before = alloc_region->used();
   169   _alloc_region = alloc_region;
   170   _count += 1;
   171   trace("set");
   172 }
   174 void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
   175   trace("update");
   176   // We explicitly check that the region is not empty to make sure we
   177   // maintain the "the alloc region cannot be empty" invariant.
   178   assert(alloc_region != NULL && !alloc_region->is_empty(),
   179          ar_ext_msg(this, "pre-condition"));
   181   _alloc_region = alloc_region;
   182   _alloc_region->set_allocation_context(allocation_context());
   183   _count += 1;
   184   trace("updated");
   185 }
   187 HeapRegion* G1AllocRegion::release() {
   188   trace("releasing");
   189   HeapRegion* alloc_region = _alloc_region;
   190   retire(false /* fill_up */);
   191   assert(_alloc_region == _dummy_region,
   192          ar_ext_msg(this, "post-condition of retire()"));
   193   _alloc_region = NULL;
   194   trace("released");
   195   return (alloc_region == _dummy_region) ? NULL : alloc_region;
   196 }
   198 #if G1_ALLOC_REGION_TRACING
   199 void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
   200   // All the calls to trace that set either just the size or the size
   201   // and the result are considered part of level 2 tracing and are
   202   // skipped during level 1 tracing.
   203   if ((word_size == 0 && result == NULL) || (G1_ALLOC_REGION_TRACING > 1)) {
   204     const size_t buffer_length = 128;
   205     char hr_buffer[buffer_length];
   206     char rest_buffer[buffer_length];
   208     HeapRegion* alloc_region = _alloc_region;
   209     if (alloc_region == NULL) {
   210       jio_snprintf(hr_buffer, buffer_length, "NULL");
   211     } else if (alloc_region == _dummy_region) {
   212       jio_snprintf(hr_buffer, buffer_length, "DUMMY");
   213     } else {
   214       jio_snprintf(hr_buffer, buffer_length,
   215                    HR_FORMAT, HR_FORMAT_PARAMS(alloc_region));
   216     }
   218     if (G1_ALLOC_REGION_TRACING > 1) {
   219       if (result != NULL) {
   220         jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT" "PTR_FORMAT,
   221                      word_size, result);
   222       } else if (word_size != 0) {
   223         jio_snprintf(rest_buffer, buffer_length, SIZE_FORMAT, word_size);
   224       } else {
   225         jio_snprintf(rest_buffer, buffer_length, "");
   226       }
   227     } else {
   228       jio_snprintf(rest_buffer, buffer_length, "");
   229     }
   231     tty->print_cr("[%s] %u %s : %s %s",
   232                   _name, _count, hr_buffer, str, rest_buffer);
   233   }
   234 }
   235 #endif // G1_ALLOC_REGION_TRACING
   237 G1AllocRegion::G1AllocRegion(const char* name,
   238                              bool bot_updates)
   239   : _name(name), _bot_updates(bot_updates),
   240     _alloc_region(NULL), _count(0), _used_bytes_before(0),
   241     _allocation_context(AllocationContext::system()) { }
   244 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
   245                                                     bool force) {
   246   return _g1h->new_mutator_alloc_region(word_size, force);
   247 }
   249 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
   250                                        size_t allocated_bytes) {
   251   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
   252 }
   254 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
   255                                                        bool force) {
   256   assert(!force, "not supported for GC alloc regions");
   257   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
   258 }
   260 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
   261                                           size_t allocated_bytes) {
   262   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
   263                                GCAllocForSurvived);
   264 }
   266 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
   267                                                   bool force) {
   268   assert(!force, "not supported for GC alloc regions");
   269   return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
   270 }
   272 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
   273                                      size_t allocated_bytes) {
   274   _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
   275                                GCAllocForTenured);
   276 }
   278 HeapRegion* OldGCAllocRegion::release() {
   279   HeapRegion* cur = get();
   280   if (cur != NULL) {
   281     // Determine how far we are from the next card boundary. If it is smaller than
   282     // the minimum object size we can allocate into, expand into the next card.
   283     HeapWord* top = cur->top();
   284     HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
   286     size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
   288     if (to_allocate_words != 0) {
   289       // We are not at a card boundary. Fill up, possibly into the next, taking the
   290       // end of the region and the minimum object size into account.
   291       to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
   292                                MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
   294       // Skip allocation if there is not enough space to allocate even the smallest
   295       // possible object. In this case this region will not be retained, so the
   296       // original problem cannot occur.
   297       if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
   298         HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
   299         CollectedHeap::fill_with_object(dummy, to_allocate_words);
   300       }
   301     }
   302   }
   303   return G1AllocRegion::release();
   304 }

mercurial