src/share/vm/gc_implementation/g1/g1Allocator.cpp

Thu, 23 Oct 2014 12:02:08 -0700

author
asaha
date
Thu, 23 Oct 2014 12:02:08 -0700
changeset 7476
c2844108a708
parent 7118
227a9e5e4b4a
child 7647
80ac3ee51955
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/g1Allocator.hpp"
    27 #include "gc_implementation/g1/g1CollectedHeap.hpp"
    28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    29 #include "gc_implementation/g1/heapRegion.inline.hpp"
    30 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
    32 void G1DefaultAllocator::init_mutator_alloc_region() {
    33   assert(_mutator_alloc_region.get() == NULL, "pre-condition");
    34   _mutator_alloc_region.init();
    35 }
    37 void G1DefaultAllocator::release_mutator_alloc_region() {
    38   _mutator_alloc_region.release();
    39   assert(_mutator_alloc_region.get() == NULL, "post-condition");
    40 }
    42 void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
    43                                             OldGCAllocRegion* old,
    44                                             HeapRegion** retained_old) {
    45   HeapRegion* retained_region = *retained_old;
    46   *retained_old = NULL;
    48   // We will discard the current GC alloc region if:
    49   // a) it's in the collection set (it can happen!),
    50   // b) it's already full (no point in using it),
    51   // c) it's empty (this means that it was emptied during
    52   // a cleanup and it should be on the free list now), or
    53   // d) it's humongous (this means that it was emptied
    54   // during a cleanup and was added to the free list, but
    55   // has been subsequently used to allocate a humongous
    56   // object that may be less than the region size).
    57   if (retained_region != NULL &&
    58       !retained_region->in_collection_set() &&
    59       !(retained_region->top() == retained_region->end()) &&
    60       !retained_region->is_empty() &&
    61       !retained_region->isHumongous()) {
    62     retained_region->record_top_and_timestamp();
    63     // The retained region was added to the old region set when it was
    64     // retired. We have to remove it now, since we don't allow regions
    65     // we allocate to in the region sets. We'll re-add it later, when
    66     // it's retired again.
    67     _g1h->_old_set.remove(retained_region);
    68     bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
    69     retained_region->note_start_of_copying(during_im);
    70     old->set(retained_region);
    71     _g1h->_hr_printer.reuse(retained_region);
    72     evacuation_info.set_alloc_regions_used_before(retained_region->used());
    73   }
    74 }
    76 void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
    77   assert_at_safepoint(true /* should_be_vm_thread */);
    79   _survivor_gc_alloc_region.init();
    80   _old_gc_alloc_region.init();
    81   reuse_retained_old_region(evacuation_info,
    82                             &_old_gc_alloc_region,
    83                             &_retained_old_gc_alloc_region);
    84 }
    86 void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
    87   AllocationContext_t context = AllocationContext::current();
    88   evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
    89                                          old_gc_alloc_region(context)->count());
    90   survivor_gc_alloc_region(context)->release();
    91   // If we have an old GC alloc region to release, we'll save it in
    92   // _retained_old_gc_alloc_region. If we don't
    93   // _retained_old_gc_alloc_region will become NULL. This is what we
    94   // want either way so no reason to check explicitly for either
    95   // condition.
    96   _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
    98   if (ResizePLAB) {
    99     _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
   100     _g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
   101   }
   102 }
   104 void G1DefaultAllocator::abandon_gc_alloc_regions() {
   105   assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
   106   assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
   107   _retained_old_gc_alloc_region = NULL;
   108 }
   110 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
   111   ParGCAllocBuffer(gclab_word_size), _retired(true) { }
   113 HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
   114   HeapWord* obj = NULL;
   115   size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
   116   if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
   117     G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
   118     add_to_alloc_buffer_waste(alloc_buf->words_remaining());
   119     alloc_buf->retire(false /* end_of_gc */, false /* retain */);
   121     HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
   122     if (buf == NULL) {
   123       return NULL; // Let caller handle allocation failure.
   124     }
   125     // Otherwise.
   126     alloc_buf->set_word_size(gclab_word_size);
   127     alloc_buf->set_buf(buf);
   129     obj = alloc_buf->allocate(word_sz);
   130     assert(obj != NULL, "buffer was definitely big enough...");
   131   } else {
   132     obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
   133   }
   134   return obj;
   135 }
   137 G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
   138             G1ParGCAllocator(g1h),
   139             _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
   140             _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
   142   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
   143   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
   145 }
   147 void G1DefaultParGCAllocator::retire_alloc_buffers() {
   148   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
   149     size_t waste = _alloc_buffers[ap]->words_remaining();
   150     add_to_alloc_buffer_waste(waste);
   151     _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
   152                                                true /* end_of_gc */,
   153                                                false /* retain */);
   154   }
   155 }

mercurial