src/share/vm/gc_implementation/g1/g1AllocRegion.hpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7118
227a9e5e4b4a
child 7535
7ae4e26cb1e0
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

     1 /*
     2  * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP
    28 #include "gc_implementation/g1/heapRegion.hpp"
    30 class G1CollectedHeap;
    32 // 0 -> no tracing, 1 -> basic tracing, 2 -> basic + allocation tracing
    33 #define G1_ALLOC_REGION_TRACING 0
    35 class ar_ext_msg;
    37 // A class that holds a region that is active in satisfying allocation
    38 // requests, potentially issued in parallel. When the active region is
    39 // full it will be retired and replaced with a new one. The
    40 // implementation assumes that fast-path allocations will be lock-free
    41 // and a lock will need to be taken when the active region needs to be
    42 // replaced.
    44 class G1AllocRegion VALUE_OBJ_CLASS_SPEC {
    45   friend class ar_ext_msg;
    47 private:
    48   // The active allocating region we are currently allocating out
    49   // of. The invariant is that if this object is initialized (i.e.,
    50   // init() has been called and release() has not) then _alloc_region
    51   // is either an active allocating region or the dummy region (i.e.,
    52   // it can never be NULL) and this object can be used to satisfy
    53   // allocation requests. If this object is not initialized
    54   // (i.e. init() has not been called or release() has been called)
    55   // then _alloc_region is NULL and this object should not be used to
    56   // satisfy allocation requests (it was done this way to force the
    57   // correct use of init() and release()).
    58   HeapRegion* volatile _alloc_region;
    60   // Allocation context associated with this alloc region.
    61   AllocationContext_t _allocation_context;
    63   // It keeps track of the distinct number of regions that are used
    64   // for allocation in the active interval of this object, i.e.,
    65   // between a call to init() and a call to release(). The count
    66   // mostly includes regions that are freshly allocated, as well as
    67   // the region that is re-used using the set() method. This count can
    68   // be used in any heuristics that might want to bound how many
    69   // distinct regions this object can used during an active interval.
    70   uint _count;
    72   // When we set up a new active region we save its used bytes in this
    73   // field so that, when we retire it, we can calculate how much space
    74   // we allocated in it.
    75   size_t _used_bytes_before;
    77   // When true, indicates that allocate calls should do BOT updates.
    78   const bool _bot_updates;
    80   // Useful for debugging and tracing.
    81   const char* _name;
    83   // A dummy region (i.e., it's been allocated specially for this
    84   // purpose and it is not part of the heap) that is full (i.e., top()
    85   // == end()). When we don't have a valid active region we make
    86   // _alloc_region point to this. This allows us to skip checking
    87   // whether the _alloc_region is NULL or not.
    88   static HeapRegion* _dummy_region;
    90   // Some of the methods below take a bot_updates parameter. Its value
    91   // should be the same as the _bot_updates field. The idea is that
    92   // the parameter will be a constant for a particular alloc region
    93   // and, given that these methods will be hopefully inlined, the
    94   // compiler should compile out the test.
    96   // Perform a non-MT-safe allocation out of the given region.
    97   static inline HeapWord* allocate(HeapRegion* alloc_region,
    98                                    size_t word_size,
    99                                    bool bot_updates);
   101   // Perform a MT-safe allocation out of the given region.
   102   static inline HeapWord* par_allocate(HeapRegion* alloc_region,
   103                                        size_t word_size,
   104                                        bool bot_updates);
   106   // Ensure that the region passed as a parameter has been filled up
   107   // so that noone else can allocate out of it any more.
   108   static void fill_up_remaining_space(HeapRegion* alloc_region,
   109                                       bool bot_updates);
   111   // Retire the active allocating region. If fill_up is true then make
   112   // sure that the region is full before we retire it so that noone
   113   // else can allocate out of it.
   114   void retire(bool fill_up);
   116   // After a region is allocated by alloc_new_region, this
   117   // method is used to set it as the active alloc_region
   118   void update_alloc_region(HeapRegion* alloc_region);
   120   // Allocate a new active region and use it to perform a word_size
   121   // allocation. The force parameter will be passed on to
   122   // G1CollectedHeap::allocate_new_alloc_region() and tells it to try
   123   // to allocate a new region even if the max has been reached.
   124   HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force);
   126   void fill_in_ext_msg(ar_ext_msg* msg, const char* message);
   128 protected:
   129   // For convenience as subclasses use it.
   130   static G1CollectedHeap* _g1h;
   132   virtual HeapRegion* allocate_new_region(size_t word_size, bool force) = 0;
   133   virtual void retire_region(HeapRegion* alloc_region,
   134                              size_t allocated_bytes) = 0;
   136   G1AllocRegion(const char* name, bool bot_updates);
   138 public:
   139   static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region);
   141   HeapRegion* get() const {
   142     HeapRegion * hr = _alloc_region;
   143     // Make sure that the dummy region does not escape this class.
   144     return (hr == _dummy_region) ? NULL : hr;
   145   }
   147   void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
   148   AllocationContext_t  allocation_context() { return _allocation_context; }
   150   uint count() { return _count; }
   152   // The following two are the building blocks for the allocation method.
   154   // First-level allocation: Should be called without holding a
   155   // lock. It will try to allocate lock-free out of the active region,
   156   // or return NULL if it was unable to.
   157   inline HeapWord* attempt_allocation(size_t word_size, bool bot_updates);
   159   // Second-level allocation: Should be called while holding a
   160   // lock. It will try to first allocate lock-free out of the active
   161   // region or, if it's unable to, it will try to replace the active
   162   // alloc region with a new one. We require that the caller takes the
   163   // appropriate lock before calling this so that it is easier to make
   164   // it conform to its locking protocol.
   165   inline HeapWord* attempt_allocation_locked(size_t word_size,
   166                                              bool bot_updates);
   168   // Should be called to allocate a new region even if the max of this
   169   // type of regions has been reached. Should only be called if other
   170   // allocation attempts have failed and we are not holding a valid
   171   // active region.
   172   inline HeapWord* attempt_allocation_force(size_t word_size,
   173                                             bool bot_updates);
   175   // Should be called before we start using this object.
   176   void init();
   178   // This can be used to set the active region to a specific
   179   // region. (Use Example: we try to retain the last old GC alloc
   180   // region that we've used during a GC and we can use set() to
   181   // re-instate it at the beginning of the next GC.)
   182   void set(HeapRegion* alloc_region);
   184   // Should be called when we want to release the active region which
   185   // is returned after it's been retired.
   186   virtual HeapRegion* release();
   188 #if G1_ALLOC_REGION_TRACING
   189   void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL);
   190 #else // G1_ALLOC_REGION_TRACING
   191   void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL) { }
   192 #endif // G1_ALLOC_REGION_TRACING
   193 };
   195 class MutatorAllocRegion : public G1AllocRegion {
   196 protected:
   197   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
   198   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
   199 public:
   200   MutatorAllocRegion()
   201     : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
   202 };
   204 class SurvivorGCAllocRegion : public G1AllocRegion {
   205 protected:
   206   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
   207   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
   208 public:
   209   SurvivorGCAllocRegion()
   210   : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
   211 };
   213 class OldGCAllocRegion : public G1AllocRegion {
   214 protected:
   215   virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
   216   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
   217 public:
   218   OldGCAllocRegion()
   219   : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
   221   // This specialization of release() makes sure that the last card that has
   222   // been allocated into has been completely filled by a dummy object.  This
   223   // avoids races when remembered set scanning wants to update the BOT of the
   224   // last card in the retained old gc alloc region, and allocation threads
   225   // allocating into that card at the same time.
   226   virtual HeapRegion* release();
   227 };
   229 class ar_ext_msg : public err_msg {
   230 public:
   231   ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") {
   232     alloc_region->fill_in_ext_msg(this, message);
   233   }
   234 };
   236 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP

mercurial