src/share/vm/gc_implementation/g1/g1Allocator.hpp

Thu, 14 Jun 2018 09:15:08 -0700

author
kevinw
date
Thu, 14 Jun 2018 09:15:08 -0700
changeset 9327
f96fcd9e1e1b
parent 7651
c132be0fb74d
permissions
-rw-r--r--

8081202: Hotspot compile warning: "Invalid suffix on literal; C++11 requires a space between literal and identifier"
Summary: Need to add a space between macro identifier and string literal
Reviewed-by: bpittore, stefank, dholmes, kbarrett

     1 /*
     2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
    28 #include "gc_implementation/g1/g1AllocationContext.hpp"
    29 #include "gc_implementation/g1/g1AllocRegion.hpp"
    30 #include "gc_implementation/g1/g1InCSetState.hpp"
    31 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
    33 // Base class for G1 allocators.
    34 class G1Allocator : public CHeapObj<mtGC> {
    35   friend class VMStructs;
    36 protected:
    37   G1CollectedHeap* _g1h;
    39   // Outside of GC pauses, the number of bytes used in all regions other
    40   // than the current allocation region.
    41   size_t _summary_bytes_used;
    43 public:
    44    G1Allocator(G1CollectedHeap* heap) :
    45      _g1h(heap), _summary_bytes_used(0) { }
    47    static G1Allocator* create_allocator(G1CollectedHeap* g1h);
    49    virtual void init_mutator_alloc_region() = 0;
    50    virtual void release_mutator_alloc_region() = 0;
    52    virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
    53    virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
    54    virtual void abandon_gc_alloc_regions() = 0;
    56    virtual MutatorAllocRegion*    mutator_alloc_region(AllocationContext_t context) = 0;
    57    virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
    58    virtual OldGCAllocRegion*      old_gc_alloc_region(AllocationContext_t context) = 0;
    59    virtual size_t                 used() = 0;
    60    virtual bool                   is_retained_old_region(HeapRegion* hr) = 0;
    62    void                           reuse_retained_old_region(EvacuationInfo& evacuation_info,
    63                                                             OldGCAllocRegion* old,
    64                                                             HeapRegion** retained);
    66    size_t used_unlocked() const {
    67      return _summary_bytes_used;
    68    }
    70    void increase_used(size_t bytes) {
    71      _summary_bytes_used += bytes;
    72    }
    74    void decrease_used(size_t bytes) {
    75      assert(_summary_bytes_used >= bytes,
    76             err_msg("invariant: _summary_bytes_used: " SIZE_FORMAT " should be >= bytes: " SIZE_FORMAT,
    77                 _summary_bytes_used, bytes));
    78      _summary_bytes_used -= bytes;
    79    }
    81    void set_used(size_t bytes) {
    82      _summary_bytes_used = bytes;
    83    }
    85    virtual HeapRegion* new_heap_region(uint hrs_index,
    86                                        G1BlockOffsetSharedArray* sharedOffsetArray,
    87                                        MemRegion mr) {
    88      return new HeapRegion(hrs_index, sharedOffsetArray, mr);
    89    }
    90 };
    92 // The default allocator for G1.
    93 class G1DefaultAllocator : public G1Allocator {
    94 protected:
    95   // Alloc region used to satisfy mutator allocation requests.
    96   MutatorAllocRegion _mutator_alloc_region;
    98   // Alloc region used to satisfy allocation requests by the GC for
    99   // survivor objects.
   100   SurvivorGCAllocRegion _survivor_gc_alloc_region;
   102   // Alloc region used to satisfy allocation requests by the GC for
   103   // old objects.
   104   OldGCAllocRegion _old_gc_alloc_region;
   106   HeapRegion* _retained_old_gc_alloc_region;
   107 public:
   108   G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
   110   virtual void init_mutator_alloc_region();
   111   virtual void release_mutator_alloc_region();
   113   virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
   114   virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
   115   virtual void abandon_gc_alloc_regions();
   117   virtual bool is_retained_old_region(HeapRegion* hr) {
   118     return _retained_old_gc_alloc_region == hr;
   119   }
   121   virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
   122     return &_mutator_alloc_region;
   123   }
   125   virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
   126     return &_survivor_gc_alloc_region;
   127   }
   129   virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
   130     return &_old_gc_alloc_region;
   131   }
   133   virtual size_t used() {
   134     assert(Heap_lock->owner() != NULL,
   135            "Should be owned on this thread's behalf.");
   136     size_t result = _summary_bytes_used;
   138     // Read only once in case it is set to NULL concurrently
   139     HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
   140     if (hr != NULL) {
   141       result += hr->used();
   142     }
   143     return result;
   144   }
   145 };
   147 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
   148 private:
   149   bool _retired;
   151 public:
   152   G1ParGCAllocBuffer(size_t gclab_word_size);
   153   virtual ~G1ParGCAllocBuffer() {
   154     guarantee(_retired, "Allocation buffer has not been retired");
   155   }
   157   virtual void set_buf(HeapWord* buf) {
   158     ParGCAllocBuffer::set_buf(buf);
   159     _retired = false;
   160   }
   162   virtual void retire(bool end_of_gc, bool retain) {
   163     if (_retired) {
   164       return;
   165     }
   166     ParGCAllocBuffer::retire(end_of_gc, retain);
   167     _retired = true;
   168   }
   169 };
   171 class G1ParGCAllocator : public CHeapObj<mtGC> {
   172   friend class G1ParScanThreadState;
   173 protected:
   174   G1CollectedHeap* _g1h;
   176   // The survivor alignment in effect in bytes.
   177   // == 0 : don't align survivors
   178   // != 0 : align survivors to that alignment
   179   // These values were chosen to favor the non-alignment case since some
   180   // architectures have a special compare against zero instructions.
   181   const uint _survivor_alignment_bytes;
   183   size_t _alloc_buffer_waste;
   184   size_t _undo_waste;
   186   void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
   187   void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
   189   virtual void retire_alloc_buffers() = 0;
   190   virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
   192   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
   193   // there are no restrictions on survivor alignment.
   194   static uint calc_survivor_alignment_bytes() {
   195     assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
   196     if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
   197       // No need to align objects in the survivors differently, return 0
   198       // which means "survivor alignment is not used".
   199       return 0;
   200     } else {
   201       assert(SurvivorAlignmentInBytes > 0, "sanity");
   202       return SurvivorAlignmentInBytes;
   203     }
   204   }
   206 public:
   207   G1ParGCAllocator(G1CollectedHeap* g1h) :
   208     _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
   209     _alloc_buffer_waste(0), _undo_waste(0) {
   210   }
   212   static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
   214   size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
   215   size_t undo_waste() {return _undo_waste; }
   217   // Allocate word_sz words in dest, either directly into the regions or by
   218   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
   219   // not successful.
   220   HeapWord* allocate_direct_or_new_plab(InCSetState dest,
   221                                         size_t word_sz,
   222                                         AllocationContext_t context);
   224   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
   225   // allocated memory, NULL if not successful.
   226   HeapWord* plab_allocate(InCSetState dest,
   227                           size_t word_sz,
   228                           AllocationContext_t context) {
   229     G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
   230     if (_survivor_alignment_bytes == 0) {
   231       return buffer->allocate(word_sz);
   232     } else {
   233       return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
   234     }
   235   }
   237   HeapWord* allocate(InCSetState dest, size_t word_sz,
   238                      AllocationContext_t context) {
   239     HeapWord* const obj = plab_allocate(dest, word_sz, context);
   240     if (obj != NULL) {
   241       return obj;
   242     }
   243     return allocate_direct_or_new_plab(dest, word_sz, context);
   244   }
   246   void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
   247     if (alloc_buffer(dest, context)->contains(obj)) {
   248       assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
   249              "should contain whole object");
   250       alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
   251     } else {
   252       CollectedHeap::fill_with_object(obj, word_sz);
   253       add_to_undo_waste(word_sz);
   254     }
   255   }
   256 };
   258 class G1DefaultParGCAllocator : public G1ParGCAllocator {
   259   G1ParGCAllocBuffer  _surviving_alloc_buffer;
   260   G1ParGCAllocBuffer  _tenured_alloc_buffer;
   261   G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
   263 public:
   264   G1DefaultParGCAllocator(G1CollectedHeap* g1h);
   266   virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {
   267     assert(dest.is_valid(),
   268            err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
   269     assert(_alloc_buffers[dest.value()] != NULL,
   270            err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
   271     return _alloc_buffers[dest.value()];
   272   }
   274   virtual void retire_alloc_buffers() ;
   275 };
   277 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP

mercurial