src/share/vm/memory/allocation.inline.hpp

Tue, 28 Apr 2015 19:04:39 +0900

author
ysuenaga
date
Tue, 28 Apr 2015 19:04:39 +0900
changeset 7806
ed0067c67bd7
parent 7074
833b0f92429a
child 7994
04ff2f6cd0eb
permissions
-rw-r--r--

8076212: AllocateHeap() and ReallocateHeap() should be inlined.
Summary: NMT with detail option reports incorrect caller address on Linux.
Reviewed-by: dholmes, coleenp

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
    26 #define SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
    28 #include "runtime/atomic.inline.hpp"
    29 #include "runtime/os.hpp"
    30 #include "services/memTracker.hpp"
    32 // Explicit C-heap memory management
    34 void trace_heap_malloc(size_t size, const char* name, void *p);
    35 void trace_heap_free(void *p);
    37 #ifndef PRODUCT
    38 // Increments unsigned long value for statistics (not atomic on MP).
    39 inline void inc_stat_counter(volatile julong* dest, julong add_value) {
    40 #if defined(SPARC) || defined(X86)
    41   // Sparc and X86 have atomic jlong (8 bytes) instructions
    42   julong value = Atomic::load((volatile jlong*)dest);
    43   value += add_value;
    44   Atomic::store((jlong)value, (volatile jlong*)dest);
    45 #else
    46   // possible word-tearing during load/store
    47   *dest += add_value;
    48 #endif
    49 }
    50 #endif
    52 // allocate using malloc; will fail if no memory available
    53 inline char* AllocateHeap(size_t size, MEMFLAGS flags,
    54     const NativeCallStack& stack,
    55     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
    56   char* p = (char*) os::malloc(size, flags, stack);
    57   #ifdef ASSERT
    58   if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
    59   #endif
    60   if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
    61     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
    62   }
    63   return p;
    64 }
    66 #ifdef __GNUC__
    67 __attribute__((always_inline))
    68 #endif
    69 inline char* AllocateHeap(size_t size, MEMFLAGS flags,
    70     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
    71   return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
    72 }
    74 #ifdef __GNUC__
    75 __attribute__((always_inline))
    76 #endif
    77 inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
    78     AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
    79   char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
    80   #ifdef ASSERT
    81   if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
    82   #endif
    83   if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
    84     vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
    85   }
    86   return p;
    87 }
    89 inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
    90   #ifdef ASSERT
    91   if (PrintMallocFree) trace_heap_free(p);
    92   #endif
    93   os::free(p, memflags);
    94 }
    97 template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
    98       const NativeCallStack& stack) throw() {
    99   void* p = (void*)AllocateHeap(size, F, stack);
   100 #ifdef ASSERT
   101   if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
   102 #endif
   103   return p;
   104 }
   106 template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
   107   return CHeapObj<F>::operator new(size, CALLER_PC);
   108 }
   110 template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
   111   const std::nothrow_t&  nothrow_constant, const NativeCallStack& stack) throw() {
   112   void* p = (void*)AllocateHeap(size, F, stack,
   113       AllocFailStrategy::RETURN_NULL);
   114 #ifdef ASSERT
   115     if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
   116 #endif
   117     return p;
   118   }
   120 template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
   121   const std::nothrow_t& nothrow_constant) throw() {
   122   return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
   123 }
   125 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
   126       const NativeCallStack& stack) throw() {
   127   return CHeapObj<F>::operator new(size, stack);
   128 }
   130 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size)
   131   throw() {
   132   return CHeapObj<F>::operator new(size, CALLER_PC);
   133 }
   135 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
   136   const std::nothrow_t&  nothrow_constant, const NativeCallStack& stack) throw() {
   137   return CHeapObj<F>::operator new(size, nothrow_constant, stack);
   138 }
   140 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
   141   const std::nothrow_t& nothrow_constant) throw() {
   142   return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
   143 }
   145 template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
   146     FreeHeap(p, F);
   147 }
   149 template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
   150     FreeHeap(p, F);
   151 }
   153 template <class E, MEMFLAGS F>
   154 E* ArrayAllocator<E, F>::allocate(size_t length) {
   155   assert(_addr == NULL, "Already in use");
   157   _size = sizeof(E) * length;
   158   _use_malloc = _size < ArrayAllocatorMallocLimit;
   160   if (_use_malloc) {
   161     _addr = AllocateHeap(_size, F);
   162     if (_addr == NULL && _size >=  (size_t)os::vm_allocation_granularity()) {
   163       // malloc failed let's try with mmap instead
   164       _use_malloc = false;
   165     } else {
   166       return (E*)_addr;
   167     }
   168   }
   170   int alignment = os::vm_allocation_granularity();
   171   _size = align_size_up(_size, alignment);
   173   _addr = os::reserve_memory(_size, NULL, alignment, F);
   174   if (_addr == NULL) {
   175     vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
   176   }
   178   os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
   180   return (E*)_addr;
   181 }
   183 template<class E, MEMFLAGS F>
   184 void ArrayAllocator<E, F>::free() {
   185   if (_addr != NULL) {
   186     if (_use_malloc) {
   187       FreeHeap(_addr, F);
   188     } else {
   189       os::release_memory(_addr, _size);
   190     }
   191     _addr = NULL;
   192   }
   193 }
   195 #endif // SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP

mercurial