src/share/vm/memory/allocation.inline.hpp

Thu, 26 Sep 2013 12:18:21 +0200

author
tschatzl
date
Thu, 26 Sep 2013 12:18:21 +0200
changeset 5775
461159cd7a91
parent 5614
9758d9f36299
child 6876
710a3c8b516e
child 7074
833b0f92429a
permissions
-rw-r--r--

Merge

duke@435 1 /*
simonis@4675 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
stefank@2314 26 #define SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
stefank@2314 27
simonis@4675 28 #include "runtime/atomic.inline.hpp"
stefank@2314 29 #include "runtime/os.hpp"
stefank@2314 30
duke@435 31 // Explicit C-heap memory management
duke@435 32
duke@435 33 void trace_heap_malloc(size_t size, const char* name, void *p);
duke@435 34 void trace_heap_free(void *p);
duke@435 35
kvn@2562 36 #ifndef PRODUCT
kvn@2557 37 // Increments unsigned long value for statistics (not atomic on MP).
kvn@2557 38 inline void inc_stat_counter(volatile julong* dest, julong add_value) {
kvn@2562 39 #if defined(SPARC) || defined(X86)
kvn@2562 40 // Sparc and X86 have atomic jlong (8 bytes) instructions
kvn@2557 41 julong value = Atomic::load((volatile jlong*)dest);
kvn@2557 42 value += add_value;
kvn@2557 43 Atomic::store((jlong)value, (volatile jlong*)dest);
kvn@2562 44 #else
kvn@2562 45 // possible word-tearing during load/store
kvn@2562 46 *dest += add_value;
kvn@2562 47 #endif
kvn@2557 48 }
kvn@2562 49 #endif
duke@435 50
duke@435 51 // allocate using malloc; will fail if no memory available
nloodin@4183 52 inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0,
nloodin@4183 53 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
zgu@3900 54 if (pc == 0) {
zgu@3900 55 pc = CURRENT_PC;
zgu@3900 56 }
zgu@3900 57 char* p = (char*) os::malloc(size, flags, pc);
duke@435 58 #ifdef ASSERT
zgu@3900 59 if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
duke@435 60 #endif
ccheung@4993 61 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
ccheung@4993 62 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
ccheung@4993 63 }
duke@435 64 return p;
duke@435 65 }
duke@435 66
nloodin@4183 67 inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags,
nloodin@4183 68 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
zgu@3900 69 char* p = (char*) os::realloc(old, size, flags, CURRENT_PC);
duke@435 70 #ifdef ASSERT
zgu@3900 71 if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
duke@435 72 #endif
ccheung@4993 73 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
ccheung@4993 74 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
ccheung@4993 75 }
duke@435 76 return p;
duke@435 77 }
duke@435 78
zgu@3900 79 inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) {
duke@435 80 #ifdef ASSERT
duke@435 81 if (PrintMallocFree) trace_heap_free(p);
duke@435 82 #endif
zgu@3900 83 os::free(p, memflags);
duke@435 84 }
stefank@2314 85
zgu@3900 86
zgu@3900 87 template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
coleenp@5614 88 address caller_pc) throw() {
minqi@5103 89 void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
dcubed@4967 90 #ifdef ASSERT
zgu@3900 91 if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
minqi@5103 92 #endif
dcubed@4967 93 return p;
zgu@3900 94 }
zgu@3900 95
zgu@3900 96 template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
coleenp@5614 97 const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
nloodin@4183 98 void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
nloodin@4183 99 AllocFailStrategy::RETURN_NULL);
minqi@5103 100 #ifdef ASSERT
zgu@3900 101 if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
minqi@5103 102 #endif
dcubed@4967 103 return p;
minqi@5103 104 }
minqi@5103 105
minqi@5103 106 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
coleenp@5614 107 address caller_pc) throw() {
minqi@5103 108 return CHeapObj<F>::operator new(size, caller_pc);
minqi@5103 109 }
minqi@5103 110
minqi@5103 111 template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
coleenp@5614 112 const std::nothrow_t& nothrow_constant, address caller_pc) throw() {
minqi@5103 113 return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
zgu@3900 114 }
zgu@3900 115
zgu@3900 116 template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
minqi@5103 117 FreeHeap(p, F);
minqi@5103 118 }
minqi@5103 119
minqi@5103 120 template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
minqi@5103 121 FreeHeap(p, F);
zgu@3900 122 }
zgu@3900 123
brutisso@4901 124 template <class E, MEMFLAGS F>
brutisso@4901 125 E* ArrayAllocator<E, F>::allocate(size_t length) {
brutisso@4901 126 assert(_addr == NULL, "Already in use");
brutisso@4901 127
brutisso@4901 128 _size = sizeof(E) * length;
brutisso@4901 129 _use_malloc = _size < ArrayAllocatorMallocLimit;
brutisso@4901 130
brutisso@4901 131 if (_use_malloc) {
brutisso@4901 132 _addr = AllocateHeap(_size, F);
brutisso@4901 133 if (_addr == NULL && _size >= (size_t)os::vm_allocation_granularity()) {
brutisso@4901 134 // malloc failed let's try with mmap instead
brutisso@4901 135 _use_malloc = false;
brutisso@4901 136 } else {
brutisso@4901 137 return (E*)_addr;
brutisso@4901 138 }
brutisso@4901 139 }
brutisso@4901 140
brutisso@4901 141 int alignment = os::vm_allocation_granularity();
brutisso@4901 142 _size = align_size_up(_size, alignment);
brutisso@4901 143
zgu@5053 144 _addr = os::reserve_memory(_size, NULL, alignment, F);
brutisso@4901 145 if (_addr == NULL) {
ccheung@4993 146 vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
brutisso@4901 147 }
brutisso@4901 148
dcubed@5255 149 os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
brutisso@4901 150
brutisso@4901 151 return (E*)_addr;
brutisso@4901 152 }
brutisso@4901 153
brutisso@4901 154 template<class E, MEMFLAGS F>
brutisso@4901 155 void ArrayAllocator<E, F>::free() {
brutisso@4901 156 if (_addr != NULL) {
brutisso@4901 157 if (_use_malloc) {
brutisso@4901 158 FreeHeap(_addr, F);
brutisso@4901 159 } else {
brutisso@4901 160 os::release_memory(_addr, _size);
brutisso@4901 161 }
brutisso@4901 162 _addr = NULL;
brutisso@4901 163 }
brutisso@4901 164 }
zgu@3900 165
stefank@2314 166 #endif // SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP

mercurial