duke@435: /* simonis@4675: * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP stefank@2314: #define SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP stefank@2314: simonis@4675: #include "runtime/atomic.inline.hpp" stefank@2314: #include "runtime/os.hpp" stefank@2314: duke@435: // Explicit C-heap memory management duke@435: duke@435: void trace_heap_malloc(size_t size, const char* name, void *p); duke@435: void trace_heap_free(void *p); duke@435: kvn@2562: #ifndef PRODUCT kvn@2557: // Increments unsigned long value for statistics (not atomic on MP). kvn@2557: inline void inc_stat_counter(volatile julong* dest, julong add_value) { kvn@2562: #if defined(SPARC) || defined(X86) kvn@2562: // Sparc and X86 have atomic jlong (8 bytes) instructions kvn@2557: julong value = Atomic::load((volatile jlong*)dest); kvn@2557: value += add_value; kvn@2557: Atomic::store((jlong)value, (volatile jlong*)dest); kvn@2562: #else kvn@2562: // possible word-tearing during load/store kvn@2562: *dest += add_value; kvn@2562: #endif kvn@2557: } kvn@2562: #endif duke@435: duke@435: // allocate using malloc; will fail if no memory available nloodin@4183: inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0, nloodin@4183: AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { zgu@3900: if (pc == 0) { zgu@3900: pc = CURRENT_PC; zgu@3900: } zgu@3900: char* p = (char*) os::malloc(size, flags, pc); duke@435: #ifdef ASSERT zgu@3900: if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p); duke@435: #endif ccheung@4993: if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { ccheung@4993: vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap"); ccheung@4993: } duke@435: return p; duke@435: } duke@435: nloodin@4183: inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags, nloodin@4183: AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { zgu@3900: char* p = (char*) os::realloc(old, size, flags, CURRENT_PC); duke@435: #ifdef ASSERT zgu@3900: if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p); duke@435: #endif ccheung@4993: if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { ccheung@4993: vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap"); ccheung@4993: } duke@435: return p; duke@435: } duke@435: zgu@3900: inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) { duke@435: #ifdef ASSERT duke@435: if (PrintMallocFree) trace_heap_free(p); duke@435: #endif zgu@3900: os::free(p, memflags); duke@435: } stefank@2314: zgu@3900: zgu@3900: template void* CHeapObj::operator new(size_t size, coleenp@5614: address caller_pc) throw() { minqi@5103: void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC)); dcubed@4967: #ifdef ASSERT zgu@3900: if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); minqi@5103: #endif dcubed@4967: return p; zgu@3900: } zgu@3900: zgu@3900: template void* CHeapObj::operator new (size_t size, coleenp@5614: const std::nothrow_t& nothrow_constant, address caller_pc) throw() { nloodin@4183: void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC), nloodin@4183: AllocFailStrategy::RETURN_NULL); minqi@5103: #ifdef ASSERT zgu@3900: if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); minqi@5103: #endif dcubed@4967: return p; minqi@5103: } minqi@5103: minqi@5103: template void* CHeapObj::operator new [](size_t size, coleenp@5614: address caller_pc) throw() { minqi@5103: return CHeapObj::operator new(size, caller_pc); minqi@5103: } minqi@5103: minqi@5103: template void* CHeapObj::operator new [](size_t size, coleenp@5614: const std::nothrow_t& nothrow_constant, address caller_pc) throw() { minqi@5103: return CHeapObj::operator new(size, nothrow_constant, caller_pc); zgu@3900: } zgu@3900: zgu@3900: template void CHeapObj::operator delete(void* p){ minqi@5103: FreeHeap(p, F); minqi@5103: } minqi@5103: minqi@5103: template void CHeapObj::operator delete [](void* p){ minqi@5103: FreeHeap(p, F); zgu@3900: } zgu@3900: brutisso@4901: template brutisso@4901: E* ArrayAllocator::allocate(size_t length) { brutisso@4901: assert(_addr == NULL, "Already in use"); brutisso@4901: brutisso@4901: _size = sizeof(E) * length; brutisso@4901: _use_malloc = _size < ArrayAllocatorMallocLimit; brutisso@4901: brutisso@4901: if (_use_malloc) { brutisso@4901: _addr = AllocateHeap(_size, F); brutisso@4901: if (_addr == NULL && _size >= (size_t)os::vm_allocation_granularity()) { brutisso@4901: // malloc failed let's try with mmap instead brutisso@4901: _use_malloc = false; brutisso@4901: } else { brutisso@4901: return (E*)_addr; brutisso@4901: } brutisso@4901: } brutisso@4901: brutisso@4901: int alignment = os::vm_allocation_granularity(); brutisso@4901: _size = align_size_up(_size, alignment); brutisso@4901: zgu@5053: _addr = os::reserve_memory(_size, NULL, alignment, F); brutisso@4901: if (_addr == NULL) { ccheung@4993: vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)"); brutisso@4901: } brutisso@4901: dcubed@5255: os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)"); brutisso@4901: brutisso@4901: return (E*)_addr; brutisso@4901: } brutisso@4901: brutisso@4901: template brutisso@4901: void ArrayAllocator::free() { brutisso@4901: if (_addr != NULL) { brutisso@4901: if (_use_malloc) { brutisso@4901: FreeHeap(_addr, F); brutisso@4901: } else { brutisso@4901: os::release_memory(_addr, _size); brutisso@4901: } brutisso@4901: _addr = NULL; brutisso@4901: } brutisso@4901: } zgu@3900: stefank@2314: #endif // SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP