duke@435: /* drchase@6680: * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "memory/allocation.hpp" stefank@2314: #include "memory/allocation.inline.hpp" coleenp@4037: #include "memory/genCollectedHeap.hpp" coleenp@4037: #include "memory/metaspaceShared.hpp" stefank@2314: #include "memory/resourceArea.hpp" coleenp@4037: #include "memory/universe.hpp" zgu@3900: #include "runtime/atomic.hpp" stefank@2314: #include "runtime/os.hpp" stefank@2314: #include "runtime/task.hpp" stefank@2314: #include "runtime/threadCritical.hpp" zgu@3900: #include "services/memTracker.hpp" stefank@2314: #include "utilities/ostream.hpp" zgu@3900: stefank@2314: #ifdef TARGET_OS_FAMILY_linux stefank@2314: # include "os_linux.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_solaris stefank@2314: # include "os_solaris.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_windows stefank@2314: # include "os_windows.inline.hpp" stefank@2314: #endif goetz@6461: #ifdef TARGET_OS_FAMILY_aix goetz@6461: # include "os_aix.inline.hpp" goetz@6461: #endif never@3156: #ifdef TARGET_OS_FAMILY_bsd never@3156: # include "os_bsd.inline.hpp" never@3156: #endif duke@435: coleenp@5614: void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } coleenp@5614: void StackObj::operator delete(void* p) { ShouldNotCallThis(); } coleenp@5614: void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } coleenp@5614: void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } minqi@5103: coleenp@5614: void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } coleenp@5614: void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); } coleenp@5614: void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } coleenp@5614: void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); } duke@435: coleenp@4037: void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, iklam@5208: size_t word_size, bool read_only, coleenp@5614: MetaspaceObj::Type type, TRAPS) throw() { coleenp@4037: // Klass has it's own operator new coleenp@4037: return Metaspace::allocate(loader_data, word_size, read_only, iklam@5208: type, CHECK_NULL); coleenp@4037: } coleenp@4037: coleenp@4037: bool MetaspaceObj::is_shared() const { coleenp@4037: return MetaspaceShared::is_in_shared_space(this); coleenp@4037: } coleenp@4037: coleenp@4295: bool MetaspaceObj::is_metaspace_object() const { coleenp@6678: return Metaspace::contains((void*)this); coleenp@4295: } coleenp@4295: coleenp@4037: void MetaspaceObj::print_address_on(outputStream* st) const { drchase@6680: st->print(" {" INTPTR_FORMAT "}", p2i(this)); coleenp@4037: } coleenp@4037: coleenp@5614: void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() { duke@435: address res; duke@435: switch (type) { duke@435: case C_HEAP: zgu@3900: res = (address)AllocateHeap(size, flags, CALLER_PC); kvn@2040: DEBUG_ONLY(set_allocation_type(res, C_HEAP);) duke@435: break; duke@435: case RESOURCE_AREA: kvn@2043: // new(size) sets allocation type RESOURCE_AREA. duke@435: res = (address)operator new(size); duke@435: break; duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: return res; duke@435: } duke@435: coleenp@5614: void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() { minqi@5103: return (address) operator new(size, type, flags); minqi@5103: } minqi@5103: nloodin@4183: void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, coleenp@5614: allocation_type type, MEMFLAGS flags) throw() { nloodin@4183: //should only call this with std::nothrow, use other operator new() otherwise nloodin@4183: address res; nloodin@4183: switch (type) { nloodin@4183: case C_HEAP: nloodin@4183: res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); nloodin@4183: DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);) nloodin@4183: break; nloodin@4183: case RESOURCE_AREA: nloodin@4183: // new(size) sets allocation type RESOURCE_AREA. nloodin@4183: res = (address)operator new(size, std::nothrow); nloodin@4183: break; nloodin@4183: default: nloodin@4183: ShouldNotReachHere(); nloodin@4183: } nloodin@4183: return res; nloodin@4183: } nloodin@4183: minqi@5103: void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, coleenp@5614: allocation_type type, MEMFLAGS flags) throw() { minqi@5103: return (address)operator new(size, nothrow_constant, type, flags); minqi@5103: } nloodin@4183: duke@435: void ResourceObj::operator delete(void* p) { duke@435: assert(((ResourceObj *)p)->allocated_on_C_heap(), duke@435: "delete only allowed for C_HEAP objects"); kvn@2357: DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) duke@435: FreeHeap(p); duke@435: } duke@435: minqi@5103: void ResourceObj::operator delete [](void* p) { minqi@5103: operator delete(p); minqi@5103: } minqi@5103: kvn@2040: #ifdef ASSERT kvn@2040: void ResourceObj::set_allocation_type(address res, allocation_type type) { kvn@2040: // Set allocation type in the resource object kvn@2040: uintptr_t allocation = (uintptr_t)res; drchase@6680: assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " INTPTR_FORMAT, p2i(res))); kvn@2040: assert(type <= allocation_mask, "incorrect allocation type"); kvn@2357: ResourceObj* resobj = (ResourceObj *)res; kvn@2357: resobj->_allocation_t[0] = ~(allocation + type); kvn@2357: if (type != STACK_OR_EMBEDDED) { kvn@2357: // Called from operator new() and CollectionSetChooser(), kvn@2357: // set verification value. kvn@2357: resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; kvn@2357: } kvn@2040: } kvn@2040: kvn@2043: ResourceObj::allocation_type ResourceObj::get_allocation_type() const { kvn@2357: assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object"); kvn@2357: return (allocation_type)((~_allocation_t[0]) & allocation_mask); kvn@2357: } kvn@2357: kvn@2357: bool ResourceObj::is_type_set() const { kvn@2357: allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); kvn@2357: return get_allocation_type() == type && kvn@2357: (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); kvn@2040: } kvn@2040: kvn@2043: ResourceObj::ResourceObj() { // default constructor kvn@2357: if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { kvn@2357: // Operator new() is not called for allocations kvn@2357: // on stack and for embedded objects. kvn@2040: set_allocation_type((address)this, STACK_OR_EMBEDDED); kvn@2357: } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED kvn@2357: // For some reason we got a value which resembles kvn@2357: // an embedded or stack object (operator new() does not kvn@2357: // set such type). Keep it since it is valid value kvn@2357: // (even if it was garbage). kvn@2357: // Ignore garbage in other fields. kvn@2357: } else if (is_type_set()) { kvn@2357: // Operator new() was called and type was set. kvn@2357: assert(!allocated_on_stack(), kvn@2357: err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", drchase@6680: p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1])); kvn@2040: } else { kvn@2357: // Operator new() was not called. kvn@2357: // Assume that it is embedded or stack object. kvn@2357: set_allocation_type((address)this, STACK_OR_EMBEDDED); kvn@2040: } kvn@2357: _allocation_t[1] = 0; // Zap verification value kvn@2040: } kvn@2040: kvn@2043: ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor kvn@2040: // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream. kvn@2357: // Note: garbage may resembles valid value. kvn@2357: assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(), kvn@2357: err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", drchase@6680: p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1])); kvn@2040: set_allocation_type((address)this, STACK_OR_EMBEDDED); kvn@2357: _allocation_t[1] = 0; // Zap verification value kvn@2040: } kvn@2040: kvn@2040: ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment kvn@2040: // Used in InlineTree::ok_to_inline() for WarmCallInfo. kvn@2357: assert(allocated_on_stack(), kvn@2357: err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", drchase@6680: p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1])); kvn@2357: // Keep current _allocation_t value; kvn@2040: return *this; kvn@2040: } kvn@2040: kvn@2040: ResourceObj::~ResourceObj() { kvn@2043: // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. kvn@2357: if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap. kvn@2357: _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type kvn@2040: } kvn@2040: } kvn@2040: #endif // ASSERT kvn@2040: kvn@2040: duke@435: void trace_heap_malloc(size_t size, const char* name, void* p) { duke@435: // A lock is not needed here - tty uses a lock internally drchase@6680: tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p2i(p), size, name == NULL ? "" : name); duke@435: } duke@435: duke@435: duke@435: void trace_heap_free(void* p) { duke@435: // A lock is not needed here - tty uses a lock internally drchase@6680: tty->print_cr("Heap free " INTPTR_FORMAT, p2i(p)); duke@435: } duke@435: duke@435: //-------------------------------------------------------------------------------------- duke@435: // ChunkPool implementation duke@435: duke@435: // MT-safe pool of chunks to reduce malloc/free thrashing duke@435: // NB: not using Mutex because pools are used before Threads are initialized zgu@3900: class ChunkPool: public CHeapObj { duke@435: Chunk* _first; // first cached Chunk; its first word points to next chunk duke@435: size_t _num_chunks; // number of unused chunks in pool duke@435: size_t _num_used; // number of chunks currently checked out duke@435: const size_t _size; // size of each chunk (must be uniform) duke@435: iklam@5368: // Our four static pools duke@435: static ChunkPool* _large_pool; duke@435: static ChunkPool* _medium_pool; duke@435: static ChunkPool* _small_pool; iklam@5368: static ChunkPool* _tiny_pool; duke@435: duke@435: // return first element or null duke@435: void* get_first() { duke@435: Chunk* c = _first; duke@435: if (_first) { duke@435: _first = _first->next(); duke@435: _num_chunks--; duke@435: } duke@435: return c; duke@435: } duke@435: duke@435: public: duke@435: // All chunks in a ChunkPool has the same size duke@435: ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } duke@435: duke@435: // Allocate a new chunk from the pool (might expand the pool) hseigel@5241: _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) { duke@435: assert(bytes == _size, "bad size"); duke@435: void* p = NULL; zgu@3900: // No VM lock can be taken inside ThreadCritical lock, so os::malloc zgu@3900: // should be done outside ThreadCritical lock due to NMT duke@435: { ThreadCritical tc; duke@435: _num_used++; duke@435: p = get_first(); duke@435: } zgu@3900: if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); hseigel@5241: if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { ccheung@4993: vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate"); hseigel@5241: } duke@435: return p; duke@435: } duke@435: duke@435: // Return a chunk to the pool duke@435: void free(Chunk* chunk) { duke@435: assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size"); duke@435: ThreadCritical tc; duke@435: _num_used--; duke@435: duke@435: // Add chunk to list duke@435: chunk->set_next(_first); duke@435: _first = chunk; duke@435: _num_chunks++; duke@435: } duke@435: duke@435: // Prune the pool duke@435: void free_all_but(size_t n) { zgu@3900: Chunk* cur = NULL; zgu@3900: Chunk* next; zgu@3900: { duke@435: // if we have more than n chunks, free all of them duke@435: ThreadCritical tc; duke@435: if (_num_chunks > n) { duke@435: // free chunks at end of queue, for better locality zgu@3900: cur = _first; duke@435: for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); duke@435: duke@435: if (cur != NULL) { zgu@3900: next = cur->next(); duke@435: cur->set_next(NULL); duke@435: cur = next; duke@435: zgu@3900: _num_chunks = n; zgu@3900: } zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: // Free all remaining chunks, outside of ThreadCritical zgu@3900: // to avoid deadlock with NMT duke@435: while(cur != NULL) { duke@435: next = cur->next(); zgu@3900: os::free(cur, mtChunk); duke@435: cur = next; duke@435: } duke@435: } duke@435: duke@435: // Accessors to preallocated pool's duke@435: static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } duke@435: static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } duke@435: static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } iklam@5368: static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; } duke@435: duke@435: static void initialize() { duke@435: _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size()); duke@435: _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size()); duke@435: _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size()); iklam@5368: _tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size()); duke@435: } bobv@2036: bobv@2036: static void clean() { bobv@2036: enum { BlocksToKeep = 5 }; iklam@5368: _tiny_pool->free_all_but(BlocksToKeep); bobv@2036: _small_pool->free_all_but(BlocksToKeep); bobv@2036: _medium_pool->free_all_but(BlocksToKeep); bobv@2036: _large_pool->free_all_but(BlocksToKeep); bobv@2036: } duke@435: }; duke@435: duke@435: ChunkPool* ChunkPool::_large_pool = NULL; duke@435: ChunkPool* ChunkPool::_medium_pool = NULL; duke@435: ChunkPool* ChunkPool::_small_pool = NULL; iklam@5368: ChunkPool* ChunkPool::_tiny_pool = NULL; duke@435: duke@435: void chunkpool_init() { duke@435: ChunkPool::initialize(); duke@435: } duke@435: bobv@2036: void bobv@2036: Chunk::clean_chunk_pool() { bobv@2036: ChunkPool::clean(); bobv@2036: } bobv@2036: duke@435: duke@435: //-------------------------------------------------------------------------------------- duke@435: // ChunkPoolCleaner implementation bobv@2036: // duke@435: duke@435: class ChunkPoolCleaner : public PeriodicTask { bobv@2036: enum { CleaningInterval = 5000 }; // cleaning interval in ms duke@435: duke@435: public: duke@435: ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {} duke@435: void task() { bobv@2036: ChunkPool::clean(); duke@435: } duke@435: }; duke@435: duke@435: //-------------------------------------------------------------------------------------- duke@435: // Chunk implementation duke@435: coleenp@5614: void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() { duke@435: // requested_size is equal to sizeof(Chunk) but in order for the arena duke@435: // allocations to come out aligned as expected the size must be aligned minqi@5103: // to expected arena alignment. duke@435: // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. duke@435: assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); duke@435: size_t bytes = ARENA_ALIGN(requested_size) + length; duke@435: switch (length) { hseigel@5241: case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode); hseigel@5241: case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode); hseigel@5241: case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode); iklam@5368: case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode); duke@435: default: { hseigel@5241: void* p = os::malloc(bytes, mtChunk, CALLER_PC); hseigel@5241: if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { ccheung@4993: vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new"); hseigel@5241: } duke@435: return p; duke@435: } duke@435: } duke@435: } duke@435: duke@435: void Chunk::operator delete(void* p) { duke@435: Chunk* c = (Chunk*)p; duke@435: switch (c->length()) { duke@435: case Chunk::size: ChunkPool::large_pool()->free(c); break; duke@435: case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; duke@435: case Chunk::init_size: ChunkPool::small_pool()->free(c); break; iklam@5368: case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break; zgu@3900: default: os::free(c, mtChunk); duke@435: } duke@435: } duke@435: duke@435: Chunk::Chunk(size_t length) : _len(length) { duke@435: _next = NULL; // Chain on the linked list duke@435: } duke@435: duke@435: duke@435: void Chunk::chop() { duke@435: Chunk *k = this; duke@435: while( k ) { duke@435: Chunk *tmp = k->next(); duke@435: // clear out this chunk (to detect allocation bugs) duke@435: if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length()); duke@435: delete k; // Free chunk (was malloc'd) duke@435: k = tmp; duke@435: } duke@435: } duke@435: duke@435: void Chunk::next_chop() { duke@435: _next->chop(); duke@435: _next = NULL; duke@435: } duke@435: duke@435: duke@435: void Chunk::start_chunk_pool_cleaner_task() { duke@435: #ifdef ASSERT duke@435: static bool task_created = false; duke@435: assert(!task_created, "should not start chuck pool cleaner twice"); duke@435: task_created = true; duke@435: #endif duke@435: ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); duke@435: cleaner->enroll(); duke@435: } duke@435: duke@435: //------------------------------Arena------------------------------------------ zgu@7074: Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) { duke@435: size_t round_size = (sizeof (char *)) - 1; duke@435: init_size = (init_size+round_size) & ~round_size; hseigel@5241: _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size); duke@435: _hwm = _chunk->bottom(); // Save the cached hwm, max duke@435: _max = _chunk->top(); zgu@7074: MemTracker::record_new_arena(flag); duke@435: set_size_in_bytes(init_size); duke@435: } duke@435: zgu@7074: Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) { hseigel@5241: _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size); duke@435: _hwm = _chunk->bottom(); // Save the cached hwm, max duke@435: _max = _chunk->top(); zgu@7074: MemTracker::record_new_arena(flag); duke@435: set_size_in_bytes(Chunk::init_size); duke@435: } duke@435: duke@435: Arena *Arena::move_contents(Arena *copy) { duke@435: copy->destruct_contents(); duke@435: copy->_chunk = _chunk; duke@435: copy->_hwm = _hwm; duke@435: copy->_max = _max; duke@435: copy->_first = _first; zgu@4193: zgu@4193: // workaround rare racing condition, which could double count zgu@4193: // the arena size by native memory tracking zgu@4193: size_t size = size_in_bytes(); zgu@4193: set_size_in_bytes(0); zgu@4193: copy->set_size_in_bytes(size); duke@435: // Destroy original arena duke@435: reset(); duke@435: return copy; // Return Arena with contents duke@435: } duke@435: duke@435: Arena::~Arena() { duke@435: destruct_contents(); zgu@7074: MemTracker::record_arena_free(_flags); zgu@3900: } zgu@3900: coleenp@5614: void* Arena::operator new(size_t size) throw() { zgu@3900: assert(false, "Use dynamic memory type binding"); zgu@3900: return NULL; zgu@3900: } zgu@3900: coleenp@5614: void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() { zgu@3900: assert(false, "Use dynamic memory type binding"); zgu@3900: return NULL; zgu@3900: } zgu@3900: zgu@3900: // dynamic memory type binding coleenp@5614: void* Arena::operator new(size_t size, MEMFLAGS flags) throw() { zgu@3900: #ifdef ASSERT zgu@7074: void* p = (void*)AllocateHeap(size, flags, CALLER_PC); zgu@3900: if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); zgu@3900: return p; zgu@3900: #else zgu@7074: return (void *) AllocateHeap(size, flags, CALLER_PC); zgu@3900: #endif zgu@3900: } zgu@3900: coleenp@5614: void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() { zgu@3900: #ifdef ASSERT zgu@7074: void* p = os::malloc(size, flags, CALLER_PC); zgu@3900: if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); zgu@3900: return p; zgu@3900: #else zgu@7074: return os::malloc(size, flags, CALLER_PC); zgu@3900: #endif zgu@3900: } zgu@3900: zgu@3900: void Arena::operator delete(void* p) { zgu@3900: FreeHeap(p); duke@435: } duke@435: duke@435: // Destroy this arenas contents and reset to empty duke@435: void Arena::destruct_contents() { duke@435: if (UseMallocOnly && _first != NULL) { duke@435: char* end = _first->next() ? _first->top() : _hwm; duke@435: free_malloced_objects(_first, _first->bottom(), end, _hwm); duke@435: } zgu@4193: // reset size before chop to avoid a rare racing condition zgu@4193: // that can have total arena memory exceed total chunk memory zgu@4193: set_size_in_bytes(0); duke@435: _first->chop(); duke@435: reset(); duke@435: } duke@435: zgu@3900: // This is high traffic method, but many calls actually don't zgu@3900: // change the size zgu@3900: void Arena::set_size_in_bytes(size_t size) { zgu@3900: if (_size_in_bytes != size) { zgu@7074: long delta = (long)(size - size_in_bytes()); zgu@3900: _size_in_bytes = size; zgu@7074: MemTracker::record_arena_size_change(delta, _flags); zgu@3900: } zgu@3900: } duke@435: duke@435: // Total of all Chunks in arena duke@435: size_t Arena::used() const { duke@435: size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk duke@435: register Chunk *k = _first; duke@435: while( k != _chunk) { // Whilst have Chunks in a row duke@435: sum += k->length(); // Total size of this Chunk duke@435: k = k->next(); // Bump along to next Chunk duke@435: } duke@435: return sum; // Return total consumed space. duke@435: } duke@435: kamg@2589: void Arena::signal_out_of_memory(size_t sz, const char* whence) const { ccheung@4993: vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence); kamg@2589: } duke@435: duke@435: // Grow a new Chunk nloodin@4183: void* Arena::grow(size_t x, AllocFailType alloc_failmode) { duke@435: // Get minimal required size. Either real big, or even bigger for giant objs duke@435: size_t len = MAX2(x, (size_t) Chunk::size); duke@435: duke@435: Chunk *k = _chunk; // Get filled-up chunk address hseigel@5241: _chunk = new (alloc_failmode, len) Chunk(len); duke@435: kamg@2589: if (_chunk == NULL) { poonam@6695: _chunk = k; // restore the previous value of _chunk nloodin@4183: return NULL; kamg@2589: } duke@435: if (k) k->set_next(_chunk); // Append new chunk to end of linked list duke@435: else _first = _chunk; duke@435: _hwm = _chunk->bottom(); // Save the cached hwm, max duke@435: _max = _chunk->top(); duke@435: set_size_in_bytes(size_in_bytes() + len); duke@435: void* result = _hwm; duke@435: _hwm += x; duke@435: return result; duke@435: } duke@435: duke@435: duke@435: duke@435: // Reallocate storage in Arena. nloodin@4183: void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) { duke@435: assert(new_size >= 0, "bad size"); duke@435: if (new_size == 0) return NULL; duke@435: #ifdef ASSERT duke@435: if (UseMallocOnly) { duke@435: // always allocate a new object (otherwise we'll free this one twice) nloodin@4183: char* copy = (char*)Amalloc(new_size, alloc_failmode); nloodin@4183: if (copy == NULL) { nloodin@4183: return NULL; nloodin@4183: } duke@435: size_t n = MIN2(old_size, new_size); duke@435: if (n > 0) memcpy(copy, old_ptr, n); duke@435: Afree(old_ptr,old_size); // Mostly done to keep stats accurate duke@435: return copy; duke@435: } duke@435: #endif duke@435: char *c_old = (char*)old_ptr; // Handy name duke@435: // Stupid fast special case duke@435: if( new_size <= old_size ) { // Shrink in-place duke@435: if( c_old+old_size == _hwm) // Attempt to free the excess bytes duke@435: _hwm = c_old+new_size; // Adjust hwm duke@435: return c_old; duke@435: } duke@435: duke@435: // make sure that new_size is legal duke@435: size_t corrected_new_size = ARENA_ALIGN(new_size); duke@435: duke@435: // See if we can resize in-place duke@435: if( (c_old+old_size == _hwm) && // Adjusting recent thing duke@435: (c_old+corrected_new_size <= _max) ) { // Still fits where it sits duke@435: _hwm = c_old+corrected_new_size; // Adjust hwm duke@435: return c_old; // Return old pointer duke@435: } duke@435: duke@435: // Oops, got to relocate guts nloodin@4183: void *new_ptr = Amalloc(new_size, alloc_failmode); nloodin@4183: if (new_ptr == NULL) { nloodin@4183: return NULL; nloodin@4183: } duke@435: memcpy( new_ptr, c_old, old_size ); duke@435: Afree(c_old,old_size); // Mostly done to keep stats accurate duke@435: return new_ptr; duke@435: } duke@435: duke@435: duke@435: // Determine if pointer belongs to this Arena or not. duke@435: bool Arena::contains( const void *ptr ) const { duke@435: #ifdef ASSERT duke@435: if (UseMallocOnly) { duke@435: // really slow, but not easy to make fast duke@435: if (_chunk == NULL) return false; duke@435: char** bottom = (char**)_chunk->bottom(); duke@435: for (char** p = (char**)_hwm - 1; p >= bottom; p--) { duke@435: if (*p == ptr) return true; duke@435: } duke@435: for (Chunk *c = _first; c != NULL; c = c->next()) { duke@435: if (c == _chunk) continue; // current chunk has been processed duke@435: char** bottom = (char**)c->bottom(); duke@435: for (char** p = (char**)c->top() - 1; p >= bottom; p--) { duke@435: if (*p == ptr) return true; duke@435: } duke@435: } duke@435: return false; duke@435: } duke@435: #endif duke@435: if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) duke@435: return true; // Check for in this chunk duke@435: for (Chunk *c = _first; c; c = c->next()) { duke@435: if (c == _chunk) continue; // current chunk has been processed duke@435: if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) { duke@435: return true; // Check for every chunk in Arena duke@435: } duke@435: } duke@435: return false; // Not in any Chunk, so not in Arena duke@435: } duke@435: duke@435: duke@435: #ifdef ASSERT duke@435: void* Arena::malloc(size_t size) { duke@435: assert(UseMallocOnly, "shouldn't call"); duke@435: // use malloc, but save pointer in res. area for later freeing duke@435: char** save = (char**)internal_malloc_4(sizeof(char*)); zgu@3900: return (*save = (char*)os::malloc(size, mtChunk)); duke@435: } duke@435: duke@435: // for debugging with UseMallocOnly duke@435: void* Arena::internal_malloc_4(size_t x) { duke@435: assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); kamg@2589: check_for_overflow(x, "Arena::internal_malloc_4"); duke@435: if (_hwm + x > _max) { duke@435: return grow(x); duke@435: } else { duke@435: char *old = _hwm; duke@435: _hwm += x; duke@435: return old; duke@435: } duke@435: } duke@435: #endif duke@435: duke@435: duke@435: //-------------------------------------------------------------------------------------- duke@435: // Non-product code duke@435: duke@435: #ifndef PRODUCT duke@435: // The global operator new should never be called since it will usually indicate duke@435: // a memory leak. Use CHeapObj as the base class of such objects to make it explicit duke@435: // that they're allocated on the C heap. duke@435: // Commented out in product version to avoid conflicts with third-party C++ native code. minqi@5103: // On certain platforms, such as Mac OS X (Darwin), in debug version, new is being called minqi@5103: // from jdk source and causing data corruption. Such as minqi@5103: // Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair minqi@5103: // define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed. minqi@5103: // minqi@5103: #ifndef ALLOW_OPERATOR_NEW_USAGE coleenp@5614: void* operator new(size_t size) throw() { minqi@5103: assert(false, "Should not call global operator new"); minqi@5103: return 0; duke@435: } minqi@5103: coleenp@5614: void* operator new [](size_t size) throw() { minqi@5103: assert(false, "Should not call global operator new[]"); minqi@5103: return 0; minqi@5103: } minqi@5103: coleenp@5614: void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { minqi@5103: assert(false, "Should not call global operator new"); minqi@5103: return 0; minqi@5103: } minqi@5103: coleenp@5614: void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() { minqi@5103: assert(false, "Should not call global operator new[]"); minqi@5103: return 0; minqi@5103: } minqi@5103: minqi@5103: void operator delete(void* p) { minqi@5103: assert(false, "Should not call global delete"); minqi@5103: } minqi@5103: minqi@5103: void operator delete [](void* p) { minqi@5103: assert(false, "Should not call global delete []"); minqi@5103: } minqi@5103: #endif // ALLOW_OPERATOR_NEW_USAGE duke@435: duke@435: void AllocatedObj::print() const { print_on(tty); } duke@435: void AllocatedObj::print_value() const { print_value_on(tty); } duke@435: duke@435: void AllocatedObj::print_on(outputStream* st) const { drchase@6680: st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", p2i(this)); duke@435: } duke@435: duke@435: void AllocatedObj::print_value_on(outputStream* st) const { drchase@6680: st->print("AllocatedObj(" INTPTR_FORMAT ")", p2i(this)); duke@435: } duke@435: kvn@2557: julong Arena::_bytes_allocated = 0; kvn@2557: kvn@2557: void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); } duke@435: duke@435: AllocStats::AllocStats() { kvn@2557: start_mallocs = os::num_mallocs; kvn@2557: start_frees = os::num_frees; duke@435: start_malloc_bytes = os::alloc_bytes; kvn@2557: start_mfree_bytes = os::free_bytes; kvn@2557: start_res_bytes = Arena::_bytes_allocated; duke@435: } duke@435: kvn@2557: julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } kvn@2557: julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } kvn@2557: julong AllocStats::num_frees() { return os::num_frees - start_frees; } kvn@2557: julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; } kvn@2557: julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } duke@435: void AllocStats::print() { kvn@2557: tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), " kvn@2557: UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc", kvn@2557: num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M); duke@435: } duke@435: duke@435: duke@435: // debugging code duke@435: inline void Arena::free_all(char** start, char** end) { duke@435: for (char** p = start; p < end; p++) if (*p) os::free(*p); duke@435: } duke@435: duke@435: void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) { duke@435: assert(UseMallocOnly, "should not call"); duke@435: // free all objects malloced since resource mark was created; resource area duke@435: // contains their addresses duke@435: if (chunk->next()) { duke@435: // this chunk is full, and some others too duke@435: for (Chunk* c = chunk->next(); c != NULL; c = c->next()) { duke@435: char* top = c->top(); duke@435: if (c->next() == NULL) { duke@435: top = hwm2; // last junk is only used up to hwm2 duke@435: assert(c->contains(hwm2), "bad hwm2"); duke@435: } duke@435: free_all((char**)c->bottom(), (char**)top); duke@435: } duke@435: assert(chunk->contains(hwm), "bad hwm"); duke@435: assert(chunk->contains(max), "bad max"); duke@435: free_all((char**)hwm, (char**)max); duke@435: } else { duke@435: // this chunk was partially used duke@435: assert(chunk->contains(hwm), "bad hwm"); duke@435: assert(chunk->contains(hwm2), "bad hwm2"); duke@435: free_all((char**)hwm, (char**)hwm2); duke@435: } duke@435: } duke@435: duke@435: duke@435: ReallocMark::ReallocMark() { duke@435: #ifdef ASSERT duke@435: Thread *thread = ThreadLocalStorage::get_thread_slow(); duke@435: _nesting = thread->resource_area()->nesting(); duke@435: #endif duke@435: } duke@435: duke@435: void ReallocMark::check() { duke@435: #ifdef ASSERT duke@435: if (_nesting != Thread::current()->resource_area()->nesting()) { duke@435: fatal("allocation bug: array could grow within nested ResourceMark"); duke@435: } duke@435: #endif duke@435: } duke@435: duke@435: #endif // Non-product