aoqi@0: /* aoqi@0: * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #include "precompiled.hpp" aoqi@0: #include "memory/allocation.hpp" aoqi@0: #include "memory/allocation.inline.hpp" aoqi@0: #include "memory/genCollectedHeap.hpp" aoqi@0: #include "memory/metaspaceShared.hpp" aoqi@0: #include "memory/resourceArea.hpp" aoqi@0: #include "memory/universe.hpp" aoqi@0: #include "runtime/atomic.hpp" aoqi@0: #include "runtime/os.hpp" aoqi@0: #include "runtime/task.hpp" aoqi@0: #include "runtime/threadCritical.hpp" aoqi@0: #include "services/memTracker.hpp" aoqi@0: #include "utilities/ostream.hpp" aoqi@0: aoqi@0: #ifdef TARGET_OS_FAMILY_linux aoqi@0: # include "os_linux.inline.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_OS_FAMILY_solaris aoqi@0: # include "os_solaris.inline.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_OS_FAMILY_windows aoqi@0: # include "os_windows.inline.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_OS_FAMILY_aix aoqi@0: # include "os_aix.inline.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_OS_FAMILY_bsd aoqi@0: # include "os_bsd.inline.hpp" aoqi@0: #endif aoqi@0: aoqi@0: void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } aoqi@0: void StackObj::operator delete(void* p) { ShouldNotCallThis(); } aoqi@0: void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } aoqi@0: void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } aoqi@0: aoqi@0: void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } aoqi@0: void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); } aoqi@0: void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } aoqi@0: void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); } aoqi@0: aoqi@0: void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, aoqi@0: size_t word_size, bool read_only, aoqi@0: MetaspaceObj::Type type, TRAPS) throw() { aoqi@0: // Klass has it's own operator new aoqi@0: return Metaspace::allocate(loader_data, word_size, read_only, aoqi@0: type, CHECK_NULL); aoqi@0: } aoqi@0: aoqi@0: bool MetaspaceObj::is_shared() const { aoqi@0: return MetaspaceShared::is_in_shared_space(this); aoqi@0: } aoqi@0: aoqi@0: bool MetaspaceObj::is_metaspace_object() const { aoqi@0: return Metaspace::contains((void*)this); aoqi@0: } aoqi@0: aoqi@0: void MetaspaceObj::print_address_on(outputStream* st) const { aoqi@0: st->print(" {" INTPTR_FORMAT "}", p2i(this)); aoqi@0: } aoqi@0: aoqi@0: void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() { aoqi@0: address res; aoqi@0: switch (type) { aoqi@0: case C_HEAP: aoqi@0: res = (address)AllocateHeap(size, flags, CALLER_PC); aoqi@0: DEBUG_ONLY(set_allocation_type(res, C_HEAP);) aoqi@0: break; aoqi@0: case RESOURCE_AREA: aoqi@0: // new(size) sets allocation type RESOURCE_AREA. aoqi@0: res = (address)operator new(size); aoqi@0: break; aoqi@0: default: aoqi@0: ShouldNotReachHere(); aoqi@0: } aoqi@0: return res; aoqi@0: } aoqi@0: aoqi@0: void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() { aoqi@0: return (address) operator new(size, type, flags); aoqi@0: } aoqi@0: aoqi@0: void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, aoqi@0: allocation_type type, MEMFLAGS flags) throw() { aoqi@0: //should only call this with std::nothrow, use other operator new() otherwise aoqi@0: address res; aoqi@0: switch (type) { aoqi@0: case C_HEAP: aoqi@0: res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); aoqi@0: DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);) aoqi@0: break; aoqi@0: case RESOURCE_AREA: aoqi@0: // new(size) sets allocation type RESOURCE_AREA. aoqi@0: res = (address)operator new(size, std::nothrow); aoqi@0: break; aoqi@0: default: aoqi@0: ShouldNotReachHere(); aoqi@0: } aoqi@0: return res; aoqi@0: } aoqi@0: aoqi@0: void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, aoqi@0: allocation_type type, MEMFLAGS flags) throw() { aoqi@0: return (address)operator new(size, nothrow_constant, type, flags); aoqi@0: } aoqi@0: aoqi@0: void ResourceObj::operator delete(void* p) { aoqi@0: assert(((ResourceObj *)p)->allocated_on_C_heap(), aoqi@0: "delete only allowed for C_HEAP objects"); aoqi@0: DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) aoqi@0: FreeHeap(p); aoqi@0: } aoqi@0: aoqi@0: void ResourceObj::operator delete [](void* p) { aoqi@0: operator delete(p); aoqi@0: } aoqi@0: aoqi@0: #ifdef ASSERT aoqi@0: void ResourceObj::set_allocation_type(address res, allocation_type type) { aoqi@0: // Set allocation type in the resource object aoqi@0: uintptr_t allocation = (uintptr_t)res; aoqi@0: assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " INTPTR_FORMAT, p2i(res))); aoqi@0: assert(type <= allocation_mask, "incorrect allocation type"); aoqi@0: ResourceObj* resobj = (ResourceObj *)res; aoqi@0: resobj->_allocation_t[0] = ~(allocation + type); aoqi@0: if (type != STACK_OR_EMBEDDED) { aoqi@0: // Called from operator new() and CollectionSetChooser(), aoqi@0: // set verification value. aoqi@0: resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: ResourceObj::allocation_type ResourceObj::get_allocation_type() const { aoqi@0: assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object"); aoqi@0: return (allocation_type)((~_allocation_t[0]) & allocation_mask); aoqi@0: } aoqi@0: aoqi@0: bool ResourceObj::is_type_set() const { aoqi@0: allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); aoqi@0: return get_allocation_type() == type && aoqi@0: (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); aoqi@0: } aoqi@0: aoqi@0: ResourceObj::ResourceObj() { // default constructor aoqi@0: if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { aoqi@0: // Operator new() is not called for allocations aoqi@0: // on stack and for embedded objects. aoqi@0: set_allocation_type((address)this, STACK_OR_EMBEDDED); aoqi@0: } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED aoqi@0: // For some reason we got a value which resembles aoqi@0: // an embedded or stack object (operator new() does not aoqi@0: // set such type). Keep it since it is valid value aoqi@0: // (even if it was garbage). aoqi@0: // Ignore garbage in other fields. aoqi@0: } else if (is_type_set()) { aoqi@0: // Operator new() was called and type was set. aoqi@0: assert(!allocated_on_stack(), aoqi@0: err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", aoqi@0: p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1])); aoqi@0: } else { aoqi@0: // Operator new() was not called. aoqi@0: // Assume that it is embedded or stack object. aoqi@0: set_allocation_type((address)this, STACK_OR_EMBEDDED); aoqi@0: } aoqi@0: _allocation_t[1] = 0; // Zap verification value aoqi@0: } aoqi@0: aoqi@0: ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor aoqi@0: // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream. aoqi@0: // Note: garbage may resembles valid value. aoqi@0: assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(), aoqi@0: err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", aoqi@0: p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1])); aoqi@0: set_allocation_type((address)this, STACK_OR_EMBEDDED); aoqi@0: _allocation_t[1] = 0; // Zap verification value aoqi@0: } aoqi@0: aoqi@0: ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment aoqi@0: // Used in InlineTree::ok_to_inline() for WarmCallInfo. aoqi@0: assert(allocated_on_stack(), aoqi@0: err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", aoqi@0: p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1])); aoqi@0: // Keep current _allocation_t value; aoqi@0: return *this; aoqi@0: } aoqi@0: aoqi@0: ResourceObj::~ResourceObj() { aoqi@0: // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. aoqi@0: if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap. aoqi@0: _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type aoqi@0: } aoqi@0: } aoqi@0: #endif // ASSERT aoqi@0: aoqi@0: aoqi@0: void trace_heap_malloc(size_t size, const char* name, void* p) { aoqi@0: // A lock is not needed here - tty uses a lock internally aoqi@0: tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p2i(p), size, name == NULL ? "" : name); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void trace_heap_free(void* p) { aoqi@0: // A lock is not needed here - tty uses a lock internally aoqi@0: tty->print_cr("Heap free " INTPTR_FORMAT, p2i(p)); aoqi@0: } aoqi@0: aoqi@0: //-------------------------------------------------------------------------------------- aoqi@0: // ChunkPool implementation aoqi@0: aoqi@0: // MT-safe pool of chunks to reduce malloc/free thrashing aoqi@0: // NB: not using Mutex because pools are used before Threads are initialized aoqi@0: class ChunkPool: public CHeapObj { aoqi@0: Chunk* _first; // first cached Chunk; its first word points to next chunk aoqi@0: size_t _num_chunks; // number of unused chunks in pool aoqi@0: size_t _num_used; // number of chunks currently checked out aoqi@0: const size_t _size; // size of each chunk (must be uniform) aoqi@0: aoqi@0: // Our four static pools aoqi@0: static ChunkPool* _large_pool; aoqi@0: static ChunkPool* _medium_pool; aoqi@0: static ChunkPool* _small_pool; aoqi@0: static ChunkPool* _tiny_pool; aoqi@0: aoqi@0: // return first element or null aoqi@0: void* get_first() { aoqi@0: Chunk* c = _first; aoqi@0: if (_first) { aoqi@0: _first = _first->next(); aoqi@0: _num_chunks--; aoqi@0: } aoqi@0: return c; aoqi@0: } aoqi@0: aoqi@0: public: aoqi@0: // All chunks in a ChunkPool has the same size aoqi@0: ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } aoqi@0: aoqi@0: // Allocate a new chunk from the pool (might expand the pool) aoqi@0: _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) { aoqi@0: assert(bytes == _size, "bad size"); aoqi@0: void* p = NULL; aoqi@0: // No VM lock can be taken inside ThreadCritical lock, so os::malloc aoqi@0: // should be done outside ThreadCritical lock due to NMT aoqi@0: { ThreadCritical tc; aoqi@0: _num_used++; aoqi@0: p = get_first(); aoqi@0: } aoqi@0: if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); aoqi@0: if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { aoqi@0: vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate"); aoqi@0: } aoqi@0: return p; aoqi@0: } aoqi@0: aoqi@0: // Return a chunk to the pool aoqi@0: void free(Chunk* chunk) { aoqi@0: assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size"); aoqi@0: ThreadCritical tc; aoqi@0: _num_used--; aoqi@0: aoqi@0: // Add chunk to list aoqi@0: chunk->set_next(_first); aoqi@0: _first = chunk; aoqi@0: _num_chunks++; aoqi@0: } aoqi@0: aoqi@0: // Prune the pool aoqi@0: void free_all_but(size_t n) { aoqi@0: Chunk* cur = NULL; aoqi@0: Chunk* next; aoqi@0: { aoqi@0: // if we have more than n chunks, free all of them aoqi@0: ThreadCritical tc; aoqi@0: if (_num_chunks > n) { aoqi@0: // free chunks at end of queue, for better locality aoqi@0: cur = _first; aoqi@0: for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); aoqi@0: aoqi@0: if (cur != NULL) { aoqi@0: next = cur->next(); aoqi@0: cur->set_next(NULL); aoqi@0: cur = next; aoqi@0: aoqi@0: _num_chunks = n; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Free all remaining chunks, outside of ThreadCritical aoqi@0: // to avoid deadlock with NMT aoqi@0: while(cur != NULL) { aoqi@0: next = cur->next(); aoqi@0: os::free(cur, mtChunk); aoqi@0: cur = next; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Accessors to preallocated pool's aoqi@0: static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } aoqi@0: static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } aoqi@0: static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } aoqi@0: static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; } aoqi@0: aoqi@0: static void initialize() { aoqi@0: _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size()); aoqi@0: _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size()); aoqi@0: _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size()); aoqi@0: _tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size()); aoqi@0: } aoqi@0: aoqi@0: static void clean() { aoqi@0: enum { BlocksToKeep = 5 }; aoqi@0: _tiny_pool->free_all_but(BlocksToKeep); aoqi@0: _small_pool->free_all_but(BlocksToKeep); aoqi@0: _medium_pool->free_all_but(BlocksToKeep); aoqi@0: _large_pool->free_all_but(BlocksToKeep); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: ChunkPool* ChunkPool::_large_pool = NULL; aoqi@0: ChunkPool* ChunkPool::_medium_pool = NULL; aoqi@0: ChunkPool* ChunkPool::_small_pool = NULL; aoqi@0: ChunkPool* ChunkPool::_tiny_pool = NULL; aoqi@0: aoqi@0: void chunkpool_init() { aoqi@0: ChunkPool::initialize(); aoqi@0: } aoqi@0: aoqi@0: void aoqi@0: Chunk::clean_chunk_pool() { aoqi@0: ChunkPool::clean(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: //-------------------------------------------------------------------------------------- aoqi@0: // ChunkPoolCleaner implementation aoqi@0: // aoqi@0: aoqi@0: class ChunkPoolCleaner : public PeriodicTask { aoqi@0: enum { CleaningInterval = 5000 }; // cleaning interval in ms aoqi@0: aoqi@0: public: aoqi@0: ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {} aoqi@0: void task() { aoqi@0: ChunkPool::clean(); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: //-------------------------------------------------------------------------------------- aoqi@0: // Chunk implementation aoqi@0: aoqi@0: void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() { aoqi@0: // requested_size is equal to sizeof(Chunk) but in order for the arena aoqi@0: // allocations to come out aligned as expected the size must be aligned aoqi@0: // to expected arena alignment. aoqi@0: // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. aoqi@0: assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); aoqi@0: size_t bytes = ARENA_ALIGN(requested_size) + length; aoqi@0: switch (length) { aoqi@0: case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode); aoqi@0: case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode); aoqi@0: case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode); aoqi@0: case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode); aoqi@0: default: { aoqi@0: void* p = os::malloc(bytes, mtChunk, CALLER_PC); aoqi@0: if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { aoqi@0: vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new"); aoqi@0: } aoqi@0: return p; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void Chunk::operator delete(void* p) { aoqi@0: Chunk* c = (Chunk*)p; aoqi@0: switch (c->length()) { aoqi@0: case Chunk::size: ChunkPool::large_pool()->free(c); break; aoqi@0: case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; aoqi@0: case Chunk::init_size: ChunkPool::small_pool()->free(c); break; aoqi@0: case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break; aoqi@0: default: os::free(c, mtChunk); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: Chunk::Chunk(size_t length) : _len(length) { aoqi@0: _next = NULL; // Chain on the linked list aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void Chunk::chop() { aoqi@0: Chunk *k = this; aoqi@0: while( k ) { aoqi@0: Chunk *tmp = k->next(); aoqi@0: // clear out this chunk (to detect allocation bugs) aoqi@0: if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length()); aoqi@0: delete k; // Free chunk (was malloc'd) aoqi@0: k = tmp; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void Chunk::next_chop() { aoqi@0: _next->chop(); aoqi@0: _next = NULL; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void Chunk::start_chunk_pool_cleaner_task() { aoqi@0: #ifdef ASSERT aoqi@0: static bool task_created = false; aoqi@0: assert(!task_created, "should not start chuck pool cleaner twice"); aoqi@0: task_created = true; aoqi@0: #endif aoqi@0: ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); aoqi@0: cleaner->enroll(); aoqi@0: } aoqi@0: aoqi@0: //------------------------------Arena------------------------------------------ aoqi@0: NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) aoqi@0: aoqi@0: Arena::Arena(size_t init_size) { aoqi@0: size_t round_size = (sizeof (char *)) - 1; aoqi@0: init_size = (init_size+round_size) & ~round_size; aoqi@0: _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size); aoqi@0: _hwm = _chunk->bottom(); // Save the cached hwm, max aoqi@0: _max = _chunk->top(); aoqi@0: set_size_in_bytes(init_size); aoqi@0: NOT_PRODUCT(Atomic::inc(&_instance_count);) aoqi@0: } aoqi@0: aoqi@0: Arena::Arena() { aoqi@0: _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size); aoqi@0: _hwm = _chunk->bottom(); // Save the cached hwm, max aoqi@0: _max = _chunk->top(); aoqi@0: set_size_in_bytes(Chunk::init_size); aoqi@0: NOT_PRODUCT(Atomic::inc(&_instance_count);) aoqi@0: } aoqi@0: aoqi@0: Arena *Arena::move_contents(Arena *copy) { aoqi@0: copy->destruct_contents(); aoqi@0: copy->_chunk = _chunk; aoqi@0: copy->_hwm = _hwm; aoqi@0: copy->_max = _max; aoqi@0: copy->_first = _first; aoqi@0: aoqi@0: // workaround rare racing condition, which could double count aoqi@0: // the arena size by native memory tracking aoqi@0: size_t size = size_in_bytes(); aoqi@0: set_size_in_bytes(0); aoqi@0: copy->set_size_in_bytes(size); aoqi@0: // Destroy original arena aoqi@0: reset(); aoqi@0: return copy; // Return Arena with contents aoqi@0: } aoqi@0: aoqi@0: Arena::~Arena() { aoqi@0: destruct_contents(); aoqi@0: NOT_PRODUCT(Atomic::dec(&_instance_count);) aoqi@0: } aoqi@0: aoqi@0: void* Arena::operator new(size_t size) throw() { aoqi@0: assert(false, "Use dynamic memory type binding"); aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() { aoqi@0: assert(false, "Use dynamic memory type binding"); aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: // dynamic memory type binding aoqi@0: void* Arena::operator new(size_t size, MEMFLAGS flags) throw() { aoqi@0: #ifdef ASSERT aoqi@0: void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); aoqi@0: if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); aoqi@0: return p; aoqi@0: #else aoqi@0: return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() { aoqi@0: #ifdef ASSERT aoqi@0: void* p = os::malloc(size, flags|otArena, CALLER_PC); aoqi@0: if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); aoqi@0: return p; aoqi@0: #else aoqi@0: return os::malloc(size, flags|otArena, CALLER_PC); aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: void Arena::operator delete(void* p) { aoqi@0: FreeHeap(p); aoqi@0: } aoqi@0: aoqi@0: // Destroy this arenas contents and reset to empty aoqi@0: void Arena::destruct_contents() { aoqi@0: if (UseMallocOnly && _first != NULL) { aoqi@0: char* end = _first->next() ? _first->top() : _hwm; aoqi@0: free_malloced_objects(_first, _first->bottom(), end, _hwm); aoqi@0: } aoqi@0: // reset size before chop to avoid a rare racing condition aoqi@0: // that can have total arena memory exceed total chunk memory aoqi@0: set_size_in_bytes(0); aoqi@0: _first->chop(); aoqi@0: reset(); aoqi@0: } aoqi@0: aoqi@0: // This is high traffic method, but many calls actually don't aoqi@0: // change the size aoqi@0: void Arena::set_size_in_bytes(size_t size) { aoqi@0: if (_size_in_bytes != size) { aoqi@0: _size_in_bytes = size; aoqi@0: MemTracker::record_arena_size((address)this, size); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Total of all Chunks in arena aoqi@0: size_t Arena::used() const { aoqi@0: size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk aoqi@0: register Chunk *k = _first; aoqi@0: while( k != _chunk) { // Whilst have Chunks in a row aoqi@0: sum += k->length(); // Total size of this Chunk aoqi@0: k = k->next(); // Bump along to next Chunk aoqi@0: } aoqi@0: return sum; // Return total consumed space. aoqi@0: } aoqi@0: aoqi@0: void Arena::signal_out_of_memory(size_t sz, const char* whence) const { aoqi@0: vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence); aoqi@0: } aoqi@0: aoqi@0: // Grow a new Chunk aoqi@0: void* Arena::grow(size_t x, AllocFailType alloc_failmode) { aoqi@0: // Get minimal required size. Either real big, or even bigger for giant objs aoqi@0: size_t len = MAX2(x, (size_t) Chunk::size); aoqi@0: aoqi@0: Chunk *k = _chunk; // Get filled-up chunk address aoqi@0: _chunk = new (alloc_failmode, len) Chunk(len); aoqi@0: aoqi@0: if (_chunk == NULL) { aoqi@0: _chunk = k; // restore the previous value of _chunk aoqi@0: return NULL; aoqi@0: } aoqi@0: if (k) k->set_next(_chunk); // Append new chunk to end of linked list aoqi@0: else _first = _chunk; aoqi@0: _hwm = _chunk->bottom(); // Save the cached hwm, max aoqi@0: _max = _chunk->top(); aoqi@0: set_size_in_bytes(size_in_bytes() + len); aoqi@0: void* result = _hwm; aoqi@0: _hwm += x; aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: aoqi@0: // Reallocate storage in Arena. aoqi@0: void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) { aoqi@0: assert(new_size >= 0, "bad size"); aoqi@0: if (new_size == 0) return NULL; aoqi@0: #ifdef ASSERT aoqi@0: if (UseMallocOnly) { aoqi@0: // always allocate a new object (otherwise we'll free this one twice) aoqi@0: char* copy = (char*)Amalloc(new_size, alloc_failmode); aoqi@0: if (copy == NULL) { aoqi@0: return NULL; aoqi@0: } aoqi@0: size_t n = MIN2(old_size, new_size); aoqi@0: if (n > 0) memcpy(copy, old_ptr, n); aoqi@0: Afree(old_ptr,old_size); // Mostly done to keep stats accurate aoqi@0: return copy; aoqi@0: } aoqi@0: #endif aoqi@0: char *c_old = (char*)old_ptr; // Handy name aoqi@0: // Stupid fast special case aoqi@0: if( new_size <= old_size ) { // Shrink in-place aoqi@0: if( c_old+old_size == _hwm) // Attempt to free the excess bytes aoqi@0: _hwm = c_old+new_size; // Adjust hwm aoqi@0: return c_old; aoqi@0: } aoqi@0: aoqi@0: // make sure that new_size is legal aoqi@0: size_t corrected_new_size = ARENA_ALIGN(new_size); aoqi@0: aoqi@0: // See if we can resize in-place aoqi@0: if( (c_old+old_size == _hwm) && // Adjusting recent thing aoqi@0: (c_old+corrected_new_size <= _max) ) { // Still fits where it sits aoqi@0: _hwm = c_old+corrected_new_size; // Adjust hwm aoqi@0: return c_old; // Return old pointer aoqi@0: } aoqi@0: aoqi@0: // Oops, got to relocate guts aoqi@0: void *new_ptr = Amalloc(new_size, alloc_failmode); aoqi@0: if (new_ptr == NULL) { aoqi@0: return NULL; aoqi@0: } aoqi@0: memcpy( new_ptr, c_old, old_size ); aoqi@0: Afree(c_old,old_size); // Mostly done to keep stats accurate aoqi@0: return new_ptr; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // Determine if pointer belongs to this Arena or not. aoqi@0: bool Arena::contains( const void *ptr ) const { aoqi@0: #ifdef ASSERT aoqi@0: if (UseMallocOnly) { aoqi@0: // really slow, but not easy to make fast aoqi@0: if (_chunk == NULL) return false; aoqi@0: char** bottom = (char**)_chunk->bottom(); aoqi@0: for (char** p = (char**)_hwm - 1; p >= bottom; p--) { aoqi@0: if (*p == ptr) return true; aoqi@0: } aoqi@0: for (Chunk *c = _first; c != NULL; c = c->next()) { aoqi@0: if (c == _chunk) continue; // current chunk has been processed aoqi@0: char** bottom = (char**)c->bottom(); aoqi@0: for (char** p = (char**)c->top() - 1; p >= bottom; p--) { aoqi@0: if (*p == ptr) return true; aoqi@0: } aoqi@0: } aoqi@0: return false; aoqi@0: } aoqi@0: #endif aoqi@0: if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) aoqi@0: return true; // Check for in this chunk aoqi@0: for (Chunk *c = _first; c; c = c->next()) { aoqi@0: if (c == _chunk) continue; // current chunk has been processed aoqi@0: if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) { aoqi@0: return true; // Check for every chunk in Arena aoqi@0: } aoqi@0: } aoqi@0: return false; // Not in any Chunk, so not in Arena aoqi@0: } aoqi@0: aoqi@0: aoqi@0: #ifdef ASSERT aoqi@0: void* Arena::malloc(size_t size) { aoqi@0: assert(UseMallocOnly, "shouldn't call"); aoqi@0: // use malloc, but save pointer in res. area for later freeing aoqi@0: char** save = (char**)internal_malloc_4(sizeof(char*)); aoqi@0: return (*save = (char*)os::malloc(size, mtChunk)); aoqi@0: } aoqi@0: aoqi@0: // for debugging with UseMallocOnly aoqi@0: void* Arena::internal_malloc_4(size_t x) { aoqi@0: assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); aoqi@0: check_for_overflow(x, "Arena::internal_malloc_4"); aoqi@0: if (_hwm + x > _max) { aoqi@0: return grow(x); aoqi@0: } else { aoqi@0: char *old = _hwm; aoqi@0: _hwm += x; aoqi@0: return old; aoqi@0: } aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: aoqi@0: //-------------------------------------------------------------------------------------- aoqi@0: // Non-product code aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: // The global operator new should never be called since it will usually indicate aoqi@0: // a memory leak. Use CHeapObj as the base class of such objects to make it explicit aoqi@0: // that they're allocated on the C heap. aoqi@0: // Commented out in product version to avoid conflicts with third-party C++ native code. aoqi@0: // On certain platforms, such as Mac OS X (Darwin), in debug version, new is being called aoqi@0: // from jdk source and causing data corruption. Such as aoqi@0: // Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair aoqi@0: // define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed. aoqi@0: // aoqi@0: #ifndef ALLOW_OPERATOR_NEW_USAGE aoqi@0: void* operator new(size_t size) throw() { aoqi@0: assert(false, "Should not call global operator new"); aoqi@0: return 0; aoqi@0: } aoqi@0: aoqi@0: void* operator new [](size_t size) throw() { aoqi@0: assert(false, "Should not call global operator new[]"); aoqi@0: return 0; aoqi@0: } aoqi@0: aoqi@0: void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { aoqi@0: assert(false, "Should not call global operator new"); aoqi@0: return 0; aoqi@0: } aoqi@0: aoqi@0: void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() { aoqi@0: assert(false, "Should not call global operator new[]"); aoqi@0: return 0; aoqi@0: } aoqi@0: aoqi@0: void operator delete(void* p) { aoqi@0: assert(false, "Should not call global delete"); aoqi@0: } aoqi@0: aoqi@0: void operator delete [](void* p) { aoqi@0: assert(false, "Should not call global delete []"); aoqi@0: } aoqi@0: #endif // ALLOW_OPERATOR_NEW_USAGE aoqi@0: aoqi@0: void AllocatedObj::print() const { print_on(tty); } aoqi@0: void AllocatedObj::print_value() const { print_value_on(tty); } aoqi@0: aoqi@0: void AllocatedObj::print_on(outputStream* st) const { aoqi@0: st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", p2i(this)); aoqi@0: } aoqi@0: aoqi@0: void AllocatedObj::print_value_on(outputStream* st) const { aoqi@0: st->print("AllocatedObj(" INTPTR_FORMAT ")", p2i(this)); aoqi@0: } aoqi@0: aoqi@0: julong Arena::_bytes_allocated = 0; aoqi@0: aoqi@0: void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); } aoqi@0: aoqi@0: AllocStats::AllocStats() { aoqi@0: start_mallocs = os::num_mallocs; aoqi@0: start_frees = os::num_frees; aoqi@0: start_malloc_bytes = os::alloc_bytes; aoqi@0: start_mfree_bytes = os::free_bytes; aoqi@0: start_res_bytes = Arena::_bytes_allocated; aoqi@0: } aoqi@0: aoqi@0: julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } aoqi@0: julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } aoqi@0: julong AllocStats::num_frees() { return os::num_frees - start_frees; } aoqi@0: julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; } aoqi@0: julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } aoqi@0: void AllocStats::print() { aoqi@0: tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), " aoqi@0: UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc", aoqi@0: num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // debugging code aoqi@0: inline void Arena::free_all(char** start, char** end) { aoqi@0: for (char** p = start; p < end; p++) if (*p) os::free(*p); aoqi@0: } aoqi@0: aoqi@0: void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) { aoqi@0: assert(UseMallocOnly, "should not call"); aoqi@0: // free all objects malloced since resource mark was created; resource area aoqi@0: // contains their addresses aoqi@0: if (chunk->next()) { aoqi@0: // this chunk is full, and some others too aoqi@0: for (Chunk* c = chunk->next(); c != NULL; c = c->next()) { aoqi@0: char* top = c->top(); aoqi@0: if (c->next() == NULL) { aoqi@0: top = hwm2; // last junk is only used up to hwm2 aoqi@0: assert(c->contains(hwm2), "bad hwm2"); aoqi@0: } aoqi@0: free_all((char**)c->bottom(), (char**)top); aoqi@0: } aoqi@0: assert(chunk->contains(hwm), "bad hwm"); aoqi@0: assert(chunk->contains(max), "bad max"); aoqi@0: free_all((char**)hwm, (char**)max); aoqi@0: } else { aoqi@0: // this chunk was partially used aoqi@0: assert(chunk->contains(hwm), "bad hwm"); aoqi@0: assert(chunk->contains(hwm2), "bad hwm2"); aoqi@0: free_all((char**)hwm, (char**)hwm2); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: ReallocMark::ReallocMark() { aoqi@0: #ifdef ASSERT aoqi@0: Thread *thread = ThreadLocalStorage::get_thread_slow(); aoqi@0: _nesting = thread->resource_area()->nesting(); aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: void ReallocMark::check() { aoqi@0: #ifdef ASSERT aoqi@0: if (_nesting != Thread::current()->resource_area()->nesting()) { aoqi@0: fatal("allocation bug: array could grow within nested ResourceMark"); aoqi@0: } aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: #endif // Non-product