aoqi@0: /* aoqi@0: * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #ifndef SHARE_VM_MEMORY_ALLOCATION_HPP aoqi@0: #define SHARE_VM_MEMORY_ALLOCATION_HPP aoqi@0: aoqi@0: #include "runtime/globals.hpp" aoqi@0: #include "utilities/globalDefinitions.hpp" aoqi@0: #include "utilities/macros.hpp" aoqi@0: #ifdef COMPILER1 aoqi@0: #include "c1/c1_globals.hpp" aoqi@0: #endif aoqi@0: #ifdef COMPILER2 aoqi@0: #include "opto/c2_globals.hpp" aoqi@0: #endif aoqi@0: aoqi@0: #include aoqi@0: aoqi@0: #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1) aoqi@0: #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) aoqi@0: #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) aoqi@0: aoqi@0: aoqi@0: // noinline attribute aoqi@0: #ifdef _WINDOWS aoqi@0: #define _NOINLINE_ __declspec(noinline) aoqi@0: #else aoqi@0: #if __GNUC__ < 3 // gcc 2.x does not support noinline attribute aoqi@0: #define _NOINLINE_ aoqi@0: #else aoqi@0: #define _NOINLINE_ __attribute__ ((noinline)) aoqi@0: #endif aoqi@0: #endif aoqi@0: aoqi@0: class AllocFailStrategy { aoqi@0: public: aoqi@0: enum AllocFailEnum { EXIT_OOM, RETURN_NULL }; aoqi@0: }; aoqi@0: typedef AllocFailStrategy::AllocFailEnum AllocFailType; aoqi@0: aoqi@0: // All classes in the virtual machine must be subclassed aoqi@0: // by one of the following allocation classes: aoqi@0: // aoqi@0: // For objects allocated in the resource area (see resourceArea.hpp). aoqi@0: // - ResourceObj aoqi@0: // aoqi@0: // For objects allocated in the C-heap (managed by: free & malloc). aoqi@0: // - CHeapObj aoqi@0: // aoqi@0: // For objects allocated on the stack. aoqi@0: // - StackObj aoqi@0: // aoqi@0: // For embedded objects. aoqi@0: // - ValueObj aoqi@0: // aoqi@0: // For classes used as name spaces. aoqi@0: // - AllStatic aoqi@0: // aoqi@0: // For classes in Metaspace (class data) aoqi@0: // - MetaspaceObj aoqi@0: // aoqi@0: // The printable subclasses are used for debugging and define virtual aoqi@0: // member functions for printing. Classes that avoid allocating the aoqi@0: // vtbl entries in the objects should therefore not be the printable aoqi@0: // subclasses. aoqi@0: // aoqi@0: // The following macros and function should be used to allocate memory aoqi@0: // directly in the resource area or in the C-heap, The _OBJ variants aoqi@0: // of the NEW/FREE_C_HEAP macros are used for alloc/dealloc simple aoqi@0: // objects which are not inherited from CHeapObj, note constructor and aoqi@0: // destructor are not called. The preferable way to allocate objects aoqi@0: // is using the new operator. aoqi@0: // aoqi@0: // WARNING: The array variant must only be used for a homogenous array aoqi@0: // where all objects are of the exact type specified. If subtypes are aoqi@0: // stored in the array then must pay attention to calling destructors aoqi@0: // at needed. aoqi@0: // aoqi@0: // NEW_RESOURCE_ARRAY(type, size) aoqi@0: // NEW_RESOURCE_OBJ(type) aoqi@0: // NEW_C_HEAP_ARRAY(type, size) aoqi@0: // NEW_C_HEAP_OBJ(type, memflags) aoqi@0: // FREE_C_HEAP_ARRAY(type, old, memflags) aoqi@0: // FREE_C_HEAP_OBJ(objname, type, memflags) aoqi@0: // char* AllocateHeap(size_t size, const char* name); aoqi@0: // void FreeHeap(void* p); aoqi@0: // aoqi@0: // C-heap allocation can be traced using +PrintHeapAllocation. aoqi@0: // malloc and free should therefore never called directly. aoqi@0: aoqi@0: // Base class for objects allocated in the C-heap. aoqi@0: aoqi@0: // In non product mode we introduce a super class for all allocation classes aoqi@0: // that supports printing. aoqi@0: // We avoid the superclass in product mode since some C++ compilers add aoqi@0: // a word overhead for empty super classes. aoqi@0: aoqi@0: #ifdef PRODUCT aoqi@0: #define ALLOCATION_SUPER_CLASS_SPEC aoqi@0: #else aoqi@0: #define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj aoqi@0: class AllocatedObj { aoqi@0: public: aoqi@0: // Printing support aoqi@0: void print() const; aoqi@0: void print_value() const; aoqi@0: aoqi@0: virtual void print_on(outputStream* st) const; aoqi@0: virtual void print_value_on(outputStream* st) const; aoqi@0: }; aoqi@0: #endif aoqi@0: aoqi@0: aoqi@0: /* aoqi@0: * MemoryType bitmap layout: aoqi@0: * | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 | aoqi@0: * | memory type | object | reserved | aoqi@0: * | | type | | aoqi@0: */ aoqi@0: enum MemoryType { aoqi@0: // Memory type by sub systems. It occupies lower byte. aoqi@0: mtNone = 0x0000, // undefined aoqi@0: mtClass = 0x0100, // memory class for Java classes aoqi@0: mtThread = 0x0200, // memory for thread objects aoqi@0: mtThreadStack = 0x0300, aoqi@0: mtCode = 0x0400, // memory for generated code aoqi@0: mtGC = 0x0500, // memory for GC aoqi@0: mtCompiler = 0x0600, // memory for compiler aoqi@0: mtInternal = 0x0700, // memory used by VM, but does not belong to aoqi@0: // any of above categories, and not used for aoqi@0: // native memory tracking aoqi@0: mtOther = 0x0800, // memory not used by VM aoqi@0: mtSymbol = 0x0900, // symbol aoqi@0: mtNMT = 0x0A00, // memory used by native memory tracking aoqi@0: mtChunk = 0x0B00, // chunk that holds content of arenas aoqi@0: mtJavaHeap = 0x0C00, // Java heap aoqi@0: mtClassShared = 0x0D00, // class data sharing aoqi@0: mtTest = 0x0E00, // Test type for verifying NMT aoqi@0: mtTracing = 0x0F00, // memory used for Tracing aoqi@0: mt_number_of_types = 0x000F, // number of memory types (mtDontTrack aoqi@0: // is not included as validate type) aoqi@0: mtDontTrack = 0x0F00, // memory we do not or cannot track aoqi@0: mt_masks = 0x7F00, aoqi@0: aoqi@0: // object type mask aoqi@0: otArena = 0x0010, // an arena object aoqi@0: otNMTRecorder = 0x0020, // memory recorder object aoqi@0: ot_masks = 0x00F0 aoqi@0: }; aoqi@0: aoqi@0: #define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type) aoqi@0: #define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone) aoqi@0: #define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks) aoqi@0: aoqi@0: #define IS_ARENA_OBJ(flags) ((flags & ot_masks) == otArena) aoqi@0: #define IS_NMT_RECORDER(flags) ((flags & ot_masks) == otNMTRecorder) aoqi@0: #define NMT_CAN_TRACK(flags) (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack))) aoqi@0: aoqi@0: typedef unsigned short MEMFLAGS; aoqi@0: aoqi@0: #if INCLUDE_NMT aoqi@0: aoqi@0: extern bool NMT_track_callsite; aoqi@0: aoqi@0: #else aoqi@0: aoqi@0: const bool NMT_track_callsite = false; aoqi@0: aoqi@0: #endif // INCLUDE_NMT aoqi@0: aoqi@0: // debug build does not inline aoqi@0: #if defined(_NMT_NOINLINE_) aoqi@0: #define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0) aoqi@0: #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) aoqi@0: #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0) aoqi@0: #else aoqi@0: #define CURRENT_PC (NMT_track_callsite? os::get_caller_pc(0) : 0) aoqi@0: #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0) aoqi@0: #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) aoqi@0: #endif aoqi@0: aoqi@0: aoqi@0: aoqi@0: template class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { aoqi@0: public: aoqi@0: _NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw(); aoqi@0: _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant, aoqi@0: address caller_pc = 0) throw(); aoqi@0: _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw(); aoqi@0: _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, aoqi@0: address caller_pc = 0) throw(); aoqi@0: void operator delete(void* p); aoqi@0: void operator delete [] (void* p); aoqi@0: }; aoqi@0: aoqi@0: // Base class for objects allocated on the stack only. aoqi@0: // Calling new or delete will result in fatal error. aoqi@0: aoqi@0: class StackObj ALLOCATION_SUPER_CLASS_SPEC { aoqi@0: private: aoqi@0: void* operator new(size_t size) throw(); aoqi@0: void* operator new [](size_t size) throw(); aoqi@0: #ifdef __IBMCPP__ aoqi@0: public: aoqi@0: #endif aoqi@0: void operator delete(void* p); aoqi@0: void operator delete [](void* p); aoqi@0: }; aoqi@0: aoqi@0: // Base class for objects used as value objects. aoqi@0: // Calling new or delete will result in fatal error. aoqi@0: // aoqi@0: // Portability note: Certain compilers (e.g. gcc) will aoqi@0: // always make classes bigger if it has a superclass, even aoqi@0: // if the superclass does not have any virtual methods or aoqi@0: // instance fields. The HotSpot implementation relies on this aoqi@0: // not to happen. So never make a ValueObj class a direct subclass aoqi@0: // of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g., aoqi@0: // like this: aoqi@0: // aoqi@0: // class A VALUE_OBJ_CLASS_SPEC { aoqi@0: // ... aoqi@0: // } aoqi@0: // aoqi@0: // With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can aoqi@0: // be defined as a an empty string "". aoqi@0: // aoqi@0: class _ValueObj { aoqi@0: private: aoqi@0: void* operator new(size_t size) throw(); aoqi@0: void operator delete(void* p); aoqi@0: void* operator new [](size_t size) throw(); aoqi@0: void operator delete [](void* p); aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: // Base class for objects stored in Metaspace. aoqi@0: // Calling delete will result in fatal error. aoqi@0: // aoqi@0: // Do not inherit from something with a vptr because this class does aoqi@0: // not introduce one. This class is used to allocate both shared read-only aoqi@0: // and shared read-write classes. aoqi@0: // aoqi@0: aoqi@0: class ClassLoaderData; aoqi@0: aoqi@0: class MetaspaceObj { aoqi@0: public: aoqi@0: bool is_metaspace_object() const; aoqi@0: bool is_shared() const; aoqi@0: void print_address_on(outputStream* st) const; // nonvirtual address printing aoqi@0: aoqi@0: #define METASPACE_OBJ_TYPES_DO(f) \ aoqi@0: f(Unknown) \ aoqi@0: f(Class) \ aoqi@0: f(Symbol) \ aoqi@0: f(TypeArrayU1) \ aoqi@0: f(TypeArrayU2) \ aoqi@0: f(TypeArrayU4) \ aoqi@0: f(TypeArrayU8) \ aoqi@0: f(TypeArrayOther) \ aoqi@0: f(Method) \ aoqi@0: f(ConstMethod) \ aoqi@0: f(MethodData) \ aoqi@0: f(ConstantPool) \ aoqi@0: f(ConstantPoolCache) \ aoqi@0: f(Annotation) \ aoqi@0: f(MethodCounters) aoqi@0: aoqi@0: #define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type, aoqi@0: #define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name; aoqi@0: aoqi@0: enum Type { aoqi@0: // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc aoqi@0: METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) aoqi@0: _number_of_types aoqi@0: }; aoqi@0: aoqi@0: static const char * type_name(Type type) { aoqi@0: switch(type) { aoqi@0: METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) aoqi@0: default: aoqi@0: ShouldNotReachHere(); aoqi@0: return NULL; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: static MetaspaceObj::Type array_type(size_t elem_size) { aoqi@0: switch (elem_size) { aoqi@0: case 1: return TypeArrayU1Type; aoqi@0: case 2: return TypeArrayU2Type; aoqi@0: case 4: return TypeArrayU4Type; aoqi@0: case 8: return TypeArrayU8Type; aoqi@0: default: aoqi@0: return TypeArrayOtherType; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void* operator new(size_t size, ClassLoaderData* loader_data, aoqi@0: size_t word_size, bool read_only, aoqi@0: Type type, Thread* thread) throw(); aoqi@0: // can't use TRAPS from this header file. aoqi@0: void operator delete(void* p) { ShouldNotCallThis(); } aoqi@0: }; aoqi@0: aoqi@0: // Base class for classes that constitute name spaces. aoqi@0: aoqi@0: class AllStatic { aoqi@0: public: aoqi@0: AllStatic() { ShouldNotCallThis(); } aoqi@0: ~AllStatic() { ShouldNotCallThis(); } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: //------------------------------Chunk------------------------------------------ aoqi@0: // Linked list of raw memory chunks aoqi@0: class Chunk: CHeapObj { aoqi@0: friend class VMStructs; aoqi@0: aoqi@0: protected: aoqi@0: Chunk* _next; // Next Chunk in list aoqi@0: const size_t _len; // Size of this Chunk aoqi@0: public: aoqi@0: void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw(); aoqi@0: void operator delete(void* p); aoqi@0: Chunk(size_t length); aoqi@0: aoqi@0: enum { aoqi@0: // default sizes; make them slightly smaller than 2**k to guard against aoqi@0: // buddy-system style malloc implementations aoqi@0: #ifdef _LP64 aoqi@0: slack = 40, // [RGV] Not sure if this is right, but make it aoqi@0: // a multiple of 8. aoqi@0: #else aoqi@0: slack = 20, // suspected sizeof(Chunk) + internal malloc headers aoqi@0: #endif aoqi@0: aoqi@0: tiny_size = 256 - slack, // Size of first chunk (tiny) aoqi@0: init_size = 1*K - slack, // Size of first chunk (normal aka small) aoqi@0: medium_size= 10*K - slack, // Size of medium-sized chunk aoqi@0: size = 32*K - slack, // Default size of an Arena chunk (following the first) aoqi@0: non_pool_size = init_size + 32 // An initial size which is not one of above aoqi@0: }; aoqi@0: aoqi@0: void chop(); // Chop this chunk aoqi@0: void next_chop(); // Chop next chunk aoqi@0: static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); } aoqi@0: static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); } aoqi@0: aoqi@0: size_t length() const { return _len; } aoqi@0: Chunk* next() const { return _next; } aoqi@0: void set_next(Chunk* n) { _next = n; } aoqi@0: // Boundaries of data area (possibly unused) aoqi@0: char* bottom() const { return ((char*) this) + aligned_overhead_size(); } aoqi@0: char* top() const { return bottom() + _len; } aoqi@0: bool contains(char* p) const { return bottom() <= p && p <= top(); } aoqi@0: aoqi@0: // Start the chunk_pool cleaner task aoqi@0: static void start_chunk_pool_cleaner_task(); aoqi@0: aoqi@0: static void clean_chunk_pool(); aoqi@0: }; aoqi@0: aoqi@0: //------------------------------Arena------------------------------------------ aoqi@0: // Fast allocation of memory aoqi@0: class Arena : public CHeapObj { aoqi@0: protected: aoqi@0: friend class ResourceMark; aoqi@0: friend class HandleMark; aoqi@0: friend class NoHandleMark; aoqi@0: friend class VMStructs; aoqi@0: aoqi@0: Chunk *_first; // First chunk aoqi@0: Chunk *_chunk; // current chunk aoqi@0: char *_hwm, *_max; // High water mark and max in current chunk aoqi@0: // Get a new Chunk of at least size x aoqi@0: void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); aoqi@0: size_t _size_in_bytes; // Size of arena (used for native memory tracking) aoqi@0: aoqi@0: NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start aoqi@0: friend class AllocStats; aoqi@0: debug_only(void* malloc(size_t size);) aoqi@0: debug_only(void* internal_malloc_4(size_t x);) aoqi@0: NOT_PRODUCT(void inc_bytes_allocated(size_t x);) aoqi@0: aoqi@0: void signal_out_of_memory(size_t request, const char* whence) const; aoqi@0: aoqi@0: bool check_for_overflow(size_t request, const char* whence, aoqi@0: AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const { aoqi@0: if (UINTPTR_MAX - request < (uintptr_t)_hwm) { aoqi@0: if (alloc_failmode == AllocFailStrategy::RETURN_NULL) { aoqi@0: return false; aoqi@0: } aoqi@0: signal_out_of_memory(request, whence); aoqi@0: } aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: public: aoqi@0: Arena(); aoqi@0: Arena(size_t init_size); aoqi@0: ~Arena(); aoqi@0: void destruct_contents(); aoqi@0: char* hwm() const { return _hwm; } aoqi@0: aoqi@0: // new operators aoqi@0: void* operator new (size_t size) throw(); aoqi@0: void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw(); aoqi@0: aoqi@0: // dynamic memory type tagging aoqi@0: void* operator new(size_t size, MEMFLAGS flags) throw(); aoqi@0: void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw(); aoqi@0: void operator delete(void* p); aoqi@0: aoqi@0: // Fast allocate in the arena. Common case is: pointer test + increment. aoqi@0: void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { aoqi@0: assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); aoqi@0: x = ARENA_ALIGN(x); aoqi@0: debug_only(if (UseMallocOnly) return malloc(x);) aoqi@0: if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode)) aoqi@0: return NULL; aoqi@0: NOT_PRODUCT(inc_bytes_allocated(x);) aoqi@0: if (_hwm + x > _max) { aoqi@0: return grow(x, alloc_failmode); aoqi@0: } else { aoqi@0: char *old = _hwm; aoqi@0: _hwm += x; aoqi@0: return old; aoqi@0: } aoqi@0: } aoqi@0: // Further assume size is padded out to words aoqi@0: void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { aoqi@0: assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); aoqi@0: debug_only(if (UseMallocOnly) return malloc(x);) aoqi@0: if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode)) aoqi@0: return NULL; aoqi@0: NOT_PRODUCT(inc_bytes_allocated(x);) aoqi@0: if (_hwm + x > _max) { aoqi@0: return grow(x, alloc_failmode); aoqi@0: } else { aoqi@0: char *old = _hwm; aoqi@0: _hwm += x; aoqi@0: return old; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Allocate with 'double' alignment. It is 8 bytes on sparc. aoqi@0: // In other cases Amalloc_D() should be the same as Amalloc_4(). aoqi@0: void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { aoqi@0: assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); aoqi@0: debug_only(if (UseMallocOnly) return malloc(x);) aoqi@0: #if defined(SPARC) && !defined(_LP64) aoqi@0: #define DALIGN_M1 7 aoqi@0: size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; aoqi@0: x += delta; aoqi@0: #endif aoqi@0: if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode)) aoqi@0: return NULL; aoqi@0: NOT_PRODUCT(inc_bytes_allocated(x);) aoqi@0: if (_hwm + x > _max) { aoqi@0: return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes. aoqi@0: } else { aoqi@0: char *old = _hwm; aoqi@0: _hwm += x; aoqi@0: #if defined(SPARC) && !defined(_LP64) aoqi@0: old += delta; // align to 8-bytes aoqi@0: #endif aoqi@0: return old; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Fast delete in area. Common case is: NOP (except for storage reclaimed) aoqi@0: void Afree(void *ptr, size_t size) { aoqi@0: #ifdef ASSERT aoqi@0: if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory aoqi@0: if (UseMallocOnly) return; aoqi@0: #endif aoqi@0: if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr; aoqi@0: } aoqi@0: aoqi@0: void *Arealloc( void *old_ptr, size_t old_size, size_t new_size, aoqi@0: AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); aoqi@0: aoqi@0: // Move contents of this arena into an empty arena aoqi@0: Arena *move_contents(Arena *empty_arena); aoqi@0: aoqi@0: // Determine if pointer belongs to this Arena or not. aoqi@0: bool contains( const void *ptr ) const; aoqi@0: aoqi@0: // Total of all chunks in use (not thread-safe) aoqi@0: size_t used() const; aoqi@0: aoqi@0: // Total # of bytes used aoqi@0: size_t size_in_bytes() const { return _size_in_bytes; }; aoqi@0: void set_size_in_bytes(size_t size); aoqi@0: aoqi@0: static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN; aoqi@0: static void free_all(char** start, char** end) PRODUCT_RETURN; aoqi@0: aoqi@0: // how many arena instances aoqi@0: NOT_PRODUCT(static volatile jint _instance_count;) aoqi@0: private: aoqi@0: // Reset this Arena to empty, access will trigger grow if necessary aoqi@0: void reset(void) { aoqi@0: _first = _chunk = NULL; aoqi@0: _hwm = _max = NULL; aoqi@0: set_size_in_bytes(0); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: // One of the following macros must be used when allocating aoqi@0: // an array or object from an arena aoqi@0: #define NEW_ARENA_ARRAY(arena, type, size) \ aoqi@0: (type*) (arena)->Amalloc((size) * sizeof(type)) aoqi@0: aoqi@0: #define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \ aoqi@0: (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \ aoqi@0: (new_size) * sizeof(type) ) aoqi@0: aoqi@0: #define FREE_ARENA_ARRAY(arena, type, old, size) \ aoqi@0: (arena)->Afree((char*)(old), (size) * sizeof(type)) aoqi@0: aoqi@0: #define NEW_ARENA_OBJ(arena, type) \ aoqi@0: NEW_ARENA_ARRAY(arena, type, 1) aoqi@0: aoqi@0: aoqi@0: //%note allocation_1 aoqi@0: extern char* resource_allocate_bytes(size_t size, aoqi@0: AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); aoqi@0: extern char* resource_allocate_bytes(Thread* thread, size_t size, aoqi@0: AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); aoqi@0: extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size, aoqi@0: AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); aoqi@0: extern void resource_free_bytes( char *old, size_t size ); aoqi@0: aoqi@0: //---------------------------------------------------------------------- aoqi@0: // Base class for objects allocated in the resource area per default. aoqi@0: // Optionally, objects may be allocated on the C heap with aoqi@0: // new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena) aoqi@0: // ResourceObj's can be allocated within other objects, but don't use aoqi@0: // new or delete (allocation_type is unknown). If new is used to allocate, aoqi@0: // use delete to deallocate. aoqi@0: class ResourceObj ALLOCATION_SUPER_CLASS_SPEC { aoqi@0: public: aoqi@0: enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 }; aoqi@0: static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN; aoqi@0: #ifdef ASSERT aoqi@0: private: aoqi@0: // When this object is allocated on stack the new() operator is not aoqi@0: // called but garbage on stack may look like a valid allocation_type. aoqi@0: // Store negated 'this' pointer when new() is called to distinguish cases. aoqi@0: // Use second array's element for verification value to distinguish garbage. aoqi@0: uintptr_t _allocation_t[2]; aoqi@0: bool is_type_set() const; aoqi@0: public: aoqi@0: allocation_type get_allocation_type() const; aoqi@0: bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; } aoqi@0: bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; } aoqi@0: bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; } aoqi@0: bool allocated_on_arena() const { return get_allocation_type() == ARENA; } aoqi@0: ResourceObj(); // default construtor aoqi@0: ResourceObj(const ResourceObj& r); // default copy construtor aoqi@0: ResourceObj& operator=(const ResourceObj& r); // default copy assignment aoqi@0: ~ResourceObj(); aoqi@0: #endif // ASSERT aoqi@0: aoqi@0: public: aoqi@0: void* operator new(size_t size, allocation_type type, MEMFLAGS flags) throw(); aoqi@0: void* operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw(); aoqi@0: void* operator new(size_t size, const std::nothrow_t& nothrow_constant, aoqi@0: allocation_type type, MEMFLAGS flags) throw(); aoqi@0: void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, aoqi@0: allocation_type type, MEMFLAGS flags) throw(); aoqi@0: aoqi@0: void* operator new(size_t size, Arena *arena) throw() { aoqi@0: address res = (address)arena->Amalloc(size); aoqi@0: DEBUG_ONLY(set_allocation_type(res, ARENA);) aoqi@0: return res; aoqi@0: } aoqi@0: aoqi@0: void* operator new [](size_t size, Arena *arena) throw() { aoqi@0: address res = (address)arena->Amalloc(size); aoqi@0: DEBUG_ONLY(set_allocation_type(res, ARENA);) aoqi@0: return res; aoqi@0: } aoqi@0: aoqi@0: void* operator new(size_t size) throw() { aoqi@0: address res = (address)resource_allocate_bytes(size); aoqi@0: DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) aoqi@0: return res; aoqi@0: } aoqi@0: aoqi@0: void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { aoqi@0: address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); aoqi@0: DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) aoqi@0: return res; aoqi@0: } aoqi@0: aoqi@0: void* operator new [](size_t size) throw() { aoqi@0: address res = (address)resource_allocate_bytes(size); aoqi@0: DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) aoqi@0: return res; aoqi@0: } aoqi@0: aoqi@0: void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() { aoqi@0: address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); aoqi@0: DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) aoqi@0: return res; aoqi@0: } aoqi@0: aoqi@0: void operator delete(void* p); aoqi@0: void operator delete [](void* p); aoqi@0: }; aoqi@0: aoqi@0: // One of the following macros must be used when allocating an array aoqi@0: // or object to determine whether it should reside in the C heap on in aoqi@0: // the resource area. aoqi@0: aoqi@0: #define NEW_RESOURCE_ARRAY(type, size)\ aoqi@0: (type*) resource_allocate_bytes((size) * sizeof(type)) aoqi@0: aoqi@0: #define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\ aoqi@0: (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL) aoqi@0: aoqi@0: #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ aoqi@0: (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) aoqi@0: aoqi@0: #define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\ aoqi@0: (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL) aoqi@0: aoqi@0: #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\ aoqi@0: (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type)) aoqi@0: aoqi@0: #define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\ aoqi@0: (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\ aoqi@0: (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL) aoqi@0: aoqi@0: #define FREE_RESOURCE_ARRAY(type, old, size)\ aoqi@0: resource_free_bytes((char*)(old), (size) * sizeof(type)) aoqi@0: aoqi@0: #define FREE_FAST(old)\ aoqi@0: /* nop */ aoqi@0: aoqi@0: #define NEW_RESOURCE_OBJ(type)\ aoqi@0: NEW_RESOURCE_ARRAY(type, 1) aoqi@0: aoqi@0: #define NEW_RESOURCE_OBJ_RETURN_NULL(type)\ aoqi@0: NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1) aoqi@0: aoqi@0: #define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\ aoqi@0: (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail) aoqi@0: aoqi@0: #define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ aoqi@0: (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) aoqi@0: aoqi@0: #define NEW_C_HEAP_ARRAY(type, size, memflags)\ aoqi@0: (type*) (AllocateHeap((size) * sizeof(type), memflags)) aoqi@0: aoqi@0: #define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\ aoqi@0: NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL) aoqi@0: aoqi@0: #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\ aoqi@0: NEW_C_HEAP_ARRAY3(type, (size), memflags, (address)0, AllocFailStrategy::RETURN_NULL) aoqi@0: aoqi@0: #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ aoqi@0: (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags)) aoqi@0: aoqi@0: #define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\ aoqi@0: (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL)) aoqi@0: aoqi@0: #define FREE_C_HEAP_ARRAY(type, old, memflags) \ aoqi@0: FreeHeap((char*)(old), memflags) aoqi@0: aoqi@0: // allocate type in heap without calling ctor aoqi@0: #define NEW_C_HEAP_OBJ(type, memflags)\ aoqi@0: NEW_C_HEAP_ARRAY(type, 1, memflags) aoqi@0: aoqi@0: #define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\ aoqi@0: NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags) aoqi@0: aoqi@0: // deallocate obj of type in heap without calling dtor aoqi@0: #define FREE_C_HEAP_OBJ(objname, memflags)\ aoqi@0: FreeHeap((char*)objname, memflags); aoqi@0: aoqi@0: // for statistics aoqi@0: #ifndef PRODUCT aoqi@0: class AllocStats : StackObj { aoqi@0: julong start_mallocs, start_frees; aoqi@0: julong start_malloc_bytes, start_mfree_bytes, start_res_bytes; aoqi@0: public: aoqi@0: AllocStats(); aoqi@0: aoqi@0: julong num_mallocs(); // since creation of receiver aoqi@0: julong alloc_bytes(); aoqi@0: julong num_frees(); aoqi@0: julong free_bytes(); aoqi@0: julong resource_bytes(); aoqi@0: void print(); aoqi@0: }; aoqi@0: #endif aoqi@0: aoqi@0: aoqi@0: //------------------------------ReallocMark--------------------------------- aoqi@0: // Code which uses REALLOC_RESOURCE_ARRAY should check an associated aoqi@0: // ReallocMark, which is declared in the same scope as the reallocated aoqi@0: // pointer. Any operation that could __potentially__ cause a reallocation aoqi@0: // should check the ReallocMark. aoqi@0: class ReallocMark: public StackObj { aoqi@0: protected: aoqi@0: NOT_PRODUCT(int _nesting;) aoqi@0: aoqi@0: public: aoqi@0: ReallocMark() PRODUCT_RETURN; aoqi@0: void check() PRODUCT_RETURN; aoqi@0: }; aoqi@0: aoqi@0: // Helper class to allocate arrays that may become large. aoqi@0: // Uses the OS malloc for allocations smaller than ArrayAllocatorMallocLimit aoqi@0: // and uses mapped memory for larger allocations. aoqi@0: // Most OS mallocs do something similar but Solaris malloc does not revert aoqi@0: // to mapped memory for large allocations. By default ArrayAllocatorMallocLimit aoqi@0: // is set so that we always use malloc except for Solaris where we set the aoqi@0: // limit to get mapped memory. aoqi@0: template aoqi@0: class ArrayAllocator VALUE_OBJ_CLASS_SPEC { aoqi@0: char* _addr; aoqi@0: bool _use_malloc; aoqi@0: size_t _size; aoqi@0: bool _free_in_destructor; aoqi@0: public: aoqi@0: ArrayAllocator(bool free_in_destructor = true) : aoqi@0: _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { } aoqi@0: aoqi@0: ~ArrayAllocator() { aoqi@0: if (_free_in_destructor) { aoqi@0: free(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: E* allocate(size_t length); aoqi@0: void free(); aoqi@0: }; aoqi@0: aoqi@0: #endif // SHARE_VM_MEMORY_ALLOCATION_HPP