duke@435: /* zgu@7074: * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_MEMORY_ALLOCATION_HPP stefank@2314: #define SHARE_VM_MEMORY_ALLOCATION_HPP stefank@2314: stefank@2314: #include "runtime/globals.hpp" stefank@2314: #include "utilities/globalDefinitions.hpp" jprovino@4165: #include "utilities/macros.hpp" stefank@2314: #ifdef COMPILER1 stefank@2314: #include "c1/c1_globals.hpp" stefank@2314: #endif stefank@2314: #ifdef COMPILER2 stefank@2314: #include "opto/c2_globals.hpp" stefank@2314: #endif stefank@2314: zgu@2834: #include zgu@2834: duke@435: #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1) duke@435: #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) duke@435: #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) duke@435: zgu@3900: zgu@3900: // noinline attribute zgu@3900: #ifdef _WINDOWS zgu@3900: #define _NOINLINE_ __declspec(noinline) zgu@3900: #else zgu@3900: #if __GNUC__ < 3 // gcc 2.x does not support noinline attribute zgu@3900: #define _NOINLINE_ zgu@3900: #else zgu@3900: #define _NOINLINE_ __attribute__ ((noinline)) zgu@3900: #endif zgu@3900: #endif zgu@3900: nloodin@4183: class AllocFailStrategy { nloodin@4183: public: nloodin@4183: enum AllocFailEnum { EXIT_OOM, RETURN_NULL }; nloodin@4183: }; nloodin@4183: typedef AllocFailStrategy::AllocFailEnum AllocFailType; nloodin@4183: duke@435: // All classes in the virtual machine must be subclassed duke@435: // by one of the following allocation classes: duke@435: // duke@435: // For objects allocated in the resource area (see resourceArea.hpp). duke@435: // - ResourceObj duke@435: // duke@435: // For objects allocated in the C-heap (managed by: free & malloc). duke@435: // - CHeapObj duke@435: // duke@435: // For objects allocated on the stack. duke@435: // - StackObj duke@435: // duke@435: // For embedded objects. duke@435: // - ValueObj duke@435: // duke@435: // For classes used as name spaces. duke@435: // - AllStatic duke@435: // coleenp@4037: // For classes in Metaspace (class data) coleenp@4037: // - MetaspaceObj coleenp@4037: // duke@435: // The printable subclasses are used for debugging and define virtual duke@435: // member functions for printing. Classes that avoid allocating the duke@435: // vtbl entries in the objects should therefore not be the printable duke@435: // subclasses. duke@435: // duke@435: // The following macros and function should be used to allocate memory minqi@5103: // directly in the resource area or in the C-heap, The _OBJ variants minqi@5103: // of the NEW/FREE_C_HEAP macros are used for alloc/dealloc simple minqi@5103: // objects which are not inherited from CHeapObj, note constructor and minqi@5103: // destructor are not called. The preferable way to allocate objects minqi@5103: // is using the new operator. duke@435: // minqi@5103: // WARNING: The array variant must only be used for a homogenous array minqi@5103: // where all objects are of the exact type specified. If subtypes are minqi@5103: // stored in the array then must pay attention to calling destructors minqi@5103: // at needed. minqi@5103: // minqi@5103: // NEW_RESOURCE_ARRAY(type, size) duke@435: // NEW_RESOURCE_OBJ(type) minqi@5103: // NEW_C_HEAP_ARRAY(type, size) minqi@5103: // NEW_C_HEAP_OBJ(type, memflags) minqi@5103: // FREE_C_HEAP_ARRAY(type, old, memflags) minqi@5103: // FREE_C_HEAP_OBJ(objname, type, memflags) duke@435: // char* AllocateHeap(size_t size, const char* name); duke@435: // void FreeHeap(void* p); duke@435: // duke@435: // C-heap allocation can be traced using +PrintHeapAllocation. duke@435: // malloc and free should therefore never called directly. duke@435: duke@435: // Base class for objects allocated in the C-heap. duke@435: duke@435: // In non product mode we introduce a super class for all allocation classes duke@435: // that supports printing. duke@435: // We avoid the superclass in product mode since some C++ compilers add duke@435: // a word overhead for empty super classes. duke@435: duke@435: #ifdef PRODUCT duke@435: #define ALLOCATION_SUPER_CLASS_SPEC duke@435: #else duke@435: #define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj duke@435: class AllocatedObj { duke@435: public: duke@435: // Printing support duke@435: void print() const; duke@435: void print_value() const; duke@435: duke@435: virtual void print_on(outputStream* st) const; duke@435: virtual void print_value_on(outputStream* st) const; duke@435: }; duke@435: #endif duke@435: zgu@3900: zgu@3900: /* zgu@7074: * Memory types zgu@3900: */ zgu@3900: enum MemoryType { zgu@3900: // Memory type by sub systems. It occupies lower byte. zgu@7074: mtJavaHeap = 0x00, // Java heap zgu@7074: mtClass = 0x01, // memory class for Java classes zgu@7074: mtThread = 0x02, // memory for thread objects zgu@7074: mtThreadStack = 0x03, zgu@7074: mtCode = 0x04, // memory for generated code zgu@7074: mtGC = 0x05, // memory for GC zgu@7074: mtCompiler = 0x06, // memory for compiler zgu@7074: mtInternal = 0x07, // memory used by VM, but does not belong to zgu@3900: // any of above categories, and not used for zgu@3900: // native memory tracking zgu@7074: mtOther = 0x08, // memory not used by VM zgu@7074: mtSymbol = 0x09, // symbol zgu@7074: mtNMT = 0x0A, // memory used by native memory tracking zgu@7074: mtClassShared = 0x0B, // class data sharing zgu@7074: mtChunk = 0x0C, // chunk that holds content of arenas zgu@7074: mtTest = 0x0D, // Test type for verifying NMT zgu@7074: mtTracing = 0x0E, // memory used for Tracing zgu@7074: mtNone = 0x0F, // undefined zgu@7074: mt_number_of_types = 0x10 // number of memory types (mtDontTrack zgu@4193: // is not included as validate type) zgu@3900: }; zgu@3900: zgu@7074: typedef MemoryType MEMFLAGS; zgu@3900: zgu@3900: jprovino@4165: #if INCLUDE_NMT jprovino@4165: zgu@3900: extern bool NMT_track_callsite; zgu@3900: jprovino@4165: #else jprovino@4165: jprovino@4165: const bool NMT_track_callsite = false; jprovino@4165: jprovino@4165: #endif // INCLUDE_NMT jprovino@4165: zgu@7074: class NativeCallStack; zgu@3900: zgu@3900: zgu@3900: template class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { duke@435: public: zgu@7074: _NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw(); zgu@7074: _NOINLINE_ void* operator new(size_t size) throw(); zgu@3900: _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant, zgu@7074: const NativeCallStack& stack) throw(); zgu@7074: _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant) zgu@7074: throw(); zgu@7074: _NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw(); zgu@7074: _NOINLINE_ void* operator new [](size_t size) throw(); minqi@5103: _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, zgu@7074: const NativeCallStack& stack) throw(); zgu@7074: _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) zgu@7074: throw(); duke@435: void operator delete(void* p); minqi@5103: void operator delete [] (void* p); duke@435: }; duke@435: duke@435: // Base class for objects allocated on the stack only. duke@435: // Calling new or delete will result in fatal error. duke@435: duke@435: class StackObj ALLOCATION_SUPER_CLASS_SPEC { brutisso@4370: private: coleenp@5614: void* operator new(size_t size) throw(); kvn@6472: void* operator new [](size_t size) throw(); goetz@6461: #ifdef __IBMCPP__ goetz@6461: public: goetz@6461: #endif duke@435: void operator delete(void* p); minqi@5103: void operator delete [](void* p); duke@435: }; duke@435: duke@435: // Base class for objects used as value objects. duke@435: // Calling new or delete will result in fatal error. duke@435: // duke@435: // Portability note: Certain compilers (e.g. gcc) will duke@435: // always make classes bigger if it has a superclass, even duke@435: // if the superclass does not have any virtual methods or duke@435: // instance fields. The HotSpot implementation relies on this duke@435: // not to happen. So never make a ValueObj class a direct subclass duke@435: // of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g., duke@435: // like this: duke@435: // duke@435: // class A VALUE_OBJ_CLASS_SPEC { duke@435: // ... duke@435: // } duke@435: // duke@435: // With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can duke@435: // be defined as a an empty string "". duke@435: // duke@435: class _ValueObj { brutisso@4370: private: coleenp@5614: void* operator new(size_t size) throw(); minqi@5103: void operator delete(void* p); coleenp@5614: void* operator new [](size_t size) throw(); minqi@5103: void operator delete [](void* p); duke@435: }; duke@435: coleenp@4037: coleenp@4037: // Base class for objects stored in Metaspace. coleenp@4037: // Calling delete will result in fatal error. coleenp@4037: // coleenp@4037: // Do not inherit from something with a vptr because this class does coleenp@4037: // not introduce one. This class is used to allocate both shared read-only coleenp@4037: // and shared read-write classes. coleenp@4037: // coleenp@4037: coleenp@4037: class ClassLoaderData; coleenp@4037: coleenp@4037: class MetaspaceObj { coleenp@4037: public: coleenp@6305: bool is_metaspace_object() const; coleenp@4037: bool is_shared() const; coleenp@4037: void print_address_on(outputStream* st) const; // nonvirtual address printing coleenp@4037: iklam@5208: #define METASPACE_OBJ_TYPES_DO(f) \ iklam@5208: f(Unknown) \ iklam@5208: f(Class) \ iklam@5208: f(Symbol) \ iklam@5208: f(TypeArrayU1) \ iklam@5208: f(TypeArrayU2) \ iklam@5208: f(TypeArrayU4) \ iklam@5208: f(TypeArrayU8) \ iklam@5208: f(TypeArrayOther) \ iklam@5208: f(Method) \ iklam@5208: f(ConstMethod) \ iklam@5208: f(MethodData) \ iklam@5208: f(ConstantPool) \ iklam@5208: f(ConstantPoolCache) \ iklam@5208: f(Annotation) \ iklam@5208: f(MethodCounters) iklam@5208: iklam@5208: #define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type, iklam@5208: #define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name; iklam@5208: iklam@5208: enum Type { iklam@5208: // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc iklam@5208: METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) iklam@5208: _number_of_types iklam@5208: }; iklam@5208: iklam@5208: static const char * type_name(Type type) { iklam@5208: switch(type) { iklam@5208: METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) iklam@5208: default: iklam@5208: ShouldNotReachHere(); iklam@5208: return NULL; iklam@5208: } iklam@5208: } iklam@5208: iklam@5208: static MetaspaceObj::Type array_type(size_t elem_size) { iklam@5208: switch (elem_size) { iklam@5208: case 1: return TypeArrayU1Type; iklam@5208: case 2: return TypeArrayU2Type; iklam@5208: case 4: return TypeArrayU4Type; iklam@5208: case 8: return TypeArrayU8Type; iklam@5208: default: iklam@5208: return TypeArrayOtherType; iklam@5208: } iklam@5208: } iklam@5208: coleenp@4037: void* operator new(size_t size, ClassLoaderData* loader_data, iklam@5208: size_t word_size, bool read_only, coleenp@5614: Type type, Thread* thread) throw(); coleenp@4037: // can't use TRAPS from this header file. coleenp@4037: void operator delete(void* p) { ShouldNotCallThis(); } coleenp@4037: }; coleenp@4037: duke@435: // Base class for classes that constitute name spaces. duke@435: duke@435: class AllStatic { duke@435: public: duke@435: AllStatic() { ShouldNotCallThis(); } duke@435: ~AllStatic() { ShouldNotCallThis(); } duke@435: }; duke@435: duke@435: duke@435: //------------------------------Chunk------------------------------------------ duke@435: // Linked list of raw memory chunks zgu@3900: class Chunk: CHeapObj { never@3138: friend class VMStructs; never@3138: duke@435: protected: duke@435: Chunk* _next; // Next Chunk in list duke@435: const size_t _len; // Size of this Chunk duke@435: public: coleenp@5614: void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw(); duke@435: void operator delete(void* p); duke@435: Chunk(size_t length); duke@435: duke@435: enum { duke@435: // default sizes; make them slightly smaller than 2**k to guard against duke@435: // buddy-system style malloc implementations duke@435: #ifdef _LP64 duke@435: slack = 40, // [RGV] Not sure if this is right, but make it duke@435: // a multiple of 8. duke@435: #else duke@435: slack = 20, // suspected sizeof(Chunk) + internal malloc headers duke@435: #endif duke@435: iklam@5368: tiny_size = 256 - slack, // Size of first chunk (tiny) iklam@5368: init_size = 1*K - slack, // Size of first chunk (normal aka small) duke@435: medium_size= 10*K - slack, // Size of medium-sized chunk duke@435: size = 32*K - slack, // Default size of an Arena chunk (following the first) duke@435: non_pool_size = init_size + 32 // An initial size which is not one of above duke@435: }; duke@435: duke@435: void chop(); // Chop this chunk duke@435: void next_chop(); // Chop next chunk duke@435: static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); } coleenp@4037: static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); } duke@435: duke@435: size_t length() const { return _len; } duke@435: Chunk* next() const { return _next; } duke@435: void set_next(Chunk* n) { _next = n; } duke@435: // Boundaries of data area (possibly unused) duke@435: char* bottom() const { return ((char*) this) + aligned_overhead_size(); } duke@435: char* top() const { return bottom() + _len; } duke@435: bool contains(char* p) const { return bottom() <= p && p <= top(); } duke@435: duke@435: // Start the chunk_pool cleaner task duke@435: static void start_chunk_pool_cleaner_task(); bobv@2036: bobv@2036: static void clean_chunk_pool(); duke@435: }; duke@435: duke@435: //------------------------------Arena------------------------------------------ duke@435: // Fast allocation of memory zgu@7074: class Arena : public CHeapObj { duke@435: protected: duke@435: friend class ResourceMark; duke@435: friend class HandleMark; duke@435: friend class NoHandleMark; never@3138: friend class VMStructs; never@3138: zgu@7074: MEMFLAGS _flags; // Memory tracking flags zgu@7074: duke@435: Chunk *_first; // First chunk duke@435: Chunk *_chunk; // current chunk duke@435: char *_hwm, *_max; // High water mark and max in current chunk nloodin@4183: // Get a new Chunk of at least size x nloodin@4183: void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); zgu@3900: size_t _size_in_bytes; // Size of arena (used for native memory tracking) zgu@3900: kvn@2557: NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start duke@435: friend class AllocStats; duke@435: debug_only(void* malloc(size_t size);) duke@435: debug_only(void* internal_malloc_4(size_t x);) kvn@2557: NOT_PRODUCT(void inc_bytes_allocated(size_t x);) kamg@2589: kamg@2589: void signal_out_of_memory(size_t request, const char* whence) const; kamg@2589: hseigel@5241: bool check_for_overflow(size_t request, const char* whence, hseigel@5241: AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const { kamg@2589: if (UINTPTR_MAX - request < (uintptr_t)_hwm) { hseigel@5241: if (alloc_failmode == AllocFailStrategy::RETURN_NULL) { hseigel@5241: return false; hseigel@5241: } kamg@2589: signal_out_of_memory(request, whence); kamg@2589: } hseigel@5241: return true; kamg@2589: } kamg@2589: duke@435: public: zgu@7074: Arena(MEMFLAGS memflag); zgu@7074: Arena(MEMFLAGS memflag, size_t init_size); duke@435: ~Arena(); duke@435: void destruct_contents(); duke@435: char* hwm() const { return _hwm; } duke@435: zgu@3900: // new operators coleenp@5614: void* operator new (size_t size) throw(); coleenp@5614: void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw(); zgu@3900: zgu@3900: // dynamic memory type tagging coleenp@5614: void* operator new(size_t size, MEMFLAGS flags) throw(); coleenp@5614: void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw(); zgu@3900: void operator delete(void* p); zgu@3900: duke@435: // Fast allocate in the arena. Common case is: pointer test + increment. nloodin@4183: void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { duke@435: assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); duke@435: x = ARENA_ALIGN(x); duke@435: debug_only(if (UseMallocOnly) return malloc(x);) hseigel@5241: if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode)) hseigel@5241: return NULL; kvn@2557: NOT_PRODUCT(inc_bytes_allocated(x);) duke@435: if (_hwm + x > _max) { nloodin@4183: return grow(x, alloc_failmode); duke@435: } else { duke@435: char *old = _hwm; duke@435: _hwm += x; duke@435: return old; duke@435: } duke@435: } duke@435: // Further assume size is padded out to words nloodin@4183: void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { duke@435: assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); duke@435: debug_only(if (UseMallocOnly) return malloc(x);) hseigel@5241: if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode)) hseigel@5241: return NULL; kvn@2557: NOT_PRODUCT(inc_bytes_allocated(x);) duke@435: if (_hwm + x > _max) { nloodin@4183: return grow(x, alloc_failmode); duke@435: } else { duke@435: char *old = _hwm; duke@435: _hwm += x; duke@435: return old; duke@435: } duke@435: } duke@435: duke@435: // Allocate with 'double' alignment. It is 8 bytes on sparc. duke@435: // In other cases Amalloc_D() should be the same as Amalloc_4(). nloodin@4183: void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { duke@435: assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); duke@435: debug_only(if (UseMallocOnly) return malloc(x);) duke@435: #if defined(SPARC) && !defined(_LP64) duke@435: #define DALIGN_M1 7 duke@435: size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; duke@435: x += delta; duke@435: #endif hseigel@5241: if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode)) hseigel@5241: return NULL; kvn@2557: NOT_PRODUCT(inc_bytes_allocated(x);) duke@435: if (_hwm + x > _max) { nloodin@4183: return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes. duke@435: } else { duke@435: char *old = _hwm; duke@435: _hwm += x; duke@435: #if defined(SPARC) && !defined(_LP64) duke@435: old += delta; // align to 8-bytes duke@435: #endif duke@435: return old; duke@435: } duke@435: } duke@435: duke@435: // Fast delete in area. Common case is: NOP (except for storage reclaimed) duke@435: void Afree(void *ptr, size_t size) { duke@435: #ifdef ASSERT duke@435: if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory duke@435: if (UseMallocOnly) return; duke@435: #endif duke@435: if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr; duke@435: } duke@435: nloodin@4183: void *Arealloc( void *old_ptr, size_t old_size, size_t new_size, nloodin@4183: AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); duke@435: duke@435: // Move contents of this arena into an empty arena duke@435: Arena *move_contents(Arena *empty_arena); duke@435: duke@435: // Determine if pointer belongs to this Arena or not. duke@435: bool contains( const void *ptr ) const; duke@435: duke@435: // Total of all chunks in use (not thread-safe) duke@435: size_t used() const; duke@435: duke@435: // Total # of bytes used zgu@3900: size_t size_in_bytes() const { return _size_in_bytes; }; zgu@3900: void set_size_in_bytes(size_t size); zgu@3900: duke@435: static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN; duke@435: static void free_all(char** start, char** end) PRODUCT_RETURN; duke@435: duke@435: private: duke@435: // Reset this Arena to empty, access will trigger grow if necessary duke@435: void reset(void) { duke@435: _first = _chunk = NULL; duke@435: _hwm = _max = NULL; zgu@3900: set_size_in_bytes(0); duke@435: } duke@435: }; duke@435: duke@435: // One of the following macros must be used when allocating duke@435: // an array or object from an arena jcoomes@2191: #define NEW_ARENA_ARRAY(arena, type, size) \ jcoomes@2191: (type*) (arena)->Amalloc((size) * sizeof(type)) duke@435: jcoomes@2191: #define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \ jcoomes@2191: (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \ jcoomes@2191: (new_size) * sizeof(type) ) duke@435: jcoomes@2191: #define FREE_ARENA_ARRAY(arena, type, old, size) \ jcoomes@2191: (arena)->Afree((char*)(old), (size) * sizeof(type)) duke@435: jcoomes@2191: #define NEW_ARENA_OBJ(arena, type) \ duke@435: NEW_ARENA_ARRAY(arena, type, 1) duke@435: duke@435: duke@435: //%note allocation_1 nloodin@4183: extern char* resource_allocate_bytes(size_t size, nloodin@4183: AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); nloodin@4183: extern char* resource_allocate_bytes(Thread* thread, size_t size, nloodin@4183: AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); nloodin@4183: extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size, nloodin@4183: AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); duke@435: extern void resource_free_bytes( char *old, size_t size ); duke@435: duke@435: //---------------------------------------------------------------------- duke@435: // Base class for objects allocated in the resource area per default. duke@435: // Optionally, objects may be allocated on the C heap with duke@435: // new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena) duke@435: // ResourceObj's can be allocated within other objects, but don't use duke@435: // new or delete (allocation_type is unknown). If new is used to allocate, duke@435: // use delete to deallocate. duke@435: class ResourceObj ALLOCATION_SUPER_CLASS_SPEC { duke@435: public: kvn@2040: enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 }; kvn@2043: static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN; duke@435: #ifdef ASSERT duke@435: private: kvn@2040: // When this object is allocated on stack the new() operator is not kvn@2040: // called but garbage on stack may look like a valid allocation_type. kvn@2040: // Store negated 'this' pointer when new() is called to distinguish cases. kvn@2357: // Use second array's element for verification value to distinguish garbage. kvn@2357: uintptr_t _allocation_t[2]; kvn@2357: bool is_type_set() const; duke@435: public: kvn@2043: allocation_type get_allocation_type() const; kvn@2043: bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; } kvn@2043: bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; } kvn@2043: bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; } kvn@2043: bool allocated_on_arena() const { return get_allocation_type() == ARENA; } kvn@2040: ResourceObj(); // default construtor kvn@2040: ResourceObj(const ResourceObj& r); // default copy construtor kvn@2040: ResourceObj& operator=(const ResourceObj& r); // default copy assignment kvn@2040: ~ResourceObj(); duke@435: #endif // ASSERT duke@435: duke@435: public: coleenp@5614: void* operator new(size_t size, allocation_type type, MEMFLAGS flags) throw(); coleenp@5614: void* operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw(); nloodin@4183: void* operator new(size_t size, const std::nothrow_t& nothrow_constant, coleenp@5614: allocation_type type, MEMFLAGS flags) throw(); minqi@5103: void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, coleenp@5614: allocation_type type, MEMFLAGS flags) throw(); minqi@5103: coleenp@5614: void* operator new(size_t size, Arena *arena) throw() { duke@435: address res = (address)arena->Amalloc(size); kvn@2040: DEBUG_ONLY(set_allocation_type(res, ARENA);) duke@435: return res; duke@435: } minqi@5103: coleenp@5614: void* operator new [](size_t size, Arena *arena) throw() { minqi@5103: address res = (address)arena->Amalloc(size); minqi@5103: DEBUG_ONLY(set_allocation_type(res, ARENA);) minqi@5103: return res; minqi@5103: } minqi@5103: coleenp@5614: void* operator new(size_t size) throw() { duke@435: address res = (address)resource_allocate_bytes(size); kvn@2040: DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) duke@435: return res; duke@435: } nloodin@4183: coleenp@5614: void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { nloodin@4183: address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); nloodin@4183: DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) nloodin@4183: return res; nloodin@4183: } nloodin@4183: coleenp@5614: void* operator new [](size_t size) throw() { minqi@5103: address res = (address)resource_allocate_bytes(size); minqi@5103: DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) minqi@5103: return res; minqi@5103: } minqi@5103: coleenp@5614: void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() { minqi@5103: address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL); minqi@5103: DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);) minqi@5103: return res; minqi@5103: } minqi@5103: duke@435: void operator delete(void* p); minqi@5103: void operator delete [](void* p); duke@435: }; duke@435: duke@435: // One of the following macros must be used when allocating an array duke@435: // or object to determine whether it should reside in the C heap on in duke@435: // the resource area. duke@435: duke@435: #define NEW_RESOURCE_ARRAY(type, size)\ duke@435: (type*) resource_allocate_bytes((size) * sizeof(type)) duke@435: hseigel@4987: #define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\ hseigel@4987: (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL) hseigel@4987: duke@435: #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ duke@435: (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) duke@435: mgronlun@5269: #define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\ mgronlun@5269: (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL) mgronlun@5269: duke@435: #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\ mgronlun@5269: (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type)) mgronlun@5269: mgronlun@5269: #define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\ mgronlun@5269: (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\ mgronlun@5269: (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL) duke@435: duke@435: #define FREE_RESOURCE_ARRAY(type, old, size)\ duke@435: resource_free_bytes((char*)(old), (size) * sizeof(type)) duke@435: duke@435: #define FREE_FAST(old)\ duke@435: /* nop */ duke@435: duke@435: #define NEW_RESOURCE_OBJ(type)\ duke@435: NEW_RESOURCE_ARRAY(type, 1) duke@435: mgronlun@5269: #define NEW_RESOURCE_OBJ_RETURN_NULL(type)\ mgronlun@5269: NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1) mgronlun@5269: mgronlun@5269: #define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\ dsimms@5577: (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail) mgronlun@5269: mgronlun@5269: #define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ mgronlun@5269: (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) mgronlun@5269: zgu@3900: #define NEW_C_HEAP_ARRAY(type, size, memflags)\ zgu@3900: (type*) (AllocateHeap((size) * sizeof(type), memflags)) duke@435: mgronlun@5269: #define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\ dsimms@5577: NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL) mgronlun@5269: mgronlun@5269: #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\ zgu@7074: NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL) mgronlun@5269: zgu@3900: #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ dsimms@5577: (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags)) duke@435: mgronlun@5269: #define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\ dsimms@5577: (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL)) mgronlun@5269: minqi@5103: #define FREE_C_HEAP_ARRAY(type, old, memflags) \ zgu@3900: FreeHeap((char*)(old), memflags) duke@435: minqi@5103: // allocate type in heap without calling ctor minqi@5103: #define NEW_C_HEAP_OBJ(type, memflags)\ minqi@5103: NEW_C_HEAP_ARRAY(type, 1, memflags) duke@435: mgronlun@5269: #define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\ mgronlun@5269: NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags) mgronlun@5269: minqi@5103: // deallocate obj of type in heap without calling dtor minqi@5103: #define FREE_C_HEAP_OBJ(objname, memflags)\ minqi@5103: FreeHeap((char*)objname, memflags); duke@435: duke@435: // for statistics duke@435: #ifndef PRODUCT duke@435: class AllocStats : StackObj { kvn@2557: julong start_mallocs, start_frees; kvn@2557: julong start_malloc_bytes, start_mfree_bytes, start_res_bytes; duke@435: public: duke@435: AllocStats(); duke@435: kvn@2557: julong num_mallocs(); // since creation of receiver kvn@2557: julong alloc_bytes(); kvn@2557: julong num_frees(); kvn@2557: julong free_bytes(); kvn@2557: julong resource_bytes(); duke@435: void print(); duke@435: }; duke@435: #endif duke@435: duke@435: duke@435: //------------------------------ReallocMark--------------------------------- duke@435: // Code which uses REALLOC_RESOURCE_ARRAY should check an associated duke@435: // ReallocMark, which is declared in the same scope as the reallocated duke@435: // pointer. Any operation that could __potentially__ cause a reallocation duke@435: // should check the ReallocMark. duke@435: class ReallocMark: public StackObj { duke@435: protected: duke@435: NOT_PRODUCT(int _nesting;) duke@435: duke@435: public: duke@435: ReallocMark() PRODUCT_RETURN; duke@435: void check() PRODUCT_RETURN; duke@435: }; stefank@2314: brutisso@4901: // Helper class to allocate arrays that may become large. brutisso@4901: // Uses the OS malloc for allocations smaller than ArrayAllocatorMallocLimit brutisso@4901: // and uses mapped memory for larger allocations. brutisso@4901: // Most OS mallocs do something similar but Solaris malloc does not revert brutisso@4901: // to mapped memory for large allocations. By default ArrayAllocatorMallocLimit brutisso@4901: // is set so that we always use malloc except for Solaris where we set the brutisso@4901: // limit to get mapped memory. brutisso@4901: template brutisso@5278: class ArrayAllocator VALUE_OBJ_CLASS_SPEC { brutisso@4901: char* _addr; brutisso@4901: bool _use_malloc; brutisso@4901: size_t _size; brutisso@5278: bool _free_in_destructor; brutisso@4901: public: brutisso@5278: ArrayAllocator(bool free_in_destructor = true) : brutisso@5278: _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { } brutisso@5278: brutisso@5278: ~ArrayAllocator() { brutisso@5278: if (_free_in_destructor) { brutisso@5278: free(); brutisso@5278: } brutisso@5278: } brutisso@5278: brutisso@4901: E* allocate(size_t length); brutisso@4901: void free(); brutisso@4901: }; brutisso@4901: stefank@2314: #endif // SHARE_VM_MEMORY_ALLOCATION_HPP