duke@435: /* zgu@3900: * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_MEMORY_ALLOCATION_HPP stefank@2314: #define SHARE_VM_MEMORY_ALLOCATION_HPP stefank@2314: stefank@2314: #include "runtime/globals.hpp" stefank@2314: #include "utilities/globalDefinitions.hpp" stefank@2314: #ifdef COMPILER1 stefank@2314: #include "c1/c1_globals.hpp" stefank@2314: #endif stefank@2314: #ifdef COMPILER2 stefank@2314: #include "opto/c2_globals.hpp" stefank@2314: #endif stefank@2314: zgu@2834: #include zgu@2834: duke@435: #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1) duke@435: #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) duke@435: #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) duke@435: zgu@3900: zgu@3900: // noinline attribute zgu@3900: #ifdef _WINDOWS zgu@3900: #define _NOINLINE_ __declspec(noinline) zgu@3900: #else zgu@3900: #if __GNUC__ < 3 // gcc 2.x does not support noinline attribute zgu@3900: #define _NOINLINE_ zgu@3900: #else zgu@3900: #define _NOINLINE_ __attribute__ ((noinline)) zgu@3900: #endif zgu@3900: #endif zgu@3900: duke@435: // All classes in the virtual machine must be subclassed duke@435: // by one of the following allocation classes: duke@435: // duke@435: // For objects allocated in the resource area (see resourceArea.hpp). duke@435: // - ResourceObj duke@435: // duke@435: // For objects allocated in the C-heap (managed by: free & malloc). duke@435: // - CHeapObj duke@435: // duke@435: // For objects allocated on the stack. duke@435: // - StackObj duke@435: // duke@435: // For embedded objects. duke@435: // - ValueObj duke@435: // duke@435: // For classes used as name spaces. duke@435: // - AllStatic duke@435: // duke@435: // The printable subclasses are used for debugging and define virtual duke@435: // member functions for printing. Classes that avoid allocating the duke@435: // vtbl entries in the objects should therefore not be the printable duke@435: // subclasses. duke@435: // duke@435: // The following macros and function should be used to allocate memory duke@435: // directly in the resource area or in the C-heap: duke@435: // duke@435: // NEW_RESOURCE_ARRAY(type,size) duke@435: // NEW_RESOURCE_OBJ(type) duke@435: // NEW_C_HEAP_ARRAY(type,size) duke@435: // NEW_C_HEAP_OBJ(type) duke@435: // char* AllocateHeap(size_t size, const char* name); duke@435: // void FreeHeap(void* p); duke@435: // duke@435: // C-heap allocation can be traced using +PrintHeapAllocation. duke@435: // malloc and free should therefore never called directly. duke@435: duke@435: // Base class for objects allocated in the C-heap. duke@435: duke@435: // In non product mode we introduce a super class for all allocation classes duke@435: // that supports printing. duke@435: // We avoid the superclass in product mode since some C++ compilers add duke@435: // a word overhead for empty super classes. duke@435: duke@435: #ifdef PRODUCT duke@435: #define ALLOCATION_SUPER_CLASS_SPEC duke@435: #else duke@435: #define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj duke@435: class AllocatedObj { duke@435: public: duke@435: // Printing support duke@435: void print() const; duke@435: void print_value() const; duke@435: duke@435: virtual void print_on(outputStream* st) const; duke@435: virtual void print_value_on(outputStream* st) const; duke@435: }; duke@435: #endif duke@435: zgu@3900: zgu@3900: /* zgu@3900: * MemoryType bitmap layout: zgu@3900: * | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 | zgu@3900: * | memory type | object | reserved | zgu@3900: * | | type | | zgu@3900: */ zgu@3900: enum MemoryType { zgu@3900: // Memory type by sub systems. It occupies lower byte. zgu@3900: mtNone = 0x0000, // undefined zgu@3900: mtClass = 0x0100, // memory class for Java classes zgu@3900: mtThread = 0x0200, // memory for thread objects zgu@3900: mtThreadStack = 0x0300, zgu@3900: mtCode = 0x0400, // memory for generated code zgu@3900: mtGC = 0x0500, // memory for GC zgu@3900: mtCompiler = 0x0600, // memory for compiler zgu@3900: mtInternal = 0x0700, // memory used by VM, but does not belong to zgu@3900: // any of above categories, and not used for zgu@3900: // native memory tracking zgu@3900: mtOther = 0x0800, // memory not used by VM zgu@3900: mtSymbol = 0x0900, // symbol zgu@3900: mtNMT = 0x0A00, // memory used by native memory tracking zgu@3900: mtChunk = 0x0B00, // chunk that holds content of arenas zgu@3900: mtJavaHeap = 0x0C00, // Java heap zgu@3900: mtDontTrack = 0x0D00, // memory we donot or cannot track zgu@3900: mt_number_of_types = 0x000C, // number of memory types zgu@3900: mt_masks = 0x7F00, zgu@3900: zgu@3900: // object type mask zgu@3900: otArena = 0x0010, // an arena object zgu@3900: otNMTRecorder = 0x0020, // memory recorder object zgu@3900: ot_masks = 0x00F0 zgu@3900: }; zgu@3900: zgu@3900: #define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type) zgu@3900: #define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone) zgu@3900: #define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks) zgu@3900: zgu@3900: #define IS_ARENA_OBJ(flags) ((flags & ot_masks) == otArena) zgu@3900: #define IS_NMT_RECORDER(flags) ((flags & ot_masks) == otNMTRecorder) zgu@3900: #define NMT_CAN_TRACK(flags) (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack))) zgu@3900: zgu@3900: typedef unsigned short MEMFLAGS; zgu@3900: zgu@3900: extern bool NMT_track_callsite; zgu@3900: zgu@3900: // debug build does not inline zgu@3900: #if defined(_DEBUG_) zgu@3900: #define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0) zgu@3900: #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) zgu@3900: #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0) zgu@3900: #else zgu@3900: #define CURRENT_PC (NMT_track_callsite? os::get_caller_pc(0) : 0) zgu@3900: #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0) zgu@3900: #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) zgu@3900: #endif zgu@3900: zgu@3900: zgu@3900: zgu@3900: template class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { duke@435: public: zgu@3900: _NOINLINE_ void* operator new(size_t size, address caller_pc = 0); zgu@3900: _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant, zgu@3900: address caller_pc = 0); zgu@3900: duke@435: void operator delete(void* p); duke@435: }; duke@435: duke@435: // Base class for objects allocated on the stack only. duke@435: // Calling new or delete will result in fatal error. duke@435: duke@435: class StackObj ALLOCATION_SUPER_CLASS_SPEC { duke@435: public: duke@435: void* operator new(size_t size); duke@435: void operator delete(void* p); duke@435: }; duke@435: duke@435: // Base class for objects used as value objects. duke@435: // Calling new or delete will result in fatal error. duke@435: // duke@435: // Portability note: Certain compilers (e.g. gcc) will duke@435: // always make classes bigger if it has a superclass, even duke@435: // if the superclass does not have any virtual methods or duke@435: // instance fields. The HotSpot implementation relies on this duke@435: // not to happen. So never make a ValueObj class a direct subclass duke@435: // of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g., duke@435: // like this: duke@435: // duke@435: // class A VALUE_OBJ_CLASS_SPEC { duke@435: // ... duke@435: // } duke@435: // duke@435: // With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can duke@435: // be defined as a an empty string "". duke@435: // duke@435: class _ValueObj { duke@435: public: duke@435: void* operator new(size_t size); duke@435: void operator delete(void* p); duke@435: }; duke@435: duke@435: // Base class for classes that constitute name spaces. duke@435: duke@435: class AllStatic { duke@435: public: duke@435: AllStatic() { ShouldNotCallThis(); } duke@435: ~AllStatic() { ShouldNotCallThis(); } duke@435: }; duke@435: duke@435: duke@435: //------------------------------Chunk------------------------------------------ duke@435: // Linked list of raw memory chunks zgu@3900: class Chunk: CHeapObj { never@3138: friend class VMStructs; never@3138: duke@435: protected: duke@435: Chunk* _next; // Next Chunk in list duke@435: const size_t _len; // Size of this Chunk duke@435: public: duke@435: void* operator new(size_t size, size_t length); duke@435: void operator delete(void* p); duke@435: Chunk(size_t length); duke@435: duke@435: enum { duke@435: // default sizes; make them slightly smaller than 2**k to guard against duke@435: // buddy-system style malloc implementations duke@435: #ifdef _LP64 duke@435: slack = 40, // [RGV] Not sure if this is right, but make it duke@435: // a multiple of 8. duke@435: #else duke@435: slack = 20, // suspected sizeof(Chunk) + internal malloc headers duke@435: #endif duke@435: duke@435: init_size = 1*K - slack, // Size of first chunk duke@435: medium_size= 10*K - slack, // Size of medium-sized chunk duke@435: size = 32*K - slack, // Default size of an Arena chunk (following the first) duke@435: non_pool_size = init_size + 32 // An initial size which is not one of above duke@435: }; duke@435: duke@435: void chop(); // Chop this chunk duke@435: void next_chop(); // Chop next chunk duke@435: static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); } duke@435: duke@435: size_t length() const { return _len; } duke@435: Chunk* next() const { return _next; } duke@435: void set_next(Chunk* n) { _next = n; } duke@435: // Boundaries of data area (possibly unused) duke@435: char* bottom() const { return ((char*) this) + aligned_overhead_size(); } duke@435: char* top() const { return bottom() + _len; } duke@435: bool contains(char* p) const { return bottom() <= p && p <= top(); } duke@435: duke@435: // Start the chunk_pool cleaner task duke@435: static void start_chunk_pool_cleaner_task(); bobv@2036: bobv@2036: static void clean_chunk_pool(); duke@435: }; duke@435: duke@435: //------------------------------Arena------------------------------------------ duke@435: // Fast allocation of memory zgu@3900: class Arena : public CHeapObj { duke@435: protected: duke@435: friend class ResourceMark; duke@435: friend class HandleMark; duke@435: friend class NoHandleMark; never@3138: friend class VMStructs; never@3138: duke@435: Chunk *_first; // First chunk duke@435: Chunk *_chunk; // current chunk duke@435: char *_hwm, *_max; // High water mark and max in current chunk duke@435: void* grow(size_t x); // Get a new Chunk of at least size x zgu@3900: size_t _size_in_bytes; // Size of arena (used for native memory tracking) zgu@3900: kvn@2557: NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start duke@435: friend class AllocStats; duke@435: debug_only(void* malloc(size_t size);) duke@435: debug_only(void* internal_malloc_4(size_t x);) kvn@2557: NOT_PRODUCT(void inc_bytes_allocated(size_t x);) kamg@2589: kamg@2589: void signal_out_of_memory(size_t request, const char* whence) const; kamg@2589: kamg@2589: void check_for_overflow(size_t request, const char* whence) const { kamg@2589: if (UINTPTR_MAX - request < (uintptr_t)_hwm) { kamg@2589: signal_out_of_memory(request, whence); kamg@2589: } kamg@2589: } kamg@2589: duke@435: public: duke@435: Arena(); duke@435: Arena(size_t init_size); duke@435: Arena(Arena *old); duke@435: ~Arena(); duke@435: void destruct_contents(); duke@435: char* hwm() const { return _hwm; } duke@435: zgu@3900: // new operators zgu@3900: void* operator new (size_t size); zgu@3900: void* operator new (size_t size, const std::nothrow_t& nothrow_constant); zgu@3900: zgu@3900: // dynamic memory type tagging zgu@3900: void* operator new(size_t size, MEMFLAGS flags); zgu@3900: void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags); zgu@3900: void operator delete(void* p); zgu@3900: duke@435: // Fast allocate in the arena. Common case is: pointer test + increment. duke@435: void* Amalloc(size_t x) { duke@435: assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); duke@435: x = ARENA_ALIGN(x); duke@435: debug_only(if (UseMallocOnly) return malloc(x);) kamg@2589: check_for_overflow(x, "Arena::Amalloc"); kvn@2557: NOT_PRODUCT(inc_bytes_allocated(x);) duke@435: if (_hwm + x > _max) { duke@435: return grow(x); duke@435: } else { duke@435: char *old = _hwm; duke@435: _hwm += x; duke@435: return old; duke@435: } duke@435: } duke@435: // Further assume size is padded out to words duke@435: void *Amalloc_4(size_t x) { duke@435: assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); duke@435: debug_only(if (UseMallocOnly) return malloc(x);) kamg@2589: check_for_overflow(x, "Arena::Amalloc_4"); kvn@2557: NOT_PRODUCT(inc_bytes_allocated(x);) duke@435: if (_hwm + x > _max) { duke@435: return grow(x); duke@435: } else { duke@435: char *old = _hwm; duke@435: _hwm += x; duke@435: return old; duke@435: } duke@435: } duke@435: duke@435: // Allocate with 'double' alignment. It is 8 bytes on sparc. duke@435: // In other cases Amalloc_D() should be the same as Amalloc_4(). duke@435: void* Amalloc_D(size_t x) { duke@435: assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); duke@435: debug_only(if (UseMallocOnly) return malloc(x);) duke@435: #if defined(SPARC) && !defined(_LP64) duke@435: #define DALIGN_M1 7 duke@435: size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; duke@435: x += delta; duke@435: #endif kamg@2589: check_for_overflow(x, "Arena::Amalloc_D"); kvn@2557: NOT_PRODUCT(inc_bytes_allocated(x);) duke@435: if (_hwm + x > _max) { duke@435: return grow(x); // grow() returns a result aligned >= 8 bytes. duke@435: } else { duke@435: char *old = _hwm; duke@435: _hwm += x; duke@435: #if defined(SPARC) && !defined(_LP64) duke@435: old += delta; // align to 8-bytes duke@435: #endif duke@435: return old; duke@435: } duke@435: } duke@435: duke@435: // Fast delete in area. Common case is: NOP (except for storage reclaimed) duke@435: void Afree(void *ptr, size_t size) { duke@435: #ifdef ASSERT duke@435: if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory duke@435: if (UseMallocOnly) return; duke@435: #endif duke@435: if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr; duke@435: } duke@435: duke@435: void *Arealloc( void *old_ptr, size_t old_size, size_t new_size ); duke@435: duke@435: // Move contents of this arena into an empty arena duke@435: Arena *move_contents(Arena *empty_arena); duke@435: duke@435: // Determine if pointer belongs to this Arena or not. duke@435: bool contains( const void *ptr ) const; duke@435: duke@435: // Total of all chunks in use (not thread-safe) duke@435: size_t used() const; duke@435: duke@435: // Total # of bytes used zgu@3900: size_t size_in_bytes() const { return _size_in_bytes; }; zgu@3900: void set_size_in_bytes(size_t size); zgu@3900: duke@435: static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN; duke@435: static void free_all(char** start, char** end) PRODUCT_RETURN; duke@435: zgu@3900: // how many arena instances zgu@3900: NOT_PRODUCT(static volatile jint _instance_count;) duke@435: private: duke@435: // Reset this Arena to empty, access will trigger grow if necessary duke@435: void reset(void) { duke@435: _first = _chunk = NULL; duke@435: _hwm = _max = NULL; zgu@3900: set_size_in_bytes(0); duke@435: } duke@435: }; duke@435: duke@435: // One of the following macros must be used when allocating duke@435: // an array or object from an arena jcoomes@2191: #define NEW_ARENA_ARRAY(arena, type, size) \ jcoomes@2191: (type*) (arena)->Amalloc((size) * sizeof(type)) duke@435: jcoomes@2191: #define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \ jcoomes@2191: (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \ jcoomes@2191: (new_size) * sizeof(type) ) duke@435: jcoomes@2191: #define FREE_ARENA_ARRAY(arena, type, old, size) \ jcoomes@2191: (arena)->Afree((char*)(old), (size) * sizeof(type)) duke@435: jcoomes@2191: #define NEW_ARENA_OBJ(arena, type) \ duke@435: NEW_ARENA_ARRAY(arena, type, 1) duke@435: duke@435: duke@435: //%note allocation_1 duke@435: extern char* resource_allocate_bytes(size_t size); duke@435: extern char* resource_allocate_bytes(Thread* thread, size_t size); duke@435: extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size); duke@435: extern void resource_free_bytes( char *old, size_t size ); duke@435: duke@435: //---------------------------------------------------------------------- duke@435: // Base class for objects allocated in the resource area per default. duke@435: // Optionally, objects may be allocated on the C heap with duke@435: // new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena) duke@435: // ResourceObj's can be allocated within other objects, but don't use duke@435: // new or delete (allocation_type is unknown). If new is used to allocate, duke@435: // use delete to deallocate. duke@435: class ResourceObj ALLOCATION_SUPER_CLASS_SPEC { duke@435: public: kvn@2040: enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 }; kvn@2043: static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN; duke@435: #ifdef ASSERT duke@435: private: kvn@2040: // When this object is allocated on stack the new() operator is not kvn@2040: // called but garbage on stack may look like a valid allocation_type. kvn@2040: // Store negated 'this' pointer when new() is called to distinguish cases. kvn@2357: // Use second array's element for verification value to distinguish garbage. kvn@2357: uintptr_t _allocation_t[2]; kvn@2357: bool is_type_set() const; duke@435: public: kvn@2043: allocation_type get_allocation_type() const; kvn@2043: bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; } kvn@2043: bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; } kvn@2043: bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; } kvn@2043: bool allocated_on_arena() const { return get_allocation_type() == ARENA; } kvn@2040: ResourceObj(); // default construtor kvn@2040: ResourceObj(const ResourceObj& r); // default copy construtor kvn@2040: ResourceObj& operator=(const ResourceObj& r); // default copy assignment kvn@2040: ~ResourceObj(); duke@435: #endif // ASSERT duke@435: duke@435: public: zgu@3900: void* operator new(size_t size, allocation_type type, MEMFLAGS flags); duke@435: void* operator new(size_t size, Arena *arena) { duke@435: address res = (address)arena->Amalloc(size); kvn@2040: DEBUG_ONLY(set_allocation_type(res, ARENA);) duke@435: return res; duke@435: } duke@435: void* operator new(size_t size) { duke@435: address res = (address)resource_allocate_bytes(size); kvn@2040: DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) duke@435: return res; duke@435: } duke@435: void operator delete(void* p); duke@435: }; duke@435: duke@435: // One of the following macros must be used when allocating an array duke@435: // or object to determine whether it should reside in the C heap on in duke@435: // the resource area. duke@435: duke@435: #define NEW_RESOURCE_ARRAY(type, size)\ duke@435: (type*) resource_allocate_bytes((size) * sizeof(type)) duke@435: duke@435: #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ duke@435: (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) duke@435: duke@435: #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\ duke@435: (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) ) duke@435: duke@435: #define FREE_RESOURCE_ARRAY(type, old, size)\ duke@435: resource_free_bytes((char*)(old), (size) * sizeof(type)) duke@435: duke@435: #define FREE_FAST(old)\ duke@435: /* nop */ duke@435: duke@435: #define NEW_RESOURCE_OBJ(type)\ duke@435: NEW_RESOURCE_ARRAY(type, 1) duke@435: zgu@3900: #define NEW_C_HEAP_ARRAY(type, size, memflags)\ zgu@3900: (type*) (AllocateHeap((size) * sizeof(type), memflags)) duke@435: zgu@3900: #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ zgu@3900: (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags)) duke@435: zgu@3900: #define FREE_C_HEAP_ARRAY(type,old,memflags) \ zgu@3900: FreeHeap((char*)(old), memflags) duke@435: zgu@3900: #define NEW_C_HEAP_OBJ(type, memflags)\ zgu@3900: NEW_C_HEAP_ARRAY(type, 1, memflags) zgu@3900: zgu@3900: zgu@3900: #define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ zgu@3900: (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) zgu@3900: zgu@3900: #define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\ zgu@3900: (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc)) zgu@3900: zgu@3900: #define NEW_C_HEAP_OBJ2(type, memflags, pc)\ zgu@3900: NEW_C_HEAP_ARRAY2(type, 1, memflags, pc) zgu@3900: duke@435: duke@435: extern bool warn_new_operator; duke@435: duke@435: // for statistics duke@435: #ifndef PRODUCT duke@435: class AllocStats : StackObj { kvn@2557: julong start_mallocs, start_frees; kvn@2557: julong start_malloc_bytes, start_mfree_bytes, start_res_bytes; duke@435: public: duke@435: AllocStats(); duke@435: kvn@2557: julong num_mallocs(); // since creation of receiver kvn@2557: julong alloc_bytes(); kvn@2557: julong num_frees(); kvn@2557: julong free_bytes(); kvn@2557: julong resource_bytes(); duke@435: void print(); duke@435: }; duke@435: #endif duke@435: duke@435: duke@435: //------------------------------ReallocMark--------------------------------- duke@435: // Code which uses REALLOC_RESOURCE_ARRAY should check an associated duke@435: // ReallocMark, which is declared in the same scope as the reallocated duke@435: // pointer. Any operation that could __potentially__ cause a reallocation duke@435: // should check the ReallocMark. duke@435: class ReallocMark: public StackObj { duke@435: protected: duke@435: NOT_PRODUCT(int _nesting;) duke@435: duke@435: public: duke@435: ReallocMark() PRODUCT_RETURN; duke@435: void check() PRODUCT_RETURN; duke@435: }; stefank@2314: stefank@2314: #endif // SHARE_VM_MEMORY_ALLOCATION_HPP