Wed, 15 Feb 2012 10:12:55 -0800
7145537: minor tweaks to LogEvents
Reviewed-by: kvn, twisti
duke@435 | 1 | /* |
kvn@2557 | 2 | * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_MEMORY_ALLOCATION_HPP |
stefank@2314 | 26 | #define SHARE_VM_MEMORY_ALLOCATION_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "runtime/globals.hpp" |
stefank@2314 | 29 | #include "utilities/globalDefinitions.hpp" |
stefank@2314 | 30 | #ifdef COMPILER1 |
stefank@2314 | 31 | #include "c1/c1_globals.hpp" |
stefank@2314 | 32 | #endif |
stefank@2314 | 33 | #ifdef COMPILER2 |
stefank@2314 | 34 | #include "opto/c2_globals.hpp" |
stefank@2314 | 35 | #endif |
stefank@2314 | 36 | |
zgu@2834 | 37 | #include <new> |
zgu@2834 | 38 | |
duke@435 | 39 | #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1) |
duke@435 | 40 | #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) |
duke@435 | 41 | #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) |
duke@435 | 42 | |
duke@435 | 43 | // All classes in the virtual machine must be subclassed |
duke@435 | 44 | // by one of the following allocation classes: |
duke@435 | 45 | // |
duke@435 | 46 | // For objects allocated in the resource area (see resourceArea.hpp). |
duke@435 | 47 | // - ResourceObj |
duke@435 | 48 | // |
duke@435 | 49 | // For objects allocated in the C-heap (managed by: free & malloc). |
duke@435 | 50 | // - CHeapObj |
duke@435 | 51 | // |
duke@435 | 52 | // For objects allocated on the stack. |
duke@435 | 53 | // - StackObj |
duke@435 | 54 | // |
duke@435 | 55 | // For embedded objects. |
duke@435 | 56 | // - ValueObj |
duke@435 | 57 | // |
duke@435 | 58 | // For classes used as name spaces. |
duke@435 | 59 | // - AllStatic |
duke@435 | 60 | // |
duke@435 | 61 | // The printable subclasses are used for debugging and define virtual |
duke@435 | 62 | // member functions for printing. Classes that avoid allocating the |
duke@435 | 63 | // vtbl entries in the objects should therefore not be the printable |
duke@435 | 64 | // subclasses. |
duke@435 | 65 | // |
duke@435 | 66 | // The following macros and function should be used to allocate memory |
duke@435 | 67 | // directly in the resource area or in the C-heap: |
duke@435 | 68 | // |
duke@435 | 69 | // NEW_RESOURCE_ARRAY(type,size) |
duke@435 | 70 | // NEW_RESOURCE_OBJ(type) |
duke@435 | 71 | // NEW_C_HEAP_ARRAY(type,size) |
duke@435 | 72 | // NEW_C_HEAP_OBJ(type) |
duke@435 | 73 | // char* AllocateHeap(size_t size, const char* name); |
duke@435 | 74 | // void FreeHeap(void* p); |
duke@435 | 75 | // |
duke@435 | 76 | // C-heap allocation can be traced using +PrintHeapAllocation. |
duke@435 | 77 | // malloc and free should therefore never called directly. |
duke@435 | 78 | |
duke@435 | 79 | // Base class for objects allocated in the C-heap. |
duke@435 | 80 | |
duke@435 | 81 | // In non product mode we introduce a super class for all allocation classes |
duke@435 | 82 | // that supports printing. |
duke@435 | 83 | // We avoid the superclass in product mode since some C++ compilers add |
duke@435 | 84 | // a word overhead for empty super classes. |
duke@435 | 85 | |
duke@435 | 86 | #ifdef PRODUCT |
duke@435 | 87 | #define ALLOCATION_SUPER_CLASS_SPEC |
duke@435 | 88 | #else |
duke@435 | 89 | #define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj |
duke@435 | 90 | class AllocatedObj { |
duke@435 | 91 | public: |
duke@435 | 92 | // Printing support |
duke@435 | 93 | void print() const; |
duke@435 | 94 | void print_value() const; |
duke@435 | 95 | |
duke@435 | 96 | virtual void print_on(outputStream* st) const; |
duke@435 | 97 | virtual void print_value_on(outputStream* st) const; |
duke@435 | 98 | }; |
duke@435 | 99 | #endif |
duke@435 | 100 | |
duke@435 | 101 | class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { |
duke@435 | 102 | public: |
duke@435 | 103 | void* operator new(size_t size); |
zgu@2834 | 104 | void* operator new (size_t size, const std::nothrow_t& nothrow_constant); |
duke@435 | 105 | void operator delete(void* p); |
duke@435 | 106 | void* new_array(size_t size); |
duke@435 | 107 | }; |
duke@435 | 108 | |
duke@435 | 109 | // Base class for objects allocated on the stack only. |
duke@435 | 110 | // Calling new or delete will result in fatal error. |
duke@435 | 111 | |
duke@435 | 112 | class StackObj ALLOCATION_SUPER_CLASS_SPEC { |
duke@435 | 113 | public: |
duke@435 | 114 | void* operator new(size_t size); |
duke@435 | 115 | void operator delete(void* p); |
duke@435 | 116 | }; |
duke@435 | 117 | |
duke@435 | 118 | // Base class for objects used as value objects. |
duke@435 | 119 | // Calling new or delete will result in fatal error. |
duke@435 | 120 | // |
duke@435 | 121 | // Portability note: Certain compilers (e.g. gcc) will |
duke@435 | 122 | // always make classes bigger if it has a superclass, even |
duke@435 | 123 | // if the superclass does not have any virtual methods or |
duke@435 | 124 | // instance fields. The HotSpot implementation relies on this |
duke@435 | 125 | // not to happen. So never make a ValueObj class a direct subclass |
duke@435 | 126 | // of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g., |
duke@435 | 127 | // like this: |
duke@435 | 128 | // |
duke@435 | 129 | // class A VALUE_OBJ_CLASS_SPEC { |
duke@435 | 130 | // ... |
duke@435 | 131 | // } |
duke@435 | 132 | // |
duke@435 | 133 | // With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can |
duke@435 | 134 | // be defined as a an empty string "". |
duke@435 | 135 | // |
duke@435 | 136 | class _ValueObj { |
duke@435 | 137 | public: |
duke@435 | 138 | void* operator new(size_t size); |
duke@435 | 139 | void operator delete(void* p); |
duke@435 | 140 | }; |
duke@435 | 141 | |
duke@435 | 142 | // Base class for classes that constitute name spaces. |
duke@435 | 143 | |
duke@435 | 144 | class AllStatic { |
duke@435 | 145 | public: |
duke@435 | 146 | AllStatic() { ShouldNotCallThis(); } |
duke@435 | 147 | ~AllStatic() { ShouldNotCallThis(); } |
duke@435 | 148 | }; |
duke@435 | 149 | |
duke@435 | 150 | |
duke@435 | 151 | //------------------------------Chunk------------------------------------------ |
duke@435 | 152 | // Linked list of raw memory chunks |
duke@435 | 153 | class Chunk: public CHeapObj { |
never@3138 | 154 | friend class VMStructs; |
never@3138 | 155 | |
duke@435 | 156 | protected: |
duke@435 | 157 | Chunk* _next; // Next Chunk in list |
duke@435 | 158 | const size_t _len; // Size of this Chunk |
duke@435 | 159 | public: |
duke@435 | 160 | void* operator new(size_t size, size_t length); |
duke@435 | 161 | void operator delete(void* p); |
duke@435 | 162 | Chunk(size_t length); |
duke@435 | 163 | |
duke@435 | 164 | enum { |
duke@435 | 165 | // default sizes; make them slightly smaller than 2**k to guard against |
duke@435 | 166 | // buddy-system style malloc implementations |
duke@435 | 167 | #ifdef _LP64 |
duke@435 | 168 | slack = 40, // [RGV] Not sure if this is right, but make it |
duke@435 | 169 | // a multiple of 8. |
duke@435 | 170 | #else |
duke@435 | 171 | slack = 20, // suspected sizeof(Chunk) + internal malloc headers |
duke@435 | 172 | #endif |
duke@435 | 173 | |
duke@435 | 174 | init_size = 1*K - slack, // Size of first chunk |
duke@435 | 175 | medium_size= 10*K - slack, // Size of medium-sized chunk |
duke@435 | 176 | size = 32*K - slack, // Default size of an Arena chunk (following the first) |
duke@435 | 177 | non_pool_size = init_size + 32 // An initial size which is not one of above |
duke@435 | 178 | }; |
duke@435 | 179 | |
duke@435 | 180 | void chop(); // Chop this chunk |
duke@435 | 181 | void next_chop(); // Chop next chunk |
duke@435 | 182 | static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); } |
duke@435 | 183 | |
duke@435 | 184 | size_t length() const { return _len; } |
duke@435 | 185 | Chunk* next() const { return _next; } |
duke@435 | 186 | void set_next(Chunk* n) { _next = n; } |
duke@435 | 187 | // Boundaries of data area (possibly unused) |
duke@435 | 188 | char* bottom() const { return ((char*) this) + aligned_overhead_size(); } |
duke@435 | 189 | char* top() const { return bottom() + _len; } |
duke@435 | 190 | bool contains(char* p) const { return bottom() <= p && p <= top(); } |
duke@435 | 191 | |
duke@435 | 192 | // Start the chunk_pool cleaner task |
duke@435 | 193 | static void start_chunk_pool_cleaner_task(); |
bobv@2036 | 194 | |
bobv@2036 | 195 | static void clean_chunk_pool(); |
duke@435 | 196 | }; |
duke@435 | 197 | |
duke@435 | 198 | //------------------------------Arena------------------------------------------ |
duke@435 | 199 | // Fast allocation of memory |
duke@435 | 200 | class Arena: public CHeapObj { |
duke@435 | 201 | protected: |
duke@435 | 202 | friend class ResourceMark; |
duke@435 | 203 | friend class HandleMark; |
duke@435 | 204 | friend class NoHandleMark; |
never@3138 | 205 | friend class VMStructs; |
never@3138 | 206 | |
duke@435 | 207 | Chunk *_first; // First chunk |
duke@435 | 208 | Chunk *_chunk; // current chunk |
duke@435 | 209 | char *_hwm, *_max; // High water mark and max in current chunk |
duke@435 | 210 | void* grow(size_t x); // Get a new Chunk of at least size x |
duke@435 | 211 | NOT_PRODUCT(size_t _size_in_bytes;) // Size of arena (used for memory usage tracing) |
kvn@2557 | 212 | NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start |
duke@435 | 213 | friend class AllocStats; |
duke@435 | 214 | debug_only(void* malloc(size_t size);) |
duke@435 | 215 | debug_only(void* internal_malloc_4(size_t x);) |
kvn@2557 | 216 | NOT_PRODUCT(void inc_bytes_allocated(size_t x);) |
kamg@2589 | 217 | |
kamg@2589 | 218 | void signal_out_of_memory(size_t request, const char* whence) const; |
kamg@2589 | 219 | |
kamg@2589 | 220 | void check_for_overflow(size_t request, const char* whence) const { |
kamg@2589 | 221 | if (UINTPTR_MAX - request < (uintptr_t)_hwm) { |
kamg@2589 | 222 | signal_out_of_memory(request, whence); |
kamg@2589 | 223 | } |
kamg@2589 | 224 | } |
kamg@2589 | 225 | |
duke@435 | 226 | public: |
duke@435 | 227 | Arena(); |
duke@435 | 228 | Arena(size_t init_size); |
duke@435 | 229 | Arena(Arena *old); |
duke@435 | 230 | ~Arena(); |
duke@435 | 231 | void destruct_contents(); |
duke@435 | 232 | char* hwm() const { return _hwm; } |
duke@435 | 233 | |
duke@435 | 234 | // Fast allocate in the arena. Common case is: pointer test + increment. |
duke@435 | 235 | void* Amalloc(size_t x) { |
duke@435 | 236 | assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); |
duke@435 | 237 | x = ARENA_ALIGN(x); |
duke@435 | 238 | debug_only(if (UseMallocOnly) return malloc(x);) |
kamg@2589 | 239 | check_for_overflow(x, "Arena::Amalloc"); |
kvn@2557 | 240 | NOT_PRODUCT(inc_bytes_allocated(x);) |
duke@435 | 241 | if (_hwm + x > _max) { |
duke@435 | 242 | return grow(x); |
duke@435 | 243 | } else { |
duke@435 | 244 | char *old = _hwm; |
duke@435 | 245 | _hwm += x; |
duke@435 | 246 | return old; |
duke@435 | 247 | } |
duke@435 | 248 | } |
duke@435 | 249 | // Further assume size is padded out to words |
duke@435 | 250 | void *Amalloc_4(size_t x) { |
duke@435 | 251 | assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); |
duke@435 | 252 | debug_only(if (UseMallocOnly) return malloc(x);) |
kamg@2589 | 253 | check_for_overflow(x, "Arena::Amalloc_4"); |
kvn@2557 | 254 | NOT_PRODUCT(inc_bytes_allocated(x);) |
duke@435 | 255 | if (_hwm + x > _max) { |
duke@435 | 256 | return grow(x); |
duke@435 | 257 | } else { |
duke@435 | 258 | char *old = _hwm; |
duke@435 | 259 | _hwm += x; |
duke@435 | 260 | return old; |
duke@435 | 261 | } |
duke@435 | 262 | } |
duke@435 | 263 | |
duke@435 | 264 | // Allocate with 'double' alignment. It is 8 bytes on sparc. |
duke@435 | 265 | // In other cases Amalloc_D() should be the same as Amalloc_4(). |
duke@435 | 266 | void* Amalloc_D(size_t x) { |
duke@435 | 267 | assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); |
duke@435 | 268 | debug_only(if (UseMallocOnly) return malloc(x);) |
duke@435 | 269 | #if defined(SPARC) && !defined(_LP64) |
duke@435 | 270 | #define DALIGN_M1 7 |
duke@435 | 271 | size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; |
duke@435 | 272 | x += delta; |
duke@435 | 273 | #endif |
kamg@2589 | 274 | check_for_overflow(x, "Arena::Amalloc_D"); |
kvn@2557 | 275 | NOT_PRODUCT(inc_bytes_allocated(x);) |
duke@435 | 276 | if (_hwm + x > _max) { |
duke@435 | 277 | return grow(x); // grow() returns a result aligned >= 8 bytes. |
duke@435 | 278 | } else { |
duke@435 | 279 | char *old = _hwm; |
duke@435 | 280 | _hwm += x; |
duke@435 | 281 | #if defined(SPARC) && !defined(_LP64) |
duke@435 | 282 | old += delta; // align to 8-bytes |
duke@435 | 283 | #endif |
duke@435 | 284 | return old; |
duke@435 | 285 | } |
duke@435 | 286 | } |
duke@435 | 287 | |
duke@435 | 288 | // Fast delete in area. Common case is: NOP (except for storage reclaimed) |
duke@435 | 289 | void Afree(void *ptr, size_t size) { |
duke@435 | 290 | #ifdef ASSERT |
duke@435 | 291 | if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory |
duke@435 | 292 | if (UseMallocOnly) return; |
duke@435 | 293 | #endif |
duke@435 | 294 | if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr; |
duke@435 | 295 | } |
duke@435 | 296 | |
duke@435 | 297 | void *Arealloc( void *old_ptr, size_t old_size, size_t new_size ); |
duke@435 | 298 | |
duke@435 | 299 | // Move contents of this arena into an empty arena |
duke@435 | 300 | Arena *move_contents(Arena *empty_arena); |
duke@435 | 301 | |
duke@435 | 302 | // Determine if pointer belongs to this Arena or not. |
duke@435 | 303 | bool contains( const void *ptr ) const; |
duke@435 | 304 | |
duke@435 | 305 | // Total of all chunks in use (not thread-safe) |
duke@435 | 306 | size_t used() const; |
duke@435 | 307 | |
duke@435 | 308 | // Total # of bytes used |
duke@435 | 309 | size_t size_in_bytes() const NOT_PRODUCT({ return _size_in_bytes; }) PRODUCT_RETURN0; |
duke@435 | 310 | void set_size_in_bytes(size_t size) NOT_PRODUCT({ _size_in_bytes = size; }) PRODUCT_RETURN; |
duke@435 | 311 | static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN; |
duke@435 | 312 | static void free_all(char** start, char** end) PRODUCT_RETURN; |
duke@435 | 313 | |
duke@435 | 314 | private: |
duke@435 | 315 | // Reset this Arena to empty, access will trigger grow if necessary |
duke@435 | 316 | void reset(void) { |
duke@435 | 317 | _first = _chunk = NULL; |
duke@435 | 318 | _hwm = _max = NULL; |
duke@435 | 319 | } |
duke@435 | 320 | }; |
duke@435 | 321 | |
duke@435 | 322 | // One of the following macros must be used when allocating |
duke@435 | 323 | // an array or object from an arena |
jcoomes@2191 | 324 | #define NEW_ARENA_ARRAY(arena, type, size) \ |
jcoomes@2191 | 325 | (type*) (arena)->Amalloc((size) * sizeof(type)) |
duke@435 | 326 | |
jcoomes@2191 | 327 | #define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \ |
jcoomes@2191 | 328 | (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \ |
jcoomes@2191 | 329 | (new_size) * sizeof(type) ) |
duke@435 | 330 | |
jcoomes@2191 | 331 | #define FREE_ARENA_ARRAY(arena, type, old, size) \ |
jcoomes@2191 | 332 | (arena)->Afree((char*)(old), (size) * sizeof(type)) |
duke@435 | 333 | |
jcoomes@2191 | 334 | #define NEW_ARENA_OBJ(arena, type) \ |
duke@435 | 335 | NEW_ARENA_ARRAY(arena, type, 1) |
duke@435 | 336 | |
duke@435 | 337 | |
duke@435 | 338 | //%note allocation_1 |
duke@435 | 339 | extern char* resource_allocate_bytes(size_t size); |
duke@435 | 340 | extern char* resource_allocate_bytes(Thread* thread, size_t size); |
duke@435 | 341 | extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size); |
duke@435 | 342 | extern void resource_free_bytes( char *old, size_t size ); |
duke@435 | 343 | |
duke@435 | 344 | //---------------------------------------------------------------------- |
duke@435 | 345 | // Base class for objects allocated in the resource area per default. |
duke@435 | 346 | // Optionally, objects may be allocated on the C heap with |
duke@435 | 347 | // new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena) |
duke@435 | 348 | // ResourceObj's can be allocated within other objects, but don't use |
duke@435 | 349 | // new or delete (allocation_type is unknown). If new is used to allocate, |
duke@435 | 350 | // use delete to deallocate. |
duke@435 | 351 | class ResourceObj ALLOCATION_SUPER_CLASS_SPEC { |
duke@435 | 352 | public: |
kvn@2040 | 353 | enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 }; |
kvn@2043 | 354 | static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN; |
duke@435 | 355 | #ifdef ASSERT |
duke@435 | 356 | private: |
kvn@2040 | 357 | // When this object is allocated on stack the new() operator is not |
kvn@2040 | 358 | // called but garbage on stack may look like a valid allocation_type. |
kvn@2040 | 359 | // Store negated 'this' pointer when new() is called to distinguish cases. |
kvn@2357 | 360 | // Use second array's element for verification value to distinguish garbage. |
kvn@2357 | 361 | uintptr_t _allocation_t[2]; |
kvn@2357 | 362 | bool is_type_set() const; |
duke@435 | 363 | public: |
kvn@2043 | 364 | allocation_type get_allocation_type() const; |
kvn@2043 | 365 | bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; } |
kvn@2043 | 366 | bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; } |
kvn@2043 | 367 | bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; } |
kvn@2043 | 368 | bool allocated_on_arena() const { return get_allocation_type() == ARENA; } |
kvn@2040 | 369 | ResourceObj(); // default construtor |
kvn@2040 | 370 | ResourceObj(const ResourceObj& r); // default copy construtor |
kvn@2040 | 371 | ResourceObj& operator=(const ResourceObj& r); // default copy assignment |
kvn@2040 | 372 | ~ResourceObj(); |
duke@435 | 373 | #endif // ASSERT |
duke@435 | 374 | |
duke@435 | 375 | public: |
duke@435 | 376 | void* operator new(size_t size, allocation_type type); |
duke@435 | 377 | void* operator new(size_t size, Arena *arena) { |
duke@435 | 378 | address res = (address)arena->Amalloc(size); |
kvn@2040 | 379 | DEBUG_ONLY(set_allocation_type(res, ARENA);) |
duke@435 | 380 | return res; |
duke@435 | 381 | } |
duke@435 | 382 | void* operator new(size_t size) { |
duke@435 | 383 | address res = (address)resource_allocate_bytes(size); |
kvn@2040 | 384 | DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);) |
duke@435 | 385 | return res; |
duke@435 | 386 | } |
duke@435 | 387 | void operator delete(void* p); |
duke@435 | 388 | }; |
duke@435 | 389 | |
duke@435 | 390 | // One of the following macros must be used when allocating an array |
duke@435 | 391 | // or object to determine whether it should reside in the C heap on in |
duke@435 | 392 | // the resource area. |
duke@435 | 393 | |
duke@435 | 394 | #define NEW_RESOURCE_ARRAY(type, size)\ |
duke@435 | 395 | (type*) resource_allocate_bytes((size) * sizeof(type)) |
duke@435 | 396 | |
duke@435 | 397 | #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ |
duke@435 | 398 | (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) |
duke@435 | 399 | |
duke@435 | 400 | #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\ |
duke@435 | 401 | (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) ) |
duke@435 | 402 | |
duke@435 | 403 | #define FREE_RESOURCE_ARRAY(type, old, size)\ |
duke@435 | 404 | resource_free_bytes((char*)(old), (size) * sizeof(type)) |
duke@435 | 405 | |
duke@435 | 406 | #define FREE_FAST(old)\ |
duke@435 | 407 | /* nop */ |
duke@435 | 408 | |
duke@435 | 409 | #define NEW_RESOURCE_OBJ(type)\ |
duke@435 | 410 | NEW_RESOURCE_ARRAY(type, 1) |
duke@435 | 411 | |
duke@435 | 412 | #define NEW_C_HEAP_ARRAY(type, size)\ |
duke@435 | 413 | (type*) (AllocateHeap((size) * sizeof(type), XSTR(type) " in " __FILE__)) |
duke@435 | 414 | |
duke@435 | 415 | #define REALLOC_C_HEAP_ARRAY(type, old, size)\ |
duke@435 | 416 | (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), XSTR(type) " in " __FILE__)) |
duke@435 | 417 | |
duke@435 | 418 | #define FREE_C_HEAP_ARRAY(type,old) \ |
duke@435 | 419 | FreeHeap((char*)(old)) |
duke@435 | 420 | |
duke@435 | 421 | #define NEW_C_HEAP_OBJ(type)\ |
duke@435 | 422 | NEW_C_HEAP_ARRAY(type, 1) |
duke@435 | 423 | |
duke@435 | 424 | extern bool warn_new_operator; |
duke@435 | 425 | |
duke@435 | 426 | // for statistics |
duke@435 | 427 | #ifndef PRODUCT |
duke@435 | 428 | class AllocStats : StackObj { |
kvn@2557 | 429 | julong start_mallocs, start_frees; |
kvn@2557 | 430 | julong start_malloc_bytes, start_mfree_bytes, start_res_bytes; |
duke@435 | 431 | public: |
duke@435 | 432 | AllocStats(); |
duke@435 | 433 | |
kvn@2557 | 434 | julong num_mallocs(); // since creation of receiver |
kvn@2557 | 435 | julong alloc_bytes(); |
kvn@2557 | 436 | julong num_frees(); |
kvn@2557 | 437 | julong free_bytes(); |
kvn@2557 | 438 | julong resource_bytes(); |
duke@435 | 439 | void print(); |
duke@435 | 440 | }; |
duke@435 | 441 | #endif |
duke@435 | 442 | |
duke@435 | 443 | |
duke@435 | 444 | //------------------------------ReallocMark--------------------------------- |
duke@435 | 445 | // Code which uses REALLOC_RESOURCE_ARRAY should check an associated |
duke@435 | 446 | // ReallocMark, which is declared in the same scope as the reallocated |
duke@435 | 447 | // pointer. Any operation that could __potentially__ cause a reallocation |
duke@435 | 448 | // should check the ReallocMark. |
duke@435 | 449 | class ReallocMark: public StackObj { |
duke@435 | 450 | protected: |
duke@435 | 451 | NOT_PRODUCT(int _nesting;) |
duke@435 | 452 | |
duke@435 | 453 | public: |
duke@435 | 454 | ReallocMark() PRODUCT_RETURN; |
duke@435 | 455 | void check() PRODUCT_RETURN; |
duke@435 | 456 | }; |
stefank@2314 | 457 | |
stefank@2314 | 458 | #endif // SHARE_VM_MEMORY_ALLOCATION_HPP |