Tue, 05 Aug 2014 15:41:12 -0700
8034056: assert(_heap_alignment >= _space_alignment) failed: heap_alignment less than space_alignment
Reviewed-by: tschatzl, tamao
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_ALLOCATION_HPP
26 #define SHARE_VM_MEMORY_ALLOCATION_HPP
28 #include "runtime/globals.hpp"
29 #include "utilities/globalDefinitions.hpp"
30 #include "utilities/macros.hpp"
31 #ifdef COMPILER1
32 #include "c1/c1_globals.hpp"
33 #endif
34 #ifdef COMPILER2
35 #include "opto/c2_globals.hpp"
36 #endif
38 #include <new>
40 #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
41 #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
42 #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
45 // noinline attribute
46 #ifdef _WINDOWS
47 #define _NOINLINE_ __declspec(noinline)
48 #else
49 #if __GNUC__ < 3 // gcc 2.x does not support noinline attribute
50 #define _NOINLINE_
51 #else
52 #define _NOINLINE_ __attribute__ ((noinline))
53 #endif
54 #endif
56 class AllocFailStrategy {
57 public:
58 enum AllocFailEnum { EXIT_OOM, RETURN_NULL };
59 };
60 typedef AllocFailStrategy::AllocFailEnum AllocFailType;
62 // All classes in the virtual machine must be subclassed
63 // by one of the following allocation classes:
64 //
65 // For objects allocated in the resource area (see resourceArea.hpp).
66 // - ResourceObj
67 //
68 // For objects allocated in the C-heap (managed by: free & malloc).
69 // - CHeapObj
70 //
71 // For objects allocated on the stack.
72 // - StackObj
73 //
74 // For embedded objects.
75 // - ValueObj
76 //
77 // For classes used as name spaces.
78 // - AllStatic
79 //
80 // For classes in Metaspace (class data)
81 // - MetaspaceObj
82 //
83 // The printable subclasses are used for debugging and define virtual
84 // member functions for printing. Classes that avoid allocating the
85 // vtbl entries in the objects should therefore not be the printable
86 // subclasses.
87 //
88 // The following macros and function should be used to allocate memory
89 // directly in the resource area or in the C-heap, The _OBJ variants
90 // of the NEW/FREE_C_HEAP macros are used for alloc/dealloc simple
91 // objects which are not inherited from CHeapObj, note constructor and
92 // destructor are not called. The preferable way to allocate objects
93 // is using the new operator.
94 //
95 // WARNING: The array variant must only be used for a homogenous array
96 // where all objects are of the exact type specified. If subtypes are
97 // stored in the array then must pay attention to calling destructors
98 // at needed.
99 //
100 // NEW_RESOURCE_ARRAY(type, size)
101 // NEW_RESOURCE_OBJ(type)
102 // NEW_C_HEAP_ARRAY(type, size)
103 // NEW_C_HEAP_OBJ(type, memflags)
104 // FREE_C_HEAP_ARRAY(type, old, memflags)
105 // FREE_C_HEAP_OBJ(objname, type, memflags)
106 // char* AllocateHeap(size_t size, const char* name);
107 // void FreeHeap(void* p);
108 //
109 // C-heap allocation can be traced using +PrintHeapAllocation.
110 // malloc and free should therefore never called directly.
112 // Base class for objects allocated in the C-heap.
114 // In non product mode we introduce a super class for all allocation classes
115 // that supports printing.
116 // We avoid the superclass in product mode since some C++ compilers add
117 // a word overhead for empty super classes.
119 #ifdef PRODUCT
120 #define ALLOCATION_SUPER_CLASS_SPEC
121 #else
122 #define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj
123 class AllocatedObj {
124 public:
125 // Printing support
126 void print() const;
127 void print_value() const;
129 virtual void print_on(outputStream* st) const;
130 virtual void print_value_on(outputStream* st) const;
131 };
132 #endif
135 /*
136 * Memory types
137 */
138 enum MemoryType {
139 // Memory type by sub systems. It occupies lower byte.
140 mtJavaHeap = 0x00, // Java heap
141 mtClass = 0x01, // memory class for Java classes
142 mtThread = 0x02, // memory for thread objects
143 mtThreadStack = 0x03,
144 mtCode = 0x04, // memory for generated code
145 mtGC = 0x05, // memory for GC
146 mtCompiler = 0x06, // memory for compiler
147 mtInternal = 0x07, // memory used by VM, but does not belong to
148 // any of above categories, and not used for
149 // native memory tracking
150 mtOther = 0x08, // memory not used by VM
151 mtSymbol = 0x09, // symbol
152 mtNMT = 0x0A, // memory used by native memory tracking
153 mtClassShared = 0x0B, // class data sharing
154 mtChunk = 0x0C, // chunk that holds content of arenas
155 mtTest = 0x0D, // Test type for verifying NMT
156 mtTracing = 0x0E, // memory used for Tracing
157 mtNone = 0x0F, // undefined
158 mt_number_of_types = 0x10 // number of memory types (mtDontTrack
159 // is not included as validate type)
160 };
162 typedef MemoryType MEMFLAGS;
165 #if INCLUDE_NMT
167 extern bool NMT_track_callsite;
169 #else
171 const bool NMT_track_callsite = false;
173 #endif // INCLUDE_NMT
175 class NativeCallStack;
178 template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
179 public:
180 _NOINLINE_ void* operator new(size_t size, const NativeCallStack& stack) throw();
181 _NOINLINE_ void* operator new(size_t size) throw();
182 _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
183 const NativeCallStack& stack) throw();
184 _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant)
185 throw();
186 _NOINLINE_ void* operator new [](size_t size, const NativeCallStack& stack) throw();
187 _NOINLINE_ void* operator new [](size_t size) throw();
188 _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
189 const NativeCallStack& stack) throw();
190 _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t& nothrow_constant)
191 throw();
192 void operator delete(void* p);
193 void operator delete [] (void* p);
194 };
196 // Base class for objects allocated on the stack only.
197 // Calling new or delete will result in fatal error.
199 class StackObj ALLOCATION_SUPER_CLASS_SPEC {
200 private:
201 void* operator new(size_t size) throw();
202 void* operator new [](size_t size) throw();
203 #ifdef __IBMCPP__
204 public:
205 #endif
206 void operator delete(void* p);
207 void operator delete [](void* p);
208 };
210 // Base class for objects used as value objects.
211 // Calling new or delete will result in fatal error.
212 //
213 // Portability note: Certain compilers (e.g. gcc) will
214 // always make classes bigger if it has a superclass, even
215 // if the superclass does not have any virtual methods or
216 // instance fields. The HotSpot implementation relies on this
217 // not to happen. So never make a ValueObj class a direct subclass
218 // of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g.,
219 // like this:
220 //
221 // class A VALUE_OBJ_CLASS_SPEC {
222 // ...
223 // }
224 //
225 // With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can
226 // be defined as a an empty string "".
227 //
228 class _ValueObj {
229 private:
230 void* operator new(size_t size) throw();
231 void operator delete(void* p);
232 void* operator new [](size_t size) throw();
233 void operator delete [](void* p);
234 };
237 // Base class for objects stored in Metaspace.
238 // Calling delete will result in fatal error.
239 //
240 // Do not inherit from something with a vptr because this class does
241 // not introduce one. This class is used to allocate both shared read-only
242 // and shared read-write classes.
243 //
245 class ClassLoaderData;
247 class MetaspaceObj {
248 public:
249 bool is_metaspace_object() const;
250 bool is_shared() const;
251 void print_address_on(outputStream* st) const; // nonvirtual address printing
253 #define METASPACE_OBJ_TYPES_DO(f) \
254 f(Unknown) \
255 f(Class) \
256 f(Symbol) \
257 f(TypeArrayU1) \
258 f(TypeArrayU2) \
259 f(TypeArrayU4) \
260 f(TypeArrayU8) \
261 f(TypeArrayOther) \
262 f(Method) \
263 f(ConstMethod) \
264 f(MethodData) \
265 f(ConstantPool) \
266 f(ConstantPoolCache) \
267 f(Annotation) \
268 f(MethodCounters) \
269 f(Deallocated)
271 #define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
272 #define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
274 enum Type {
275 // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
276 METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
277 _number_of_types
278 };
280 static const char * type_name(Type type) {
281 switch(type) {
282 METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
283 default:
284 ShouldNotReachHere();
285 return NULL;
286 }
287 }
289 static MetaspaceObj::Type array_type(size_t elem_size) {
290 switch (elem_size) {
291 case 1: return TypeArrayU1Type;
292 case 2: return TypeArrayU2Type;
293 case 4: return TypeArrayU4Type;
294 case 8: return TypeArrayU8Type;
295 default:
296 return TypeArrayOtherType;
297 }
298 }
300 void* operator new(size_t size, ClassLoaderData* loader_data,
301 size_t word_size, bool read_only,
302 Type type, Thread* thread) throw();
303 // can't use TRAPS from this header file.
304 void operator delete(void* p) { ShouldNotCallThis(); }
305 };
307 // Base class for classes that constitute name spaces.
309 class AllStatic {
310 public:
311 AllStatic() { ShouldNotCallThis(); }
312 ~AllStatic() { ShouldNotCallThis(); }
313 };
316 //------------------------------Chunk------------------------------------------
317 // Linked list of raw memory chunks
318 class Chunk: CHeapObj<mtChunk> {
319 friend class VMStructs;
321 protected:
322 Chunk* _next; // Next Chunk in list
323 const size_t _len; // Size of this Chunk
324 public:
325 void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw();
326 void operator delete(void* p);
327 Chunk(size_t length);
329 enum {
330 // default sizes; make them slightly smaller than 2**k to guard against
331 // buddy-system style malloc implementations
332 #ifdef _LP64
333 slack = 40, // [RGV] Not sure if this is right, but make it
334 // a multiple of 8.
335 #else
336 slack = 20, // suspected sizeof(Chunk) + internal malloc headers
337 #endif
339 tiny_size = 256 - slack, // Size of first chunk (tiny)
340 init_size = 1*K - slack, // Size of first chunk (normal aka small)
341 medium_size= 10*K - slack, // Size of medium-sized chunk
342 size = 32*K - slack, // Default size of an Arena chunk (following the first)
343 non_pool_size = init_size + 32 // An initial size which is not one of above
344 };
346 void chop(); // Chop this chunk
347 void next_chop(); // Chop next chunk
348 static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); }
349 static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); }
351 size_t length() const { return _len; }
352 Chunk* next() const { return _next; }
353 void set_next(Chunk* n) { _next = n; }
354 // Boundaries of data area (possibly unused)
355 char* bottom() const { return ((char*) this) + aligned_overhead_size(); }
356 char* top() const { return bottom() + _len; }
357 bool contains(char* p) const { return bottom() <= p && p <= top(); }
359 // Start the chunk_pool cleaner task
360 static void start_chunk_pool_cleaner_task();
362 static void clean_chunk_pool();
363 };
365 //------------------------------Arena------------------------------------------
366 // Fast allocation of memory
367 class Arena : public CHeapObj<mtNone> {
368 protected:
369 friend class ResourceMark;
370 friend class HandleMark;
371 friend class NoHandleMark;
372 friend class VMStructs;
374 MEMFLAGS _flags; // Memory tracking flags
376 Chunk *_first; // First chunk
377 Chunk *_chunk; // current chunk
378 char *_hwm, *_max; // High water mark and max in current chunk
379 // Get a new Chunk of at least size x
380 void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
381 size_t _size_in_bytes; // Size of arena (used for native memory tracking)
383 NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start
384 friend class AllocStats;
385 debug_only(void* malloc(size_t size);)
386 debug_only(void* internal_malloc_4(size_t x);)
387 NOT_PRODUCT(void inc_bytes_allocated(size_t x);)
389 void signal_out_of_memory(size_t request, const char* whence) const;
391 bool check_for_overflow(size_t request, const char* whence,
392 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
393 if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
394 if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
395 return false;
396 }
397 signal_out_of_memory(request, whence);
398 }
399 return true;
400 }
402 public:
403 Arena(MEMFLAGS memflag);
404 Arena(MEMFLAGS memflag, size_t init_size);
405 ~Arena();
406 void destruct_contents();
407 char* hwm() const { return _hwm; }
409 // new operators
410 void* operator new (size_t size) throw();
411 void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw();
413 // dynamic memory type tagging
414 void* operator new(size_t size, MEMFLAGS flags) throw();
415 void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw();
416 void operator delete(void* p);
418 // Fast allocate in the arena. Common case is: pointer test + increment.
419 void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
420 assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
421 x = ARENA_ALIGN(x);
422 debug_only(if (UseMallocOnly) return malloc(x);)
423 if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
424 return NULL;
425 NOT_PRODUCT(inc_bytes_allocated(x);)
426 if (_hwm + x > _max) {
427 return grow(x, alloc_failmode);
428 } else {
429 char *old = _hwm;
430 _hwm += x;
431 return old;
432 }
433 }
434 // Further assume size is padded out to words
435 void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
436 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
437 debug_only(if (UseMallocOnly) return malloc(x);)
438 if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
439 return NULL;
440 NOT_PRODUCT(inc_bytes_allocated(x);)
441 if (_hwm + x > _max) {
442 return grow(x, alloc_failmode);
443 } else {
444 char *old = _hwm;
445 _hwm += x;
446 return old;
447 }
448 }
450 // Allocate with 'double' alignment. It is 8 bytes on sparc.
451 // In other cases Amalloc_D() should be the same as Amalloc_4().
452 void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
453 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
454 debug_only(if (UseMallocOnly) return malloc(x);)
455 #if defined(SPARC) && !defined(_LP64)
456 #define DALIGN_M1 7
457 size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
458 x += delta;
459 #endif
460 if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
461 return NULL;
462 NOT_PRODUCT(inc_bytes_allocated(x);)
463 if (_hwm + x > _max) {
464 return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
465 } else {
466 char *old = _hwm;
467 _hwm += x;
468 #if defined(SPARC) && !defined(_LP64)
469 old += delta; // align to 8-bytes
470 #endif
471 return old;
472 }
473 }
475 // Fast delete in area. Common case is: NOP (except for storage reclaimed)
476 void Afree(void *ptr, size_t size) {
477 #ifdef ASSERT
478 if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
479 if (UseMallocOnly) return;
480 #endif
481 if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
482 }
484 void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,
485 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
487 // Move contents of this arena into an empty arena
488 Arena *move_contents(Arena *empty_arena);
490 // Determine if pointer belongs to this Arena or not.
491 bool contains( const void *ptr ) const;
493 // Total of all chunks in use (not thread-safe)
494 size_t used() const;
496 // Total # of bytes used
497 size_t size_in_bytes() const { return _size_in_bytes; };
498 void set_size_in_bytes(size_t size);
500 static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN;
501 static void free_all(char** start, char** end) PRODUCT_RETURN;
503 private:
504 // Reset this Arena to empty, access will trigger grow if necessary
505 void reset(void) {
506 _first = _chunk = NULL;
507 _hwm = _max = NULL;
508 set_size_in_bytes(0);
509 }
510 };
512 // One of the following macros must be used when allocating
513 // an array or object from an arena
514 #define NEW_ARENA_ARRAY(arena, type, size) \
515 (type*) (arena)->Amalloc((size) * sizeof(type))
517 #define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \
518 (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
519 (new_size) * sizeof(type) )
521 #define FREE_ARENA_ARRAY(arena, type, old, size) \
522 (arena)->Afree((char*)(old), (size) * sizeof(type))
524 #define NEW_ARENA_OBJ(arena, type) \
525 NEW_ARENA_ARRAY(arena, type, 1)
528 //%note allocation_1
529 extern char* resource_allocate_bytes(size_t size,
530 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
531 extern char* resource_allocate_bytes(Thread* thread, size_t size,
532 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
533 extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size,
534 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
535 extern void resource_free_bytes( char *old, size_t size );
537 //----------------------------------------------------------------------
538 // Base class for objects allocated in the resource area per default.
539 // Optionally, objects may be allocated on the C heap with
540 // new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena)
541 // ResourceObj's can be allocated within other objects, but don't use
542 // new or delete (allocation_type is unknown). If new is used to allocate,
543 // use delete to deallocate.
544 class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
545 public:
546 enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 };
547 static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN;
548 #ifdef ASSERT
549 private:
550 // When this object is allocated on stack the new() operator is not
551 // called but garbage on stack may look like a valid allocation_type.
552 // Store negated 'this' pointer when new() is called to distinguish cases.
553 // Use second array's element for verification value to distinguish garbage.
554 uintptr_t _allocation_t[2];
555 bool is_type_set() const;
556 public:
557 allocation_type get_allocation_type() const;
558 bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; }
559 bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
560 bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; }
561 bool allocated_on_arena() const { return get_allocation_type() == ARENA; }
562 ResourceObj(); // default construtor
563 ResourceObj(const ResourceObj& r); // default copy construtor
564 ResourceObj& operator=(const ResourceObj& r); // default copy assignment
565 ~ResourceObj();
566 #endif // ASSERT
568 public:
569 void* operator new(size_t size, allocation_type type, MEMFLAGS flags) throw();
570 void* operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw();
571 void* operator new(size_t size, const std::nothrow_t& nothrow_constant,
572 allocation_type type, MEMFLAGS flags) throw();
573 void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
574 allocation_type type, MEMFLAGS flags) throw();
576 void* operator new(size_t size, Arena *arena) throw() {
577 address res = (address)arena->Amalloc(size);
578 DEBUG_ONLY(set_allocation_type(res, ARENA);)
579 return res;
580 }
582 void* operator new [](size_t size, Arena *arena) throw() {
583 address res = (address)arena->Amalloc(size);
584 DEBUG_ONLY(set_allocation_type(res, ARENA);)
585 return res;
586 }
588 void* operator new(size_t size) throw() {
589 address res = (address)resource_allocate_bytes(size);
590 DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
591 return res;
592 }
594 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
595 address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
596 DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
597 return res;
598 }
600 void* operator new [](size_t size) throw() {
601 address res = (address)resource_allocate_bytes(size);
602 DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
603 return res;
604 }
606 void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() {
607 address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
608 DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
609 return res;
610 }
612 void operator delete(void* p);
613 void operator delete [](void* p);
614 };
616 // One of the following macros must be used when allocating an array
617 // or object to determine whether it should reside in the C heap on in
618 // the resource area.
620 #define NEW_RESOURCE_ARRAY(type, size)\
621 (type*) resource_allocate_bytes((size) * sizeof(type))
623 #define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\
624 (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
626 #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
627 (type*) resource_allocate_bytes(thread, (size) * sizeof(type))
629 #define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\
630 (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
632 #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\
633 (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type))
635 #define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\
636 (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\
637 (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
639 #define FREE_RESOURCE_ARRAY(type, old, size)\
640 resource_free_bytes((char*)(old), (size) * sizeof(type))
642 #define FREE_FAST(old)\
643 /* nop */
645 #define NEW_RESOURCE_OBJ(type)\
646 NEW_RESOURCE_ARRAY(type, 1)
648 #define NEW_RESOURCE_OBJ_RETURN_NULL(type)\
649 NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)
651 #define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
652 (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail)
654 #define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
655 (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
657 #define NEW_C_HEAP_ARRAY(type, size, memflags)\
658 (type*) (AllocateHeap((size) * sizeof(type), memflags))
660 #define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
661 NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL)
663 #define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
664 NEW_C_HEAP_ARRAY3(type, (size), memflags, CURRENT_PC, AllocFailStrategy::RETURN_NULL)
666 #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
667 (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags))
669 #define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
670 (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
672 #define FREE_C_HEAP_ARRAY(type, old, memflags) \
673 FreeHeap((char*)(old), memflags)
675 // allocate type in heap without calling ctor
676 #define NEW_C_HEAP_OBJ(type, memflags)\
677 NEW_C_HEAP_ARRAY(type, 1, memflags)
679 #define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\
680 NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags)
682 // deallocate obj of type in heap without calling dtor
683 #define FREE_C_HEAP_OBJ(objname, memflags)\
684 FreeHeap((char*)objname, memflags);
686 // for statistics
687 #ifndef PRODUCT
688 class AllocStats : StackObj {
689 julong start_mallocs, start_frees;
690 julong start_malloc_bytes, start_mfree_bytes, start_res_bytes;
691 public:
692 AllocStats();
694 julong num_mallocs(); // since creation of receiver
695 julong alloc_bytes();
696 julong num_frees();
697 julong free_bytes();
698 julong resource_bytes();
699 void print();
700 };
701 #endif
704 //------------------------------ReallocMark---------------------------------
705 // Code which uses REALLOC_RESOURCE_ARRAY should check an associated
706 // ReallocMark, which is declared in the same scope as the reallocated
707 // pointer. Any operation that could __potentially__ cause a reallocation
708 // should check the ReallocMark.
709 class ReallocMark: public StackObj {
710 protected:
711 NOT_PRODUCT(int _nesting;)
713 public:
714 ReallocMark() PRODUCT_RETURN;
715 void check() PRODUCT_RETURN;
716 };
718 // Helper class to allocate arrays that may become large.
719 // Uses the OS malloc for allocations smaller than ArrayAllocatorMallocLimit
720 // and uses mapped memory for larger allocations.
721 // Most OS mallocs do something similar but Solaris malloc does not revert
722 // to mapped memory for large allocations. By default ArrayAllocatorMallocLimit
723 // is set so that we always use malloc except for Solaris where we set the
724 // limit to get mapped memory.
725 template <class E, MEMFLAGS F>
726 class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
727 char* _addr;
728 bool _use_malloc;
729 size_t _size;
730 bool _free_in_destructor;
731 public:
732 ArrayAllocator(bool free_in_destructor = true) :
733 _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }
735 ~ArrayAllocator() {
736 if (_free_in_destructor) {
737 free();
738 }
739 }
741 E* allocate(size_t length);
742 void free();
743 };
745 #endif // SHARE_VM_MEMORY_ALLOCATION_HPP