Mon, 29 Apr 2013 16:13:57 -0400
8011773: Some tests on Interned String crashed JVM with OOM
Summary: Instead of terminating the VM, throw OutOfMemoryError exceptions.
Reviewed-by: coleenp, dholmes
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_ALLOCATION_HPP
26 #define SHARE_VM_MEMORY_ALLOCATION_HPP
28 #include "runtime/globals.hpp"
29 #include "utilities/globalDefinitions.hpp"
30 #include "utilities/macros.hpp"
31 #ifdef COMPILER1
32 #include "c1/c1_globals.hpp"
33 #endif
34 #ifdef COMPILER2
35 #include "opto/c2_globals.hpp"
36 #endif
38 #include <new>
40 #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
41 #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
42 #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
45 // noinline attribute
46 #ifdef _WINDOWS
47 #define _NOINLINE_ __declspec(noinline)
48 #else
49 #if __GNUC__ < 3 // gcc 2.x does not support noinline attribute
50 #define _NOINLINE_
51 #else
52 #define _NOINLINE_ __attribute__ ((noinline))
53 #endif
54 #endif
56 class AllocFailStrategy {
57 public:
58 enum AllocFailEnum { EXIT_OOM, RETURN_NULL };
59 };
60 typedef AllocFailStrategy::AllocFailEnum AllocFailType;
62 // All classes in the virtual machine must be subclassed
63 // by one of the following allocation classes:
64 //
65 // For objects allocated in the resource area (see resourceArea.hpp).
66 // - ResourceObj
67 //
68 // For objects allocated in the C-heap (managed by: free & malloc).
69 // - CHeapObj
70 //
71 // For objects allocated on the stack.
72 // - StackObj
73 //
74 // For embedded objects.
75 // - ValueObj
76 //
77 // For classes used as name spaces.
78 // - AllStatic
79 //
80 // For classes in Metaspace (class data)
81 // - MetaspaceObj
82 //
83 // The printable subclasses are used for debugging and define virtual
84 // member functions for printing. Classes that avoid allocating the
85 // vtbl entries in the objects should therefore not be the printable
86 // subclasses.
87 //
88 // The following macros and function should be used to allocate memory
89 // directly in the resource area or in the C-heap:
90 //
91 // NEW_RESOURCE_ARRAY(type,size)
92 // NEW_RESOURCE_OBJ(type)
93 // NEW_C_HEAP_ARRAY(type,size)
94 // NEW_C_HEAP_OBJ(type)
95 // char* AllocateHeap(size_t size, const char* name);
96 // void FreeHeap(void* p);
97 //
98 // C-heap allocation can be traced using +PrintHeapAllocation.
99 // malloc and free should therefore never called directly.
101 // Base class for objects allocated in the C-heap.
103 // In non product mode we introduce a super class for all allocation classes
104 // that supports printing.
105 // We avoid the superclass in product mode since some C++ compilers add
106 // a word overhead for empty super classes.
108 #ifdef PRODUCT
109 #define ALLOCATION_SUPER_CLASS_SPEC
110 #else
111 #define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj
112 class AllocatedObj {
113 public:
114 // Printing support
115 void print() const;
116 void print_value() const;
118 virtual void print_on(outputStream* st) const;
119 virtual void print_value_on(outputStream* st) const;
120 };
121 #endif
124 /*
125 * MemoryType bitmap layout:
126 * | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 |
127 * | memory type | object | reserved |
128 * | | type | |
129 */
130 enum MemoryType {
131 // Memory type by sub systems. It occupies lower byte.
132 mtNone = 0x0000, // undefined
133 mtClass = 0x0100, // memory class for Java classes
134 mtThread = 0x0200, // memory for thread objects
135 mtThreadStack = 0x0300,
136 mtCode = 0x0400, // memory for generated code
137 mtGC = 0x0500, // memory for GC
138 mtCompiler = 0x0600, // memory for compiler
139 mtInternal = 0x0700, // memory used by VM, but does not belong to
140 // any of above categories, and not used for
141 // native memory tracking
142 mtOther = 0x0800, // memory not used by VM
143 mtSymbol = 0x0900, // symbol
144 mtNMT = 0x0A00, // memory used by native memory tracking
145 mtChunk = 0x0B00, // chunk that holds content of arenas
146 mtJavaHeap = 0x0C00, // Java heap
147 mtClassShared = 0x0D00, // class data sharing
148 mtTest = 0x0E00, // Test type for verifying NMT
149 mt_number_of_types = 0x000E, // number of memory types (mtDontTrack
150 // is not included as validate type)
151 mtDontTrack = 0x0F00, // memory we do not or cannot track
152 mt_masks = 0x7F00,
154 // object type mask
155 otArena = 0x0010, // an arena object
156 otNMTRecorder = 0x0020, // memory recorder object
157 ot_masks = 0x00F0
158 };
160 #define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type)
161 #define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone)
162 #define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks)
164 #define IS_ARENA_OBJ(flags) ((flags & ot_masks) == otArena)
165 #define IS_NMT_RECORDER(flags) ((flags & ot_masks) == otNMTRecorder)
166 #define NMT_CAN_TRACK(flags) (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack)))
168 typedef unsigned short MEMFLAGS;
170 #if INCLUDE_NMT
172 extern bool NMT_track_callsite;
174 #else
176 const bool NMT_track_callsite = false;
178 #endif // INCLUDE_NMT
180 // debug build does not inline
181 #if defined(_NMT_NOINLINE_)
182 #define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0)
183 #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
184 #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0)
185 #else
186 #define CURRENT_PC (NMT_track_callsite? os::get_caller_pc(0) : 0)
187 #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0)
188 #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0)
189 #endif
193 template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
194 public:
195 _NOINLINE_ void* operator new(size_t size, address caller_pc = 0);
196 _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
197 address caller_pc = 0);
199 void operator delete(void* p);
200 };
202 // Base class for objects allocated on the stack only.
203 // Calling new or delete will result in fatal error.
205 class StackObj ALLOCATION_SUPER_CLASS_SPEC {
206 private:
207 void* operator new(size_t size);
208 void operator delete(void* p);
209 };
211 // Base class for objects used as value objects.
212 // Calling new or delete will result in fatal error.
213 //
214 // Portability note: Certain compilers (e.g. gcc) will
215 // always make classes bigger if it has a superclass, even
216 // if the superclass does not have any virtual methods or
217 // instance fields. The HotSpot implementation relies on this
218 // not to happen. So never make a ValueObj class a direct subclass
219 // of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g.,
220 // like this:
221 //
222 // class A VALUE_OBJ_CLASS_SPEC {
223 // ...
224 // }
225 //
226 // With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can
227 // be defined as a an empty string "".
228 //
229 class _ValueObj {
230 private:
231 void* operator new(size_t size);
232 void operator delete(void* p);
233 };
236 // Base class for objects stored in Metaspace.
237 // Calling delete will result in fatal error.
238 //
239 // Do not inherit from something with a vptr because this class does
240 // not introduce one. This class is used to allocate both shared read-only
241 // and shared read-write classes.
242 //
244 class ClassLoaderData;
246 class MetaspaceObj {
247 public:
248 bool is_metadata() const;
249 bool is_metaspace_object() const; // more specific test but slower
250 bool is_shared() const;
251 void print_address_on(outputStream* st) const; // nonvirtual address printing
253 void* operator new(size_t size, ClassLoaderData* loader_data,
254 size_t word_size, bool read_only, Thread* thread);
255 // can't use TRAPS from this header file.
256 void operator delete(void* p) { ShouldNotCallThis(); }
257 };
259 // Base class for classes that constitute name spaces.
261 class AllStatic {
262 public:
263 AllStatic() { ShouldNotCallThis(); }
264 ~AllStatic() { ShouldNotCallThis(); }
265 };
268 //------------------------------Chunk------------------------------------------
269 // Linked list of raw memory chunks
270 class Chunk: CHeapObj<mtChunk> {
271 friend class VMStructs;
273 protected:
274 Chunk* _next; // Next Chunk in list
275 const size_t _len; // Size of this Chunk
276 public:
277 void* operator new(size_t size, size_t length);
278 void operator delete(void* p);
279 Chunk(size_t length);
281 enum {
282 // default sizes; make them slightly smaller than 2**k to guard against
283 // buddy-system style malloc implementations
284 #ifdef _LP64
285 slack = 40, // [RGV] Not sure if this is right, but make it
286 // a multiple of 8.
287 #else
288 slack = 20, // suspected sizeof(Chunk) + internal malloc headers
289 #endif
291 init_size = 1*K - slack, // Size of first chunk
292 medium_size= 10*K - slack, // Size of medium-sized chunk
293 size = 32*K - slack, // Default size of an Arena chunk (following the first)
294 non_pool_size = init_size + 32 // An initial size which is not one of above
295 };
297 void chop(); // Chop this chunk
298 void next_chop(); // Chop next chunk
299 static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); }
300 static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); }
302 size_t length() const { return _len; }
303 Chunk* next() const { return _next; }
304 void set_next(Chunk* n) { _next = n; }
305 // Boundaries of data area (possibly unused)
306 char* bottom() const { return ((char*) this) + aligned_overhead_size(); }
307 char* top() const { return bottom() + _len; }
308 bool contains(char* p) const { return bottom() <= p && p <= top(); }
310 // Start the chunk_pool cleaner task
311 static void start_chunk_pool_cleaner_task();
313 static void clean_chunk_pool();
314 };
316 //------------------------------Arena------------------------------------------
317 // Fast allocation of memory
318 class Arena : public CHeapObj<mtNone|otArena> {
319 protected:
320 friend class ResourceMark;
321 friend class HandleMark;
322 friend class NoHandleMark;
323 friend class VMStructs;
325 Chunk *_first; // First chunk
326 Chunk *_chunk; // current chunk
327 char *_hwm, *_max; // High water mark and max in current chunk
328 // Get a new Chunk of at least size x
329 void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
330 size_t _size_in_bytes; // Size of arena (used for native memory tracking)
332 NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start
333 friend class AllocStats;
334 debug_only(void* malloc(size_t size);)
335 debug_only(void* internal_malloc_4(size_t x);)
336 NOT_PRODUCT(void inc_bytes_allocated(size_t x);)
338 void signal_out_of_memory(size_t request, const char* whence) const;
340 void check_for_overflow(size_t request, const char* whence) const {
341 if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
342 signal_out_of_memory(request, whence);
343 }
344 }
346 public:
347 Arena();
348 Arena(size_t init_size);
349 ~Arena();
350 void destruct_contents();
351 char* hwm() const { return _hwm; }
353 // new operators
354 void* operator new (size_t size);
355 void* operator new (size_t size, const std::nothrow_t& nothrow_constant);
357 // dynamic memory type tagging
358 void* operator new(size_t size, MEMFLAGS flags);
359 void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags);
360 void operator delete(void* p);
362 // Fast allocate in the arena. Common case is: pointer test + increment.
363 void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
364 assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
365 x = ARENA_ALIGN(x);
366 debug_only(if (UseMallocOnly) return malloc(x);)
367 check_for_overflow(x, "Arena::Amalloc");
368 NOT_PRODUCT(inc_bytes_allocated(x);)
369 if (_hwm + x > _max) {
370 return grow(x, alloc_failmode);
371 } else {
372 char *old = _hwm;
373 _hwm += x;
374 return old;
375 }
376 }
377 // Further assume size is padded out to words
378 void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
379 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
380 debug_only(if (UseMallocOnly) return malloc(x);)
381 check_for_overflow(x, "Arena::Amalloc_4");
382 NOT_PRODUCT(inc_bytes_allocated(x);)
383 if (_hwm + x > _max) {
384 return grow(x, alloc_failmode);
385 } else {
386 char *old = _hwm;
387 _hwm += x;
388 return old;
389 }
390 }
392 // Allocate with 'double' alignment. It is 8 bytes on sparc.
393 // In other cases Amalloc_D() should be the same as Amalloc_4().
394 void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
395 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
396 debug_only(if (UseMallocOnly) return malloc(x);)
397 #if defined(SPARC) && !defined(_LP64)
398 #define DALIGN_M1 7
399 size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
400 x += delta;
401 #endif
402 check_for_overflow(x, "Arena::Amalloc_D");
403 NOT_PRODUCT(inc_bytes_allocated(x);)
404 if (_hwm + x > _max) {
405 return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
406 } else {
407 char *old = _hwm;
408 _hwm += x;
409 #if defined(SPARC) && !defined(_LP64)
410 old += delta; // align to 8-bytes
411 #endif
412 return old;
413 }
414 }
416 // Fast delete in area. Common case is: NOP (except for storage reclaimed)
417 void Afree(void *ptr, size_t size) {
418 #ifdef ASSERT
419 if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
420 if (UseMallocOnly) return;
421 #endif
422 if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
423 }
425 void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,
426 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
428 // Move contents of this arena into an empty arena
429 Arena *move_contents(Arena *empty_arena);
431 // Determine if pointer belongs to this Arena or not.
432 bool contains( const void *ptr ) const;
434 // Total of all chunks in use (not thread-safe)
435 size_t used() const;
437 // Total # of bytes used
438 size_t size_in_bytes() const { return _size_in_bytes; };
439 void set_size_in_bytes(size_t size);
441 static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN;
442 static void free_all(char** start, char** end) PRODUCT_RETURN;
444 // how many arena instances
445 NOT_PRODUCT(static volatile jint _instance_count;)
446 private:
447 // Reset this Arena to empty, access will trigger grow if necessary
448 void reset(void) {
449 _first = _chunk = NULL;
450 _hwm = _max = NULL;
451 set_size_in_bytes(0);
452 }
453 };
455 // One of the following macros must be used when allocating
456 // an array or object from an arena
457 #define NEW_ARENA_ARRAY(arena, type, size) \
458 (type*) (arena)->Amalloc((size) * sizeof(type))
460 #define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \
461 (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
462 (new_size) * sizeof(type) )
464 #define FREE_ARENA_ARRAY(arena, type, old, size) \
465 (arena)->Afree((char*)(old), (size) * sizeof(type))
467 #define NEW_ARENA_OBJ(arena, type) \
468 NEW_ARENA_ARRAY(arena, type, 1)
471 //%note allocation_1
472 extern char* resource_allocate_bytes(size_t size,
473 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
474 extern char* resource_allocate_bytes(Thread* thread, size_t size,
475 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
476 extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size,
477 AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
478 extern void resource_free_bytes( char *old, size_t size );
480 //----------------------------------------------------------------------
481 // Base class for objects allocated in the resource area per default.
482 // Optionally, objects may be allocated on the C heap with
483 // new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena)
484 // ResourceObj's can be allocated within other objects, but don't use
485 // new or delete (allocation_type is unknown). If new is used to allocate,
486 // use delete to deallocate.
487 class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
488 public:
489 enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 };
490 static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN;
491 #ifdef ASSERT
492 private:
493 // When this object is allocated on stack the new() operator is not
494 // called but garbage on stack may look like a valid allocation_type.
495 // Store negated 'this' pointer when new() is called to distinguish cases.
496 // Use second array's element for verification value to distinguish garbage.
497 uintptr_t _allocation_t[2];
498 bool is_type_set() const;
499 public:
500 allocation_type get_allocation_type() const;
501 bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; }
502 bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
503 bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; }
504 bool allocated_on_arena() const { return get_allocation_type() == ARENA; }
505 ResourceObj(); // default construtor
506 ResourceObj(const ResourceObj& r); // default copy construtor
507 ResourceObj& operator=(const ResourceObj& r); // default copy assignment
508 ~ResourceObj();
509 #endif // ASSERT
511 public:
512 void* operator new(size_t size, allocation_type type, MEMFLAGS flags);
513 void* operator new(size_t size, const std::nothrow_t& nothrow_constant,
514 allocation_type type, MEMFLAGS flags);
515 void* operator new(size_t size, Arena *arena) {
516 address res = (address)arena->Amalloc(size);
517 DEBUG_ONLY(set_allocation_type(res, ARENA);)
518 return res;
519 }
520 void* operator new(size_t size) {
521 address res = (address)resource_allocate_bytes(size);
522 DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
523 return res;
524 }
526 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
527 address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
528 DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
529 return res;
530 }
532 void operator delete(void* p);
533 };
535 // One of the following macros must be used when allocating an array
536 // or object to determine whether it should reside in the C heap on in
537 // the resource area.
539 #define NEW_RESOURCE_ARRAY(type, size)\
540 (type*) resource_allocate_bytes((size) * sizeof(type))
542 #define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\
543 (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
545 #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
546 (type*) resource_allocate_bytes(thread, (size) * sizeof(type))
548 #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\
549 (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) )
551 #define FREE_RESOURCE_ARRAY(type, old, size)\
552 resource_free_bytes((char*)(old), (size) * sizeof(type))
554 #define FREE_FAST(old)\
555 /* nop */
557 #define NEW_RESOURCE_OBJ(type)\
558 NEW_RESOURCE_ARRAY(type, 1)
560 #define NEW_C_HEAP_ARRAY(type, size, memflags)\
561 (type*) (AllocateHeap((size) * sizeof(type), memflags))
563 #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
564 (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags))
566 #define FREE_C_HEAP_ARRAY(type,old,memflags) \
567 FreeHeap((char*)(old), memflags)
569 #define NEW_C_HEAP_OBJ(type, memflags)\
570 NEW_C_HEAP_ARRAY(type, 1, memflags)
573 #define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
574 (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
576 #define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\
577 (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc))
579 #define NEW_C_HEAP_OBJ2(type, memflags, pc)\
580 NEW_C_HEAP_ARRAY2(type, 1, memflags, pc)
583 extern bool warn_new_operator;
585 // for statistics
586 #ifndef PRODUCT
587 class AllocStats : StackObj {
588 julong start_mallocs, start_frees;
589 julong start_malloc_bytes, start_mfree_bytes, start_res_bytes;
590 public:
591 AllocStats();
593 julong num_mallocs(); // since creation of receiver
594 julong alloc_bytes();
595 julong num_frees();
596 julong free_bytes();
597 julong resource_bytes();
598 void print();
599 };
600 #endif
603 //------------------------------ReallocMark---------------------------------
604 // Code which uses REALLOC_RESOURCE_ARRAY should check an associated
605 // ReallocMark, which is declared in the same scope as the reallocated
606 // pointer. Any operation that could __potentially__ cause a reallocation
607 // should check the ReallocMark.
608 class ReallocMark: public StackObj {
609 protected:
610 NOT_PRODUCT(int _nesting;)
612 public:
613 ReallocMark() PRODUCT_RETURN;
614 void check() PRODUCT_RETURN;
615 };
617 // Helper class to allocate arrays that may become large.
618 // Uses the OS malloc for allocations smaller than ArrayAllocatorMallocLimit
619 // and uses mapped memory for larger allocations.
620 // Most OS mallocs do something similar but Solaris malloc does not revert
621 // to mapped memory for large allocations. By default ArrayAllocatorMallocLimit
622 // is set so that we always use malloc except for Solaris where we set the
623 // limit to get mapped memory.
624 template <class E, MEMFLAGS F>
625 class ArrayAllocator : StackObj {
626 char* _addr;
627 bool _use_malloc;
628 size_t _size;
629 public:
630 ArrayAllocator() : _addr(NULL), _use_malloc(false), _size(0) { }
631 ~ArrayAllocator() { free(); }
632 E* allocate(size_t length);
633 void free();
634 };
636 #endif // SHARE_VM_MEMORY_ALLOCATION_HPP