Sun, 01 Apr 2012 17:04:26 -0400
Merge
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_ALLOCATION_HPP
26 #define SHARE_VM_MEMORY_ALLOCATION_HPP
28 #include "runtime/globals.hpp"
29 #include "utilities/globalDefinitions.hpp"
30 #ifdef COMPILER1
31 #include "c1/c1_globals.hpp"
32 #endif
33 #ifdef COMPILER2
34 #include "opto/c2_globals.hpp"
35 #endif
37 #include <new>
39 #define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1)
40 #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1))
41 #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK)
43 // All classes in the virtual machine must be subclassed
44 // by one of the following allocation classes:
45 //
46 // For objects allocated in the resource area (see resourceArea.hpp).
47 // - ResourceObj
48 //
49 // For objects allocated in the C-heap (managed by: free & malloc).
50 // - CHeapObj
51 //
52 // For objects allocated on the stack.
53 // - StackObj
54 //
55 // For embedded objects.
56 // - ValueObj
57 //
58 // For classes used as name spaces.
59 // - AllStatic
60 //
61 // The printable subclasses are used for debugging and define virtual
62 // member functions for printing. Classes that avoid allocating the
63 // vtbl entries in the objects should therefore not be the printable
64 // subclasses.
65 //
66 // The following macros and function should be used to allocate memory
67 // directly in the resource area or in the C-heap:
68 //
69 // NEW_RESOURCE_ARRAY(type,size)
70 // NEW_RESOURCE_OBJ(type)
71 // NEW_C_HEAP_ARRAY(type,size)
72 // NEW_C_HEAP_OBJ(type)
73 // char* AllocateHeap(size_t size, const char* name);
74 // void FreeHeap(void* p);
75 //
76 // C-heap allocation can be traced using +PrintHeapAllocation.
77 // malloc and free should therefore never called directly.
79 // Base class for objects allocated in the C-heap.
81 // In non product mode we introduce a super class for all allocation classes
82 // that supports printing.
83 // We avoid the superclass in product mode since some C++ compilers add
84 // a word overhead for empty super classes.
86 #ifdef PRODUCT
87 #define ALLOCATION_SUPER_CLASS_SPEC
88 #else
89 #define ALLOCATION_SUPER_CLASS_SPEC : public AllocatedObj
90 class AllocatedObj {
91 public:
92 // Printing support
93 void print() const;
94 void print_value() const;
96 virtual void print_on(outputStream* st) const;
97 virtual void print_value_on(outputStream* st) const;
98 };
99 #endif
101 class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
102 public:
103 void* operator new(size_t size);
104 void* operator new (size_t size, const std::nothrow_t& nothrow_constant);
105 void operator delete(void* p);
106 void* new_array(size_t size);
107 };
109 // Base class for objects allocated on the stack only.
110 // Calling new or delete will result in fatal error.
112 class StackObj ALLOCATION_SUPER_CLASS_SPEC {
113 public:
114 void* operator new(size_t size);
115 void operator delete(void* p);
116 };
118 // Base class for objects used as value objects.
119 // Calling new or delete will result in fatal error.
120 //
121 // Portability note: Certain compilers (e.g. gcc) will
122 // always make classes bigger if it has a superclass, even
123 // if the superclass does not have any virtual methods or
124 // instance fields. The HotSpot implementation relies on this
125 // not to happen. So never make a ValueObj class a direct subclass
126 // of this object, but use the VALUE_OBJ_CLASS_SPEC class instead, e.g.,
127 // like this:
128 //
129 // class A VALUE_OBJ_CLASS_SPEC {
130 // ...
131 // }
132 //
133 // With gcc and possible other compilers the VALUE_OBJ_CLASS_SPEC can
134 // be defined as a an empty string "".
135 //
136 class _ValueObj {
137 public:
138 void* operator new(size_t size);
139 void operator delete(void* p);
140 };
142 // Base class for classes that constitute name spaces.
144 class AllStatic {
145 public:
146 AllStatic() { ShouldNotCallThis(); }
147 ~AllStatic() { ShouldNotCallThis(); }
148 };
151 //------------------------------Chunk------------------------------------------
152 // Linked list of raw memory chunks
153 class Chunk: public CHeapObj {
154 friend class VMStructs;
156 protected:
157 Chunk* _next; // Next Chunk in list
158 const size_t _len; // Size of this Chunk
159 public:
160 void* operator new(size_t size, size_t length);
161 void operator delete(void* p);
162 Chunk(size_t length);
164 enum {
165 // default sizes; make them slightly smaller than 2**k to guard against
166 // buddy-system style malloc implementations
167 #ifdef _LP64
168 slack = 40, // [RGV] Not sure if this is right, but make it
169 // a multiple of 8.
170 #else
171 slack = 20, // suspected sizeof(Chunk) + internal malloc headers
172 #endif
174 init_size = 1*K - slack, // Size of first chunk
175 medium_size= 10*K - slack, // Size of medium-sized chunk
176 size = 32*K - slack, // Default size of an Arena chunk (following the first)
177 non_pool_size = init_size + 32 // An initial size which is not one of above
178 };
180 void chop(); // Chop this chunk
181 void next_chop(); // Chop next chunk
182 static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); }
184 size_t length() const { return _len; }
185 Chunk* next() const { return _next; }
186 void set_next(Chunk* n) { _next = n; }
187 // Boundaries of data area (possibly unused)
188 char* bottom() const { return ((char*) this) + aligned_overhead_size(); }
189 char* top() const { return bottom() + _len; }
190 bool contains(char* p) const { return bottom() <= p && p <= top(); }
192 // Start the chunk_pool cleaner task
193 static void start_chunk_pool_cleaner_task();
195 static void clean_chunk_pool();
196 };
198 //------------------------------Arena------------------------------------------
199 // Fast allocation of memory
200 class Arena: public CHeapObj {
201 protected:
202 friend class ResourceMark;
203 friend class HandleMark;
204 friend class NoHandleMark;
205 friend class VMStructs;
207 Chunk *_first; // First chunk
208 Chunk *_chunk; // current chunk
209 char *_hwm, *_max; // High water mark and max in current chunk
210 void* grow(size_t x); // Get a new Chunk of at least size x
211 NOT_PRODUCT(size_t _size_in_bytes;) // Size of arena (used for memory usage tracing)
212 NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start
213 friend class AllocStats;
214 debug_only(void* malloc(size_t size);)
215 debug_only(void* internal_malloc_4(size_t x);)
216 NOT_PRODUCT(void inc_bytes_allocated(size_t x);)
218 void signal_out_of_memory(size_t request, const char* whence) const;
220 void check_for_overflow(size_t request, const char* whence) const {
221 if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
222 signal_out_of_memory(request, whence);
223 }
224 }
226 public:
227 Arena();
228 Arena(size_t init_size);
229 Arena(Arena *old);
230 ~Arena();
231 void destruct_contents();
232 char* hwm() const { return _hwm; }
234 // Fast allocate in the arena. Common case is: pointer test + increment.
235 void* Amalloc(size_t x) {
236 assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
237 x = ARENA_ALIGN(x);
238 debug_only(if (UseMallocOnly) return malloc(x);)
239 check_for_overflow(x, "Arena::Amalloc");
240 NOT_PRODUCT(inc_bytes_allocated(x);)
241 if (_hwm + x > _max) {
242 return grow(x);
243 } else {
244 char *old = _hwm;
245 _hwm += x;
246 return old;
247 }
248 }
249 // Further assume size is padded out to words
250 void *Amalloc_4(size_t x) {
251 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
252 debug_only(if (UseMallocOnly) return malloc(x);)
253 check_for_overflow(x, "Arena::Amalloc_4");
254 NOT_PRODUCT(inc_bytes_allocated(x);)
255 if (_hwm + x > _max) {
256 return grow(x);
257 } else {
258 char *old = _hwm;
259 _hwm += x;
260 return old;
261 }
262 }
264 // Allocate with 'double' alignment. It is 8 bytes on sparc.
265 // In other cases Amalloc_D() should be the same as Amalloc_4().
266 void* Amalloc_D(size_t x) {
267 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
268 debug_only(if (UseMallocOnly) return malloc(x);)
269 #if defined(SPARC) && !defined(_LP64)
270 #define DALIGN_M1 7
271 size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
272 x += delta;
273 #endif
274 check_for_overflow(x, "Arena::Amalloc_D");
275 NOT_PRODUCT(inc_bytes_allocated(x);)
276 if (_hwm + x > _max) {
277 return grow(x); // grow() returns a result aligned >= 8 bytes.
278 } else {
279 char *old = _hwm;
280 _hwm += x;
281 #if defined(SPARC) && !defined(_LP64)
282 old += delta; // align to 8-bytes
283 #endif
284 return old;
285 }
286 }
288 // Fast delete in area. Common case is: NOP (except for storage reclaimed)
289 void Afree(void *ptr, size_t size) {
290 #ifdef ASSERT
291 if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
292 if (UseMallocOnly) return;
293 #endif
294 if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr;
295 }
297 void *Arealloc( void *old_ptr, size_t old_size, size_t new_size );
299 // Move contents of this arena into an empty arena
300 Arena *move_contents(Arena *empty_arena);
302 // Determine if pointer belongs to this Arena or not.
303 bool contains( const void *ptr ) const;
305 // Total of all chunks in use (not thread-safe)
306 size_t used() const;
308 // Total # of bytes used
309 size_t size_in_bytes() const NOT_PRODUCT({ return _size_in_bytes; }) PRODUCT_RETURN0;
310 void set_size_in_bytes(size_t size) NOT_PRODUCT({ _size_in_bytes = size; }) PRODUCT_RETURN;
311 static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN;
312 static void free_all(char** start, char** end) PRODUCT_RETURN;
314 private:
315 // Reset this Arena to empty, access will trigger grow if necessary
316 void reset(void) {
317 _first = _chunk = NULL;
318 _hwm = _max = NULL;
319 }
320 };
322 // One of the following macros must be used when allocating
323 // an array or object from an arena
324 #define NEW_ARENA_ARRAY(arena, type, size) \
325 (type*) (arena)->Amalloc((size) * sizeof(type))
327 #define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \
328 (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \
329 (new_size) * sizeof(type) )
331 #define FREE_ARENA_ARRAY(arena, type, old, size) \
332 (arena)->Afree((char*)(old), (size) * sizeof(type))
334 #define NEW_ARENA_OBJ(arena, type) \
335 NEW_ARENA_ARRAY(arena, type, 1)
338 //%note allocation_1
339 extern char* resource_allocate_bytes(size_t size);
340 extern char* resource_allocate_bytes(Thread* thread, size_t size);
341 extern char* resource_reallocate_bytes( char *old, size_t old_size, size_t new_size);
342 extern void resource_free_bytes( char *old, size_t size );
344 //----------------------------------------------------------------------
345 // Base class for objects allocated in the resource area per default.
346 // Optionally, objects may be allocated on the C heap with
347 // new(ResourceObj::C_HEAP) Foo(...) or in an Arena with new (&arena)
348 // ResourceObj's can be allocated within other objects, but don't use
349 // new or delete (allocation_type is unknown). If new is used to allocate,
350 // use delete to deallocate.
351 class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
352 public:
353 enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 };
354 static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN;
355 #ifdef ASSERT
356 private:
357 // When this object is allocated on stack the new() operator is not
358 // called but garbage on stack may look like a valid allocation_type.
359 // Store negated 'this' pointer when new() is called to distinguish cases.
360 // Use second array's element for verification value to distinguish garbage.
361 uintptr_t _allocation_t[2];
362 bool is_type_set() const;
363 public:
364 allocation_type get_allocation_type() const;
365 bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; }
366 bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
367 bool allocated_on_C_heap() const { return get_allocation_type() == C_HEAP; }
368 bool allocated_on_arena() const { return get_allocation_type() == ARENA; }
369 ResourceObj(); // default construtor
370 ResourceObj(const ResourceObj& r); // default copy construtor
371 ResourceObj& operator=(const ResourceObj& r); // default copy assignment
372 ~ResourceObj();
373 #endif // ASSERT
375 public:
376 void* operator new(size_t size, allocation_type type);
377 void* operator new(size_t size, Arena *arena) {
378 address res = (address)arena->Amalloc(size);
379 DEBUG_ONLY(set_allocation_type(res, ARENA);)
380 return res;
381 }
382 void* operator new(size_t size) {
383 address res = (address)resource_allocate_bytes(size);
384 DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
385 return res;
386 }
387 void operator delete(void* p);
388 };
390 // One of the following macros must be used when allocating an array
391 // or object to determine whether it should reside in the C heap on in
392 // the resource area.
394 #define NEW_RESOURCE_ARRAY(type, size)\
395 (type*) resource_allocate_bytes((size) * sizeof(type))
397 #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
398 (type*) resource_allocate_bytes(thread, (size) * sizeof(type))
400 #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\
401 (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) )
403 #define FREE_RESOURCE_ARRAY(type, old, size)\
404 resource_free_bytes((char*)(old), (size) * sizeof(type))
406 #define FREE_FAST(old)\
407 /* nop */
409 #define NEW_RESOURCE_OBJ(type)\
410 NEW_RESOURCE_ARRAY(type, 1)
412 #define NEW_C_HEAP_ARRAY(type, size)\
413 (type*) (AllocateHeap((size) * sizeof(type), XSTR(type) " in " __FILE__))
415 #define REALLOC_C_HEAP_ARRAY(type, old, size)\
416 (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), XSTR(type) " in " __FILE__))
418 #define FREE_C_HEAP_ARRAY(type,old) \
419 FreeHeap((char*)(old))
421 #define NEW_C_HEAP_OBJ(type)\
422 NEW_C_HEAP_ARRAY(type, 1)
424 extern bool warn_new_operator;
426 // for statistics
427 #ifndef PRODUCT
428 class AllocStats : StackObj {
429 julong start_mallocs, start_frees;
430 julong start_malloc_bytes, start_mfree_bytes, start_res_bytes;
431 public:
432 AllocStats();
434 julong num_mallocs(); // since creation of receiver
435 julong alloc_bytes();
436 julong num_frees();
437 julong free_bytes();
438 julong resource_bytes();
439 void print();
440 };
441 #endif
444 //------------------------------ReallocMark---------------------------------
445 // Code which uses REALLOC_RESOURCE_ARRAY should check an associated
446 // ReallocMark, which is declared in the same scope as the reallocated
447 // pointer. Any operation that could __potentially__ cause a reallocation
448 // should check the ReallocMark.
449 class ReallocMark: public StackObj {
450 protected:
451 NOT_PRODUCT(int _nesting;)
453 public:
454 ReallocMark() PRODUCT_RETURN;
455 void check() PRODUCT_RETURN;
456 };
458 #endif // SHARE_VM_MEMORY_ALLOCATION_HPP