Mon, 29 Apr 2013 16:13:57 -0400
8011773: Some tests on Interned String crashed JVM with OOM
Summary: Instead of terminating the VM, throw OutOfMemoryError exceptions.
Reviewed-by: coleenp, dholmes
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "memory/allocation.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/genCollectedHeap.hpp"
29 #include "memory/metaspaceShared.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "memory/universe.hpp"
32 #include "runtime/atomic.hpp"
33 #include "runtime/os.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/threadCritical.hpp"
36 #include "services/memTracker.hpp"
37 #include "utilities/ostream.hpp"
39 #ifdef TARGET_OS_FAMILY_linux
40 # include "os_linux.inline.hpp"
41 #endif
42 #ifdef TARGET_OS_FAMILY_solaris
43 # include "os_solaris.inline.hpp"
44 #endif
45 #ifdef TARGET_OS_FAMILY_windows
46 # include "os_windows.inline.hpp"
47 #endif
48 #ifdef TARGET_OS_FAMILY_bsd
49 # include "os_bsd.inline.hpp"
50 #endif
52 void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; };
53 void StackObj::operator delete(void* p) { ShouldNotCallThis(); };
54 void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; };
55 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); };
57 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
58 size_t word_size, bool read_only, TRAPS) {
59 // Klass has it's own operator new
60 return Metaspace::allocate(loader_data, word_size, read_only,
61 Metaspace::NonClassType, CHECK_NULL);
62 }
64 bool MetaspaceObj::is_shared() const {
65 return MetaspaceShared::is_in_shared_space(this);
66 }
68 bool MetaspaceObj::is_metadata() const {
69 // GC Verify checks use this in guarantees.
70 // TODO: either replace them with is_metaspace_object() or remove them.
71 // is_metaspace_object() is slower than this test. This test doesn't
72 // seem very useful for metaspace objects anymore though.
73 return !Universe::heap()->is_in_reserved(this);
74 }
76 bool MetaspaceObj::is_metaspace_object() const {
77 return Metaspace::contains((void*)this);
78 }
80 void MetaspaceObj::print_address_on(outputStream* st) const {
81 st->print(" {"INTPTR_FORMAT"}", this);
82 }
85 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) {
86 address res;
87 switch (type) {
88 case C_HEAP:
89 res = (address)AllocateHeap(size, flags, CALLER_PC);
90 DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
91 break;
92 case RESOURCE_AREA:
93 // new(size) sets allocation type RESOURCE_AREA.
94 res = (address)operator new(size);
95 break;
96 default:
97 ShouldNotReachHere();
98 }
99 return res;
100 }
102 void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant,
103 allocation_type type, MEMFLAGS flags) {
104 //should only call this with std::nothrow, use other operator new() otherwise
105 address res;
106 switch (type) {
107 case C_HEAP:
108 res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
109 DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);)
110 break;
111 case RESOURCE_AREA:
112 // new(size) sets allocation type RESOURCE_AREA.
113 res = (address)operator new(size, std::nothrow);
114 break;
115 default:
116 ShouldNotReachHere();
117 }
118 return res;
119 }
122 void ResourceObj::operator delete(void* p) {
123 assert(((ResourceObj *)p)->allocated_on_C_heap(),
124 "delete only allowed for C_HEAP objects");
125 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
126 FreeHeap(p);
127 }
129 #ifdef ASSERT
130 void ResourceObj::set_allocation_type(address res, allocation_type type) {
131 // Set allocation type in the resource object
132 uintptr_t allocation = (uintptr_t)res;
133 assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
134 assert(type <= allocation_mask, "incorrect allocation type");
135 ResourceObj* resobj = (ResourceObj *)res;
136 resobj->_allocation_t[0] = ~(allocation + type);
137 if (type != STACK_OR_EMBEDDED) {
138 // Called from operator new() and CollectionSetChooser(),
139 // set verification value.
140 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
141 }
142 }
144 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
145 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
146 return (allocation_type)((~_allocation_t[0]) & allocation_mask);
147 }
149 bool ResourceObj::is_type_set() const {
150 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
151 return get_allocation_type() == type &&
152 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
153 }
155 ResourceObj::ResourceObj() { // default constructor
156 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
157 // Operator new() is not called for allocations
158 // on stack and for embedded objects.
159 set_allocation_type((address)this, STACK_OR_EMBEDDED);
160 } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
161 // For some reason we got a value which resembles
162 // an embedded or stack object (operator new() does not
163 // set such type). Keep it since it is valid value
164 // (even if it was garbage).
165 // Ignore garbage in other fields.
166 } else if (is_type_set()) {
167 // Operator new() was called and type was set.
168 assert(!allocated_on_stack(),
169 err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
170 this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
171 } else {
172 // Operator new() was not called.
173 // Assume that it is embedded or stack object.
174 set_allocation_type((address)this, STACK_OR_EMBEDDED);
175 }
176 _allocation_t[1] = 0; // Zap verification value
177 }
179 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
180 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
181 // Note: garbage may resembles valid value.
182 assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
183 err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
184 this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
185 set_allocation_type((address)this, STACK_OR_EMBEDDED);
186 _allocation_t[1] = 0; // Zap verification value
187 }
189 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
190 // Used in InlineTree::ok_to_inline() for WarmCallInfo.
191 assert(allocated_on_stack(),
192 err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
193 this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
194 // Keep current _allocation_t value;
195 return *this;
196 }
198 ResourceObj::~ResourceObj() {
199 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
200 if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
201 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
202 }
203 }
204 #endif // ASSERT
207 void trace_heap_malloc(size_t size, const char* name, void* p) {
208 // A lock is not needed here - tty uses a lock internally
209 tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name);
210 }
213 void trace_heap_free(void* p) {
214 // A lock is not needed here - tty uses a lock internally
215 tty->print_cr("Heap free " INTPTR_FORMAT, p);
216 }
218 bool warn_new_operator = false; // see vm_main
220 //--------------------------------------------------------------------------------------
221 // ChunkPool implementation
223 // MT-safe pool of chunks to reduce malloc/free thrashing
224 // NB: not using Mutex because pools are used before Threads are initialized
225 class ChunkPool: public CHeapObj<mtInternal> {
226 Chunk* _first; // first cached Chunk; its first word points to next chunk
227 size_t _num_chunks; // number of unused chunks in pool
228 size_t _num_used; // number of chunks currently checked out
229 const size_t _size; // size of each chunk (must be uniform)
231 // Our three static pools
232 static ChunkPool* _large_pool;
233 static ChunkPool* _medium_pool;
234 static ChunkPool* _small_pool;
236 // return first element or null
237 void* get_first() {
238 Chunk* c = _first;
239 if (_first) {
240 _first = _first->next();
241 _num_chunks--;
242 }
243 return c;
244 }
246 public:
247 // All chunks in a ChunkPool has the same size
248 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
250 // Allocate a new chunk from the pool (might expand the pool)
251 _NOINLINE_ void* allocate(size_t bytes) {
252 assert(bytes == _size, "bad size");
253 void* p = NULL;
254 // No VM lock can be taken inside ThreadCritical lock, so os::malloc
255 // should be done outside ThreadCritical lock due to NMT
256 { ThreadCritical tc;
257 _num_used++;
258 p = get_first();
259 }
260 if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
261 if (p == NULL)
262 vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
264 return p;
265 }
267 // Return a chunk to the pool
268 void free(Chunk* chunk) {
269 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
270 ThreadCritical tc;
271 _num_used--;
273 // Add chunk to list
274 chunk->set_next(_first);
275 _first = chunk;
276 _num_chunks++;
277 }
279 // Prune the pool
280 void free_all_but(size_t n) {
281 Chunk* cur = NULL;
282 Chunk* next;
283 {
284 // if we have more than n chunks, free all of them
285 ThreadCritical tc;
286 if (_num_chunks > n) {
287 // free chunks at end of queue, for better locality
288 cur = _first;
289 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
291 if (cur != NULL) {
292 next = cur->next();
293 cur->set_next(NULL);
294 cur = next;
296 _num_chunks = n;
297 }
298 }
299 }
301 // Free all remaining chunks, outside of ThreadCritical
302 // to avoid deadlock with NMT
303 while(cur != NULL) {
304 next = cur->next();
305 os::free(cur, mtChunk);
306 cur = next;
307 }
308 }
310 // Accessors to preallocated pool's
311 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
312 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
313 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
315 static void initialize() {
316 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
317 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
318 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
319 }
321 static void clean() {
322 enum { BlocksToKeep = 5 };
323 _small_pool->free_all_but(BlocksToKeep);
324 _medium_pool->free_all_but(BlocksToKeep);
325 _large_pool->free_all_but(BlocksToKeep);
326 }
327 };
329 ChunkPool* ChunkPool::_large_pool = NULL;
330 ChunkPool* ChunkPool::_medium_pool = NULL;
331 ChunkPool* ChunkPool::_small_pool = NULL;
333 void chunkpool_init() {
334 ChunkPool::initialize();
335 }
337 void
338 Chunk::clean_chunk_pool() {
339 ChunkPool::clean();
340 }
343 //--------------------------------------------------------------------------------------
344 // ChunkPoolCleaner implementation
345 //
347 class ChunkPoolCleaner : public PeriodicTask {
348 enum { CleaningInterval = 5000 }; // cleaning interval in ms
350 public:
351 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
352 void task() {
353 ChunkPool::clean();
354 }
355 };
357 //--------------------------------------------------------------------------------------
358 // Chunk implementation
360 void* Chunk::operator new(size_t requested_size, size_t length) {
361 // requested_size is equal to sizeof(Chunk) but in order for the arena
362 // allocations to come out aligned as expected the size must be aligned
363 // to expected arean alignment.
364 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
365 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
366 size_t bytes = ARENA_ALIGN(requested_size) + length;
367 switch (length) {
368 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes);
369 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
370 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes);
371 default: {
372 void *p = os::malloc(bytes, mtChunk, CALLER_PC);
373 if (p == NULL)
374 vm_exit_out_of_memory(bytes, "Chunk::new");
375 return p;
376 }
377 }
378 }
380 void Chunk::operator delete(void* p) {
381 Chunk* c = (Chunk*)p;
382 switch (c->length()) {
383 case Chunk::size: ChunkPool::large_pool()->free(c); break;
384 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
385 case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
386 default: os::free(c, mtChunk);
387 }
388 }
390 Chunk::Chunk(size_t length) : _len(length) {
391 _next = NULL; // Chain on the linked list
392 }
395 void Chunk::chop() {
396 Chunk *k = this;
397 while( k ) {
398 Chunk *tmp = k->next();
399 // clear out this chunk (to detect allocation bugs)
400 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
401 delete k; // Free chunk (was malloc'd)
402 k = tmp;
403 }
404 }
406 void Chunk::next_chop() {
407 _next->chop();
408 _next = NULL;
409 }
412 void Chunk::start_chunk_pool_cleaner_task() {
413 #ifdef ASSERT
414 static bool task_created = false;
415 assert(!task_created, "should not start chuck pool cleaner twice");
416 task_created = true;
417 #endif
418 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
419 cleaner->enroll();
420 }
422 //------------------------------Arena------------------------------------------
423 NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
425 Arena::Arena(size_t init_size) {
426 size_t round_size = (sizeof (char *)) - 1;
427 init_size = (init_size+round_size) & ~round_size;
428 _first = _chunk = new (init_size) Chunk(init_size);
429 _hwm = _chunk->bottom(); // Save the cached hwm, max
430 _max = _chunk->top();
431 set_size_in_bytes(init_size);
432 NOT_PRODUCT(Atomic::inc(&_instance_count);)
433 }
435 Arena::Arena() {
436 _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
437 _hwm = _chunk->bottom(); // Save the cached hwm, max
438 _max = _chunk->top();
439 set_size_in_bytes(Chunk::init_size);
440 NOT_PRODUCT(Atomic::inc(&_instance_count);)
441 }
443 Arena *Arena::move_contents(Arena *copy) {
444 copy->destruct_contents();
445 copy->_chunk = _chunk;
446 copy->_hwm = _hwm;
447 copy->_max = _max;
448 copy->_first = _first;
450 // workaround rare racing condition, which could double count
451 // the arena size by native memory tracking
452 size_t size = size_in_bytes();
453 set_size_in_bytes(0);
454 copy->set_size_in_bytes(size);
455 // Destroy original arena
456 reset();
457 return copy; // Return Arena with contents
458 }
460 Arena::~Arena() {
461 destruct_contents();
462 NOT_PRODUCT(Atomic::dec(&_instance_count);)
463 }
465 void* Arena::operator new(size_t size) {
466 assert(false, "Use dynamic memory type binding");
467 return NULL;
468 }
470 void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) {
471 assert(false, "Use dynamic memory type binding");
472 return NULL;
473 }
475 // dynamic memory type binding
476 void* Arena::operator new(size_t size, MEMFLAGS flags) {
477 #ifdef ASSERT
478 void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
479 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
480 return p;
481 #else
482 return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
483 #endif
484 }
486 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) {
487 #ifdef ASSERT
488 void* p = os::malloc(size, flags|otArena, CALLER_PC);
489 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
490 return p;
491 #else
492 return os::malloc(size, flags|otArena, CALLER_PC);
493 #endif
494 }
496 void Arena::operator delete(void* p) {
497 FreeHeap(p);
498 }
500 // Destroy this arenas contents and reset to empty
501 void Arena::destruct_contents() {
502 if (UseMallocOnly && _first != NULL) {
503 char* end = _first->next() ? _first->top() : _hwm;
504 free_malloced_objects(_first, _first->bottom(), end, _hwm);
505 }
506 // reset size before chop to avoid a rare racing condition
507 // that can have total arena memory exceed total chunk memory
508 set_size_in_bytes(0);
509 _first->chop();
510 reset();
511 }
513 // This is high traffic method, but many calls actually don't
514 // change the size
515 void Arena::set_size_in_bytes(size_t size) {
516 if (_size_in_bytes != size) {
517 _size_in_bytes = size;
518 MemTracker::record_arena_size((address)this, size);
519 }
520 }
522 // Total of all Chunks in arena
523 size_t Arena::used() const {
524 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
525 register Chunk *k = _first;
526 while( k != _chunk) { // Whilst have Chunks in a row
527 sum += k->length(); // Total size of this Chunk
528 k = k->next(); // Bump along to next Chunk
529 }
530 return sum; // Return total consumed space.
531 }
533 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
534 vm_exit_out_of_memory(sz, whence);
535 }
537 // Grow a new Chunk
538 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
539 // Get minimal required size. Either real big, or even bigger for giant objs
540 size_t len = MAX2(x, (size_t) Chunk::size);
542 Chunk *k = _chunk; // Get filled-up chunk address
543 _chunk = new (len) Chunk(len);
545 if (_chunk == NULL) {
546 if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
547 signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
548 }
549 return NULL;
550 }
551 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
552 else _first = _chunk;
553 _hwm = _chunk->bottom(); // Save the cached hwm, max
554 _max = _chunk->top();
555 set_size_in_bytes(size_in_bytes() + len);
556 void* result = _hwm;
557 _hwm += x;
558 return result;
559 }
563 // Reallocate storage in Arena.
564 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
565 assert(new_size >= 0, "bad size");
566 if (new_size == 0) return NULL;
567 #ifdef ASSERT
568 if (UseMallocOnly) {
569 // always allocate a new object (otherwise we'll free this one twice)
570 char* copy = (char*)Amalloc(new_size, alloc_failmode);
571 if (copy == NULL) {
572 return NULL;
573 }
574 size_t n = MIN2(old_size, new_size);
575 if (n > 0) memcpy(copy, old_ptr, n);
576 Afree(old_ptr,old_size); // Mostly done to keep stats accurate
577 return copy;
578 }
579 #endif
580 char *c_old = (char*)old_ptr; // Handy name
581 // Stupid fast special case
582 if( new_size <= old_size ) { // Shrink in-place
583 if( c_old+old_size == _hwm) // Attempt to free the excess bytes
584 _hwm = c_old+new_size; // Adjust hwm
585 return c_old;
586 }
588 // make sure that new_size is legal
589 size_t corrected_new_size = ARENA_ALIGN(new_size);
591 // See if we can resize in-place
592 if( (c_old+old_size == _hwm) && // Adjusting recent thing
593 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits
594 _hwm = c_old+corrected_new_size; // Adjust hwm
595 return c_old; // Return old pointer
596 }
598 // Oops, got to relocate guts
599 void *new_ptr = Amalloc(new_size, alloc_failmode);
600 if (new_ptr == NULL) {
601 return NULL;
602 }
603 memcpy( new_ptr, c_old, old_size );
604 Afree(c_old,old_size); // Mostly done to keep stats accurate
605 return new_ptr;
606 }
609 // Determine if pointer belongs to this Arena or not.
610 bool Arena::contains( const void *ptr ) const {
611 #ifdef ASSERT
612 if (UseMallocOnly) {
613 // really slow, but not easy to make fast
614 if (_chunk == NULL) return false;
615 char** bottom = (char**)_chunk->bottom();
616 for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
617 if (*p == ptr) return true;
618 }
619 for (Chunk *c = _first; c != NULL; c = c->next()) {
620 if (c == _chunk) continue; // current chunk has been processed
621 char** bottom = (char**)c->bottom();
622 for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
623 if (*p == ptr) return true;
624 }
625 }
626 return false;
627 }
628 #endif
629 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
630 return true; // Check for in this chunk
631 for (Chunk *c = _first; c; c = c->next()) {
632 if (c == _chunk) continue; // current chunk has been processed
633 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
634 return true; // Check for every chunk in Arena
635 }
636 }
637 return false; // Not in any Chunk, so not in Arena
638 }
641 #ifdef ASSERT
642 void* Arena::malloc(size_t size) {
643 assert(UseMallocOnly, "shouldn't call");
644 // use malloc, but save pointer in res. area for later freeing
645 char** save = (char**)internal_malloc_4(sizeof(char*));
646 return (*save = (char*)os::malloc(size, mtChunk));
647 }
649 // for debugging with UseMallocOnly
650 void* Arena::internal_malloc_4(size_t x) {
651 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
652 check_for_overflow(x, "Arena::internal_malloc_4");
653 if (_hwm + x > _max) {
654 return grow(x);
655 } else {
656 char *old = _hwm;
657 _hwm += x;
658 return old;
659 }
660 }
661 #endif
664 //--------------------------------------------------------------------------------------
665 // Non-product code
667 #ifndef PRODUCT
668 // The global operator new should never be called since it will usually indicate
669 // a memory leak. Use CHeapObj as the base class of such objects to make it explicit
670 // that they're allocated on the C heap.
671 // Commented out in product version to avoid conflicts with third-party C++ native code.
672 // %% note this is causing a problem on solaris debug build. the global
673 // new is being called from jdk source and causing data corruption.
674 // src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew
675 // define CATCH_OPERATOR_NEW_USAGE if you want to use this.
676 #ifdef CATCH_OPERATOR_NEW_USAGE
677 void* operator new(size_t size){
678 static bool warned = false;
679 if (!warned && warn_new_operator)
680 warning("should not call global (default) operator new");
681 warned = true;
682 return (void *) AllocateHeap(size, "global operator new");
683 }
684 #endif
686 void AllocatedObj::print() const { print_on(tty); }
687 void AllocatedObj::print_value() const { print_value_on(tty); }
689 void AllocatedObj::print_on(outputStream* st) const {
690 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this);
691 }
693 void AllocatedObj::print_value_on(outputStream* st) const {
694 st->print("AllocatedObj(" INTPTR_FORMAT ")", this);
695 }
697 julong Arena::_bytes_allocated = 0;
699 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
701 AllocStats::AllocStats() {
702 start_mallocs = os::num_mallocs;
703 start_frees = os::num_frees;
704 start_malloc_bytes = os::alloc_bytes;
705 start_mfree_bytes = os::free_bytes;
706 start_res_bytes = Arena::_bytes_allocated;
707 }
709 julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
710 julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
711 julong AllocStats::num_frees() { return os::num_frees - start_frees; }
712 julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; }
713 julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
714 void AllocStats::print() {
715 tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), "
716 UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc",
717 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M);
718 }
721 // debugging code
722 inline void Arena::free_all(char** start, char** end) {
723 for (char** p = start; p < end; p++) if (*p) os::free(*p);
724 }
726 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
727 assert(UseMallocOnly, "should not call");
728 // free all objects malloced since resource mark was created; resource area
729 // contains their addresses
730 if (chunk->next()) {
731 // this chunk is full, and some others too
732 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
733 char* top = c->top();
734 if (c->next() == NULL) {
735 top = hwm2; // last junk is only used up to hwm2
736 assert(c->contains(hwm2), "bad hwm2");
737 }
738 free_all((char**)c->bottom(), (char**)top);
739 }
740 assert(chunk->contains(hwm), "bad hwm");
741 assert(chunk->contains(max), "bad max");
742 free_all((char**)hwm, (char**)max);
743 } else {
744 // this chunk was partially used
745 assert(chunk->contains(hwm), "bad hwm");
746 assert(chunk->contains(hwm2), "bad hwm2");
747 free_all((char**)hwm, (char**)hwm2);
748 }
749 }
752 ReallocMark::ReallocMark() {
753 #ifdef ASSERT
754 Thread *thread = ThreadLocalStorage::get_thread_slow();
755 _nesting = thread->resource_area()->nesting();
756 #endif
757 }
759 void ReallocMark::check() {
760 #ifdef ASSERT
761 if (_nesting != Thread::current()->resource_area()->nesting()) {
762 fatal("allocation bug: array could grow within nested ResourceMark");
763 }
764 #endif
765 }
767 #endif // Non-product