Fri, 28 Jun 2013 02:25:07 -0700
Merge
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "memory/allocation.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/genCollectedHeap.hpp"
29 #include "memory/metaspaceShared.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "memory/universe.hpp"
32 #include "runtime/atomic.hpp"
33 #include "runtime/os.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/threadCritical.hpp"
36 #include "services/memTracker.hpp"
37 #include "utilities/ostream.hpp"
39 #ifdef TARGET_OS_FAMILY_linux
40 # include "os_linux.inline.hpp"
41 #endif
42 #ifdef TARGET_OS_FAMILY_solaris
43 # include "os_solaris.inline.hpp"
44 #endif
45 #ifdef TARGET_OS_FAMILY_windows
46 # include "os_windows.inline.hpp"
47 #endif
48 #ifdef TARGET_OS_FAMILY_bsd
49 # include "os_bsd.inline.hpp"
50 #endif
52 void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }
53 void StackObj::operator delete(void* p) { ShouldNotCallThis(); }
54 void* StackObj::operator new [](size_t size) { ShouldNotCallThis(); return 0; }
55 void StackObj::operator delete [](void* p) { ShouldNotCallThis(); }
57 void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }
58 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }
59 void* _ValueObj::operator new [](size_t size) { ShouldNotCallThis(); return 0; }
60 void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }
62 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
63 size_t word_size, bool read_only,
64 MetaspaceObj::Type type, TRAPS) {
65 // Klass has it's own operator new
66 return Metaspace::allocate(loader_data, word_size, read_only,
67 type, CHECK_NULL);
68 }
70 bool MetaspaceObj::is_shared() const {
71 return MetaspaceShared::is_in_shared_space(this);
72 }
75 bool MetaspaceObj::is_metaspace_object() const {
76 return Metaspace::contains((void*)this);
77 }
79 void MetaspaceObj::print_address_on(outputStream* st) const {
80 st->print(" {"INTPTR_FORMAT"}", this);
81 }
83 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) {
84 address res;
85 switch (type) {
86 case C_HEAP:
87 res = (address)AllocateHeap(size, flags, CALLER_PC);
88 DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
89 break;
90 case RESOURCE_AREA:
91 // new(size) sets allocation type RESOURCE_AREA.
92 res = (address)operator new(size);
93 break;
94 default:
95 ShouldNotReachHere();
96 }
97 return res;
98 }
100 void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) {
101 return (address) operator new(size, type, flags);
102 }
104 void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant,
105 allocation_type type, MEMFLAGS flags) {
106 //should only call this with std::nothrow, use other operator new() otherwise
107 address res;
108 switch (type) {
109 case C_HEAP:
110 res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
111 DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);)
112 break;
113 case RESOURCE_AREA:
114 // new(size) sets allocation type RESOURCE_AREA.
115 res = (address)operator new(size, std::nothrow);
116 break;
117 default:
118 ShouldNotReachHere();
119 }
120 return res;
121 }
123 void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant,
124 allocation_type type, MEMFLAGS flags) {
125 return (address)operator new(size, nothrow_constant, type, flags);
126 }
128 void ResourceObj::operator delete(void* p) {
129 assert(((ResourceObj *)p)->allocated_on_C_heap(),
130 "delete only allowed for C_HEAP objects");
131 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
132 FreeHeap(p);
133 }
135 void ResourceObj::operator delete [](void* p) {
136 operator delete(p);
137 }
139 #ifdef ASSERT
140 void ResourceObj::set_allocation_type(address res, allocation_type type) {
141 // Set allocation type in the resource object
142 uintptr_t allocation = (uintptr_t)res;
143 assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
144 assert(type <= allocation_mask, "incorrect allocation type");
145 ResourceObj* resobj = (ResourceObj *)res;
146 resobj->_allocation_t[0] = ~(allocation + type);
147 if (type != STACK_OR_EMBEDDED) {
148 // Called from operator new() and CollectionSetChooser(),
149 // set verification value.
150 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
151 }
152 }
154 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
155 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
156 return (allocation_type)((~_allocation_t[0]) & allocation_mask);
157 }
159 bool ResourceObj::is_type_set() const {
160 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
161 return get_allocation_type() == type &&
162 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
163 }
165 ResourceObj::ResourceObj() { // default constructor
166 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
167 // Operator new() is not called for allocations
168 // on stack and for embedded objects.
169 set_allocation_type((address)this, STACK_OR_EMBEDDED);
170 } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
171 // For some reason we got a value which resembles
172 // an embedded or stack object (operator new() does not
173 // set such type). Keep it since it is valid value
174 // (even if it was garbage).
175 // Ignore garbage in other fields.
176 } else if (is_type_set()) {
177 // Operator new() was called and type was set.
178 assert(!allocated_on_stack(),
179 err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
180 this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
181 } else {
182 // Operator new() was not called.
183 // Assume that it is embedded or stack object.
184 set_allocation_type((address)this, STACK_OR_EMBEDDED);
185 }
186 _allocation_t[1] = 0; // Zap verification value
187 }
189 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
190 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
191 // Note: garbage may resembles valid value.
192 assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
193 err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
194 this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
195 set_allocation_type((address)this, STACK_OR_EMBEDDED);
196 _allocation_t[1] = 0; // Zap verification value
197 }
199 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
200 // Used in InlineTree::ok_to_inline() for WarmCallInfo.
201 assert(allocated_on_stack(),
202 err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
203 this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
204 // Keep current _allocation_t value;
205 return *this;
206 }
208 ResourceObj::~ResourceObj() {
209 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
210 if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
211 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
212 }
213 }
214 #endif // ASSERT
217 void trace_heap_malloc(size_t size, const char* name, void* p) {
218 // A lock is not needed here - tty uses a lock internally
219 tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name);
220 }
223 void trace_heap_free(void* p) {
224 // A lock is not needed here - tty uses a lock internally
225 tty->print_cr("Heap free " INTPTR_FORMAT, p);
226 }
228 //--------------------------------------------------------------------------------------
229 // ChunkPool implementation
231 // MT-safe pool of chunks to reduce malloc/free thrashing
232 // NB: not using Mutex because pools are used before Threads are initialized
233 class ChunkPool: public CHeapObj<mtInternal> {
234 Chunk* _first; // first cached Chunk; its first word points to next chunk
235 size_t _num_chunks; // number of unused chunks in pool
236 size_t _num_used; // number of chunks currently checked out
237 const size_t _size; // size of each chunk (must be uniform)
239 // Our three static pools
240 static ChunkPool* _large_pool;
241 static ChunkPool* _medium_pool;
242 static ChunkPool* _small_pool;
244 // return first element or null
245 void* get_first() {
246 Chunk* c = _first;
247 if (_first) {
248 _first = _first->next();
249 _num_chunks--;
250 }
251 return c;
252 }
254 public:
255 // All chunks in a ChunkPool has the same size
256 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
258 // Allocate a new chunk from the pool (might expand the pool)
259 _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) {
260 assert(bytes == _size, "bad size");
261 void* p = NULL;
262 // No VM lock can be taken inside ThreadCritical lock, so os::malloc
263 // should be done outside ThreadCritical lock due to NMT
264 { ThreadCritical tc;
265 _num_used++;
266 p = get_first();
267 }
268 if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
269 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
270 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
271 }
272 return p;
273 }
275 // Return a chunk to the pool
276 void free(Chunk* chunk) {
277 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
278 ThreadCritical tc;
279 _num_used--;
281 // Add chunk to list
282 chunk->set_next(_first);
283 _first = chunk;
284 _num_chunks++;
285 }
287 // Prune the pool
288 void free_all_but(size_t n) {
289 Chunk* cur = NULL;
290 Chunk* next;
291 {
292 // if we have more than n chunks, free all of them
293 ThreadCritical tc;
294 if (_num_chunks > n) {
295 // free chunks at end of queue, for better locality
296 cur = _first;
297 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
299 if (cur != NULL) {
300 next = cur->next();
301 cur->set_next(NULL);
302 cur = next;
304 _num_chunks = n;
305 }
306 }
307 }
309 // Free all remaining chunks, outside of ThreadCritical
310 // to avoid deadlock with NMT
311 while(cur != NULL) {
312 next = cur->next();
313 os::free(cur, mtChunk);
314 cur = next;
315 }
316 }
318 // Accessors to preallocated pool's
319 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
320 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
321 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
323 static void initialize() {
324 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
325 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
326 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
327 }
329 static void clean() {
330 enum { BlocksToKeep = 5 };
331 _small_pool->free_all_but(BlocksToKeep);
332 _medium_pool->free_all_but(BlocksToKeep);
333 _large_pool->free_all_but(BlocksToKeep);
334 }
335 };
337 ChunkPool* ChunkPool::_large_pool = NULL;
338 ChunkPool* ChunkPool::_medium_pool = NULL;
339 ChunkPool* ChunkPool::_small_pool = NULL;
341 void chunkpool_init() {
342 ChunkPool::initialize();
343 }
345 void
346 Chunk::clean_chunk_pool() {
347 ChunkPool::clean();
348 }
351 //--------------------------------------------------------------------------------------
352 // ChunkPoolCleaner implementation
353 //
355 class ChunkPoolCleaner : public PeriodicTask {
356 enum { CleaningInterval = 5000 }; // cleaning interval in ms
358 public:
359 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
360 void task() {
361 ChunkPool::clean();
362 }
363 };
365 //--------------------------------------------------------------------------------------
366 // Chunk implementation
368 void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) {
369 // requested_size is equal to sizeof(Chunk) but in order for the arena
370 // allocations to come out aligned as expected the size must be aligned
371 // to expected arena alignment.
372 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
373 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
374 size_t bytes = ARENA_ALIGN(requested_size) + length;
375 switch (length) {
376 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
377 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
378 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
379 default: {
380 void* p = os::malloc(bytes, mtChunk, CALLER_PC);
381 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
382 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
383 }
384 return p;
385 }
386 }
387 }
389 void Chunk::operator delete(void* p) {
390 Chunk* c = (Chunk*)p;
391 switch (c->length()) {
392 case Chunk::size: ChunkPool::large_pool()->free(c); break;
393 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
394 case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
395 default: os::free(c, mtChunk);
396 }
397 }
399 Chunk::Chunk(size_t length) : _len(length) {
400 _next = NULL; // Chain on the linked list
401 }
404 void Chunk::chop() {
405 Chunk *k = this;
406 while( k ) {
407 Chunk *tmp = k->next();
408 // clear out this chunk (to detect allocation bugs)
409 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
410 delete k; // Free chunk (was malloc'd)
411 k = tmp;
412 }
413 }
415 void Chunk::next_chop() {
416 _next->chop();
417 _next = NULL;
418 }
421 void Chunk::start_chunk_pool_cleaner_task() {
422 #ifdef ASSERT
423 static bool task_created = false;
424 assert(!task_created, "should not start chuck pool cleaner twice");
425 task_created = true;
426 #endif
427 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
428 cleaner->enroll();
429 }
431 //------------------------------Arena------------------------------------------
432 NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
434 Arena::Arena(size_t init_size) {
435 size_t round_size = (sizeof (char *)) - 1;
436 init_size = (init_size+round_size) & ~round_size;
437 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
438 _hwm = _chunk->bottom(); // Save the cached hwm, max
439 _max = _chunk->top();
440 set_size_in_bytes(init_size);
441 NOT_PRODUCT(Atomic::inc(&_instance_count);)
442 }
444 Arena::Arena() {
445 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
446 _hwm = _chunk->bottom(); // Save the cached hwm, max
447 _max = _chunk->top();
448 set_size_in_bytes(Chunk::init_size);
449 NOT_PRODUCT(Atomic::inc(&_instance_count);)
450 }
452 Arena *Arena::move_contents(Arena *copy) {
453 copy->destruct_contents();
454 copy->_chunk = _chunk;
455 copy->_hwm = _hwm;
456 copy->_max = _max;
457 copy->_first = _first;
459 // workaround rare racing condition, which could double count
460 // the arena size by native memory tracking
461 size_t size = size_in_bytes();
462 set_size_in_bytes(0);
463 copy->set_size_in_bytes(size);
464 // Destroy original arena
465 reset();
466 return copy; // Return Arena with contents
467 }
469 Arena::~Arena() {
470 destruct_contents();
471 NOT_PRODUCT(Atomic::dec(&_instance_count);)
472 }
474 void* Arena::operator new(size_t size) {
475 assert(false, "Use dynamic memory type binding");
476 return NULL;
477 }
479 void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) {
480 assert(false, "Use dynamic memory type binding");
481 return NULL;
482 }
484 // dynamic memory type binding
485 void* Arena::operator new(size_t size, MEMFLAGS flags) {
486 #ifdef ASSERT
487 void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
488 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
489 return p;
490 #else
491 return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
492 #endif
493 }
495 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) {
496 #ifdef ASSERT
497 void* p = os::malloc(size, flags|otArena, CALLER_PC);
498 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
499 return p;
500 #else
501 return os::malloc(size, flags|otArena, CALLER_PC);
502 #endif
503 }
505 void Arena::operator delete(void* p) {
506 FreeHeap(p);
507 }
509 // Destroy this arenas contents and reset to empty
510 void Arena::destruct_contents() {
511 if (UseMallocOnly && _first != NULL) {
512 char* end = _first->next() ? _first->top() : _hwm;
513 free_malloced_objects(_first, _first->bottom(), end, _hwm);
514 }
515 // reset size before chop to avoid a rare racing condition
516 // that can have total arena memory exceed total chunk memory
517 set_size_in_bytes(0);
518 _first->chop();
519 reset();
520 }
522 // This is high traffic method, but many calls actually don't
523 // change the size
524 void Arena::set_size_in_bytes(size_t size) {
525 if (_size_in_bytes != size) {
526 _size_in_bytes = size;
527 MemTracker::record_arena_size((address)this, size);
528 }
529 }
531 // Total of all Chunks in arena
532 size_t Arena::used() const {
533 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
534 register Chunk *k = _first;
535 while( k != _chunk) { // Whilst have Chunks in a row
536 sum += k->length(); // Total size of this Chunk
537 k = k->next(); // Bump along to next Chunk
538 }
539 return sum; // Return total consumed space.
540 }
542 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
543 vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence);
544 }
546 // Grow a new Chunk
547 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
548 // Get minimal required size. Either real big, or even bigger for giant objs
549 size_t len = MAX2(x, (size_t) Chunk::size);
551 Chunk *k = _chunk; // Get filled-up chunk address
552 _chunk = new (alloc_failmode, len) Chunk(len);
554 if (_chunk == NULL) {
555 return NULL;
556 }
557 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
558 else _first = _chunk;
559 _hwm = _chunk->bottom(); // Save the cached hwm, max
560 _max = _chunk->top();
561 set_size_in_bytes(size_in_bytes() + len);
562 void* result = _hwm;
563 _hwm += x;
564 return result;
565 }
569 // Reallocate storage in Arena.
570 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
571 assert(new_size >= 0, "bad size");
572 if (new_size == 0) return NULL;
573 #ifdef ASSERT
574 if (UseMallocOnly) {
575 // always allocate a new object (otherwise we'll free this one twice)
576 char* copy = (char*)Amalloc(new_size, alloc_failmode);
577 if (copy == NULL) {
578 return NULL;
579 }
580 size_t n = MIN2(old_size, new_size);
581 if (n > 0) memcpy(copy, old_ptr, n);
582 Afree(old_ptr,old_size); // Mostly done to keep stats accurate
583 return copy;
584 }
585 #endif
586 char *c_old = (char*)old_ptr; // Handy name
587 // Stupid fast special case
588 if( new_size <= old_size ) { // Shrink in-place
589 if( c_old+old_size == _hwm) // Attempt to free the excess bytes
590 _hwm = c_old+new_size; // Adjust hwm
591 return c_old;
592 }
594 // make sure that new_size is legal
595 size_t corrected_new_size = ARENA_ALIGN(new_size);
597 // See if we can resize in-place
598 if( (c_old+old_size == _hwm) && // Adjusting recent thing
599 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits
600 _hwm = c_old+corrected_new_size; // Adjust hwm
601 return c_old; // Return old pointer
602 }
604 // Oops, got to relocate guts
605 void *new_ptr = Amalloc(new_size, alloc_failmode);
606 if (new_ptr == NULL) {
607 return NULL;
608 }
609 memcpy( new_ptr, c_old, old_size );
610 Afree(c_old,old_size); // Mostly done to keep stats accurate
611 return new_ptr;
612 }
615 // Determine if pointer belongs to this Arena or not.
616 bool Arena::contains( const void *ptr ) const {
617 #ifdef ASSERT
618 if (UseMallocOnly) {
619 // really slow, but not easy to make fast
620 if (_chunk == NULL) return false;
621 char** bottom = (char**)_chunk->bottom();
622 for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
623 if (*p == ptr) return true;
624 }
625 for (Chunk *c = _first; c != NULL; c = c->next()) {
626 if (c == _chunk) continue; // current chunk has been processed
627 char** bottom = (char**)c->bottom();
628 for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
629 if (*p == ptr) return true;
630 }
631 }
632 return false;
633 }
634 #endif
635 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
636 return true; // Check for in this chunk
637 for (Chunk *c = _first; c; c = c->next()) {
638 if (c == _chunk) continue; // current chunk has been processed
639 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
640 return true; // Check for every chunk in Arena
641 }
642 }
643 return false; // Not in any Chunk, so not in Arena
644 }
647 #ifdef ASSERT
648 void* Arena::malloc(size_t size) {
649 assert(UseMallocOnly, "shouldn't call");
650 // use malloc, but save pointer in res. area for later freeing
651 char** save = (char**)internal_malloc_4(sizeof(char*));
652 return (*save = (char*)os::malloc(size, mtChunk));
653 }
655 // for debugging with UseMallocOnly
656 void* Arena::internal_malloc_4(size_t x) {
657 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
658 check_for_overflow(x, "Arena::internal_malloc_4");
659 if (_hwm + x > _max) {
660 return grow(x);
661 } else {
662 char *old = _hwm;
663 _hwm += x;
664 return old;
665 }
666 }
667 #endif
670 //--------------------------------------------------------------------------------------
671 // Non-product code
673 #ifndef PRODUCT
674 // The global operator new should never be called since it will usually indicate
675 // a memory leak. Use CHeapObj as the base class of such objects to make it explicit
676 // that they're allocated on the C heap.
677 // Commented out in product version to avoid conflicts with third-party C++ native code.
678 // On certain platforms, such as Mac OS X (Darwin), in debug version, new is being called
679 // from jdk source and causing data corruption. Such as
680 // Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair
681 // define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed.
682 //
683 #ifndef ALLOW_OPERATOR_NEW_USAGE
684 void* operator new(size_t size){
685 assert(false, "Should not call global operator new");
686 return 0;
687 }
689 void* operator new [](size_t size){
690 assert(false, "Should not call global operator new[]");
691 return 0;
692 }
694 void* operator new(size_t size, const std::nothrow_t& nothrow_constant){
695 assert(false, "Should not call global operator new");
696 return 0;
697 }
699 void* operator new [](size_t size, std::nothrow_t& nothrow_constant){
700 assert(false, "Should not call global operator new[]");
701 return 0;
702 }
704 void operator delete(void* p) {
705 assert(false, "Should not call global delete");
706 }
708 void operator delete [](void* p) {
709 assert(false, "Should not call global delete []");
710 }
711 #endif // ALLOW_OPERATOR_NEW_USAGE
713 void AllocatedObj::print() const { print_on(tty); }
714 void AllocatedObj::print_value() const { print_value_on(tty); }
716 void AllocatedObj::print_on(outputStream* st) const {
717 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this);
718 }
720 void AllocatedObj::print_value_on(outputStream* st) const {
721 st->print("AllocatedObj(" INTPTR_FORMAT ")", this);
722 }
724 julong Arena::_bytes_allocated = 0;
726 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
728 AllocStats::AllocStats() {
729 start_mallocs = os::num_mallocs;
730 start_frees = os::num_frees;
731 start_malloc_bytes = os::alloc_bytes;
732 start_mfree_bytes = os::free_bytes;
733 start_res_bytes = Arena::_bytes_allocated;
734 }
736 julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
737 julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
738 julong AllocStats::num_frees() { return os::num_frees - start_frees; }
739 julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; }
740 julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
741 void AllocStats::print() {
742 tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), "
743 UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc",
744 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M);
745 }
748 // debugging code
749 inline void Arena::free_all(char** start, char** end) {
750 for (char** p = start; p < end; p++) if (*p) os::free(*p);
751 }
753 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
754 assert(UseMallocOnly, "should not call");
755 // free all objects malloced since resource mark was created; resource area
756 // contains their addresses
757 if (chunk->next()) {
758 // this chunk is full, and some others too
759 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
760 char* top = c->top();
761 if (c->next() == NULL) {
762 top = hwm2; // last junk is only used up to hwm2
763 assert(c->contains(hwm2), "bad hwm2");
764 }
765 free_all((char**)c->bottom(), (char**)top);
766 }
767 assert(chunk->contains(hwm), "bad hwm");
768 assert(chunk->contains(max), "bad max");
769 free_all((char**)hwm, (char**)max);
770 } else {
771 // this chunk was partially used
772 assert(chunk->contains(hwm), "bad hwm");
773 assert(chunk->contains(hwm2), "bad hwm2");
774 free_all((char**)hwm, (char**)hwm2);
775 }
776 }
779 ReallocMark::ReallocMark() {
780 #ifdef ASSERT
781 Thread *thread = ThreadLocalStorage::get_thread_slow();
782 _nesting = thread->resource_area()->nesting();
783 #endif
784 }
786 void ReallocMark::check() {
787 #ifdef ASSERT
788 if (_nesting != Thread::current()->resource_area()->nesting()) {
789 fatal("allocation bug: array could grow within nested ResourceMark");
790 }
791 #endif
792 }
794 #endif // Non-product