Mon, 12 Aug 2019 18:30:40 +0300
8223147: JFR Backport
8199712: Flight Recorder
8203346: JFR: Inconsistent signature of jfr_add_string_constant
8195817: JFR.stop should require name of recording
8195818: JFR.start should increase autogenerated name by one
8195819: Remove recording=x from jcmd JFR.check output
8203921: JFR thread sampling is missing fixes from JDK-8194552
8203929: Limit amount of data for JFR.dump
8203664: JFR start failure after AppCDS archive created with JFR StartFlightRecording
8003209: JFR events for network utilization
8207392: [PPC64] Implement JFR profiling
8202835: jfr/event/os/TestSystemProcess.java fails on missing events
Summary: Backport JFR from JDK11. Initial integration
Reviewed-by: neugens
1 /*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "memory/allocation.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/genCollectedHeap.hpp"
29 #include "memory/metaspaceShared.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "memory/universe.hpp"
32 #include "runtime/atomic.hpp"
33 #include "runtime/os.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/threadCritical.hpp"
36 #include "services/memTracker.hpp"
37 #include "utilities/ostream.hpp"
39 #ifdef TARGET_OS_FAMILY_linux
40 # include "os_linux.inline.hpp"
41 #endif
42 #ifdef TARGET_OS_FAMILY_solaris
43 # include "os_solaris.inline.hpp"
44 #endif
45 #ifdef TARGET_OS_FAMILY_windows
46 # include "os_windows.inline.hpp"
47 #endif
48 #ifdef TARGET_OS_FAMILY_aix
49 # include "os_aix.inline.hpp"
50 #endif
51 #ifdef TARGET_OS_FAMILY_bsd
52 # include "os_bsd.inline.hpp"
53 #endif
55 void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
56 void StackObj::operator delete(void* p) { ShouldNotCallThis(); }
57 void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
58 void StackObj::operator delete [](void* p) { ShouldNotCallThis(); }
60 void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
61 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }
62 void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
63 void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }
65 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
66 size_t word_size, bool read_only,
67 MetaspaceObj::Type type, TRAPS) throw() {
68 // Klass has it's own operator new
69 return Metaspace::allocate(loader_data, word_size, read_only, type, THREAD);
70 }
72 bool MetaspaceObj::is_shared() const {
73 return MetaspaceShared::is_in_shared_space(this);
74 }
76 bool MetaspaceObj::is_metaspace_object() const {
77 return Metaspace::contains((void*)this);
78 }
80 void MetaspaceObj::print_address_on(outputStream* st) const {
81 st->print(" {" INTPTR_FORMAT "}", p2i(this));
82 }
84 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
85 address res = NULL;
86 switch (type) {
87 case C_HEAP:
88 res = (address)AllocateHeap(size, flags, CALLER_PC);
89 DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
90 break;
91 case RESOURCE_AREA:
92 // new(size) sets allocation type RESOURCE_AREA.
93 res = (address)operator new(size);
94 break;
95 default:
96 ShouldNotReachHere();
97 }
98 return res;
99 }
101 void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() {
102 return (address) operator new(size, type, flags);
103 }
105 void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant,
106 allocation_type type, MEMFLAGS flags) throw() {
107 // should only call this with std::nothrow, use other operator new() otherwise
108 address res = NULL;
109 switch (type) {
110 case C_HEAP:
111 res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
112 DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);)
113 break;
114 case RESOURCE_AREA:
115 // new(size) sets allocation type RESOURCE_AREA.
116 res = (address)operator new(size, std::nothrow);
117 break;
118 default:
119 ShouldNotReachHere();
120 }
121 return res;
122 }
124 void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant,
125 allocation_type type, MEMFLAGS flags) throw() {
126 return (address)operator new(size, nothrow_constant, type, flags);
127 }
129 void ResourceObj::operator delete(void* p) {
130 assert(((ResourceObj *)p)->allocated_on_C_heap(),
131 "delete only allowed for C_HEAP objects");
132 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
133 FreeHeap(p);
134 }
136 void ResourceObj::operator delete [](void* p) {
137 operator delete(p);
138 }
140 #ifdef ASSERT
141 void ResourceObj::set_allocation_type(address res, allocation_type type) {
142 // Set allocation type in the resource object
143 uintptr_t allocation = (uintptr_t)res;
144 assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " INTPTR_FORMAT, p2i(res)));
145 assert(type <= allocation_mask, "incorrect allocation type");
146 ResourceObj* resobj = (ResourceObj *)res;
147 resobj->_allocation_t[0] = ~(allocation + type);
148 if (type != STACK_OR_EMBEDDED) {
149 // Called from operator new() and CollectionSetChooser(),
150 // set verification value.
151 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
152 }
153 }
155 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
156 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
157 return (allocation_type)((~_allocation_t[0]) & allocation_mask);
158 }
160 bool ResourceObj::is_type_set() const {
161 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
162 return get_allocation_type() == type &&
163 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
164 }
166 ResourceObj::ResourceObj() { // default constructor
167 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
168 // Operator new() is not called for allocations
169 // on stack and for embedded objects.
170 set_allocation_type((address)this, STACK_OR_EMBEDDED);
171 } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
172 // For some reason we got a value which resembles
173 // an embedded or stack object (operator new() does not
174 // set such type). Keep it since it is valid value
175 // (even if it was garbage).
176 // Ignore garbage in other fields.
177 } else if (is_type_set()) {
178 // Operator new() was called and type was set.
179 assert(!allocated_on_stack(),
180 err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
181 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
182 } else {
183 // Operator new() was not called.
184 // Assume that it is embedded or stack object.
185 set_allocation_type((address)this, STACK_OR_EMBEDDED);
186 }
187 _allocation_t[1] = 0; // Zap verification value
188 }
190 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
191 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
192 // Note: garbage may resembles valid value.
193 assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
194 err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
195 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
196 set_allocation_type((address)this, STACK_OR_EMBEDDED);
197 _allocation_t[1] = 0; // Zap verification value
198 }
200 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
201 // Used in InlineTree::ok_to_inline() for WarmCallInfo.
202 assert(allocated_on_stack(),
203 err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
204 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
205 // Keep current _allocation_t value;
206 return *this;
207 }
209 ResourceObj::~ResourceObj() {
210 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
211 if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
212 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
213 }
214 }
215 #endif // ASSERT
218 void trace_heap_malloc(size_t size, const char* name, void* p) {
219 // A lock is not needed here - tty uses a lock internally
220 tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p2i(p), size, name == NULL ? "" : name);
221 }
224 void trace_heap_free(void* p) {
225 // A lock is not needed here - tty uses a lock internally
226 tty->print_cr("Heap free " INTPTR_FORMAT, p2i(p));
227 }
229 //--------------------------------------------------------------------------------------
230 // ChunkPool implementation
232 // MT-safe pool of chunks to reduce malloc/free thrashing
233 // NB: not using Mutex because pools are used before Threads are initialized
234 class ChunkPool: public CHeapObj<mtInternal> {
235 Chunk* _first; // first cached Chunk; its first word points to next chunk
236 size_t _num_chunks; // number of unused chunks in pool
237 size_t _num_used; // number of chunks currently checked out
238 const size_t _size; // size of each chunk (must be uniform)
240 // Our four static pools
241 static ChunkPool* _large_pool;
242 static ChunkPool* _medium_pool;
243 static ChunkPool* _small_pool;
244 static ChunkPool* _tiny_pool;
246 // return first element or null
247 void* get_first() {
248 Chunk* c = _first;
249 if (_first) {
250 _first = _first->next();
251 _num_chunks--;
252 }
253 return c;
254 }
256 public:
257 // All chunks in a ChunkPool has the same size
258 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
260 // Allocate a new chunk from the pool (might expand the pool)
261 _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) {
262 assert(bytes == _size, "bad size");
263 void* p = NULL;
264 // No VM lock can be taken inside ThreadCritical lock, so os::malloc
265 // should be done outside ThreadCritical lock due to NMT
266 { ThreadCritical tc;
267 _num_used++;
268 p = get_first();
269 }
270 if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
271 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
272 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
273 }
274 return p;
275 }
277 // Return a chunk to the pool
278 void free(Chunk* chunk) {
279 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
280 ThreadCritical tc;
281 _num_used--;
283 // Add chunk to list
284 chunk->set_next(_first);
285 _first = chunk;
286 _num_chunks++;
287 }
289 // Prune the pool
290 void free_all_but(size_t n) {
291 Chunk* cur = NULL;
292 Chunk* next;
293 {
294 // if we have more than n chunks, free all of them
295 ThreadCritical tc;
296 if (_num_chunks > n) {
297 // free chunks at end of queue, for better locality
298 cur = _first;
299 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
301 if (cur != NULL) {
302 next = cur->next();
303 cur->set_next(NULL);
304 cur = next;
306 _num_chunks = n;
307 }
308 }
309 }
311 // Free all remaining chunks, outside of ThreadCritical
312 // to avoid deadlock with NMT
313 while(cur != NULL) {
314 next = cur->next();
315 os::free(cur, mtChunk);
316 cur = next;
317 }
318 }
320 // Accessors to preallocated pool's
321 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
322 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
323 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
324 static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; }
326 static void initialize() {
327 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
328 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
329 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
330 _tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size());
331 }
333 static void clean() {
334 enum { BlocksToKeep = 5 };
335 _tiny_pool->free_all_but(BlocksToKeep);
336 _small_pool->free_all_but(BlocksToKeep);
337 _medium_pool->free_all_but(BlocksToKeep);
338 _large_pool->free_all_but(BlocksToKeep);
339 }
340 };
342 ChunkPool* ChunkPool::_large_pool = NULL;
343 ChunkPool* ChunkPool::_medium_pool = NULL;
344 ChunkPool* ChunkPool::_small_pool = NULL;
345 ChunkPool* ChunkPool::_tiny_pool = NULL;
347 void chunkpool_init() {
348 ChunkPool::initialize();
349 }
351 void
352 Chunk::clean_chunk_pool() {
353 ChunkPool::clean();
354 }
357 //--------------------------------------------------------------------------------------
358 // ChunkPoolCleaner implementation
359 //
361 class ChunkPoolCleaner : public PeriodicTask {
362 enum { CleaningInterval = 5000 }; // cleaning interval in ms
364 public:
365 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
366 void task() {
367 ChunkPool::clean();
368 }
369 };
371 //--------------------------------------------------------------------------------------
372 // Chunk implementation
374 void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
375 // requested_size is equal to sizeof(Chunk) but in order for the arena
376 // allocations to come out aligned as expected the size must be aligned
377 // to expected arena alignment.
378 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
379 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
380 size_t bytes = ARENA_ALIGN(requested_size) + length;
381 switch (length) {
382 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
383 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
384 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
385 case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);
386 default: {
387 void* p = os::malloc(bytes, mtChunk, CALLER_PC);
388 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
389 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
390 }
391 return p;
392 }
393 }
394 }
396 void Chunk::operator delete(void* p) {
397 Chunk* c = (Chunk*)p;
398 switch (c->length()) {
399 case Chunk::size: ChunkPool::large_pool()->free(c); break;
400 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
401 case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
402 case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;
403 default: os::free(c, mtChunk);
404 }
405 }
407 Chunk::Chunk(size_t length) : _len(length) {
408 _next = NULL; // Chain on the linked list
409 }
412 void Chunk::chop() {
413 Chunk *k = this;
414 while( k ) {
415 Chunk *tmp = k->next();
416 // clear out this chunk (to detect allocation bugs)
417 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
418 delete k; // Free chunk (was malloc'd)
419 k = tmp;
420 }
421 }
423 void Chunk::next_chop() {
424 _next->chop();
425 _next = NULL;
426 }
429 void Chunk::start_chunk_pool_cleaner_task() {
430 #ifdef ASSERT
431 static bool task_created = false;
432 assert(!task_created, "should not start chuck pool cleaner twice");
433 task_created = true;
434 #endif
435 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
436 cleaner->enroll();
437 }
439 //------------------------------Arena------------------------------------------
440 Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) {
441 size_t round_size = (sizeof (char *)) - 1;
442 init_size = (init_size+round_size) & ~round_size;
443 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
444 _hwm = _chunk->bottom(); // Save the cached hwm, max
445 _max = _chunk->top();
446 MemTracker::record_new_arena(flag);
447 set_size_in_bytes(init_size);
448 }
450 Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
451 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
452 _hwm = _chunk->bottom(); // Save the cached hwm, max
453 _max = _chunk->top();
454 MemTracker::record_new_arena(flag);
455 set_size_in_bytes(Chunk::init_size);
456 }
458 Arena *Arena::move_contents(Arena *copy) {
459 copy->destruct_contents();
460 copy->_chunk = _chunk;
461 copy->_hwm = _hwm;
462 copy->_max = _max;
463 copy->_first = _first;
465 // workaround rare racing condition, which could double count
466 // the arena size by native memory tracking
467 size_t size = size_in_bytes();
468 set_size_in_bytes(0);
469 copy->set_size_in_bytes(size);
470 // Destroy original arena
471 reset();
472 return copy; // Return Arena with contents
473 }
475 Arena::~Arena() {
476 destruct_contents();
477 MemTracker::record_arena_free(_flags);
478 }
480 void* Arena::operator new(size_t size) throw() {
481 assert(false, "Use dynamic memory type binding");
482 return NULL;
483 }
485 void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() {
486 assert(false, "Use dynamic memory type binding");
487 return NULL;
488 }
490 // dynamic memory type binding
491 void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
492 #ifdef ASSERT
493 void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
494 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
495 return p;
496 #else
497 return (void *) AllocateHeap(size, flags, CALLER_PC);
498 #endif
499 }
501 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
502 #ifdef ASSERT
503 void* p = os::malloc(size, flags, CALLER_PC);
504 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
505 return p;
506 #else
507 return os::malloc(size, flags, CALLER_PC);
508 #endif
509 }
511 void Arena::operator delete(void* p) {
512 FreeHeap(p);
513 }
515 // Destroy this arenas contents and reset to empty
516 void Arena::destruct_contents() {
517 if (UseMallocOnly && _first != NULL) {
518 char* end = _first->next() ? _first->top() : _hwm;
519 free_malloced_objects(_first, _first->bottom(), end, _hwm);
520 }
521 // reset size before chop to avoid a rare racing condition
522 // that can have total arena memory exceed total chunk memory
523 set_size_in_bytes(0);
524 _first->chop();
525 reset();
526 }
528 // This is high traffic method, but many calls actually don't
529 // change the size
530 void Arena::set_size_in_bytes(size_t size) {
531 if (_size_in_bytes != size) {
532 long delta = (long)(size - size_in_bytes());
533 _size_in_bytes = size;
534 MemTracker::record_arena_size_change(delta, _flags);
535 }
536 }
538 // Total of all Chunks in arena
539 size_t Arena::used() const {
540 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
541 register Chunk *k = _first;
542 while( k != _chunk) { // Whilst have Chunks in a row
543 sum += k->length(); // Total size of this Chunk
544 k = k->next(); // Bump along to next Chunk
545 }
546 return sum; // Return total consumed space.
547 }
549 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
550 vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence);
551 }
553 // Grow a new Chunk
554 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
555 // Get minimal required size. Either real big, or even bigger for giant objs
556 size_t len = MAX2(x, (size_t) Chunk::size);
558 Chunk *k = _chunk; // Get filled-up chunk address
559 _chunk = new (alloc_failmode, len) Chunk(len);
561 if (_chunk == NULL) {
562 _chunk = k; // restore the previous value of _chunk
563 return NULL;
564 }
565 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
566 else _first = _chunk;
567 _hwm = _chunk->bottom(); // Save the cached hwm, max
568 _max = _chunk->top();
569 set_size_in_bytes(size_in_bytes() + len);
570 void* result = _hwm;
571 _hwm += x;
572 return result;
573 }
577 // Reallocate storage in Arena.
578 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
579 assert(new_size >= 0, "bad size");
580 if (new_size == 0) return NULL;
581 #ifdef ASSERT
582 if (UseMallocOnly) {
583 // always allocate a new object (otherwise we'll free this one twice)
584 char* copy = (char*)Amalloc(new_size, alloc_failmode);
585 if (copy == NULL) {
586 return NULL;
587 }
588 size_t n = MIN2(old_size, new_size);
589 if (n > 0) memcpy(copy, old_ptr, n);
590 Afree(old_ptr,old_size); // Mostly done to keep stats accurate
591 return copy;
592 }
593 #endif
594 char *c_old = (char*)old_ptr; // Handy name
595 // Stupid fast special case
596 if( new_size <= old_size ) { // Shrink in-place
597 if( c_old+old_size == _hwm) // Attempt to free the excess bytes
598 _hwm = c_old+new_size; // Adjust hwm
599 return c_old;
600 }
602 // make sure that new_size is legal
603 size_t corrected_new_size = ARENA_ALIGN(new_size);
605 // See if we can resize in-place
606 if( (c_old+old_size == _hwm) && // Adjusting recent thing
607 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits
608 _hwm = c_old+corrected_new_size; // Adjust hwm
609 return c_old; // Return old pointer
610 }
612 // Oops, got to relocate guts
613 void *new_ptr = Amalloc(new_size, alloc_failmode);
614 if (new_ptr == NULL) {
615 return NULL;
616 }
617 memcpy( new_ptr, c_old, old_size );
618 Afree(c_old,old_size); // Mostly done to keep stats accurate
619 return new_ptr;
620 }
623 // Determine if pointer belongs to this Arena or not.
624 bool Arena::contains( const void *ptr ) const {
625 #ifdef ASSERT
626 if (UseMallocOnly) {
627 // really slow, but not easy to make fast
628 if (_chunk == NULL) return false;
629 char** bottom = (char**)_chunk->bottom();
630 for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
631 if (*p == ptr) return true;
632 }
633 for (Chunk *c = _first; c != NULL; c = c->next()) {
634 if (c == _chunk) continue; // current chunk has been processed
635 char** bottom = (char**)c->bottom();
636 for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
637 if (*p == ptr) return true;
638 }
639 }
640 return false;
641 }
642 #endif
643 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
644 return true; // Check for in this chunk
645 for (Chunk *c = _first; c; c = c->next()) {
646 if (c == _chunk) continue; // current chunk has been processed
647 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
648 return true; // Check for every chunk in Arena
649 }
650 }
651 return false; // Not in any Chunk, so not in Arena
652 }
655 #ifdef ASSERT
656 void* Arena::malloc(size_t size) {
657 assert(UseMallocOnly, "shouldn't call");
658 // use malloc, but save pointer in res. area for later freeing
659 char** save = (char**)internal_malloc_4(sizeof(char*));
660 return (*save = (char*)os::malloc(size, mtChunk));
661 }
663 // for debugging with UseMallocOnly
664 void* Arena::internal_malloc_4(size_t x) {
665 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
666 check_for_overflow(x, "Arena::internal_malloc_4");
667 if (_hwm + x > _max) {
668 return grow(x);
669 } else {
670 char *old = _hwm;
671 _hwm += x;
672 return old;
673 }
674 }
675 #endif
678 //--------------------------------------------------------------------------------------
679 // Non-product code
681 #ifndef PRODUCT
682 // The global operator new should never be called since it will usually indicate
683 // a memory leak. Use CHeapObj as the base class of such objects to make it explicit
684 // that they're allocated on the C heap.
685 // Commented out in product version to avoid conflicts with third-party C++ native code.
686 // On certain platforms, such as Mac OS X (Darwin), in debug version, new is being called
687 // from jdk source and causing data corruption. Such as
688 // Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair
689 // define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed.
690 //
691 #ifndef ALLOW_OPERATOR_NEW_USAGE
692 void* operator new(size_t size) throw() {
693 assert(false, "Should not call global operator new");
694 return 0;
695 }
697 void* operator new [](size_t size) throw() {
698 assert(false, "Should not call global operator new[]");
699 return 0;
700 }
702 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
703 assert(false, "Should not call global operator new");
704 return 0;
705 }
707 void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() {
708 assert(false, "Should not call global operator new[]");
709 return 0;
710 }
712 void operator delete(void* p) {
713 assert(false, "Should not call global delete");
714 }
716 void operator delete [](void* p) {
717 assert(false, "Should not call global delete []");
718 }
719 #endif // ALLOW_OPERATOR_NEW_USAGE
721 void AllocatedObj::print() const { print_on(tty); }
722 void AllocatedObj::print_value() const { print_value_on(tty); }
724 void AllocatedObj::print_on(outputStream* st) const {
725 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", p2i(this));
726 }
728 void AllocatedObj::print_value_on(outputStream* st) const {
729 st->print("AllocatedObj(" INTPTR_FORMAT ")", p2i(this));
730 }
732 julong Arena::_bytes_allocated = 0;
734 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
736 AllocStats::AllocStats() {
737 start_mallocs = os::num_mallocs;
738 start_frees = os::num_frees;
739 start_malloc_bytes = os::alloc_bytes;
740 start_mfree_bytes = os::free_bytes;
741 start_res_bytes = Arena::_bytes_allocated;
742 }
744 julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
745 julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
746 julong AllocStats::num_frees() { return os::num_frees - start_frees; }
747 julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; }
748 julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
749 void AllocStats::print() {
750 tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), "
751 UINT64_FORMAT " frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc",
752 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M);
753 }
756 // debugging code
757 inline void Arena::free_all(char** start, char** end) {
758 for (char** p = start; p < end; p++) if (*p) os::free(*p);
759 }
761 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
762 assert(UseMallocOnly, "should not call");
763 // free all objects malloced since resource mark was created; resource area
764 // contains their addresses
765 if (chunk->next()) {
766 // this chunk is full, and some others too
767 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
768 char* top = c->top();
769 if (c->next() == NULL) {
770 top = hwm2; // last junk is only used up to hwm2
771 assert(c->contains(hwm2), "bad hwm2");
772 }
773 free_all((char**)c->bottom(), (char**)top);
774 }
775 assert(chunk->contains(hwm), "bad hwm");
776 assert(chunk->contains(max), "bad max");
777 free_all((char**)hwm, (char**)max);
778 } else {
779 // this chunk was partially used
780 assert(chunk->contains(hwm), "bad hwm");
781 assert(chunk->contains(hwm2), "bad hwm2");
782 free_all((char**)hwm, (char**)hwm2);
783 }
784 }
787 ReallocMark::ReallocMark() {
788 #ifdef ASSERT
789 Thread *thread = ThreadLocalStorage::get_thread_slow();
790 _nesting = thread->resource_area()->nesting();
791 #endif
792 }
794 void ReallocMark::check() {
795 #ifdef ASSERT
796 if (_nesting != Thread::current()->resource_area()->nesting()) {
797 fatal("allocation bug: array could grow within nested ResourceMark");
798 }
799 #endif
800 }
802 #endif // Non-product