Wed, 15 Feb 2012 10:12:55 -0800
7145537: minor tweaks to LogEvents
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "memory/allocation.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "runtime/os.hpp"
30 #include "runtime/task.hpp"
31 #include "runtime/threadCritical.hpp"
32 #include "utilities/ostream.hpp"
33 #ifdef TARGET_OS_FAMILY_linux
34 # include "os_linux.inline.hpp"
35 #endif
36 #ifdef TARGET_OS_FAMILY_solaris
37 # include "os_solaris.inline.hpp"
38 #endif
39 #ifdef TARGET_OS_FAMILY_windows
40 # include "os_windows.inline.hpp"
41 #endif
42 #ifdef TARGET_OS_FAMILY_bsd
43 # include "os_bsd.inline.hpp"
44 #endif
46 void* CHeapObj::operator new(size_t size){
47 return (void *) AllocateHeap(size, "CHeapObj-new");
48 }
50 void* CHeapObj::operator new (size_t size, const std::nothrow_t& nothrow_constant) {
51 char* p = (char*) os::malloc(size);
52 #ifdef ASSERT
53 if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
54 #endif
55 return p;
56 }
58 void CHeapObj::operator delete(void* p){
59 FreeHeap(p);
60 }
62 void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; };
63 void StackObj::operator delete(void* p) { ShouldNotCallThis(); };
64 void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; };
65 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); };
67 void* ResourceObj::operator new(size_t size, allocation_type type) {
68 address res;
69 switch (type) {
70 case C_HEAP:
71 res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
72 DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
73 break;
74 case RESOURCE_AREA:
75 // new(size) sets allocation type RESOURCE_AREA.
76 res = (address)operator new(size);
77 break;
78 default:
79 ShouldNotReachHere();
80 }
81 return res;
82 }
84 void ResourceObj::operator delete(void* p) {
85 assert(((ResourceObj *)p)->allocated_on_C_heap(),
86 "delete only allowed for C_HEAP objects");
87 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
88 FreeHeap(p);
89 }
91 #ifdef ASSERT
92 void ResourceObj::set_allocation_type(address res, allocation_type type) {
93 // Set allocation type in the resource object
94 uintptr_t allocation = (uintptr_t)res;
95 assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
96 assert(type <= allocation_mask, "incorrect allocation type");
97 ResourceObj* resobj = (ResourceObj *)res;
98 resobj->_allocation_t[0] = ~(allocation + type);
99 if (type != STACK_OR_EMBEDDED) {
100 // Called from operator new() and CollectionSetChooser(),
101 // set verification value.
102 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
103 }
104 }
106 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
107 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
108 return (allocation_type)((~_allocation_t[0]) & allocation_mask);
109 }
111 bool ResourceObj::is_type_set() const {
112 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
113 return get_allocation_type() == type &&
114 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
115 }
117 ResourceObj::ResourceObj() { // default constructor
118 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
119 // Operator new() is not called for allocations
120 // on stack and for embedded objects.
121 set_allocation_type((address)this, STACK_OR_EMBEDDED);
122 } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
123 // For some reason we got a value which resembles
124 // an embedded or stack object (operator new() does not
125 // set such type). Keep it since it is valid value
126 // (even if it was garbage).
127 // Ignore garbage in other fields.
128 } else if (is_type_set()) {
129 // Operator new() was called and type was set.
130 assert(!allocated_on_stack(),
131 err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
132 this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
133 } else {
134 // Operator new() was not called.
135 // Assume that it is embedded or stack object.
136 set_allocation_type((address)this, STACK_OR_EMBEDDED);
137 }
138 _allocation_t[1] = 0; // Zap verification value
139 }
141 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
142 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
143 // Note: garbage may resembles valid value.
144 assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
145 err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
146 this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
147 set_allocation_type((address)this, STACK_OR_EMBEDDED);
148 _allocation_t[1] = 0; // Zap verification value
149 }
151 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
152 // Used in InlineTree::ok_to_inline() for WarmCallInfo.
153 assert(allocated_on_stack(),
154 err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
155 this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
156 // Keep current _allocation_t value;
157 return *this;
158 }
160 ResourceObj::~ResourceObj() {
161 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
162 if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
163 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
164 }
165 }
166 #endif // ASSERT
169 void trace_heap_malloc(size_t size, const char* name, void* p) {
170 // A lock is not needed here - tty uses a lock internally
171 tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name);
172 }
175 void trace_heap_free(void* p) {
176 // A lock is not needed here - tty uses a lock internally
177 tty->print_cr("Heap free " INTPTR_FORMAT, p);
178 }
180 bool warn_new_operator = false; // see vm_main
182 //--------------------------------------------------------------------------------------
183 // ChunkPool implementation
185 // MT-safe pool of chunks to reduce malloc/free thrashing
186 // NB: not using Mutex because pools are used before Threads are initialized
187 class ChunkPool {
188 Chunk* _first; // first cached Chunk; its first word points to next chunk
189 size_t _num_chunks; // number of unused chunks in pool
190 size_t _num_used; // number of chunks currently checked out
191 const size_t _size; // size of each chunk (must be uniform)
193 // Our three static pools
194 static ChunkPool* _large_pool;
195 static ChunkPool* _medium_pool;
196 static ChunkPool* _small_pool;
198 // return first element or null
199 void* get_first() {
200 Chunk* c = _first;
201 if (_first) {
202 _first = _first->next();
203 _num_chunks--;
204 }
205 return c;
206 }
208 public:
209 // All chunks in a ChunkPool has the same size
210 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
212 // Allocate a new chunk from the pool (might expand the pool)
213 void* allocate(size_t bytes) {
214 assert(bytes == _size, "bad size");
215 void* p = NULL;
216 { ThreadCritical tc;
217 _num_used++;
218 p = get_first();
219 if (p == NULL) p = os::malloc(bytes);
220 }
221 if (p == NULL)
222 vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
224 return p;
225 }
227 // Return a chunk to the pool
228 void free(Chunk* chunk) {
229 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
230 ThreadCritical tc;
231 _num_used--;
233 // Add chunk to list
234 chunk->set_next(_first);
235 _first = chunk;
236 _num_chunks++;
237 }
239 // Prune the pool
240 void free_all_but(size_t n) {
241 // if we have more than n chunks, free all of them
242 ThreadCritical tc;
243 if (_num_chunks > n) {
244 // free chunks at end of queue, for better locality
245 Chunk* cur = _first;
246 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
248 if (cur != NULL) {
249 Chunk* next = cur->next();
250 cur->set_next(NULL);
251 cur = next;
253 // Free all remaining chunks
254 while(cur != NULL) {
255 next = cur->next();
256 os::free(cur);
257 _num_chunks--;
258 cur = next;
259 }
260 }
261 }
262 }
264 // Accessors to preallocated pool's
265 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
266 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
267 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
269 static void initialize() {
270 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
271 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
272 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
273 }
275 static void clean() {
276 enum { BlocksToKeep = 5 };
277 _small_pool->free_all_but(BlocksToKeep);
278 _medium_pool->free_all_but(BlocksToKeep);
279 _large_pool->free_all_but(BlocksToKeep);
280 }
281 };
283 ChunkPool* ChunkPool::_large_pool = NULL;
284 ChunkPool* ChunkPool::_medium_pool = NULL;
285 ChunkPool* ChunkPool::_small_pool = NULL;
287 void chunkpool_init() {
288 ChunkPool::initialize();
289 }
291 void
292 Chunk::clean_chunk_pool() {
293 ChunkPool::clean();
294 }
297 //--------------------------------------------------------------------------------------
298 // ChunkPoolCleaner implementation
299 //
301 class ChunkPoolCleaner : public PeriodicTask {
302 enum { CleaningInterval = 5000 }; // cleaning interval in ms
304 public:
305 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
306 void task() {
307 ChunkPool::clean();
308 }
309 };
311 //--------------------------------------------------------------------------------------
312 // Chunk implementation
314 void* Chunk::operator new(size_t requested_size, size_t length) {
315 // requested_size is equal to sizeof(Chunk) but in order for the arena
316 // allocations to come out aligned as expected the size must be aligned
317 // to expected arean alignment.
318 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
319 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
320 size_t bytes = ARENA_ALIGN(requested_size) + length;
321 switch (length) {
322 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes);
323 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
324 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes);
325 default: {
326 void *p = os::malloc(bytes);
327 if (p == NULL)
328 vm_exit_out_of_memory(bytes, "Chunk::new");
329 return p;
330 }
331 }
332 }
334 void Chunk::operator delete(void* p) {
335 Chunk* c = (Chunk*)p;
336 switch (c->length()) {
337 case Chunk::size: ChunkPool::large_pool()->free(c); break;
338 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
339 case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
340 default: os::free(c);
341 }
342 }
344 Chunk::Chunk(size_t length) : _len(length) {
345 _next = NULL; // Chain on the linked list
346 }
349 void Chunk::chop() {
350 Chunk *k = this;
351 while( k ) {
352 Chunk *tmp = k->next();
353 // clear out this chunk (to detect allocation bugs)
354 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
355 delete k; // Free chunk (was malloc'd)
356 k = tmp;
357 }
358 }
360 void Chunk::next_chop() {
361 _next->chop();
362 _next = NULL;
363 }
366 void Chunk::start_chunk_pool_cleaner_task() {
367 #ifdef ASSERT
368 static bool task_created = false;
369 assert(!task_created, "should not start chuck pool cleaner twice");
370 task_created = true;
371 #endif
372 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
373 cleaner->enroll();
374 }
376 //------------------------------Arena------------------------------------------
378 Arena::Arena(size_t init_size) {
379 size_t round_size = (sizeof (char *)) - 1;
380 init_size = (init_size+round_size) & ~round_size;
381 _first = _chunk = new (init_size) Chunk(init_size);
382 _hwm = _chunk->bottom(); // Save the cached hwm, max
383 _max = _chunk->top();
384 set_size_in_bytes(init_size);
385 }
387 Arena::Arena() {
388 _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
389 _hwm = _chunk->bottom(); // Save the cached hwm, max
390 _max = _chunk->top();
391 set_size_in_bytes(Chunk::init_size);
392 }
394 Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) {
395 set_size_in_bytes(a->size_in_bytes());
396 }
398 Arena *Arena::move_contents(Arena *copy) {
399 copy->destruct_contents();
400 copy->_chunk = _chunk;
401 copy->_hwm = _hwm;
402 copy->_max = _max;
403 copy->_first = _first;
404 copy->set_size_in_bytes(size_in_bytes());
405 // Destroy original arena
406 reset();
407 return copy; // Return Arena with contents
408 }
410 Arena::~Arena() {
411 destruct_contents();
412 }
414 // Destroy this arenas contents and reset to empty
415 void Arena::destruct_contents() {
416 if (UseMallocOnly && _first != NULL) {
417 char* end = _first->next() ? _first->top() : _hwm;
418 free_malloced_objects(_first, _first->bottom(), end, _hwm);
419 }
420 _first->chop();
421 reset();
422 }
425 // Total of all Chunks in arena
426 size_t Arena::used() const {
427 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
428 register Chunk *k = _first;
429 while( k != _chunk) { // Whilst have Chunks in a row
430 sum += k->length(); // Total size of this Chunk
431 k = k->next(); // Bump along to next Chunk
432 }
433 return sum; // Return total consumed space.
434 }
436 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
437 vm_exit_out_of_memory(sz, whence);
438 }
440 // Grow a new Chunk
441 void* Arena::grow( size_t x ) {
442 // Get minimal required size. Either real big, or even bigger for giant objs
443 size_t len = MAX2(x, (size_t) Chunk::size);
445 Chunk *k = _chunk; // Get filled-up chunk address
446 _chunk = new (len) Chunk(len);
448 if (_chunk == NULL) {
449 signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
450 }
452 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
453 else _first = _chunk;
454 _hwm = _chunk->bottom(); // Save the cached hwm, max
455 _max = _chunk->top();
456 set_size_in_bytes(size_in_bytes() + len);
457 void* result = _hwm;
458 _hwm += x;
459 return result;
460 }
464 // Reallocate storage in Arena.
465 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) {
466 assert(new_size >= 0, "bad size");
467 if (new_size == 0) return NULL;
468 #ifdef ASSERT
469 if (UseMallocOnly) {
470 // always allocate a new object (otherwise we'll free this one twice)
471 char* copy = (char*)Amalloc(new_size);
472 size_t n = MIN2(old_size, new_size);
473 if (n > 0) memcpy(copy, old_ptr, n);
474 Afree(old_ptr,old_size); // Mostly done to keep stats accurate
475 return copy;
476 }
477 #endif
478 char *c_old = (char*)old_ptr; // Handy name
479 // Stupid fast special case
480 if( new_size <= old_size ) { // Shrink in-place
481 if( c_old+old_size == _hwm) // Attempt to free the excess bytes
482 _hwm = c_old+new_size; // Adjust hwm
483 return c_old;
484 }
486 // make sure that new_size is legal
487 size_t corrected_new_size = ARENA_ALIGN(new_size);
489 // See if we can resize in-place
490 if( (c_old+old_size == _hwm) && // Adjusting recent thing
491 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits
492 _hwm = c_old+corrected_new_size; // Adjust hwm
493 return c_old; // Return old pointer
494 }
496 // Oops, got to relocate guts
497 void *new_ptr = Amalloc(new_size);
498 memcpy( new_ptr, c_old, old_size );
499 Afree(c_old,old_size); // Mostly done to keep stats accurate
500 return new_ptr;
501 }
504 // Determine if pointer belongs to this Arena or not.
505 bool Arena::contains( const void *ptr ) const {
506 #ifdef ASSERT
507 if (UseMallocOnly) {
508 // really slow, but not easy to make fast
509 if (_chunk == NULL) return false;
510 char** bottom = (char**)_chunk->bottom();
511 for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
512 if (*p == ptr) return true;
513 }
514 for (Chunk *c = _first; c != NULL; c = c->next()) {
515 if (c == _chunk) continue; // current chunk has been processed
516 char** bottom = (char**)c->bottom();
517 for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
518 if (*p == ptr) return true;
519 }
520 }
521 return false;
522 }
523 #endif
524 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
525 return true; // Check for in this chunk
526 for (Chunk *c = _first; c; c = c->next()) {
527 if (c == _chunk) continue; // current chunk has been processed
528 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
529 return true; // Check for every chunk in Arena
530 }
531 }
532 return false; // Not in any Chunk, so not in Arena
533 }
536 #ifdef ASSERT
537 void* Arena::malloc(size_t size) {
538 assert(UseMallocOnly, "shouldn't call");
539 // use malloc, but save pointer in res. area for later freeing
540 char** save = (char**)internal_malloc_4(sizeof(char*));
541 return (*save = (char*)os::malloc(size));
542 }
544 // for debugging with UseMallocOnly
545 void* Arena::internal_malloc_4(size_t x) {
546 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
547 check_for_overflow(x, "Arena::internal_malloc_4");
548 if (_hwm + x > _max) {
549 return grow(x);
550 } else {
551 char *old = _hwm;
552 _hwm += x;
553 return old;
554 }
555 }
556 #endif
559 //--------------------------------------------------------------------------------------
560 // Non-product code
562 #ifndef PRODUCT
563 // The global operator new should never be called since it will usually indicate
564 // a memory leak. Use CHeapObj as the base class of such objects to make it explicit
565 // that they're allocated on the C heap.
566 // Commented out in product version to avoid conflicts with third-party C++ native code.
567 // %% note this is causing a problem on solaris debug build. the global
568 // new is being called from jdk source and causing data corruption.
569 // src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew
570 // define CATCH_OPERATOR_NEW_USAGE if you want to use this.
571 #ifdef CATCH_OPERATOR_NEW_USAGE
572 void* operator new(size_t size){
573 static bool warned = false;
574 if (!warned && warn_new_operator)
575 warning("should not call global (default) operator new");
576 warned = true;
577 return (void *) AllocateHeap(size, "global operator new");
578 }
579 #endif
581 void AllocatedObj::print() const { print_on(tty); }
582 void AllocatedObj::print_value() const { print_value_on(tty); }
584 void AllocatedObj::print_on(outputStream* st) const {
585 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this);
586 }
588 void AllocatedObj::print_value_on(outputStream* st) const {
589 st->print("AllocatedObj(" INTPTR_FORMAT ")", this);
590 }
592 julong Arena::_bytes_allocated = 0;
594 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
596 AllocStats::AllocStats() {
597 start_mallocs = os::num_mallocs;
598 start_frees = os::num_frees;
599 start_malloc_bytes = os::alloc_bytes;
600 start_mfree_bytes = os::free_bytes;
601 start_res_bytes = Arena::_bytes_allocated;
602 }
604 julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
605 julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
606 julong AllocStats::num_frees() { return os::num_frees - start_frees; }
607 julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; }
608 julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
609 void AllocStats::print() {
610 tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), "
611 UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc",
612 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M);
613 }
616 // debugging code
617 inline void Arena::free_all(char** start, char** end) {
618 for (char** p = start; p < end; p++) if (*p) os::free(*p);
619 }
621 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
622 assert(UseMallocOnly, "should not call");
623 // free all objects malloced since resource mark was created; resource area
624 // contains their addresses
625 if (chunk->next()) {
626 // this chunk is full, and some others too
627 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
628 char* top = c->top();
629 if (c->next() == NULL) {
630 top = hwm2; // last junk is only used up to hwm2
631 assert(c->contains(hwm2), "bad hwm2");
632 }
633 free_all((char**)c->bottom(), (char**)top);
634 }
635 assert(chunk->contains(hwm), "bad hwm");
636 assert(chunk->contains(max), "bad max");
637 free_all((char**)hwm, (char**)max);
638 } else {
639 // this chunk was partially used
640 assert(chunk->contains(hwm), "bad hwm");
641 assert(chunk->contains(hwm2), "bad hwm2");
642 free_all((char**)hwm, (char**)hwm2);
643 }
644 }
647 ReallocMark::ReallocMark() {
648 #ifdef ASSERT
649 Thread *thread = ThreadLocalStorage::get_thread_slow();
650 _nesting = thread->resource_area()->nesting();
651 #endif
652 }
654 void ReallocMark::check() {
655 #ifdef ASSERT
656 if (_nesting != Thread::current()->resource_area()->nesting()) {
657 fatal("allocation bug: array could grow within nested ResourceMark");
658 }
659 #endif
660 }
662 #endif // Non-product