Mon, 09 Aug 2010 15:17:05 -0700
6975078: assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena()
Summary: Pass the check in ResourceObj() if _allocation value is already set and object is allocated on stack.
Reviewed-by: dholmes, johnc
1 /*
2 * Copyright (c) 1997, 2005, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_allocation.cpp.incl"
28 void* CHeapObj::operator new(size_t size){
29 return (void *) AllocateHeap(size, "CHeapObj-new");
30 }
32 void CHeapObj::operator delete(void* p){
33 FreeHeap(p);
34 }
36 void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; };
37 void StackObj::operator delete(void* p) { ShouldNotCallThis(); };
38 void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; };
39 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); };
41 void* ResourceObj::operator new(size_t size, allocation_type type) {
42 address res;
43 switch (type) {
44 case C_HEAP:
45 res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
46 DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
47 break;
48 case RESOURCE_AREA:
49 // new(size) sets allocation type RESOURCE_AREA.
50 res = (address)operator new(size);
51 break;
52 default:
53 ShouldNotReachHere();
54 }
55 return res;
56 }
58 void ResourceObj::operator delete(void* p) {
59 assert(((ResourceObj *)p)->allocated_on_C_heap(),
60 "delete only allowed for C_HEAP objects");
61 DEBUG_ONLY(((ResourceObj *)p)->_allocation = badHeapOopVal;)
62 FreeHeap(p);
63 }
65 #ifdef ASSERT
66 void ResourceObj::set_allocation_type(address res, allocation_type type) {
67 // Set allocation type in the resource object
68 uintptr_t allocation = (uintptr_t)res;
69 assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
70 assert(type <= allocation_mask, "incorrect allocation type");
71 ((ResourceObj *)res)->_allocation = ~(allocation + type);
72 }
74 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
75 assert(~(_allocation | allocation_mask) == (uintptr_t)this, "lost resource object");
76 return (allocation_type)((~_allocation) & allocation_mask);
77 }
79 ResourceObj::ResourceObj() { // default constructor
80 if (~(_allocation | allocation_mask) != (uintptr_t)this) {
81 set_allocation_type((address)this, STACK_OR_EMBEDDED);
82 } else if (allocated_on_stack()) {
83 // For some reason we got a value which looks like an allocation on stack.
84 // Pass if it is really allocated on stack.
85 assert(Thread::current()->on_local_stack((address)this),"should be on stack");
86 } else {
87 assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena(),
88 "allocation_type should be set by operator new()");
89 }
90 }
92 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
93 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
94 set_allocation_type((address)this, STACK_OR_EMBEDDED);
95 }
97 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
98 // Used in InlineTree::ok_to_inline() for WarmCallInfo.
99 assert(allocated_on_stack(), "copy only into local");
100 // Keep current _allocation value;
101 return *this;
102 }
104 ResourceObj::~ResourceObj() {
105 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
106 if (!allocated_on_C_heap()) { // ResourceObj::delete() zaps _allocation for C_heap.
107 _allocation = badHeapOopVal; // zap type
108 }
109 }
110 #endif // ASSERT
113 void trace_heap_malloc(size_t size, const char* name, void* p) {
114 // A lock is not needed here - tty uses a lock internally
115 tty->print_cr("Heap malloc " INTPTR_FORMAT " %7d %s", p, size, name == NULL ? "" : name);
116 }
119 void trace_heap_free(void* p) {
120 // A lock is not needed here - tty uses a lock internally
121 tty->print_cr("Heap free " INTPTR_FORMAT, p);
122 }
124 bool warn_new_operator = false; // see vm_main
126 //--------------------------------------------------------------------------------------
127 // ChunkPool implementation
129 // MT-safe pool of chunks to reduce malloc/free thrashing
130 // NB: not using Mutex because pools are used before Threads are initialized
131 class ChunkPool {
132 Chunk* _first; // first cached Chunk; its first word points to next chunk
133 size_t _num_chunks; // number of unused chunks in pool
134 size_t _num_used; // number of chunks currently checked out
135 const size_t _size; // size of each chunk (must be uniform)
137 // Our three static pools
138 static ChunkPool* _large_pool;
139 static ChunkPool* _medium_pool;
140 static ChunkPool* _small_pool;
142 // return first element or null
143 void* get_first() {
144 Chunk* c = _first;
145 if (_first) {
146 _first = _first->next();
147 _num_chunks--;
148 }
149 return c;
150 }
152 public:
153 // All chunks in a ChunkPool has the same size
154 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
156 // Allocate a new chunk from the pool (might expand the pool)
157 void* allocate(size_t bytes) {
158 assert(bytes == _size, "bad size");
159 void* p = NULL;
160 { ThreadCritical tc;
161 _num_used++;
162 p = get_first();
163 if (p == NULL) p = os::malloc(bytes);
164 }
165 if (p == NULL)
166 vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
168 return p;
169 }
171 // Return a chunk to the pool
172 void free(Chunk* chunk) {
173 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
174 ThreadCritical tc;
175 _num_used--;
177 // Add chunk to list
178 chunk->set_next(_first);
179 _first = chunk;
180 _num_chunks++;
181 }
183 // Prune the pool
184 void free_all_but(size_t n) {
185 // if we have more than n chunks, free all of them
186 ThreadCritical tc;
187 if (_num_chunks > n) {
188 // free chunks at end of queue, for better locality
189 Chunk* cur = _first;
190 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
192 if (cur != NULL) {
193 Chunk* next = cur->next();
194 cur->set_next(NULL);
195 cur = next;
197 // Free all remaining chunks
198 while(cur != NULL) {
199 next = cur->next();
200 os::free(cur);
201 _num_chunks--;
202 cur = next;
203 }
204 }
205 }
206 }
208 // Accessors to preallocated pool's
209 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
210 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
211 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
213 static void initialize() {
214 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
215 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
216 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
217 }
218 };
220 ChunkPool* ChunkPool::_large_pool = NULL;
221 ChunkPool* ChunkPool::_medium_pool = NULL;
222 ChunkPool* ChunkPool::_small_pool = NULL;
225 void chunkpool_init() {
226 ChunkPool::initialize();
227 }
230 //--------------------------------------------------------------------------------------
231 // ChunkPoolCleaner implementation
233 class ChunkPoolCleaner : public PeriodicTask {
234 enum { CleaningInterval = 5000, // cleaning interval in ms
235 BlocksToKeep = 5 // # of extra blocks to keep
236 };
238 public:
239 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
240 void task() {
241 ChunkPool::small_pool()->free_all_but(BlocksToKeep);
242 ChunkPool::medium_pool()->free_all_but(BlocksToKeep);
243 ChunkPool::large_pool()->free_all_but(BlocksToKeep);
244 }
245 };
247 //--------------------------------------------------------------------------------------
248 // Chunk implementation
250 void* Chunk::operator new(size_t requested_size, size_t length) {
251 // requested_size is equal to sizeof(Chunk) but in order for the arena
252 // allocations to come out aligned as expected the size must be aligned
253 // to expected arean alignment.
254 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
255 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
256 size_t bytes = ARENA_ALIGN(requested_size) + length;
257 switch (length) {
258 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes);
259 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
260 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes);
261 default: {
262 void *p = os::malloc(bytes);
263 if (p == NULL)
264 vm_exit_out_of_memory(bytes, "Chunk::new");
265 return p;
266 }
267 }
268 }
270 void Chunk::operator delete(void* p) {
271 Chunk* c = (Chunk*)p;
272 switch (c->length()) {
273 case Chunk::size: ChunkPool::large_pool()->free(c); break;
274 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
275 case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
276 default: os::free(c);
277 }
278 }
280 Chunk::Chunk(size_t length) : _len(length) {
281 _next = NULL; // Chain on the linked list
282 }
285 void Chunk::chop() {
286 Chunk *k = this;
287 while( k ) {
288 Chunk *tmp = k->next();
289 // clear out this chunk (to detect allocation bugs)
290 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
291 delete k; // Free chunk (was malloc'd)
292 k = tmp;
293 }
294 }
296 void Chunk::next_chop() {
297 _next->chop();
298 _next = NULL;
299 }
302 void Chunk::start_chunk_pool_cleaner_task() {
303 #ifdef ASSERT
304 static bool task_created = false;
305 assert(!task_created, "should not start chuck pool cleaner twice");
306 task_created = true;
307 #endif
308 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
309 cleaner->enroll();
310 }
312 //------------------------------Arena------------------------------------------
314 Arena::Arena(size_t init_size) {
315 size_t round_size = (sizeof (char *)) - 1;
316 init_size = (init_size+round_size) & ~round_size;
317 _first = _chunk = new (init_size) Chunk(init_size);
318 _hwm = _chunk->bottom(); // Save the cached hwm, max
319 _max = _chunk->top();
320 set_size_in_bytes(init_size);
321 }
323 Arena::Arena() {
324 _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
325 _hwm = _chunk->bottom(); // Save the cached hwm, max
326 _max = _chunk->top();
327 set_size_in_bytes(Chunk::init_size);
328 }
330 Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) {
331 set_size_in_bytes(a->size_in_bytes());
332 }
334 Arena *Arena::move_contents(Arena *copy) {
335 copy->destruct_contents();
336 copy->_chunk = _chunk;
337 copy->_hwm = _hwm;
338 copy->_max = _max;
339 copy->_first = _first;
340 copy->set_size_in_bytes(size_in_bytes());
341 // Destroy original arena
342 reset();
343 return copy; // Return Arena with contents
344 }
346 Arena::~Arena() {
347 destruct_contents();
348 }
350 // Destroy this arenas contents and reset to empty
351 void Arena::destruct_contents() {
352 if (UseMallocOnly && _first != NULL) {
353 char* end = _first->next() ? _first->top() : _hwm;
354 free_malloced_objects(_first, _first->bottom(), end, _hwm);
355 }
356 _first->chop();
357 reset();
358 }
361 // Total of all Chunks in arena
362 size_t Arena::used() const {
363 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
364 register Chunk *k = _first;
365 while( k != _chunk) { // Whilst have Chunks in a row
366 sum += k->length(); // Total size of this Chunk
367 k = k->next(); // Bump along to next Chunk
368 }
369 return sum; // Return total consumed space.
370 }
373 // Grow a new Chunk
374 void* Arena::grow( size_t x ) {
375 // Get minimal required size. Either real big, or even bigger for giant objs
376 size_t len = MAX2(x, (size_t) Chunk::size);
378 Chunk *k = _chunk; // Get filled-up chunk address
379 _chunk = new (len) Chunk(len);
381 if (_chunk == NULL)
382 vm_exit_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
384 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
385 else _first = _chunk;
386 _hwm = _chunk->bottom(); // Save the cached hwm, max
387 _max = _chunk->top();
388 set_size_in_bytes(size_in_bytes() + len);
389 void* result = _hwm;
390 _hwm += x;
391 return result;
392 }
396 // Reallocate storage in Arena.
397 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) {
398 assert(new_size >= 0, "bad size");
399 if (new_size == 0) return NULL;
400 #ifdef ASSERT
401 if (UseMallocOnly) {
402 // always allocate a new object (otherwise we'll free this one twice)
403 char* copy = (char*)Amalloc(new_size);
404 size_t n = MIN2(old_size, new_size);
405 if (n > 0) memcpy(copy, old_ptr, n);
406 Afree(old_ptr,old_size); // Mostly done to keep stats accurate
407 return copy;
408 }
409 #endif
410 char *c_old = (char*)old_ptr; // Handy name
411 // Stupid fast special case
412 if( new_size <= old_size ) { // Shrink in-place
413 if( c_old+old_size == _hwm) // Attempt to free the excess bytes
414 _hwm = c_old+new_size; // Adjust hwm
415 return c_old;
416 }
418 // make sure that new_size is legal
419 size_t corrected_new_size = ARENA_ALIGN(new_size);
421 // See if we can resize in-place
422 if( (c_old+old_size == _hwm) && // Adjusting recent thing
423 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits
424 _hwm = c_old+corrected_new_size; // Adjust hwm
425 return c_old; // Return old pointer
426 }
428 // Oops, got to relocate guts
429 void *new_ptr = Amalloc(new_size);
430 memcpy( new_ptr, c_old, old_size );
431 Afree(c_old,old_size); // Mostly done to keep stats accurate
432 return new_ptr;
433 }
436 // Determine if pointer belongs to this Arena or not.
437 bool Arena::contains( const void *ptr ) const {
438 #ifdef ASSERT
439 if (UseMallocOnly) {
440 // really slow, but not easy to make fast
441 if (_chunk == NULL) return false;
442 char** bottom = (char**)_chunk->bottom();
443 for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
444 if (*p == ptr) return true;
445 }
446 for (Chunk *c = _first; c != NULL; c = c->next()) {
447 if (c == _chunk) continue; // current chunk has been processed
448 char** bottom = (char**)c->bottom();
449 for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
450 if (*p == ptr) return true;
451 }
452 }
453 return false;
454 }
455 #endif
456 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
457 return true; // Check for in this chunk
458 for (Chunk *c = _first; c; c = c->next()) {
459 if (c == _chunk) continue; // current chunk has been processed
460 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
461 return true; // Check for every chunk in Arena
462 }
463 }
464 return false; // Not in any Chunk, so not in Arena
465 }
468 #ifdef ASSERT
469 void* Arena::malloc(size_t size) {
470 assert(UseMallocOnly, "shouldn't call");
471 // use malloc, but save pointer in res. area for later freeing
472 char** save = (char**)internal_malloc_4(sizeof(char*));
473 return (*save = (char*)os::malloc(size));
474 }
476 // for debugging with UseMallocOnly
477 void* Arena::internal_malloc_4(size_t x) {
478 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
479 if (_hwm + x > _max) {
480 return grow(x);
481 } else {
482 char *old = _hwm;
483 _hwm += x;
484 return old;
485 }
486 }
487 #endif
490 //--------------------------------------------------------------------------------------
491 // Non-product code
493 #ifndef PRODUCT
494 // The global operator new should never be called since it will usually indicate
495 // a memory leak. Use CHeapObj as the base class of such objects to make it explicit
496 // that they're allocated on the C heap.
497 // Commented out in product version to avoid conflicts with third-party C++ native code.
498 // %% note this is causing a problem on solaris debug build. the global
499 // new is being called from jdk source and causing data corruption.
500 // src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew
501 // define CATCH_OPERATOR_NEW_USAGE if you want to use this.
502 #ifdef CATCH_OPERATOR_NEW_USAGE
503 void* operator new(size_t size){
504 static bool warned = false;
505 if (!warned && warn_new_operator)
506 warning("should not call global (default) operator new");
507 warned = true;
508 return (void *) AllocateHeap(size, "global operator new");
509 }
510 #endif
512 void AllocatedObj::print() const { print_on(tty); }
513 void AllocatedObj::print_value() const { print_value_on(tty); }
515 void AllocatedObj::print_on(outputStream* st) const {
516 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this);
517 }
519 void AllocatedObj::print_value_on(outputStream* st) const {
520 st->print("AllocatedObj(" INTPTR_FORMAT ")", this);
521 }
523 size_t Arena::_bytes_allocated = 0;
525 AllocStats::AllocStats() {
526 start_mallocs = os::num_mallocs;
527 start_frees = os::num_frees;
528 start_malloc_bytes = os::alloc_bytes;
529 start_res_bytes = Arena::_bytes_allocated;
530 }
532 int AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
533 size_t AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
534 size_t AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
535 int AllocStats::num_frees() { return os::num_frees - start_frees; }
536 void AllocStats::print() {
537 tty->print("%d mallocs (%ldK), %d frees, %ldK resrc",
538 num_mallocs(), alloc_bytes()/K, num_frees(), resource_bytes()/K);
539 }
542 // debugging code
543 inline void Arena::free_all(char** start, char** end) {
544 for (char** p = start; p < end; p++) if (*p) os::free(*p);
545 }
547 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
548 assert(UseMallocOnly, "should not call");
549 // free all objects malloced since resource mark was created; resource area
550 // contains their addresses
551 if (chunk->next()) {
552 // this chunk is full, and some others too
553 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
554 char* top = c->top();
555 if (c->next() == NULL) {
556 top = hwm2; // last junk is only used up to hwm2
557 assert(c->contains(hwm2), "bad hwm2");
558 }
559 free_all((char**)c->bottom(), (char**)top);
560 }
561 assert(chunk->contains(hwm), "bad hwm");
562 assert(chunk->contains(max), "bad max");
563 free_all((char**)hwm, (char**)max);
564 } else {
565 // this chunk was partially used
566 assert(chunk->contains(hwm), "bad hwm");
567 assert(chunk->contains(hwm2), "bad hwm2");
568 free_all((char**)hwm, (char**)hwm2);
569 }
570 }
573 ReallocMark::ReallocMark() {
574 #ifdef ASSERT
575 Thread *thread = ThreadLocalStorage::get_thread_slow();
576 _nesting = thread->resource_area()->nesting();
577 #endif
578 }
580 void ReallocMark::check() {
581 #ifdef ASSERT
582 if (_nesting != Thread::current()->resource_area()->nesting()) {
583 fatal("allocation bug: array could grow within nested ResourceMark");
584 }
585 #endif
586 }
588 #endif // Non-product