1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/memory/allocation.cpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,539 @@ 1.4 +/* 1.5 + * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +# include "incls/_precompiled.incl" 1.29 +# include "incls/_allocation.cpp.incl" 1.30 + 1.31 +void* CHeapObj::operator new(size_t size){ 1.32 + return (void *) AllocateHeap(size, "CHeapObj-new"); 1.33 +} 1.34 + 1.35 +void CHeapObj::operator delete(void* p){ 1.36 + FreeHeap(p); 1.37 +} 1.38 + 1.39 +void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; 1.40 +void StackObj::operator delete(void* p) { ShouldNotCallThis(); }; 1.41 +void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; 1.42 +void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }; 1.43 + 1.44 +void* ResourceObj::operator new(size_t size, allocation_type type) { 1.45 + address res; 1.46 + switch (type) { 1.47 + case C_HEAP: 1.48 + res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ"); 1.49 + break; 1.50 + case RESOURCE_AREA: 1.51 + res = (address)operator new(size); 1.52 + break; 1.53 + default: 1.54 + ShouldNotReachHere(); 1.55 + } 1.56 + // Set allocation type in the resource object for assertion checks. 1.57 + DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;) 1.58 + return res; 1.59 +} 1.60 + 1.61 +void ResourceObj::operator delete(void* p) { 1.62 + assert(((ResourceObj *)p)->allocated_on_C_heap(), 1.63 + "delete only allowed for C_HEAP objects"); 1.64 + FreeHeap(p); 1.65 +} 1.66 + 1.67 +void trace_heap_malloc(size_t size, const char* name, void* p) { 1.68 + // A lock is not needed here - tty uses a lock internally 1.69 + tty->print_cr("Heap malloc " INTPTR_FORMAT " %7d %s", p, size, name == NULL ? "" : name); 1.70 +} 1.71 + 1.72 + 1.73 +void trace_heap_free(void* p) { 1.74 + // A lock is not needed here - tty uses a lock internally 1.75 + tty->print_cr("Heap free " INTPTR_FORMAT, p); 1.76 +} 1.77 + 1.78 +bool warn_new_operator = false; // see vm_main 1.79 + 1.80 +//-------------------------------------------------------------------------------------- 1.81 +// ChunkPool implementation 1.82 + 1.83 +// MT-safe pool of chunks to reduce malloc/free thrashing 1.84 +// NB: not using Mutex because pools are used before Threads are initialized 1.85 +class ChunkPool { 1.86 + Chunk* _first; // first cached Chunk; its first word points to next chunk 1.87 + size_t _num_chunks; // number of unused chunks in pool 1.88 + size_t _num_used; // number of chunks currently checked out 1.89 + const size_t _size; // size of each chunk (must be uniform) 1.90 + 1.91 + // Our three static pools 1.92 + static ChunkPool* _large_pool; 1.93 + static ChunkPool* _medium_pool; 1.94 + static ChunkPool* _small_pool; 1.95 + 1.96 + // return first element or null 1.97 + void* get_first() { 1.98 + Chunk* c = _first; 1.99 + if (_first) { 1.100 + _first = _first->next(); 1.101 + _num_chunks--; 1.102 + } 1.103 + return c; 1.104 + } 1.105 + 1.106 + public: 1.107 + // All chunks in a ChunkPool has the same size 1.108 + ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } 1.109 + 1.110 + // Allocate a new chunk from the pool (might expand the pool) 1.111 + void* allocate(size_t bytes) { 1.112 + assert(bytes == _size, "bad size"); 1.113 + void* p = NULL; 1.114 + { ThreadCritical tc; 1.115 + _num_used++; 1.116 + p = get_first(); 1.117 + if (p == NULL) p = os::malloc(bytes); 1.118 + } 1.119 + if (p == NULL) 1.120 + vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); 1.121 + 1.122 + return p; 1.123 + } 1.124 + 1.125 + // Return a chunk to the pool 1.126 + void free(Chunk* chunk) { 1.127 + assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size"); 1.128 + ThreadCritical tc; 1.129 + _num_used--; 1.130 + 1.131 + // Add chunk to list 1.132 + chunk->set_next(_first); 1.133 + _first = chunk; 1.134 + _num_chunks++; 1.135 + } 1.136 + 1.137 + // Prune the pool 1.138 + void free_all_but(size_t n) { 1.139 + // if we have more than n chunks, free all of them 1.140 + ThreadCritical tc; 1.141 + if (_num_chunks > n) { 1.142 + // free chunks at end of queue, for better locality 1.143 + Chunk* cur = _first; 1.144 + for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); 1.145 + 1.146 + if (cur != NULL) { 1.147 + Chunk* next = cur->next(); 1.148 + cur->set_next(NULL); 1.149 + cur = next; 1.150 + 1.151 + // Free all remaining chunks 1.152 + while(cur != NULL) { 1.153 + next = cur->next(); 1.154 + os::free(cur); 1.155 + _num_chunks--; 1.156 + cur = next; 1.157 + } 1.158 + } 1.159 + } 1.160 + } 1.161 + 1.162 + // Accessors to preallocated pool's 1.163 + static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } 1.164 + static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } 1.165 + static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } 1.166 + 1.167 + static void initialize() { 1.168 + _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size()); 1.169 + _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size()); 1.170 + _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size()); 1.171 + } 1.172 +}; 1.173 + 1.174 +ChunkPool* ChunkPool::_large_pool = NULL; 1.175 +ChunkPool* ChunkPool::_medium_pool = NULL; 1.176 +ChunkPool* ChunkPool::_small_pool = NULL; 1.177 + 1.178 + 1.179 +void chunkpool_init() { 1.180 + ChunkPool::initialize(); 1.181 +} 1.182 + 1.183 + 1.184 +//-------------------------------------------------------------------------------------- 1.185 +// ChunkPoolCleaner implementation 1.186 + 1.187 +class ChunkPoolCleaner : public PeriodicTask { 1.188 + enum { CleaningInterval = 5000, // cleaning interval in ms 1.189 + BlocksToKeep = 5 // # of extra blocks to keep 1.190 + }; 1.191 + 1.192 + public: 1.193 + ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {} 1.194 + void task() { 1.195 + ChunkPool::small_pool()->free_all_but(BlocksToKeep); 1.196 + ChunkPool::medium_pool()->free_all_but(BlocksToKeep); 1.197 + ChunkPool::large_pool()->free_all_but(BlocksToKeep); 1.198 + } 1.199 +}; 1.200 + 1.201 +//-------------------------------------------------------------------------------------- 1.202 +// Chunk implementation 1.203 + 1.204 +void* Chunk::operator new(size_t requested_size, size_t length) { 1.205 + // requested_size is equal to sizeof(Chunk) but in order for the arena 1.206 + // allocations to come out aligned as expected the size must be aligned 1.207 + // to expected arean alignment. 1.208 + // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. 1.209 + assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); 1.210 + size_t bytes = ARENA_ALIGN(requested_size) + length; 1.211 + switch (length) { 1.212 + case Chunk::size: return ChunkPool::large_pool()->allocate(bytes); 1.213 + case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes); 1.214 + case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes); 1.215 + default: { 1.216 + void *p = os::malloc(bytes); 1.217 + if (p == NULL) 1.218 + vm_exit_out_of_memory(bytes, "Chunk::new"); 1.219 + return p; 1.220 + } 1.221 + } 1.222 +} 1.223 + 1.224 +void Chunk::operator delete(void* p) { 1.225 + Chunk* c = (Chunk*)p; 1.226 + switch (c->length()) { 1.227 + case Chunk::size: ChunkPool::large_pool()->free(c); break; 1.228 + case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; 1.229 + case Chunk::init_size: ChunkPool::small_pool()->free(c); break; 1.230 + default: os::free(c); 1.231 + } 1.232 +} 1.233 + 1.234 +Chunk::Chunk(size_t length) : _len(length) { 1.235 + _next = NULL; // Chain on the linked list 1.236 +} 1.237 + 1.238 + 1.239 +void Chunk::chop() { 1.240 + Chunk *k = this; 1.241 + while( k ) { 1.242 + Chunk *tmp = k->next(); 1.243 + // clear out this chunk (to detect allocation bugs) 1.244 + if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length()); 1.245 + delete k; // Free chunk (was malloc'd) 1.246 + k = tmp; 1.247 + } 1.248 +} 1.249 + 1.250 +void Chunk::next_chop() { 1.251 + _next->chop(); 1.252 + _next = NULL; 1.253 +} 1.254 + 1.255 + 1.256 +void Chunk::start_chunk_pool_cleaner_task() { 1.257 +#ifdef ASSERT 1.258 + static bool task_created = false; 1.259 + assert(!task_created, "should not start chuck pool cleaner twice"); 1.260 + task_created = true; 1.261 +#endif 1.262 + ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); 1.263 + cleaner->enroll(); 1.264 +} 1.265 + 1.266 +//------------------------------Arena------------------------------------------ 1.267 + 1.268 +Arena::Arena(size_t init_size) { 1.269 + size_t round_size = (sizeof (char *)) - 1; 1.270 + init_size = (init_size+round_size) & ~round_size; 1.271 + _first = _chunk = new (init_size) Chunk(init_size); 1.272 + _hwm = _chunk->bottom(); // Save the cached hwm, max 1.273 + _max = _chunk->top(); 1.274 + set_size_in_bytes(init_size); 1.275 +} 1.276 + 1.277 +Arena::Arena() { 1.278 + _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size); 1.279 + _hwm = _chunk->bottom(); // Save the cached hwm, max 1.280 + _max = _chunk->top(); 1.281 + set_size_in_bytes(Chunk::init_size); 1.282 +} 1.283 + 1.284 +Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) { 1.285 + set_size_in_bytes(a->size_in_bytes()); 1.286 +} 1.287 + 1.288 +Arena *Arena::move_contents(Arena *copy) { 1.289 + copy->destruct_contents(); 1.290 + copy->_chunk = _chunk; 1.291 + copy->_hwm = _hwm; 1.292 + copy->_max = _max; 1.293 + copy->_first = _first; 1.294 + copy->set_size_in_bytes(size_in_bytes()); 1.295 + // Destroy original arena 1.296 + reset(); 1.297 + return copy; // Return Arena with contents 1.298 +} 1.299 + 1.300 +Arena::~Arena() { 1.301 + destruct_contents(); 1.302 +} 1.303 + 1.304 +// Destroy this arenas contents and reset to empty 1.305 +void Arena::destruct_contents() { 1.306 + if (UseMallocOnly && _first != NULL) { 1.307 + char* end = _first->next() ? _first->top() : _hwm; 1.308 + free_malloced_objects(_first, _first->bottom(), end, _hwm); 1.309 + } 1.310 + _first->chop(); 1.311 + reset(); 1.312 +} 1.313 + 1.314 + 1.315 +// Total of all Chunks in arena 1.316 +size_t Arena::used() const { 1.317 + size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk 1.318 + register Chunk *k = _first; 1.319 + while( k != _chunk) { // Whilst have Chunks in a row 1.320 + sum += k->length(); // Total size of this Chunk 1.321 + k = k->next(); // Bump along to next Chunk 1.322 + } 1.323 + return sum; // Return total consumed space. 1.324 +} 1.325 + 1.326 + 1.327 +// Grow a new Chunk 1.328 +void* Arena::grow( size_t x ) { 1.329 + // Get minimal required size. Either real big, or even bigger for giant objs 1.330 + size_t len = MAX2(x, (size_t) Chunk::size); 1.331 + 1.332 + Chunk *k = _chunk; // Get filled-up chunk address 1.333 + _chunk = new (len) Chunk(len); 1.334 + 1.335 + if (_chunk == NULL) 1.336 + vm_exit_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); 1.337 + 1.338 + if (k) k->set_next(_chunk); // Append new chunk to end of linked list 1.339 + else _first = _chunk; 1.340 + _hwm = _chunk->bottom(); // Save the cached hwm, max 1.341 + _max = _chunk->top(); 1.342 + set_size_in_bytes(size_in_bytes() + len); 1.343 + void* result = _hwm; 1.344 + _hwm += x; 1.345 + return result; 1.346 +} 1.347 + 1.348 + 1.349 + 1.350 +// Reallocate storage in Arena. 1.351 +void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) { 1.352 + assert(new_size >= 0, "bad size"); 1.353 + if (new_size == 0) return NULL; 1.354 +#ifdef ASSERT 1.355 + if (UseMallocOnly) { 1.356 + // always allocate a new object (otherwise we'll free this one twice) 1.357 + char* copy = (char*)Amalloc(new_size); 1.358 + size_t n = MIN2(old_size, new_size); 1.359 + if (n > 0) memcpy(copy, old_ptr, n); 1.360 + Afree(old_ptr,old_size); // Mostly done to keep stats accurate 1.361 + return copy; 1.362 + } 1.363 +#endif 1.364 + char *c_old = (char*)old_ptr; // Handy name 1.365 + // Stupid fast special case 1.366 + if( new_size <= old_size ) { // Shrink in-place 1.367 + if( c_old+old_size == _hwm) // Attempt to free the excess bytes 1.368 + _hwm = c_old+new_size; // Adjust hwm 1.369 + return c_old; 1.370 + } 1.371 + 1.372 + // make sure that new_size is legal 1.373 + size_t corrected_new_size = ARENA_ALIGN(new_size); 1.374 + 1.375 + // See if we can resize in-place 1.376 + if( (c_old+old_size == _hwm) && // Adjusting recent thing 1.377 + (c_old+corrected_new_size <= _max) ) { // Still fits where it sits 1.378 + _hwm = c_old+corrected_new_size; // Adjust hwm 1.379 + return c_old; // Return old pointer 1.380 + } 1.381 + 1.382 + // Oops, got to relocate guts 1.383 + void *new_ptr = Amalloc(new_size); 1.384 + memcpy( new_ptr, c_old, old_size ); 1.385 + Afree(c_old,old_size); // Mostly done to keep stats accurate 1.386 + return new_ptr; 1.387 +} 1.388 + 1.389 + 1.390 +// Determine if pointer belongs to this Arena or not. 1.391 +bool Arena::contains( const void *ptr ) const { 1.392 +#ifdef ASSERT 1.393 + if (UseMallocOnly) { 1.394 + // really slow, but not easy to make fast 1.395 + if (_chunk == NULL) return false; 1.396 + char** bottom = (char**)_chunk->bottom(); 1.397 + for (char** p = (char**)_hwm - 1; p >= bottom; p--) { 1.398 + if (*p == ptr) return true; 1.399 + } 1.400 + for (Chunk *c = _first; c != NULL; c = c->next()) { 1.401 + if (c == _chunk) continue; // current chunk has been processed 1.402 + char** bottom = (char**)c->bottom(); 1.403 + for (char** p = (char**)c->top() - 1; p >= bottom; p--) { 1.404 + if (*p == ptr) return true; 1.405 + } 1.406 + } 1.407 + return false; 1.408 + } 1.409 +#endif 1.410 + if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) 1.411 + return true; // Check for in this chunk 1.412 + for (Chunk *c = _first; c; c = c->next()) { 1.413 + if (c == _chunk) continue; // current chunk has been processed 1.414 + if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) { 1.415 + return true; // Check for every chunk in Arena 1.416 + } 1.417 + } 1.418 + return false; // Not in any Chunk, so not in Arena 1.419 +} 1.420 + 1.421 + 1.422 +#ifdef ASSERT 1.423 +void* Arena::malloc(size_t size) { 1.424 + assert(UseMallocOnly, "shouldn't call"); 1.425 + // use malloc, but save pointer in res. area for later freeing 1.426 + char** save = (char**)internal_malloc_4(sizeof(char*)); 1.427 + return (*save = (char*)os::malloc(size)); 1.428 +} 1.429 + 1.430 +// for debugging with UseMallocOnly 1.431 +void* Arena::internal_malloc_4(size_t x) { 1.432 + assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 1.433 + if (_hwm + x > _max) { 1.434 + return grow(x); 1.435 + } else { 1.436 + char *old = _hwm; 1.437 + _hwm += x; 1.438 + return old; 1.439 + } 1.440 +} 1.441 +#endif 1.442 + 1.443 + 1.444 +//-------------------------------------------------------------------------------------- 1.445 +// Non-product code 1.446 + 1.447 +#ifndef PRODUCT 1.448 +// The global operator new should never be called since it will usually indicate 1.449 +// a memory leak. Use CHeapObj as the base class of such objects to make it explicit 1.450 +// that they're allocated on the C heap. 1.451 +// Commented out in product version to avoid conflicts with third-party C++ native code. 1.452 +// %% note this is causing a problem on solaris debug build. the global 1.453 +// new is being called from jdk source and causing data corruption. 1.454 +// src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew 1.455 +// define CATCH_OPERATOR_NEW_USAGE if you want to use this. 1.456 +#ifdef CATCH_OPERATOR_NEW_USAGE 1.457 +void* operator new(size_t size){ 1.458 + static bool warned = false; 1.459 + if (!warned && warn_new_operator) 1.460 + warning("should not call global (default) operator new"); 1.461 + warned = true; 1.462 + return (void *) AllocateHeap(size, "global operator new"); 1.463 +} 1.464 +#endif 1.465 + 1.466 +void AllocatedObj::print() const { print_on(tty); } 1.467 +void AllocatedObj::print_value() const { print_value_on(tty); } 1.468 + 1.469 +void AllocatedObj::print_on(outputStream* st) const { 1.470 + st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this); 1.471 +} 1.472 + 1.473 +void AllocatedObj::print_value_on(outputStream* st) const { 1.474 + st->print("AllocatedObj(" INTPTR_FORMAT ")", this); 1.475 +} 1.476 + 1.477 +size_t Arena::_bytes_allocated = 0; 1.478 + 1.479 +AllocStats::AllocStats() { 1.480 + start_mallocs = os::num_mallocs; 1.481 + start_frees = os::num_frees; 1.482 + start_malloc_bytes = os::alloc_bytes; 1.483 + start_res_bytes = Arena::_bytes_allocated; 1.484 +} 1.485 + 1.486 +int AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } 1.487 +size_t AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } 1.488 +size_t AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } 1.489 +int AllocStats::num_frees() { return os::num_frees - start_frees; } 1.490 +void AllocStats::print() { 1.491 + tty->print("%d mallocs (%ldK), %d frees, %ldK resrc", 1.492 + num_mallocs(), alloc_bytes()/K, num_frees(), resource_bytes()/K); 1.493 +} 1.494 + 1.495 + 1.496 +// debugging code 1.497 +inline void Arena::free_all(char** start, char** end) { 1.498 + for (char** p = start; p < end; p++) if (*p) os::free(*p); 1.499 +} 1.500 + 1.501 +void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) { 1.502 + assert(UseMallocOnly, "should not call"); 1.503 + // free all objects malloced since resource mark was created; resource area 1.504 + // contains their addresses 1.505 + if (chunk->next()) { 1.506 + // this chunk is full, and some others too 1.507 + for (Chunk* c = chunk->next(); c != NULL; c = c->next()) { 1.508 + char* top = c->top(); 1.509 + if (c->next() == NULL) { 1.510 + top = hwm2; // last junk is only used up to hwm2 1.511 + assert(c->contains(hwm2), "bad hwm2"); 1.512 + } 1.513 + free_all((char**)c->bottom(), (char**)top); 1.514 + } 1.515 + assert(chunk->contains(hwm), "bad hwm"); 1.516 + assert(chunk->contains(max), "bad max"); 1.517 + free_all((char**)hwm, (char**)max); 1.518 + } else { 1.519 + // this chunk was partially used 1.520 + assert(chunk->contains(hwm), "bad hwm"); 1.521 + assert(chunk->contains(hwm2), "bad hwm2"); 1.522 + free_all((char**)hwm, (char**)hwm2); 1.523 + } 1.524 +} 1.525 + 1.526 + 1.527 +ReallocMark::ReallocMark() { 1.528 +#ifdef ASSERT 1.529 + Thread *thread = ThreadLocalStorage::get_thread_slow(); 1.530 + _nesting = thread->resource_area()->nesting(); 1.531 +#endif 1.532 +} 1.533 + 1.534 +void ReallocMark::check() { 1.535 +#ifdef ASSERT 1.536 + if (_nesting != Thread::current()->resource_area()->nesting()) { 1.537 + fatal("allocation bug: array could grow within nested ResourceMark"); 1.538 + } 1.539 +#endif 1.540 +} 1.541 + 1.542 +#endif // Non-product