1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/memory/allocation.cpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,804 @@ 1.4 +/* 1.5 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#include "precompiled.hpp" 1.29 +#include "memory/allocation.hpp" 1.30 +#include "memory/allocation.inline.hpp" 1.31 +#include "memory/genCollectedHeap.hpp" 1.32 +#include "memory/metaspaceShared.hpp" 1.33 +#include "memory/resourceArea.hpp" 1.34 +#include "memory/universe.hpp" 1.35 +#include "runtime/atomic.hpp" 1.36 +#include "runtime/os.hpp" 1.37 +#include "runtime/task.hpp" 1.38 +#include "runtime/threadCritical.hpp" 1.39 +#include "services/memTracker.hpp" 1.40 +#include "utilities/ostream.hpp" 1.41 + 1.42 +#ifdef TARGET_OS_FAMILY_linux 1.43 +# include "os_linux.inline.hpp" 1.44 +#endif 1.45 +#ifdef TARGET_OS_FAMILY_solaris 1.46 +# include "os_solaris.inline.hpp" 1.47 +#endif 1.48 +#ifdef TARGET_OS_FAMILY_windows 1.49 +# include "os_windows.inline.hpp" 1.50 +#endif 1.51 +#ifdef TARGET_OS_FAMILY_aix 1.52 +# include "os_aix.inline.hpp" 1.53 +#endif 1.54 +#ifdef TARGET_OS_FAMILY_bsd 1.55 +# include "os_bsd.inline.hpp" 1.56 +#endif 1.57 + 1.58 +void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } 1.59 +void StackObj::operator delete(void* p) { ShouldNotCallThis(); } 1.60 +void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } 1.61 +void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } 1.62 + 1.63 +void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } 1.64 +void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); } 1.65 +void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } 1.66 +void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); } 1.67 + 1.68 +void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, 1.69 + size_t word_size, bool read_only, 1.70 + MetaspaceObj::Type type, TRAPS) throw() { 1.71 + // Klass has it's own operator new 1.72 + return Metaspace::allocate(loader_data, word_size, read_only, 1.73 + type, CHECK_NULL); 1.74 +} 1.75 + 1.76 +bool MetaspaceObj::is_shared() const { 1.77 + return MetaspaceShared::is_in_shared_space(this); 1.78 +} 1.79 + 1.80 +bool MetaspaceObj::is_metaspace_object() const { 1.81 + return Metaspace::contains((void*)this); 1.82 +} 1.83 + 1.84 +void MetaspaceObj::print_address_on(outputStream* st) const { 1.85 + st->print(" {" INTPTR_FORMAT "}", p2i(this)); 1.86 +} 1.87 + 1.88 +void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() { 1.89 + address res; 1.90 + switch (type) { 1.91 + case C_HEAP: 1.92 + res = (address)AllocateHeap(size, flags, CALLER_PC); 1.93 + DEBUG_ONLY(set_allocation_type(res, C_HEAP);) 1.94 + break; 1.95 + case RESOURCE_AREA: 1.96 + // new(size) sets allocation type RESOURCE_AREA. 1.97 + res = (address)operator new(size); 1.98 + break; 1.99 + default: 1.100 + ShouldNotReachHere(); 1.101 + } 1.102 + return res; 1.103 +} 1.104 + 1.105 +void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() { 1.106 + return (address) operator new(size, type, flags); 1.107 +} 1.108 + 1.109 +void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, 1.110 + allocation_type type, MEMFLAGS flags) throw() { 1.111 + //should only call this with std::nothrow, use other operator new() otherwise 1.112 + address res; 1.113 + switch (type) { 1.114 + case C_HEAP: 1.115 + res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); 1.116 + DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);) 1.117 + break; 1.118 + case RESOURCE_AREA: 1.119 + // new(size) sets allocation type RESOURCE_AREA. 1.120 + res = (address)operator new(size, std::nothrow); 1.121 + break; 1.122 + default: 1.123 + ShouldNotReachHere(); 1.124 + } 1.125 + return res; 1.126 +} 1.127 + 1.128 +void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, 1.129 + allocation_type type, MEMFLAGS flags) throw() { 1.130 + return (address)operator new(size, nothrow_constant, type, flags); 1.131 +} 1.132 + 1.133 +void ResourceObj::operator delete(void* p) { 1.134 + assert(((ResourceObj *)p)->allocated_on_C_heap(), 1.135 + "delete only allowed for C_HEAP objects"); 1.136 + DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) 1.137 + FreeHeap(p); 1.138 +} 1.139 + 1.140 +void ResourceObj::operator delete [](void* p) { 1.141 + operator delete(p); 1.142 +} 1.143 + 1.144 +#ifdef ASSERT 1.145 +void ResourceObj::set_allocation_type(address res, allocation_type type) { 1.146 + // Set allocation type in the resource object 1.147 + uintptr_t allocation = (uintptr_t)res; 1.148 + assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " INTPTR_FORMAT, p2i(res))); 1.149 + assert(type <= allocation_mask, "incorrect allocation type"); 1.150 + ResourceObj* resobj = (ResourceObj *)res; 1.151 + resobj->_allocation_t[0] = ~(allocation + type); 1.152 + if (type != STACK_OR_EMBEDDED) { 1.153 + // Called from operator new() and CollectionSetChooser(), 1.154 + // set verification value. 1.155 + resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; 1.156 + } 1.157 +} 1.158 + 1.159 +ResourceObj::allocation_type ResourceObj::get_allocation_type() const { 1.160 + assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object"); 1.161 + return (allocation_type)((~_allocation_t[0]) & allocation_mask); 1.162 +} 1.163 + 1.164 +bool ResourceObj::is_type_set() const { 1.165 + allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); 1.166 + return get_allocation_type() == type && 1.167 + (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); 1.168 +} 1.169 + 1.170 +ResourceObj::ResourceObj() { // default constructor 1.171 + if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { 1.172 + // Operator new() is not called for allocations 1.173 + // on stack and for embedded objects. 1.174 + set_allocation_type((address)this, STACK_OR_EMBEDDED); 1.175 + } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED 1.176 + // For some reason we got a value which resembles 1.177 + // an embedded or stack object (operator new() does not 1.178 + // set such type). Keep it since it is valid value 1.179 + // (even if it was garbage). 1.180 + // Ignore garbage in other fields. 1.181 + } else if (is_type_set()) { 1.182 + // Operator new() was called and type was set. 1.183 + assert(!allocated_on_stack(), 1.184 + err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 1.185 + p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1])); 1.186 + } else { 1.187 + // Operator new() was not called. 1.188 + // Assume that it is embedded or stack object. 1.189 + set_allocation_type((address)this, STACK_OR_EMBEDDED); 1.190 + } 1.191 + _allocation_t[1] = 0; // Zap verification value 1.192 +} 1.193 + 1.194 +ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor 1.195 + // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream. 1.196 + // Note: garbage may resembles valid value. 1.197 + assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(), 1.198 + err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 1.199 + p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1])); 1.200 + set_allocation_type((address)this, STACK_OR_EMBEDDED); 1.201 + _allocation_t[1] = 0; // Zap verification value 1.202 +} 1.203 + 1.204 +ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment 1.205 + // Used in InlineTree::ok_to_inline() for WarmCallInfo. 1.206 + assert(allocated_on_stack(), 1.207 + err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 1.208 + p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1])); 1.209 + // Keep current _allocation_t value; 1.210 + return *this; 1.211 +} 1.212 + 1.213 +ResourceObj::~ResourceObj() { 1.214 + // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. 1.215 + if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap. 1.216 + _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type 1.217 + } 1.218 +} 1.219 +#endif // ASSERT 1.220 + 1.221 + 1.222 +void trace_heap_malloc(size_t size, const char* name, void* p) { 1.223 + // A lock is not needed here - tty uses a lock internally 1.224 + tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p2i(p), size, name == NULL ? "" : name); 1.225 +} 1.226 + 1.227 + 1.228 +void trace_heap_free(void* p) { 1.229 + // A lock is not needed here - tty uses a lock internally 1.230 + tty->print_cr("Heap free " INTPTR_FORMAT, p2i(p)); 1.231 +} 1.232 + 1.233 +//-------------------------------------------------------------------------------------- 1.234 +// ChunkPool implementation 1.235 + 1.236 +// MT-safe pool of chunks to reduce malloc/free thrashing 1.237 +// NB: not using Mutex because pools are used before Threads are initialized 1.238 +class ChunkPool: public CHeapObj<mtInternal> { 1.239 + Chunk* _first; // first cached Chunk; its first word points to next chunk 1.240 + size_t _num_chunks; // number of unused chunks in pool 1.241 + size_t _num_used; // number of chunks currently checked out 1.242 + const size_t _size; // size of each chunk (must be uniform) 1.243 + 1.244 + // Our four static pools 1.245 + static ChunkPool* _large_pool; 1.246 + static ChunkPool* _medium_pool; 1.247 + static ChunkPool* _small_pool; 1.248 + static ChunkPool* _tiny_pool; 1.249 + 1.250 + // return first element or null 1.251 + void* get_first() { 1.252 + Chunk* c = _first; 1.253 + if (_first) { 1.254 + _first = _first->next(); 1.255 + _num_chunks--; 1.256 + } 1.257 + return c; 1.258 + } 1.259 + 1.260 + public: 1.261 + // All chunks in a ChunkPool has the same size 1.262 + ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } 1.263 + 1.264 + // Allocate a new chunk from the pool (might expand the pool) 1.265 + _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) { 1.266 + assert(bytes == _size, "bad size"); 1.267 + void* p = NULL; 1.268 + // No VM lock can be taken inside ThreadCritical lock, so os::malloc 1.269 + // should be done outside ThreadCritical lock due to NMT 1.270 + { ThreadCritical tc; 1.271 + _num_used++; 1.272 + p = get_first(); 1.273 + } 1.274 + if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); 1.275 + if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { 1.276 + vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate"); 1.277 + } 1.278 + return p; 1.279 + } 1.280 + 1.281 + // Return a chunk to the pool 1.282 + void free(Chunk* chunk) { 1.283 + assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size"); 1.284 + ThreadCritical tc; 1.285 + _num_used--; 1.286 + 1.287 + // Add chunk to list 1.288 + chunk->set_next(_first); 1.289 + _first = chunk; 1.290 + _num_chunks++; 1.291 + } 1.292 + 1.293 + // Prune the pool 1.294 + void free_all_but(size_t n) { 1.295 + Chunk* cur = NULL; 1.296 + Chunk* next; 1.297 + { 1.298 + // if we have more than n chunks, free all of them 1.299 + ThreadCritical tc; 1.300 + if (_num_chunks > n) { 1.301 + // free chunks at end of queue, for better locality 1.302 + cur = _first; 1.303 + for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); 1.304 + 1.305 + if (cur != NULL) { 1.306 + next = cur->next(); 1.307 + cur->set_next(NULL); 1.308 + cur = next; 1.309 + 1.310 + _num_chunks = n; 1.311 + } 1.312 + } 1.313 + } 1.314 + 1.315 + // Free all remaining chunks, outside of ThreadCritical 1.316 + // to avoid deadlock with NMT 1.317 + while(cur != NULL) { 1.318 + next = cur->next(); 1.319 + os::free(cur, mtChunk); 1.320 + cur = next; 1.321 + } 1.322 + } 1.323 + 1.324 + // Accessors to preallocated pool's 1.325 + static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } 1.326 + static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } 1.327 + static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } 1.328 + static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; } 1.329 + 1.330 + static void initialize() { 1.331 + _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size()); 1.332 + _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size()); 1.333 + _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size()); 1.334 + _tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size()); 1.335 + } 1.336 + 1.337 + static void clean() { 1.338 + enum { BlocksToKeep = 5 }; 1.339 + _tiny_pool->free_all_but(BlocksToKeep); 1.340 + _small_pool->free_all_but(BlocksToKeep); 1.341 + _medium_pool->free_all_but(BlocksToKeep); 1.342 + _large_pool->free_all_but(BlocksToKeep); 1.343 + } 1.344 +}; 1.345 + 1.346 +ChunkPool* ChunkPool::_large_pool = NULL; 1.347 +ChunkPool* ChunkPool::_medium_pool = NULL; 1.348 +ChunkPool* ChunkPool::_small_pool = NULL; 1.349 +ChunkPool* ChunkPool::_tiny_pool = NULL; 1.350 + 1.351 +void chunkpool_init() { 1.352 + ChunkPool::initialize(); 1.353 +} 1.354 + 1.355 +void 1.356 +Chunk::clean_chunk_pool() { 1.357 + ChunkPool::clean(); 1.358 +} 1.359 + 1.360 + 1.361 +//-------------------------------------------------------------------------------------- 1.362 +// ChunkPoolCleaner implementation 1.363 +// 1.364 + 1.365 +class ChunkPoolCleaner : public PeriodicTask { 1.366 + enum { CleaningInterval = 5000 }; // cleaning interval in ms 1.367 + 1.368 + public: 1.369 + ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {} 1.370 + void task() { 1.371 + ChunkPool::clean(); 1.372 + } 1.373 +}; 1.374 + 1.375 +//-------------------------------------------------------------------------------------- 1.376 +// Chunk implementation 1.377 + 1.378 +void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() { 1.379 + // requested_size is equal to sizeof(Chunk) but in order for the arena 1.380 + // allocations to come out aligned as expected the size must be aligned 1.381 + // to expected arena alignment. 1.382 + // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. 1.383 + assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); 1.384 + size_t bytes = ARENA_ALIGN(requested_size) + length; 1.385 + switch (length) { 1.386 + case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode); 1.387 + case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode); 1.388 + case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode); 1.389 + case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode); 1.390 + default: { 1.391 + void* p = os::malloc(bytes, mtChunk, CALLER_PC); 1.392 + if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { 1.393 + vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new"); 1.394 + } 1.395 + return p; 1.396 + } 1.397 + } 1.398 +} 1.399 + 1.400 +void Chunk::operator delete(void* p) { 1.401 + Chunk* c = (Chunk*)p; 1.402 + switch (c->length()) { 1.403 + case Chunk::size: ChunkPool::large_pool()->free(c); break; 1.404 + case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; 1.405 + case Chunk::init_size: ChunkPool::small_pool()->free(c); break; 1.406 + case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break; 1.407 + default: os::free(c, mtChunk); 1.408 + } 1.409 +} 1.410 + 1.411 +Chunk::Chunk(size_t length) : _len(length) { 1.412 + _next = NULL; // Chain on the linked list 1.413 +} 1.414 + 1.415 + 1.416 +void Chunk::chop() { 1.417 + Chunk *k = this; 1.418 + while( k ) { 1.419 + Chunk *tmp = k->next(); 1.420 + // clear out this chunk (to detect allocation bugs) 1.421 + if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length()); 1.422 + delete k; // Free chunk (was malloc'd) 1.423 + k = tmp; 1.424 + } 1.425 +} 1.426 + 1.427 +void Chunk::next_chop() { 1.428 + _next->chop(); 1.429 + _next = NULL; 1.430 +} 1.431 + 1.432 + 1.433 +void Chunk::start_chunk_pool_cleaner_task() { 1.434 +#ifdef ASSERT 1.435 + static bool task_created = false; 1.436 + assert(!task_created, "should not start chuck pool cleaner twice"); 1.437 + task_created = true; 1.438 +#endif 1.439 + ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); 1.440 + cleaner->enroll(); 1.441 +} 1.442 + 1.443 +//------------------------------Arena------------------------------------------ 1.444 +NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) 1.445 + 1.446 +Arena::Arena(size_t init_size) { 1.447 + size_t round_size = (sizeof (char *)) - 1; 1.448 + init_size = (init_size+round_size) & ~round_size; 1.449 + _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size); 1.450 + _hwm = _chunk->bottom(); // Save the cached hwm, max 1.451 + _max = _chunk->top(); 1.452 + set_size_in_bytes(init_size); 1.453 + NOT_PRODUCT(Atomic::inc(&_instance_count);) 1.454 +} 1.455 + 1.456 +Arena::Arena() { 1.457 + _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size); 1.458 + _hwm = _chunk->bottom(); // Save the cached hwm, max 1.459 + _max = _chunk->top(); 1.460 + set_size_in_bytes(Chunk::init_size); 1.461 + NOT_PRODUCT(Atomic::inc(&_instance_count);) 1.462 +} 1.463 + 1.464 +Arena *Arena::move_contents(Arena *copy) { 1.465 + copy->destruct_contents(); 1.466 + copy->_chunk = _chunk; 1.467 + copy->_hwm = _hwm; 1.468 + copy->_max = _max; 1.469 + copy->_first = _first; 1.470 + 1.471 + // workaround rare racing condition, which could double count 1.472 + // the arena size by native memory tracking 1.473 + size_t size = size_in_bytes(); 1.474 + set_size_in_bytes(0); 1.475 + copy->set_size_in_bytes(size); 1.476 + // Destroy original arena 1.477 + reset(); 1.478 + return copy; // Return Arena with contents 1.479 +} 1.480 + 1.481 +Arena::~Arena() { 1.482 + destruct_contents(); 1.483 + NOT_PRODUCT(Atomic::dec(&_instance_count);) 1.484 +} 1.485 + 1.486 +void* Arena::operator new(size_t size) throw() { 1.487 + assert(false, "Use dynamic memory type binding"); 1.488 + return NULL; 1.489 +} 1.490 + 1.491 +void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() { 1.492 + assert(false, "Use dynamic memory type binding"); 1.493 + return NULL; 1.494 +} 1.495 + 1.496 + // dynamic memory type binding 1.497 +void* Arena::operator new(size_t size, MEMFLAGS flags) throw() { 1.498 +#ifdef ASSERT 1.499 + void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); 1.500 + if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); 1.501 + return p; 1.502 +#else 1.503 + return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); 1.504 +#endif 1.505 +} 1.506 + 1.507 +void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() { 1.508 +#ifdef ASSERT 1.509 + void* p = os::malloc(size, flags|otArena, CALLER_PC); 1.510 + if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); 1.511 + return p; 1.512 +#else 1.513 + return os::malloc(size, flags|otArena, CALLER_PC); 1.514 +#endif 1.515 +} 1.516 + 1.517 +void Arena::operator delete(void* p) { 1.518 + FreeHeap(p); 1.519 +} 1.520 + 1.521 +// Destroy this arenas contents and reset to empty 1.522 +void Arena::destruct_contents() { 1.523 + if (UseMallocOnly && _first != NULL) { 1.524 + char* end = _first->next() ? _first->top() : _hwm; 1.525 + free_malloced_objects(_first, _first->bottom(), end, _hwm); 1.526 + } 1.527 + // reset size before chop to avoid a rare racing condition 1.528 + // that can have total arena memory exceed total chunk memory 1.529 + set_size_in_bytes(0); 1.530 + _first->chop(); 1.531 + reset(); 1.532 +} 1.533 + 1.534 +// This is high traffic method, but many calls actually don't 1.535 +// change the size 1.536 +void Arena::set_size_in_bytes(size_t size) { 1.537 + if (_size_in_bytes != size) { 1.538 + _size_in_bytes = size; 1.539 + MemTracker::record_arena_size((address)this, size); 1.540 + } 1.541 +} 1.542 + 1.543 +// Total of all Chunks in arena 1.544 +size_t Arena::used() const { 1.545 + size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk 1.546 + register Chunk *k = _first; 1.547 + while( k != _chunk) { // Whilst have Chunks in a row 1.548 + sum += k->length(); // Total size of this Chunk 1.549 + k = k->next(); // Bump along to next Chunk 1.550 + } 1.551 + return sum; // Return total consumed space. 1.552 +} 1.553 + 1.554 +void Arena::signal_out_of_memory(size_t sz, const char* whence) const { 1.555 + vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence); 1.556 +} 1.557 + 1.558 +// Grow a new Chunk 1.559 +void* Arena::grow(size_t x, AllocFailType alloc_failmode) { 1.560 + // Get minimal required size. Either real big, or even bigger for giant objs 1.561 + size_t len = MAX2(x, (size_t) Chunk::size); 1.562 + 1.563 + Chunk *k = _chunk; // Get filled-up chunk address 1.564 + _chunk = new (alloc_failmode, len) Chunk(len); 1.565 + 1.566 + if (_chunk == NULL) { 1.567 + _chunk = k; // restore the previous value of _chunk 1.568 + return NULL; 1.569 + } 1.570 + if (k) k->set_next(_chunk); // Append new chunk to end of linked list 1.571 + else _first = _chunk; 1.572 + _hwm = _chunk->bottom(); // Save the cached hwm, max 1.573 + _max = _chunk->top(); 1.574 + set_size_in_bytes(size_in_bytes() + len); 1.575 + void* result = _hwm; 1.576 + _hwm += x; 1.577 + return result; 1.578 +} 1.579 + 1.580 + 1.581 + 1.582 +// Reallocate storage in Arena. 1.583 +void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) { 1.584 + assert(new_size >= 0, "bad size"); 1.585 + if (new_size == 0) return NULL; 1.586 +#ifdef ASSERT 1.587 + if (UseMallocOnly) { 1.588 + // always allocate a new object (otherwise we'll free this one twice) 1.589 + char* copy = (char*)Amalloc(new_size, alloc_failmode); 1.590 + if (copy == NULL) { 1.591 + return NULL; 1.592 + } 1.593 + size_t n = MIN2(old_size, new_size); 1.594 + if (n > 0) memcpy(copy, old_ptr, n); 1.595 + Afree(old_ptr,old_size); // Mostly done to keep stats accurate 1.596 + return copy; 1.597 + } 1.598 +#endif 1.599 + char *c_old = (char*)old_ptr; // Handy name 1.600 + // Stupid fast special case 1.601 + if( new_size <= old_size ) { // Shrink in-place 1.602 + if( c_old+old_size == _hwm) // Attempt to free the excess bytes 1.603 + _hwm = c_old+new_size; // Adjust hwm 1.604 + return c_old; 1.605 + } 1.606 + 1.607 + // make sure that new_size is legal 1.608 + size_t corrected_new_size = ARENA_ALIGN(new_size); 1.609 + 1.610 + // See if we can resize in-place 1.611 + if( (c_old+old_size == _hwm) && // Adjusting recent thing 1.612 + (c_old+corrected_new_size <= _max) ) { // Still fits where it sits 1.613 + _hwm = c_old+corrected_new_size; // Adjust hwm 1.614 + return c_old; // Return old pointer 1.615 + } 1.616 + 1.617 + // Oops, got to relocate guts 1.618 + void *new_ptr = Amalloc(new_size, alloc_failmode); 1.619 + if (new_ptr == NULL) { 1.620 + return NULL; 1.621 + } 1.622 + memcpy( new_ptr, c_old, old_size ); 1.623 + Afree(c_old,old_size); // Mostly done to keep stats accurate 1.624 + return new_ptr; 1.625 +} 1.626 + 1.627 + 1.628 +// Determine if pointer belongs to this Arena or not. 1.629 +bool Arena::contains( const void *ptr ) const { 1.630 +#ifdef ASSERT 1.631 + if (UseMallocOnly) { 1.632 + // really slow, but not easy to make fast 1.633 + if (_chunk == NULL) return false; 1.634 + char** bottom = (char**)_chunk->bottom(); 1.635 + for (char** p = (char**)_hwm - 1; p >= bottom; p--) { 1.636 + if (*p == ptr) return true; 1.637 + } 1.638 + for (Chunk *c = _first; c != NULL; c = c->next()) { 1.639 + if (c == _chunk) continue; // current chunk has been processed 1.640 + char** bottom = (char**)c->bottom(); 1.641 + for (char** p = (char**)c->top() - 1; p >= bottom; p--) { 1.642 + if (*p == ptr) return true; 1.643 + } 1.644 + } 1.645 + return false; 1.646 + } 1.647 +#endif 1.648 + if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) 1.649 + return true; // Check for in this chunk 1.650 + for (Chunk *c = _first; c; c = c->next()) { 1.651 + if (c == _chunk) continue; // current chunk has been processed 1.652 + if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) { 1.653 + return true; // Check for every chunk in Arena 1.654 + } 1.655 + } 1.656 + return false; // Not in any Chunk, so not in Arena 1.657 +} 1.658 + 1.659 + 1.660 +#ifdef ASSERT 1.661 +void* Arena::malloc(size_t size) { 1.662 + assert(UseMallocOnly, "shouldn't call"); 1.663 + // use malloc, but save pointer in res. area for later freeing 1.664 + char** save = (char**)internal_malloc_4(sizeof(char*)); 1.665 + return (*save = (char*)os::malloc(size, mtChunk)); 1.666 +} 1.667 + 1.668 +// for debugging with UseMallocOnly 1.669 +void* Arena::internal_malloc_4(size_t x) { 1.670 + assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 1.671 + check_for_overflow(x, "Arena::internal_malloc_4"); 1.672 + if (_hwm + x > _max) { 1.673 + return grow(x); 1.674 + } else { 1.675 + char *old = _hwm; 1.676 + _hwm += x; 1.677 + return old; 1.678 + } 1.679 +} 1.680 +#endif 1.681 + 1.682 + 1.683 +//-------------------------------------------------------------------------------------- 1.684 +// Non-product code 1.685 + 1.686 +#ifndef PRODUCT 1.687 +// The global operator new should never be called since it will usually indicate 1.688 +// a memory leak. Use CHeapObj as the base class of such objects to make it explicit 1.689 +// that they're allocated on the C heap. 1.690 +// Commented out in product version to avoid conflicts with third-party C++ native code. 1.691 +// On certain platforms, such as Mac OS X (Darwin), in debug version, new is being called 1.692 +// from jdk source and causing data corruption. Such as 1.693 +// Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair 1.694 +// define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed. 1.695 +// 1.696 +#ifndef ALLOW_OPERATOR_NEW_USAGE 1.697 +void* operator new(size_t size) throw() { 1.698 + assert(false, "Should not call global operator new"); 1.699 + return 0; 1.700 +} 1.701 + 1.702 +void* operator new [](size_t size) throw() { 1.703 + assert(false, "Should not call global operator new[]"); 1.704 + return 0; 1.705 +} 1.706 + 1.707 +void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { 1.708 + assert(false, "Should not call global operator new"); 1.709 + return 0; 1.710 +} 1.711 + 1.712 +void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() { 1.713 + assert(false, "Should not call global operator new[]"); 1.714 + return 0; 1.715 +} 1.716 + 1.717 +void operator delete(void* p) { 1.718 + assert(false, "Should not call global delete"); 1.719 +} 1.720 + 1.721 +void operator delete [](void* p) { 1.722 + assert(false, "Should not call global delete []"); 1.723 +} 1.724 +#endif // ALLOW_OPERATOR_NEW_USAGE 1.725 + 1.726 +void AllocatedObj::print() const { print_on(tty); } 1.727 +void AllocatedObj::print_value() const { print_value_on(tty); } 1.728 + 1.729 +void AllocatedObj::print_on(outputStream* st) const { 1.730 + st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", p2i(this)); 1.731 +} 1.732 + 1.733 +void AllocatedObj::print_value_on(outputStream* st) const { 1.734 + st->print("AllocatedObj(" INTPTR_FORMAT ")", p2i(this)); 1.735 +} 1.736 + 1.737 +julong Arena::_bytes_allocated = 0; 1.738 + 1.739 +void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); } 1.740 + 1.741 +AllocStats::AllocStats() { 1.742 + start_mallocs = os::num_mallocs; 1.743 + start_frees = os::num_frees; 1.744 + start_malloc_bytes = os::alloc_bytes; 1.745 + start_mfree_bytes = os::free_bytes; 1.746 + start_res_bytes = Arena::_bytes_allocated; 1.747 +} 1.748 + 1.749 +julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } 1.750 +julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } 1.751 +julong AllocStats::num_frees() { return os::num_frees - start_frees; } 1.752 +julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; } 1.753 +julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } 1.754 +void AllocStats::print() { 1.755 + tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), " 1.756 + UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc", 1.757 + num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M); 1.758 +} 1.759 + 1.760 + 1.761 +// debugging code 1.762 +inline void Arena::free_all(char** start, char** end) { 1.763 + for (char** p = start; p < end; p++) if (*p) os::free(*p); 1.764 +} 1.765 + 1.766 +void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) { 1.767 + assert(UseMallocOnly, "should not call"); 1.768 + // free all objects malloced since resource mark was created; resource area 1.769 + // contains their addresses 1.770 + if (chunk->next()) { 1.771 + // this chunk is full, and some others too 1.772 + for (Chunk* c = chunk->next(); c != NULL; c = c->next()) { 1.773 + char* top = c->top(); 1.774 + if (c->next() == NULL) { 1.775 + top = hwm2; // last junk is only used up to hwm2 1.776 + assert(c->contains(hwm2), "bad hwm2"); 1.777 + } 1.778 + free_all((char**)c->bottom(), (char**)top); 1.779 + } 1.780 + assert(chunk->contains(hwm), "bad hwm"); 1.781 + assert(chunk->contains(max), "bad max"); 1.782 + free_all((char**)hwm, (char**)max); 1.783 + } else { 1.784 + // this chunk was partially used 1.785 + assert(chunk->contains(hwm), "bad hwm"); 1.786 + assert(chunk->contains(hwm2), "bad hwm2"); 1.787 + free_all((char**)hwm, (char**)hwm2); 1.788 + } 1.789 +} 1.790 + 1.791 + 1.792 +ReallocMark::ReallocMark() { 1.793 +#ifdef ASSERT 1.794 + Thread *thread = ThreadLocalStorage::get_thread_slow(); 1.795 + _nesting = thread->resource_area()->nesting(); 1.796 +#endif 1.797 +} 1.798 + 1.799 +void ReallocMark::check() { 1.800 +#ifdef ASSERT 1.801 + if (_nesting != Thread::current()->resource_area()->nesting()) { 1.802 + fatal("allocation bug: array could grow within nested ResourceMark"); 1.803 + } 1.804 +#endif 1.805 +} 1.806 + 1.807 +#endif // Non-product