src/share/vm/memory/allocation.cpp

Wed, 27 Aug 2014 08:19:12 -0400

author
zgu
date
Wed, 27 Aug 2014 08:19:12 -0400
changeset 7074
833b0f92429a
parent 6695
09619752c16d
child 7535
7ae4e26cb1e0
child 8316
626f594dffa6
permissions
-rw-r--r--

8046598: Scalable Native memory tracking development
Summary: Enhance scalability of native memory tracking
Reviewed-by: coleenp, ctornqvi, gtriantafill

duke@435 1 /*
drchase@6680 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "memory/allocation.hpp"
stefank@2314 27 #include "memory/allocation.inline.hpp"
coleenp@4037 28 #include "memory/genCollectedHeap.hpp"
coleenp@4037 29 #include "memory/metaspaceShared.hpp"
stefank@2314 30 #include "memory/resourceArea.hpp"
coleenp@4037 31 #include "memory/universe.hpp"
zgu@3900 32 #include "runtime/atomic.hpp"
stefank@2314 33 #include "runtime/os.hpp"
stefank@2314 34 #include "runtime/task.hpp"
stefank@2314 35 #include "runtime/threadCritical.hpp"
zgu@3900 36 #include "services/memTracker.hpp"
stefank@2314 37 #include "utilities/ostream.hpp"
zgu@3900 38
stefank@2314 39 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 40 # include "os_linux.inline.hpp"
stefank@2314 41 #endif
stefank@2314 42 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 43 # include "os_solaris.inline.hpp"
stefank@2314 44 #endif
stefank@2314 45 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 46 # include "os_windows.inline.hpp"
stefank@2314 47 #endif
goetz@6461 48 #ifdef TARGET_OS_FAMILY_aix
goetz@6461 49 # include "os_aix.inline.hpp"
goetz@6461 50 #endif
never@3156 51 #ifdef TARGET_OS_FAMILY_bsd
never@3156 52 # include "os_bsd.inline.hpp"
never@3156 53 #endif
duke@435 54
coleenp@5614 55 void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
coleenp@5614 56 void StackObj::operator delete(void* p) { ShouldNotCallThis(); }
coleenp@5614 57 void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
coleenp@5614 58 void StackObj::operator delete [](void* p) { ShouldNotCallThis(); }
minqi@5103 59
coleenp@5614 60 void* _ValueObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
coleenp@5614 61 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }
coleenp@5614 62 void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
coleenp@5614 63 void _ValueObj::operator delete [](void* p) { ShouldNotCallThis(); }
duke@435 64
coleenp@4037 65 void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
iklam@5208 66 size_t word_size, bool read_only,
coleenp@5614 67 MetaspaceObj::Type type, TRAPS) throw() {
coleenp@4037 68 // Klass has it's own operator new
coleenp@4037 69 return Metaspace::allocate(loader_data, word_size, read_only,
iklam@5208 70 type, CHECK_NULL);
coleenp@4037 71 }
coleenp@4037 72
coleenp@4037 73 bool MetaspaceObj::is_shared() const {
coleenp@4037 74 return MetaspaceShared::is_in_shared_space(this);
coleenp@4037 75 }
coleenp@4037 76
coleenp@4295 77 bool MetaspaceObj::is_metaspace_object() const {
coleenp@6678 78 return Metaspace::contains((void*)this);
coleenp@4295 79 }
coleenp@4295 80
coleenp@4037 81 void MetaspaceObj::print_address_on(outputStream* st) const {
drchase@6680 82 st->print(" {" INTPTR_FORMAT "}", p2i(this));
coleenp@4037 83 }
coleenp@4037 84
coleenp@5614 85 void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
duke@435 86 address res;
duke@435 87 switch (type) {
duke@435 88 case C_HEAP:
zgu@3900 89 res = (address)AllocateHeap(size, flags, CALLER_PC);
kvn@2040 90 DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
duke@435 91 break;
duke@435 92 case RESOURCE_AREA:
kvn@2043 93 // new(size) sets allocation type RESOURCE_AREA.
duke@435 94 res = (address)operator new(size);
duke@435 95 break;
duke@435 96 default:
duke@435 97 ShouldNotReachHere();
duke@435 98 }
duke@435 99 return res;
duke@435 100 }
duke@435 101
coleenp@5614 102 void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() {
minqi@5103 103 return (address) operator new(size, type, flags);
minqi@5103 104 }
minqi@5103 105
nloodin@4183 106 void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant,
coleenp@5614 107 allocation_type type, MEMFLAGS flags) throw() {
nloodin@4183 108 //should only call this with std::nothrow, use other operator new() otherwise
nloodin@4183 109 address res;
nloodin@4183 110 switch (type) {
nloodin@4183 111 case C_HEAP:
nloodin@4183 112 res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
nloodin@4183 113 DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);)
nloodin@4183 114 break;
nloodin@4183 115 case RESOURCE_AREA:
nloodin@4183 116 // new(size) sets allocation type RESOURCE_AREA.
nloodin@4183 117 res = (address)operator new(size, std::nothrow);
nloodin@4183 118 break;
nloodin@4183 119 default:
nloodin@4183 120 ShouldNotReachHere();
nloodin@4183 121 }
nloodin@4183 122 return res;
nloodin@4183 123 }
nloodin@4183 124
minqi@5103 125 void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant,
coleenp@5614 126 allocation_type type, MEMFLAGS flags) throw() {
minqi@5103 127 return (address)operator new(size, nothrow_constant, type, flags);
minqi@5103 128 }
nloodin@4183 129
duke@435 130 void ResourceObj::operator delete(void* p) {
duke@435 131 assert(((ResourceObj *)p)->allocated_on_C_heap(),
duke@435 132 "delete only allowed for C_HEAP objects");
kvn@2357 133 DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
duke@435 134 FreeHeap(p);
duke@435 135 }
duke@435 136
minqi@5103 137 void ResourceObj::operator delete [](void* p) {
minqi@5103 138 operator delete(p);
minqi@5103 139 }
minqi@5103 140
kvn@2040 141 #ifdef ASSERT
kvn@2040 142 void ResourceObj::set_allocation_type(address res, allocation_type type) {
kvn@2040 143 // Set allocation type in the resource object
kvn@2040 144 uintptr_t allocation = (uintptr_t)res;
drchase@6680 145 assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " INTPTR_FORMAT, p2i(res)));
kvn@2040 146 assert(type <= allocation_mask, "incorrect allocation type");
kvn@2357 147 ResourceObj* resobj = (ResourceObj *)res;
kvn@2357 148 resobj->_allocation_t[0] = ~(allocation + type);
kvn@2357 149 if (type != STACK_OR_EMBEDDED) {
kvn@2357 150 // Called from operator new() and CollectionSetChooser(),
kvn@2357 151 // set verification value.
kvn@2357 152 resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
kvn@2357 153 }
kvn@2040 154 }
kvn@2040 155
kvn@2043 156 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
kvn@2357 157 assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
kvn@2357 158 return (allocation_type)((~_allocation_t[0]) & allocation_mask);
kvn@2357 159 }
kvn@2357 160
kvn@2357 161 bool ResourceObj::is_type_set() const {
kvn@2357 162 allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
kvn@2357 163 return get_allocation_type() == type &&
kvn@2357 164 (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
kvn@2040 165 }
kvn@2040 166
kvn@2043 167 ResourceObj::ResourceObj() { // default constructor
kvn@2357 168 if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
kvn@2357 169 // Operator new() is not called for allocations
kvn@2357 170 // on stack and for embedded objects.
kvn@2040 171 set_allocation_type((address)this, STACK_OR_EMBEDDED);
kvn@2357 172 } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
kvn@2357 173 // For some reason we got a value which resembles
kvn@2357 174 // an embedded or stack object (operator new() does not
kvn@2357 175 // set such type). Keep it since it is valid value
kvn@2357 176 // (even if it was garbage).
kvn@2357 177 // Ignore garbage in other fields.
kvn@2357 178 } else if (is_type_set()) {
kvn@2357 179 // Operator new() was called and type was set.
kvn@2357 180 assert(!allocated_on_stack(),
kvn@2357 181 err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
drchase@6680 182 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
kvn@2040 183 } else {
kvn@2357 184 // Operator new() was not called.
kvn@2357 185 // Assume that it is embedded or stack object.
kvn@2357 186 set_allocation_type((address)this, STACK_OR_EMBEDDED);
kvn@2040 187 }
kvn@2357 188 _allocation_t[1] = 0; // Zap verification value
kvn@2040 189 }
kvn@2040 190
kvn@2043 191 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
kvn@2040 192 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
kvn@2357 193 // Note: garbage may resembles valid value.
kvn@2357 194 assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
kvn@2357 195 err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
drchase@6680 196 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
kvn@2040 197 set_allocation_type((address)this, STACK_OR_EMBEDDED);
kvn@2357 198 _allocation_t[1] = 0; // Zap verification value
kvn@2040 199 }
kvn@2040 200
kvn@2040 201 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
kvn@2040 202 // Used in InlineTree::ok_to_inline() for WarmCallInfo.
kvn@2357 203 assert(allocated_on_stack(),
kvn@2357 204 err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
drchase@6680 205 p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]));
kvn@2357 206 // Keep current _allocation_t value;
kvn@2040 207 return *this;
kvn@2040 208 }
kvn@2040 209
kvn@2040 210 ResourceObj::~ResourceObj() {
kvn@2043 211 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
kvn@2357 212 if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
kvn@2357 213 _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
kvn@2040 214 }
kvn@2040 215 }
kvn@2040 216 #endif // ASSERT
kvn@2040 217
kvn@2040 218
duke@435 219 void trace_heap_malloc(size_t size, const char* name, void* p) {
duke@435 220 // A lock is not needed here - tty uses a lock internally
drchase@6680 221 tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p2i(p), size, name == NULL ? "" : name);
duke@435 222 }
duke@435 223
duke@435 224
duke@435 225 void trace_heap_free(void* p) {
duke@435 226 // A lock is not needed here - tty uses a lock internally
drchase@6680 227 tty->print_cr("Heap free " INTPTR_FORMAT, p2i(p));
duke@435 228 }
duke@435 229
duke@435 230 //--------------------------------------------------------------------------------------
duke@435 231 // ChunkPool implementation
duke@435 232
duke@435 233 // MT-safe pool of chunks to reduce malloc/free thrashing
duke@435 234 // NB: not using Mutex because pools are used before Threads are initialized
zgu@3900 235 class ChunkPool: public CHeapObj<mtInternal> {
duke@435 236 Chunk* _first; // first cached Chunk; its first word points to next chunk
duke@435 237 size_t _num_chunks; // number of unused chunks in pool
duke@435 238 size_t _num_used; // number of chunks currently checked out
duke@435 239 const size_t _size; // size of each chunk (must be uniform)
duke@435 240
iklam@5368 241 // Our four static pools
duke@435 242 static ChunkPool* _large_pool;
duke@435 243 static ChunkPool* _medium_pool;
duke@435 244 static ChunkPool* _small_pool;
iklam@5368 245 static ChunkPool* _tiny_pool;
duke@435 246
duke@435 247 // return first element or null
duke@435 248 void* get_first() {
duke@435 249 Chunk* c = _first;
duke@435 250 if (_first) {
duke@435 251 _first = _first->next();
duke@435 252 _num_chunks--;
duke@435 253 }
duke@435 254 return c;
duke@435 255 }
duke@435 256
duke@435 257 public:
duke@435 258 // All chunks in a ChunkPool has the same size
duke@435 259 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
duke@435 260
duke@435 261 // Allocate a new chunk from the pool (might expand the pool)
hseigel@5241 262 _NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) {
duke@435 263 assert(bytes == _size, "bad size");
duke@435 264 void* p = NULL;
zgu@3900 265 // No VM lock can be taken inside ThreadCritical lock, so os::malloc
zgu@3900 266 // should be done outside ThreadCritical lock due to NMT
duke@435 267 { ThreadCritical tc;
duke@435 268 _num_used++;
duke@435 269 p = get_first();
duke@435 270 }
zgu@3900 271 if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
hseigel@5241 272 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
ccheung@4993 273 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
hseigel@5241 274 }
duke@435 275 return p;
duke@435 276 }
duke@435 277
duke@435 278 // Return a chunk to the pool
duke@435 279 void free(Chunk* chunk) {
duke@435 280 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
duke@435 281 ThreadCritical tc;
duke@435 282 _num_used--;
duke@435 283
duke@435 284 // Add chunk to list
duke@435 285 chunk->set_next(_first);
duke@435 286 _first = chunk;
duke@435 287 _num_chunks++;
duke@435 288 }
duke@435 289
duke@435 290 // Prune the pool
duke@435 291 void free_all_but(size_t n) {
zgu@3900 292 Chunk* cur = NULL;
zgu@3900 293 Chunk* next;
zgu@3900 294 {
duke@435 295 // if we have more than n chunks, free all of them
duke@435 296 ThreadCritical tc;
duke@435 297 if (_num_chunks > n) {
duke@435 298 // free chunks at end of queue, for better locality
zgu@3900 299 cur = _first;
duke@435 300 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
duke@435 301
duke@435 302 if (cur != NULL) {
zgu@3900 303 next = cur->next();
duke@435 304 cur->set_next(NULL);
duke@435 305 cur = next;
duke@435 306
zgu@3900 307 _num_chunks = n;
zgu@3900 308 }
zgu@3900 309 }
zgu@3900 310 }
zgu@3900 311
zgu@3900 312 // Free all remaining chunks, outside of ThreadCritical
zgu@3900 313 // to avoid deadlock with NMT
duke@435 314 while(cur != NULL) {
duke@435 315 next = cur->next();
zgu@3900 316 os::free(cur, mtChunk);
duke@435 317 cur = next;
duke@435 318 }
duke@435 319 }
duke@435 320
duke@435 321 // Accessors to preallocated pool's
duke@435 322 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
duke@435 323 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
duke@435 324 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
iklam@5368 325 static ChunkPool* tiny_pool() { assert(_tiny_pool != NULL, "must be initialized"); return _tiny_pool; }
duke@435 326
duke@435 327 static void initialize() {
duke@435 328 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
duke@435 329 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
duke@435 330 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
iklam@5368 331 _tiny_pool = new ChunkPool(Chunk::tiny_size + Chunk::aligned_overhead_size());
duke@435 332 }
bobv@2036 333
bobv@2036 334 static void clean() {
bobv@2036 335 enum { BlocksToKeep = 5 };
iklam@5368 336 _tiny_pool->free_all_but(BlocksToKeep);
bobv@2036 337 _small_pool->free_all_but(BlocksToKeep);
bobv@2036 338 _medium_pool->free_all_but(BlocksToKeep);
bobv@2036 339 _large_pool->free_all_but(BlocksToKeep);
bobv@2036 340 }
duke@435 341 };
duke@435 342
duke@435 343 ChunkPool* ChunkPool::_large_pool = NULL;
duke@435 344 ChunkPool* ChunkPool::_medium_pool = NULL;
duke@435 345 ChunkPool* ChunkPool::_small_pool = NULL;
iklam@5368 346 ChunkPool* ChunkPool::_tiny_pool = NULL;
duke@435 347
duke@435 348 void chunkpool_init() {
duke@435 349 ChunkPool::initialize();
duke@435 350 }
duke@435 351
bobv@2036 352 void
bobv@2036 353 Chunk::clean_chunk_pool() {
bobv@2036 354 ChunkPool::clean();
bobv@2036 355 }
bobv@2036 356
duke@435 357
duke@435 358 //--------------------------------------------------------------------------------------
duke@435 359 // ChunkPoolCleaner implementation
bobv@2036 360 //
duke@435 361
duke@435 362 class ChunkPoolCleaner : public PeriodicTask {
bobv@2036 363 enum { CleaningInterval = 5000 }; // cleaning interval in ms
duke@435 364
duke@435 365 public:
duke@435 366 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
duke@435 367 void task() {
bobv@2036 368 ChunkPool::clean();
duke@435 369 }
duke@435 370 };
duke@435 371
duke@435 372 //--------------------------------------------------------------------------------------
duke@435 373 // Chunk implementation
duke@435 374
coleenp@5614 375 void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
duke@435 376 // requested_size is equal to sizeof(Chunk) but in order for the arena
duke@435 377 // allocations to come out aligned as expected the size must be aligned
minqi@5103 378 // to expected arena alignment.
duke@435 379 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
duke@435 380 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
duke@435 381 size_t bytes = ARENA_ALIGN(requested_size) + length;
duke@435 382 switch (length) {
hseigel@5241 383 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
hseigel@5241 384 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
hseigel@5241 385 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
iklam@5368 386 case Chunk::tiny_size: return ChunkPool::tiny_pool()->allocate(bytes, alloc_failmode);
duke@435 387 default: {
hseigel@5241 388 void* p = os::malloc(bytes, mtChunk, CALLER_PC);
hseigel@5241 389 if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
ccheung@4993 390 vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
hseigel@5241 391 }
duke@435 392 return p;
duke@435 393 }
duke@435 394 }
duke@435 395 }
duke@435 396
duke@435 397 void Chunk::operator delete(void* p) {
duke@435 398 Chunk* c = (Chunk*)p;
duke@435 399 switch (c->length()) {
duke@435 400 case Chunk::size: ChunkPool::large_pool()->free(c); break;
duke@435 401 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
duke@435 402 case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
iklam@5368 403 case Chunk::tiny_size: ChunkPool::tiny_pool()->free(c); break;
zgu@3900 404 default: os::free(c, mtChunk);
duke@435 405 }
duke@435 406 }
duke@435 407
duke@435 408 Chunk::Chunk(size_t length) : _len(length) {
duke@435 409 _next = NULL; // Chain on the linked list
duke@435 410 }
duke@435 411
duke@435 412
duke@435 413 void Chunk::chop() {
duke@435 414 Chunk *k = this;
duke@435 415 while( k ) {
duke@435 416 Chunk *tmp = k->next();
duke@435 417 // clear out this chunk (to detect allocation bugs)
duke@435 418 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
duke@435 419 delete k; // Free chunk (was malloc'd)
duke@435 420 k = tmp;
duke@435 421 }
duke@435 422 }
duke@435 423
duke@435 424 void Chunk::next_chop() {
duke@435 425 _next->chop();
duke@435 426 _next = NULL;
duke@435 427 }
duke@435 428
duke@435 429
duke@435 430 void Chunk::start_chunk_pool_cleaner_task() {
duke@435 431 #ifdef ASSERT
duke@435 432 static bool task_created = false;
duke@435 433 assert(!task_created, "should not start chuck pool cleaner twice");
duke@435 434 task_created = true;
duke@435 435 #endif
duke@435 436 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
duke@435 437 cleaner->enroll();
duke@435 438 }
duke@435 439
duke@435 440 //------------------------------Arena------------------------------------------
zgu@7074 441 Arena::Arena(MEMFLAGS flag, size_t init_size) : _flags(flag), _size_in_bytes(0) {
duke@435 442 size_t round_size = (sizeof (char *)) - 1;
duke@435 443 init_size = (init_size+round_size) & ~round_size;
hseigel@5241 444 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
duke@435 445 _hwm = _chunk->bottom(); // Save the cached hwm, max
duke@435 446 _max = _chunk->top();
zgu@7074 447 MemTracker::record_new_arena(flag);
duke@435 448 set_size_in_bytes(init_size);
duke@435 449 }
duke@435 450
zgu@7074 451 Arena::Arena(MEMFLAGS flag) : _flags(flag), _size_in_bytes(0) {
hseigel@5241 452 _first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
duke@435 453 _hwm = _chunk->bottom(); // Save the cached hwm, max
duke@435 454 _max = _chunk->top();
zgu@7074 455 MemTracker::record_new_arena(flag);
duke@435 456 set_size_in_bytes(Chunk::init_size);
duke@435 457 }
duke@435 458
duke@435 459 Arena *Arena::move_contents(Arena *copy) {
duke@435 460 copy->destruct_contents();
duke@435 461 copy->_chunk = _chunk;
duke@435 462 copy->_hwm = _hwm;
duke@435 463 copy->_max = _max;
duke@435 464 copy->_first = _first;
zgu@4193 465
zgu@4193 466 // workaround rare racing condition, which could double count
zgu@4193 467 // the arena size by native memory tracking
zgu@4193 468 size_t size = size_in_bytes();
zgu@4193 469 set_size_in_bytes(0);
zgu@4193 470 copy->set_size_in_bytes(size);
duke@435 471 // Destroy original arena
duke@435 472 reset();
duke@435 473 return copy; // Return Arena with contents
duke@435 474 }
duke@435 475
duke@435 476 Arena::~Arena() {
duke@435 477 destruct_contents();
zgu@7074 478 MemTracker::record_arena_free(_flags);
zgu@3900 479 }
zgu@3900 480
coleenp@5614 481 void* Arena::operator new(size_t size) throw() {
zgu@3900 482 assert(false, "Use dynamic memory type binding");
zgu@3900 483 return NULL;
zgu@3900 484 }
zgu@3900 485
coleenp@5614 486 void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) throw() {
zgu@3900 487 assert(false, "Use dynamic memory type binding");
zgu@3900 488 return NULL;
zgu@3900 489 }
zgu@3900 490
zgu@3900 491 // dynamic memory type binding
coleenp@5614 492 void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
zgu@3900 493 #ifdef ASSERT
zgu@7074 494 void* p = (void*)AllocateHeap(size, flags, CALLER_PC);
zgu@3900 495 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
zgu@3900 496 return p;
zgu@3900 497 #else
zgu@7074 498 return (void *) AllocateHeap(size, flags, CALLER_PC);
zgu@3900 499 #endif
zgu@3900 500 }
zgu@3900 501
coleenp@5614 502 void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
zgu@3900 503 #ifdef ASSERT
zgu@7074 504 void* p = os::malloc(size, flags, CALLER_PC);
zgu@3900 505 if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
zgu@3900 506 return p;
zgu@3900 507 #else
zgu@7074 508 return os::malloc(size, flags, CALLER_PC);
zgu@3900 509 #endif
zgu@3900 510 }
zgu@3900 511
zgu@3900 512 void Arena::operator delete(void* p) {
zgu@3900 513 FreeHeap(p);
duke@435 514 }
duke@435 515
duke@435 516 // Destroy this arenas contents and reset to empty
duke@435 517 void Arena::destruct_contents() {
duke@435 518 if (UseMallocOnly && _first != NULL) {
duke@435 519 char* end = _first->next() ? _first->top() : _hwm;
duke@435 520 free_malloced_objects(_first, _first->bottom(), end, _hwm);
duke@435 521 }
zgu@4193 522 // reset size before chop to avoid a rare racing condition
zgu@4193 523 // that can have total arena memory exceed total chunk memory
zgu@4193 524 set_size_in_bytes(0);
duke@435 525 _first->chop();
duke@435 526 reset();
duke@435 527 }
duke@435 528
zgu@3900 529 // This is high traffic method, but many calls actually don't
zgu@3900 530 // change the size
zgu@3900 531 void Arena::set_size_in_bytes(size_t size) {
zgu@3900 532 if (_size_in_bytes != size) {
zgu@7074 533 long delta = (long)(size - size_in_bytes());
zgu@3900 534 _size_in_bytes = size;
zgu@7074 535 MemTracker::record_arena_size_change(delta, _flags);
zgu@3900 536 }
zgu@3900 537 }
duke@435 538
duke@435 539 // Total of all Chunks in arena
duke@435 540 size_t Arena::used() const {
duke@435 541 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
duke@435 542 register Chunk *k = _first;
duke@435 543 while( k != _chunk) { // Whilst have Chunks in a row
duke@435 544 sum += k->length(); // Total size of this Chunk
duke@435 545 k = k->next(); // Bump along to next Chunk
duke@435 546 }
duke@435 547 return sum; // Return total consumed space.
duke@435 548 }
duke@435 549
kamg@2589 550 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
ccheung@4993 551 vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence);
kamg@2589 552 }
duke@435 553
duke@435 554 // Grow a new Chunk
nloodin@4183 555 void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
duke@435 556 // Get minimal required size. Either real big, or even bigger for giant objs
duke@435 557 size_t len = MAX2(x, (size_t) Chunk::size);
duke@435 558
duke@435 559 Chunk *k = _chunk; // Get filled-up chunk address
hseigel@5241 560 _chunk = new (alloc_failmode, len) Chunk(len);
duke@435 561
kamg@2589 562 if (_chunk == NULL) {
poonam@6695 563 _chunk = k; // restore the previous value of _chunk
nloodin@4183 564 return NULL;
kamg@2589 565 }
duke@435 566 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
duke@435 567 else _first = _chunk;
duke@435 568 _hwm = _chunk->bottom(); // Save the cached hwm, max
duke@435 569 _max = _chunk->top();
duke@435 570 set_size_in_bytes(size_in_bytes() + len);
duke@435 571 void* result = _hwm;
duke@435 572 _hwm += x;
duke@435 573 return result;
duke@435 574 }
duke@435 575
duke@435 576
duke@435 577
duke@435 578 // Reallocate storage in Arena.
nloodin@4183 579 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
duke@435 580 assert(new_size >= 0, "bad size");
duke@435 581 if (new_size == 0) return NULL;
duke@435 582 #ifdef ASSERT
duke@435 583 if (UseMallocOnly) {
duke@435 584 // always allocate a new object (otherwise we'll free this one twice)
nloodin@4183 585 char* copy = (char*)Amalloc(new_size, alloc_failmode);
nloodin@4183 586 if (copy == NULL) {
nloodin@4183 587 return NULL;
nloodin@4183 588 }
duke@435 589 size_t n = MIN2(old_size, new_size);
duke@435 590 if (n > 0) memcpy(copy, old_ptr, n);
duke@435 591 Afree(old_ptr,old_size); // Mostly done to keep stats accurate
duke@435 592 return copy;
duke@435 593 }
duke@435 594 #endif
duke@435 595 char *c_old = (char*)old_ptr; // Handy name
duke@435 596 // Stupid fast special case
duke@435 597 if( new_size <= old_size ) { // Shrink in-place
duke@435 598 if( c_old+old_size == _hwm) // Attempt to free the excess bytes
duke@435 599 _hwm = c_old+new_size; // Adjust hwm
duke@435 600 return c_old;
duke@435 601 }
duke@435 602
duke@435 603 // make sure that new_size is legal
duke@435 604 size_t corrected_new_size = ARENA_ALIGN(new_size);
duke@435 605
duke@435 606 // See if we can resize in-place
duke@435 607 if( (c_old+old_size == _hwm) && // Adjusting recent thing
duke@435 608 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits
duke@435 609 _hwm = c_old+corrected_new_size; // Adjust hwm
duke@435 610 return c_old; // Return old pointer
duke@435 611 }
duke@435 612
duke@435 613 // Oops, got to relocate guts
nloodin@4183 614 void *new_ptr = Amalloc(new_size, alloc_failmode);
nloodin@4183 615 if (new_ptr == NULL) {
nloodin@4183 616 return NULL;
nloodin@4183 617 }
duke@435 618 memcpy( new_ptr, c_old, old_size );
duke@435 619 Afree(c_old,old_size); // Mostly done to keep stats accurate
duke@435 620 return new_ptr;
duke@435 621 }
duke@435 622
duke@435 623
duke@435 624 // Determine if pointer belongs to this Arena or not.
duke@435 625 bool Arena::contains( const void *ptr ) const {
duke@435 626 #ifdef ASSERT
duke@435 627 if (UseMallocOnly) {
duke@435 628 // really slow, but not easy to make fast
duke@435 629 if (_chunk == NULL) return false;
duke@435 630 char** bottom = (char**)_chunk->bottom();
duke@435 631 for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
duke@435 632 if (*p == ptr) return true;
duke@435 633 }
duke@435 634 for (Chunk *c = _first; c != NULL; c = c->next()) {
duke@435 635 if (c == _chunk) continue; // current chunk has been processed
duke@435 636 char** bottom = (char**)c->bottom();
duke@435 637 for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
duke@435 638 if (*p == ptr) return true;
duke@435 639 }
duke@435 640 }
duke@435 641 return false;
duke@435 642 }
duke@435 643 #endif
duke@435 644 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
duke@435 645 return true; // Check for in this chunk
duke@435 646 for (Chunk *c = _first; c; c = c->next()) {
duke@435 647 if (c == _chunk) continue; // current chunk has been processed
duke@435 648 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
duke@435 649 return true; // Check for every chunk in Arena
duke@435 650 }
duke@435 651 }
duke@435 652 return false; // Not in any Chunk, so not in Arena
duke@435 653 }
duke@435 654
duke@435 655
duke@435 656 #ifdef ASSERT
duke@435 657 void* Arena::malloc(size_t size) {
duke@435 658 assert(UseMallocOnly, "shouldn't call");
duke@435 659 // use malloc, but save pointer in res. area for later freeing
duke@435 660 char** save = (char**)internal_malloc_4(sizeof(char*));
zgu@3900 661 return (*save = (char*)os::malloc(size, mtChunk));
duke@435 662 }
duke@435 663
duke@435 664 // for debugging with UseMallocOnly
duke@435 665 void* Arena::internal_malloc_4(size_t x) {
duke@435 666 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
kamg@2589 667 check_for_overflow(x, "Arena::internal_malloc_4");
duke@435 668 if (_hwm + x > _max) {
duke@435 669 return grow(x);
duke@435 670 } else {
duke@435 671 char *old = _hwm;
duke@435 672 _hwm += x;
duke@435 673 return old;
duke@435 674 }
duke@435 675 }
duke@435 676 #endif
duke@435 677
duke@435 678
duke@435 679 //--------------------------------------------------------------------------------------
duke@435 680 // Non-product code
duke@435 681
duke@435 682 #ifndef PRODUCT
duke@435 683 // The global operator new should never be called since it will usually indicate
duke@435 684 // a memory leak. Use CHeapObj as the base class of such objects to make it explicit
duke@435 685 // that they're allocated on the C heap.
duke@435 686 // Commented out in product version to avoid conflicts with third-party C++ native code.
minqi@5103 687 // On certain platforms, such as Mac OS X (Darwin), in debug version, new is being called
minqi@5103 688 // from jdk source and causing data corruption. Such as
minqi@5103 689 // Java_sun_security_ec_ECKeyPairGenerator_generateECKeyPair
minqi@5103 690 // define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed.
minqi@5103 691 //
minqi@5103 692 #ifndef ALLOW_OPERATOR_NEW_USAGE
coleenp@5614 693 void* operator new(size_t size) throw() {
minqi@5103 694 assert(false, "Should not call global operator new");
minqi@5103 695 return 0;
duke@435 696 }
minqi@5103 697
coleenp@5614 698 void* operator new [](size_t size) throw() {
minqi@5103 699 assert(false, "Should not call global operator new[]");
minqi@5103 700 return 0;
minqi@5103 701 }
minqi@5103 702
coleenp@5614 703 void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
minqi@5103 704 assert(false, "Should not call global operator new");
minqi@5103 705 return 0;
minqi@5103 706 }
minqi@5103 707
coleenp@5614 708 void* operator new [](size_t size, std::nothrow_t& nothrow_constant) throw() {
minqi@5103 709 assert(false, "Should not call global operator new[]");
minqi@5103 710 return 0;
minqi@5103 711 }
minqi@5103 712
minqi@5103 713 void operator delete(void* p) {
minqi@5103 714 assert(false, "Should not call global delete");
minqi@5103 715 }
minqi@5103 716
minqi@5103 717 void operator delete [](void* p) {
minqi@5103 718 assert(false, "Should not call global delete []");
minqi@5103 719 }
minqi@5103 720 #endif // ALLOW_OPERATOR_NEW_USAGE
duke@435 721
duke@435 722 void AllocatedObj::print() const { print_on(tty); }
duke@435 723 void AllocatedObj::print_value() const { print_value_on(tty); }
duke@435 724
duke@435 725 void AllocatedObj::print_on(outputStream* st) const {
drchase@6680 726 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", p2i(this));
duke@435 727 }
duke@435 728
duke@435 729 void AllocatedObj::print_value_on(outputStream* st) const {
drchase@6680 730 st->print("AllocatedObj(" INTPTR_FORMAT ")", p2i(this));
duke@435 731 }
duke@435 732
kvn@2557 733 julong Arena::_bytes_allocated = 0;
kvn@2557 734
kvn@2557 735 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
duke@435 736
duke@435 737 AllocStats::AllocStats() {
kvn@2557 738 start_mallocs = os::num_mallocs;
kvn@2557 739 start_frees = os::num_frees;
duke@435 740 start_malloc_bytes = os::alloc_bytes;
kvn@2557 741 start_mfree_bytes = os::free_bytes;
kvn@2557 742 start_res_bytes = Arena::_bytes_allocated;
duke@435 743 }
duke@435 744
kvn@2557 745 julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
kvn@2557 746 julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
kvn@2557 747 julong AllocStats::num_frees() { return os::num_frees - start_frees; }
kvn@2557 748 julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; }
kvn@2557 749 julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
duke@435 750 void AllocStats::print() {
kvn@2557 751 tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), "
kvn@2557 752 UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc",
kvn@2557 753 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M);
duke@435 754 }
duke@435 755
duke@435 756
duke@435 757 // debugging code
duke@435 758 inline void Arena::free_all(char** start, char** end) {
duke@435 759 for (char** p = start; p < end; p++) if (*p) os::free(*p);
duke@435 760 }
duke@435 761
duke@435 762 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
duke@435 763 assert(UseMallocOnly, "should not call");
duke@435 764 // free all objects malloced since resource mark was created; resource area
duke@435 765 // contains their addresses
duke@435 766 if (chunk->next()) {
duke@435 767 // this chunk is full, and some others too
duke@435 768 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
duke@435 769 char* top = c->top();
duke@435 770 if (c->next() == NULL) {
duke@435 771 top = hwm2; // last junk is only used up to hwm2
duke@435 772 assert(c->contains(hwm2), "bad hwm2");
duke@435 773 }
duke@435 774 free_all((char**)c->bottom(), (char**)top);
duke@435 775 }
duke@435 776 assert(chunk->contains(hwm), "bad hwm");
duke@435 777 assert(chunk->contains(max), "bad max");
duke@435 778 free_all((char**)hwm, (char**)max);
duke@435 779 } else {
duke@435 780 // this chunk was partially used
duke@435 781 assert(chunk->contains(hwm), "bad hwm");
duke@435 782 assert(chunk->contains(hwm2), "bad hwm2");
duke@435 783 free_all((char**)hwm, (char**)hwm2);
duke@435 784 }
duke@435 785 }
duke@435 786
duke@435 787
duke@435 788 ReallocMark::ReallocMark() {
duke@435 789 #ifdef ASSERT
duke@435 790 Thread *thread = ThreadLocalStorage::get_thread_slow();
duke@435 791 _nesting = thread->resource_area()->nesting();
duke@435 792 #endif
duke@435 793 }
duke@435 794
duke@435 795 void ReallocMark::check() {
duke@435 796 #ifdef ASSERT
duke@435 797 if (_nesting != Thread::current()->resource_area()->nesting()) {
duke@435 798 fatal("allocation bug: array could grow within nested ResourceMark");
duke@435 799 }
duke@435 800 #endif
duke@435 801 }
duke@435 802
duke@435 803 #endif // Non-product

mercurial