src/share/vm/memory/allocation.cpp

Thu, 19 Aug 2010 14:23:59 -0400

author
ptisnovs
date
Thu, 19 Aug 2010 14:23:59 -0400
changeset 2099
f8c5d1bdaad4
parent 2044
f4f596978298
child 2100
ebfb7c68865e
permissions
-rw-r--r--

6885308: The incorrect -XX:StackRedPages, -XX:StackShadowPages, -XX:StackYellowPages could cause VM crash
Summary: Test minimal stack sizes given (also fixed linux compilation error)
Reviewed-by: never, phh, coleenp

duke@435 1 /*
trims@1907 2 * Copyright (c) 1997, 2005, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_allocation.cpp.incl"
duke@435 27
duke@435 28 void* CHeapObj::operator new(size_t size){
duke@435 29 return (void *) AllocateHeap(size, "CHeapObj-new");
duke@435 30 }
duke@435 31
duke@435 32 void CHeapObj::operator delete(void* p){
duke@435 33 FreeHeap(p);
duke@435 34 }
duke@435 35
duke@435 36 void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; };
duke@435 37 void StackObj::operator delete(void* p) { ShouldNotCallThis(); };
duke@435 38 void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; };
duke@435 39 void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); };
duke@435 40
duke@435 41 void* ResourceObj::operator new(size_t size, allocation_type type) {
duke@435 42 address res;
duke@435 43 switch (type) {
duke@435 44 case C_HEAP:
duke@435 45 res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
kvn@2040 46 DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
duke@435 47 break;
duke@435 48 case RESOURCE_AREA:
kvn@2043 49 // new(size) sets allocation type RESOURCE_AREA.
duke@435 50 res = (address)operator new(size);
duke@435 51 break;
duke@435 52 default:
duke@435 53 ShouldNotReachHere();
duke@435 54 }
duke@435 55 return res;
duke@435 56 }
duke@435 57
duke@435 58 void ResourceObj::operator delete(void* p) {
duke@435 59 assert(((ResourceObj *)p)->allocated_on_C_heap(),
duke@435 60 "delete only allowed for C_HEAP objects");
ptisnovs@2099 61 DEBUG_ONLY(((ResourceObj *)p)->_allocation = (uintptr_t)badHeapOopVal;)
duke@435 62 FreeHeap(p);
duke@435 63 }
duke@435 64
kvn@2040 65 #ifdef ASSERT
kvn@2040 66 void ResourceObj::set_allocation_type(address res, allocation_type type) {
kvn@2040 67 // Set allocation type in the resource object
kvn@2040 68 uintptr_t allocation = (uintptr_t)res;
kvn@2043 69 assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
kvn@2040 70 assert(type <= allocation_mask, "incorrect allocation type");
kvn@2040 71 ((ResourceObj *)res)->_allocation = ~(allocation + type);
kvn@2040 72 }
kvn@2040 73
kvn@2043 74 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
kvn@2040 75 assert(~(_allocation | allocation_mask) == (uintptr_t)this, "lost resource object");
kvn@2040 76 return (allocation_type)((~_allocation) & allocation_mask);
kvn@2040 77 }
kvn@2040 78
kvn@2043 79 ResourceObj::ResourceObj() { // default constructor
kvn@2040 80 if (~(_allocation | allocation_mask) != (uintptr_t)this) {
kvn@2040 81 set_allocation_type((address)this, STACK_OR_EMBEDDED);
kvn@2043 82 } else if (allocated_on_stack()) {
kvn@2043 83 // For some reason we got a value which looks like an allocation on stack.
kvn@2043 84 // Pass if it is really allocated on stack.
kvn@2043 85 assert(Thread::current()->on_local_stack((address)this),"should be on stack");
kvn@2040 86 } else {
kvn@2040 87 assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena(),
kvn@2040 88 "allocation_type should be set by operator new()");
kvn@2040 89 }
kvn@2040 90 }
kvn@2040 91
kvn@2043 92 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
kvn@2040 93 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
kvn@2040 94 set_allocation_type((address)this, STACK_OR_EMBEDDED);
kvn@2040 95 }
kvn@2040 96
kvn@2040 97 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
kvn@2040 98 // Used in InlineTree::ok_to_inline() for WarmCallInfo.
kvn@2040 99 assert(allocated_on_stack(), "copy only into local");
kvn@2040 100 // Keep current _allocation value;
kvn@2040 101 return *this;
kvn@2040 102 }
kvn@2040 103
kvn@2040 104 ResourceObj::~ResourceObj() {
kvn@2043 105 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
kvn@2043 106 if (!allocated_on_C_heap()) { // ResourceObj::delete() zaps _allocation for C_heap.
ptisnovs@2099 107 _allocation = (uintptr_t)badHeapOopVal; // zap type
kvn@2040 108 }
kvn@2040 109 }
kvn@2040 110 #endif // ASSERT
kvn@2040 111
kvn@2040 112
duke@435 113 void trace_heap_malloc(size_t size, const char* name, void* p) {
duke@435 114 // A lock is not needed here - tty uses a lock internally
duke@435 115 tty->print_cr("Heap malloc " INTPTR_FORMAT " %7d %s", p, size, name == NULL ? "" : name);
duke@435 116 }
duke@435 117
duke@435 118
duke@435 119 void trace_heap_free(void* p) {
duke@435 120 // A lock is not needed here - tty uses a lock internally
duke@435 121 tty->print_cr("Heap free " INTPTR_FORMAT, p);
duke@435 122 }
duke@435 123
duke@435 124 bool warn_new_operator = false; // see vm_main
duke@435 125
duke@435 126 //--------------------------------------------------------------------------------------
duke@435 127 // ChunkPool implementation
duke@435 128
duke@435 129 // MT-safe pool of chunks to reduce malloc/free thrashing
duke@435 130 // NB: not using Mutex because pools are used before Threads are initialized
duke@435 131 class ChunkPool {
duke@435 132 Chunk* _first; // first cached Chunk; its first word points to next chunk
duke@435 133 size_t _num_chunks; // number of unused chunks in pool
duke@435 134 size_t _num_used; // number of chunks currently checked out
duke@435 135 const size_t _size; // size of each chunk (must be uniform)
duke@435 136
duke@435 137 // Our three static pools
duke@435 138 static ChunkPool* _large_pool;
duke@435 139 static ChunkPool* _medium_pool;
duke@435 140 static ChunkPool* _small_pool;
duke@435 141
duke@435 142 // return first element or null
duke@435 143 void* get_first() {
duke@435 144 Chunk* c = _first;
duke@435 145 if (_first) {
duke@435 146 _first = _first->next();
duke@435 147 _num_chunks--;
duke@435 148 }
duke@435 149 return c;
duke@435 150 }
duke@435 151
duke@435 152 public:
duke@435 153 // All chunks in a ChunkPool has the same size
duke@435 154 ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
duke@435 155
duke@435 156 // Allocate a new chunk from the pool (might expand the pool)
duke@435 157 void* allocate(size_t bytes) {
duke@435 158 assert(bytes == _size, "bad size");
duke@435 159 void* p = NULL;
duke@435 160 { ThreadCritical tc;
duke@435 161 _num_used++;
duke@435 162 p = get_first();
duke@435 163 if (p == NULL) p = os::malloc(bytes);
duke@435 164 }
duke@435 165 if (p == NULL)
duke@435 166 vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
duke@435 167
duke@435 168 return p;
duke@435 169 }
duke@435 170
duke@435 171 // Return a chunk to the pool
duke@435 172 void free(Chunk* chunk) {
duke@435 173 assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
duke@435 174 ThreadCritical tc;
duke@435 175 _num_used--;
duke@435 176
duke@435 177 // Add chunk to list
duke@435 178 chunk->set_next(_first);
duke@435 179 _first = chunk;
duke@435 180 _num_chunks++;
duke@435 181 }
duke@435 182
duke@435 183 // Prune the pool
duke@435 184 void free_all_but(size_t n) {
duke@435 185 // if we have more than n chunks, free all of them
duke@435 186 ThreadCritical tc;
duke@435 187 if (_num_chunks > n) {
duke@435 188 // free chunks at end of queue, for better locality
duke@435 189 Chunk* cur = _first;
duke@435 190 for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
duke@435 191
duke@435 192 if (cur != NULL) {
duke@435 193 Chunk* next = cur->next();
duke@435 194 cur->set_next(NULL);
duke@435 195 cur = next;
duke@435 196
duke@435 197 // Free all remaining chunks
duke@435 198 while(cur != NULL) {
duke@435 199 next = cur->next();
duke@435 200 os::free(cur);
duke@435 201 _num_chunks--;
duke@435 202 cur = next;
duke@435 203 }
duke@435 204 }
duke@435 205 }
duke@435 206 }
duke@435 207
duke@435 208 // Accessors to preallocated pool's
duke@435 209 static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; }
duke@435 210 static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
duke@435 211 static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; }
duke@435 212
duke@435 213 static void initialize() {
duke@435 214 _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size());
duke@435 215 _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
duke@435 216 _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size());
duke@435 217 }
bobv@2036 218
bobv@2036 219 static void clean() {
bobv@2036 220 enum { BlocksToKeep = 5 };
bobv@2036 221 _small_pool->free_all_but(BlocksToKeep);
bobv@2036 222 _medium_pool->free_all_but(BlocksToKeep);
bobv@2036 223 _large_pool->free_all_but(BlocksToKeep);
bobv@2036 224 }
duke@435 225 };
duke@435 226
duke@435 227 ChunkPool* ChunkPool::_large_pool = NULL;
duke@435 228 ChunkPool* ChunkPool::_medium_pool = NULL;
duke@435 229 ChunkPool* ChunkPool::_small_pool = NULL;
duke@435 230
duke@435 231 void chunkpool_init() {
duke@435 232 ChunkPool::initialize();
duke@435 233 }
duke@435 234
bobv@2036 235 void
bobv@2036 236 Chunk::clean_chunk_pool() {
bobv@2036 237 ChunkPool::clean();
bobv@2036 238 }
bobv@2036 239
duke@435 240
duke@435 241 //--------------------------------------------------------------------------------------
duke@435 242 // ChunkPoolCleaner implementation
bobv@2036 243 //
duke@435 244
duke@435 245 class ChunkPoolCleaner : public PeriodicTask {
bobv@2036 246 enum { CleaningInterval = 5000 }; // cleaning interval in ms
duke@435 247
duke@435 248 public:
duke@435 249 ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
duke@435 250 void task() {
bobv@2036 251 ChunkPool::clean();
duke@435 252 }
duke@435 253 };
duke@435 254
duke@435 255 //--------------------------------------------------------------------------------------
duke@435 256 // Chunk implementation
duke@435 257
duke@435 258 void* Chunk::operator new(size_t requested_size, size_t length) {
duke@435 259 // requested_size is equal to sizeof(Chunk) but in order for the arena
duke@435 260 // allocations to come out aligned as expected the size must be aligned
duke@435 261 // to expected arean alignment.
duke@435 262 // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
duke@435 263 assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
duke@435 264 size_t bytes = ARENA_ALIGN(requested_size) + length;
duke@435 265 switch (length) {
duke@435 266 case Chunk::size: return ChunkPool::large_pool()->allocate(bytes);
duke@435 267 case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
duke@435 268 case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes);
duke@435 269 default: {
duke@435 270 void *p = os::malloc(bytes);
duke@435 271 if (p == NULL)
duke@435 272 vm_exit_out_of_memory(bytes, "Chunk::new");
duke@435 273 return p;
duke@435 274 }
duke@435 275 }
duke@435 276 }
duke@435 277
duke@435 278 void Chunk::operator delete(void* p) {
duke@435 279 Chunk* c = (Chunk*)p;
duke@435 280 switch (c->length()) {
duke@435 281 case Chunk::size: ChunkPool::large_pool()->free(c); break;
duke@435 282 case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
duke@435 283 case Chunk::init_size: ChunkPool::small_pool()->free(c); break;
duke@435 284 default: os::free(c);
duke@435 285 }
duke@435 286 }
duke@435 287
duke@435 288 Chunk::Chunk(size_t length) : _len(length) {
duke@435 289 _next = NULL; // Chain on the linked list
duke@435 290 }
duke@435 291
duke@435 292
duke@435 293 void Chunk::chop() {
duke@435 294 Chunk *k = this;
duke@435 295 while( k ) {
duke@435 296 Chunk *tmp = k->next();
duke@435 297 // clear out this chunk (to detect allocation bugs)
duke@435 298 if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
duke@435 299 delete k; // Free chunk (was malloc'd)
duke@435 300 k = tmp;
duke@435 301 }
duke@435 302 }
duke@435 303
duke@435 304 void Chunk::next_chop() {
duke@435 305 _next->chop();
duke@435 306 _next = NULL;
duke@435 307 }
duke@435 308
duke@435 309
duke@435 310 void Chunk::start_chunk_pool_cleaner_task() {
duke@435 311 #ifdef ASSERT
duke@435 312 static bool task_created = false;
duke@435 313 assert(!task_created, "should not start chuck pool cleaner twice");
duke@435 314 task_created = true;
duke@435 315 #endif
duke@435 316 ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
duke@435 317 cleaner->enroll();
duke@435 318 }
duke@435 319
duke@435 320 //------------------------------Arena------------------------------------------
duke@435 321
duke@435 322 Arena::Arena(size_t init_size) {
duke@435 323 size_t round_size = (sizeof (char *)) - 1;
duke@435 324 init_size = (init_size+round_size) & ~round_size;
duke@435 325 _first = _chunk = new (init_size) Chunk(init_size);
duke@435 326 _hwm = _chunk->bottom(); // Save the cached hwm, max
duke@435 327 _max = _chunk->top();
duke@435 328 set_size_in_bytes(init_size);
duke@435 329 }
duke@435 330
duke@435 331 Arena::Arena() {
duke@435 332 _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
duke@435 333 _hwm = _chunk->bottom(); // Save the cached hwm, max
duke@435 334 _max = _chunk->top();
duke@435 335 set_size_in_bytes(Chunk::init_size);
duke@435 336 }
duke@435 337
duke@435 338 Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) {
duke@435 339 set_size_in_bytes(a->size_in_bytes());
duke@435 340 }
duke@435 341
duke@435 342 Arena *Arena::move_contents(Arena *copy) {
duke@435 343 copy->destruct_contents();
duke@435 344 copy->_chunk = _chunk;
duke@435 345 copy->_hwm = _hwm;
duke@435 346 copy->_max = _max;
duke@435 347 copy->_first = _first;
duke@435 348 copy->set_size_in_bytes(size_in_bytes());
duke@435 349 // Destroy original arena
duke@435 350 reset();
duke@435 351 return copy; // Return Arena with contents
duke@435 352 }
duke@435 353
duke@435 354 Arena::~Arena() {
duke@435 355 destruct_contents();
duke@435 356 }
duke@435 357
duke@435 358 // Destroy this arenas contents and reset to empty
duke@435 359 void Arena::destruct_contents() {
duke@435 360 if (UseMallocOnly && _first != NULL) {
duke@435 361 char* end = _first->next() ? _first->top() : _hwm;
duke@435 362 free_malloced_objects(_first, _first->bottom(), end, _hwm);
duke@435 363 }
duke@435 364 _first->chop();
duke@435 365 reset();
duke@435 366 }
duke@435 367
duke@435 368
duke@435 369 // Total of all Chunks in arena
duke@435 370 size_t Arena::used() const {
duke@435 371 size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
duke@435 372 register Chunk *k = _first;
duke@435 373 while( k != _chunk) { // Whilst have Chunks in a row
duke@435 374 sum += k->length(); // Total size of this Chunk
duke@435 375 k = k->next(); // Bump along to next Chunk
duke@435 376 }
duke@435 377 return sum; // Return total consumed space.
duke@435 378 }
duke@435 379
duke@435 380
duke@435 381 // Grow a new Chunk
duke@435 382 void* Arena::grow( size_t x ) {
duke@435 383 // Get minimal required size. Either real big, or even bigger for giant objs
duke@435 384 size_t len = MAX2(x, (size_t) Chunk::size);
duke@435 385
duke@435 386 Chunk *k = _chunk; // Get filled-up chunk address
duke@435 387 _chunk = new (len) Chunk(len);
duke@435 388
duke@435 389 if (_chunk == NULL)
duke@435 390 vm_exit_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
duke@435 391
duke@435 392 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
duke@435 393 else _first = _chunk;
duke@435 394 _hwm = _chunk->bottom(); // Save the cached hwm, max
duke@435 395 _max = _chunk->top();
duke@435 396 set_size_in_bytes(size_in_bytes() + len);
duke@435 397 void* result = _hwm;
duke@435 398 _hwm += x;
duke@435 399 return result;
duke@435 400 }
duke@435 401
duke@435 402
duke@435 403
duke@435 404 // Reallocate storage in Arena.
duke@435 405 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) {
duke@435 406 assert(new_size >= 0, "bad size");
duke@435 407 if (new_size == 0) return NULL;
duke@435 408 #ifdef ASSERT
duke@435 409 if (UseMallocOnly) {
duke@435 410 // always allocate a new object (otherwise we'll free this one twice)
duke@435 411 char* copy = (char*)Amalloc(new_size);
duke@435 412 size_t n = MIN2(old_size, new_size);
duke@435 413 if (n > 0) memcpy(copy, old_ptr, n);
duke@435 414 Afree(old_ptr,old_size); // Mostly done to keep stats accurate
duke@435 415 return copy;
duke@435 416 }
duke@435 417 #endif
duke@435 418 char *c_old = (char*)old_ptr; // Handy name
duke@435 419 // Stupid fast special case
duke@435 420 if( new_size <= old_size ) { // Shrink in-place
duke@435 421 if( c_old+old_size == _hwm) // Attempt to free the excess bytes
duke@435 422 _hwm = c_old+new_size; // Adjust hwm
duke@435 423 return c_old;
duke@435 424 }
duke@435 425
duke@435 426 // make sure that new_size is legal
duke@435 427 size_t corrected_new_size = ARENA_ALIGN(new_size);
duke@435 428
duke@435 429 // See if we can resize in-place
duke@435 430 if( (c_old+old_size == _hwm) && // Adjusting recent thing
duke@435 431 (c_old+corrected_new_size <= _max) ) { // Still fits where it sits
duke@435 432 _hwm = c_old+corrected_new_size; // Adjust hwm
duke@435 433 return c_old; // Return old pointer
duke@435 434 }
duke@435 435
duke@435 436 // Oops, got to relocate guts
duke@435 437 void *new_ptr = Amalloc(new_size);
duke@435 438 memcpy( new_ptr, c_old, old_size );
duke@435 439 Afree(c_old,old_size); // Mostly done to keep stats accurate
duke@435 440 return new_ptr;
duke@435 441 }
duke@435 442
duke@435 443
duke@435 444 // Determine if pointer belongs to this Arena or not.
duke@435 445 bool Arena::contains( const void *ptr ) const {
duke@435 446 #ifdef ASSERT
duke@435 447 if (UseMallocOnly) {
duke@435 448 // really slow, but not easy to make fast
duke@435 449 if (_chunk == NULL) return false;
duke@435 450 char** bottom = (char**)_chunk->bottom();
duke@435 451 for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
duke@435 452 if (*p == ptr) return true;
duke@435 453 }
duke@435 454 for (Chunk *c = _first; c != NULL; c = c->next()) {
duke@435 455 if (c == _chunk) continue; // current chunk has been processed
duke@435 456 char** bottom = (char**)c->bottom();
duke@435 457 for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
duke@435 458 if (*p == ptr) return true;
duke@435 459 }
duke@435 460 }
duke@435 461 return false;
duke@435 462 }
duke@435 463 #endif
duke@435 464 if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
duke@435 465 return true; // Check for in this chunk
duke@435 466 for (Chunk *c = _first; c; c = c->next()) {
duke@435 467 if (c == _chunk) continue; // current chunk has been processed
duke@435 468 if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
duke@435 469 return true; // Check for every chunk in Arena
duke@435 470 }
duke@435 471 }
duke@435 472 return false; // Not in any Chunk, so not in Arena
duke@435 473 }
duke@435 474
duke@435 475
duke@435 476 #ifdef ASSERT
duke@435 477 void* Arena::malloc(size_t size) {
duke@435 478 assert(UseMallocOnly, "shouldn't call");
duke@435 479 // use malloc, but save pointer in res. area for later freeing
duke@435 480 char** save = (char**)internal_malloc_4(sizeof(char*));
duke@435 481 return (*save = (char*)os::malloc(size));
duke@435 482 }
duke@435 483
duke@435 484 // for debugging with UseMallocOnly
duke@435 485 void* Arena::internal_malloc_4(size_t x) {
duke@435 486 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
duke@435 487 if (_hwm + x > _max) {
duke@435 488 return grow(x);
duke@435 489 } else {
duke@435 490 char *old = _hwm;
duke@435 491 _hwm += x;
duke@435 492 return old;
duke@435 493 }
duke@435 494 }
duke@435 495 #endif
duke@435 496
duke@435 497
duke@435 498 //--------------------------------------------------------------------------------------
duke@435 499 // Non-product code
duke@435 500
duke@435 501 #ifndef PRODUCT
duke@435 502 // The global operator new should never be called since it will usually indicate
duke@435 503 // a memory leak. Use CHeapObj as the base class of such objects to make it explicit
duke@435 504 // that they're allocated on the C heap.
duke@435 505 // Commented out in product version to avoid conflicts with third-party C++ native code.
duke@435 506 // %% note this is causing a problem on solaris debug build. the global
duke@435 507 // new is being called from jdk source and causing data corruption.
duke@435 508 // src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew
duke@435 509 // define CATCH_OPERATOR_NEW_USAGE if you want to use this.
duke@435 510 #ifdef CATCH_OPERATOR_NEW_USAGE
duke@435 511 void* operator new(size_t size){
duke@435 512 static bool warned = false;
duke@435 513 if (!warned && warn_new_operator)
duke@435 514 warning("should not call global (default) operator new");
duke@435 515 warned = true;
duke@435 516 return (void *) AllocateHeap(size, "global operator new");
duke@435 517 }
duke@435 518 #endif
duke@435 519
duke@435 520 void AllocatedObj::print() const { print_on(tty); }
duke@435 521 void AllocatedObj::print_value() const { print_value_on(tty); }
duke@435 522
duke@435 523 void AllocatedObj::print_on(outputStream* st) const {
duke@435 524 st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this);
duke@435 525 }
duke@435 526
duke@435 527 void AllocatedObj::print_value_on(outputStream* st) const {
duke@435 528 st->print("AllocatedObj(" INTPTR_FORMAT ")", this);
duke@435 529 }
duke@435 530
duke@435 531 size_t Arena::_bytes_allocated = 0;
duke@435 532
duke@435 533 AllocStats::AllocStats() {
duke@435 534 start_mallocs = os::num_mallocs;
duke@435 535 start_frees = os::num_frees;
duke@435 536 start_malloc_bytes = os::alloc_bytes;
duke@435 537 start_res_bytes = Arena::_bytes_allocated;
duke@435 538 }
duke@435 539
duke@435 540 int AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
duke@435 541 size_t AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
duke@435 542 size_t AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
duke@435 543 int AllocStats::num_frees() { return os::num_frees - start_frees; }
duke@435 544 void AllocStats::print() {
duke@435 545 tty->print("%d mallocs (%ldK), %d frees, %ldK resrc",
duke@435 546 num_mallocs(), alloc_bytes()/K, num_frees(), resource_bytes()/K);
duke@435 547 }
duke@435 548
duke@435 549
duke@435 550 // debugging code
duke@435 551 inline void Arena::free_all(char** start, char** end) {
duke@435 552 for (char** p = start; p < end; p++) if (*p) os::free(*p);
duke@435 553 }
duke@435 554
duke@435 555 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
duke@435 556 assert(UseMallocOnly, "should not call");
duke@435 557 // free all objects malloced since resource mark was created; resource area
duke@435 558 // contains their addresses
duke@435 559 if (chunk->next()) {
duke@435 560 // this chunk is full, and some others too
duke@435 561 for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
duke@435 562 char* top = c->top();
duke@435 563 if (c->next() == NULL) {
duke@435 564 top = hwm2; // last junk is only used up to hwm2
duke@435 565 assert(c->contains(hwm2), "bad hwm2");
duke@435 566 }
duke@435 567 free_all((char**)c->bottom(), (char**)top);
duke@435 568 }
duke@435 569 assert(chunk->contains(hwm), "bad hwm");
duke@435 570 assert(chunk->contains(max), "bad max");
duke@435 571 free_all((char**)hwm, (char**)max);
duke@435 572 } else {
duke@435 573 // this chunk was partially used
duke@435 574 assert(chunk->contains(hwm), "bad hwm");
duke@435 575 assert(chunk->contains(hwm2), "bad hwm2");
duke@435 576 free_all((char**)hwm, (char**)hwm2);
duke@435 577 }
duke@435 578 }
duke@435 579
duke@435 580
duke@435 581 ReallocMark::ReallocMark() {
duke@435 582 #ifdef ASSERT
duke@435 583 Thread *thread = ThreadLocalStorage::get_thread_slow();
duke@435 584 _nesting = thread->resource_area()->nesting();
duke@435 585 #endif
duke@435 586 }
duke@435 587
duke@435 588 void ReallocMark::check() {
duke@435 589 #ifdef ASSERT
duke@435 590 if (_nesting != Thread::current()->resource_area()->nesting()) {
duke@435 591 fatal("allocation bug: array could grow within nested ResourceMark");
duke@435 592 }
duke@435 593 #endif
duke@435 594 }
duke@435 595
duke@435 596 #endif // Non-product

mercurial