src/share/vm/memory/allocation.cpp

Wed, 02 Mar 2011 08:18:35 -0500

author
kamg
date
Wed, 02 Mar 2011 08:18:35 -0500
changeset 2589
4a9604cd7c5f
parent 2557
f7de3327c683
child 2834
2a3da7eaf4a6
permissions
-rw-r--r--

6878713: Verifier heap corruption, relating to backward jsrs
Summary: Added overflow detection in arena Amalloc methods
Reviewed-by: coleenp, phh

     1 /*
     2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "memory/allocation.hpp"
    27 #include "memory/allocation.inline.hpp"
    28 #include "memory/resourceArea.hpp"
    29 #include "runtime/os.hpp"
    30 #include "runtime/task.hpp"
    31 #include "runtime/threadCritical.hpp"
    32 #include "utilities/ostream.hpp"
    33 #ifdef TARGET_OS_FAMILY_linux
    34 # include "os_linux.inline.hpp"
    35 #endif
    36 #ifdef TARGET_OS_FAMILY_solaris
    37 # include "os_solaris.inline.hpp"
    38 #endif
    39 #ifdef TARGET_OS_FAMILY_windows
    40 # include "os_windows.inline.hpp"
    41 #endif
    43 void* CHeapObj::operator new(size_t size){
    44   return (void *) AllocateHeap(size, "CHeapObj-new");
    45 }
    47 void CHeapObj::operator delete(void* p){
    48  FreeHeap(p);
    49 }
    51 void* StackObj::operator new(size_t size)  { ShouldNotCallThis(); return 0; };
    52 void  StackObj::operator delete(void* p)   { ShouldNotCallThis(); };
    53 void* _ValueObj::operator new(size_t size)  { ShouldNotCallThis(); return 0; };
    54 void  _ValueObj::operator delete(void* p)   { ShouldNotCallThis(); };
    56 void* ResourceObj::operator new(size_t size, allocation_type type) {
    57   address res;
    58   switch (type) {
    59    case C_HEAP:
    60     res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
    61     DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
    62     break;
    63    case RESOURCE_AREA:
    64     // new(size) sets allocation type RESOURCE_AREA.
    65     res = (address)operator new(size);
    66     break;
    67    default:
    68     ShouldNotReachHere();
    69   }
    70   return res;
    71 }
    73 void ResourceObj::operator delete(void* p) {
    74   assert(((ResourceObj *)p)->allocated_on_C_heap(),
    75          "delete only allowed for C_HEAP objects");
    76   DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
    77   FreeHeap(p);
    78 }
    80 #ifdef ASSERT
    81 void ResourceObj::set_allocation_type(address res, allocation_type type) {
    82     // Set allocation type in the resource object
    83     uintptr_t allocation = (uintptr_t)res;
    84     assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
    85     assert(type <= allocation_mask, "incorrect allocation type");
    86     ResourceObj* resobj = (ResourceObj *)res;
    87     resobj->_allocation_t[0] = ~(allocation + type);
    88     if (type != STACK_OR_EMBEDDED) {
    89       // Called from operator new() and CollectionSetChooser(),
    90       // set verification value.
    91       resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
    92     }
    93 }
    95 ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
    96     assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
    97     return (allocation_type)((~_allocation_t[0]) & allocation_mask);
    98 }
   100 bool ResourceObj::is_type_set() const {
   101     allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
   102     return get_allocation_type()  == type &&
   103            (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
   104 }
   106 ResourceObj::ResourceObj() { // default constructor
   107     if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
   108       // Operator new() is not called for allocations
   109       // on stack and for embedded objects.
   110       set_allocation_type((address)this, STACK_OR_EMBEDDED);
   111     } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
   112       // For some reason we got a value which resembles
   113       // an embedded or stack object (operator new() does not
   114       // set such type). Keep it since it is valid value
   115       // (even if it was garbage).
   116       // Ignore garbage in other fields.
   117     } else if (is_type_set()) {
   118       // Operator new() was called and type was set.
   119       assert(!allocated_on_stack(),
   120              err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
   121                      this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
   122     } else {
   123       // Operator new() was not called.
   124       // Assume that it is embedded or stack object.
   125       set_allocation_type((address)this, STACK_OR_EMBEDDED);
   126     }
   127     _allocation_t[1] = 0; // Zap verification value
   128 }
   130 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
   131     // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
   132     // Note: garbage may resembles valid value.
   133     assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
   134            err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
   135                    this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
   136     set_allocation_type((address)this, STACK_OR_EMBEDDED);
   137     _allocation_t[1] = 0; // Zap verification value
   138 }
   140 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
   141     // Used in InlineTree::ok_to_inline() for WarmCallInfo.
   142     assert(allocated_on_stack(),
   143            err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
   144                    this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
   145     // Keep current _allocation_t value;
   146     return *this;
   147 }
   149 ResourceObj::~ResourceObj() {
   150     // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
   151     if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
   152       _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
   153     }
   154 }
   155 #endif // ASSERT
   158 void trace_heap_malloc(size_t size, const char* name, void* p) {
   159   // A lock is not needed here - tty uses a lock internally
   160   tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name);
   161 }
   164 void trace_heap_free(void* p) {
   165   // A lock is not needed here - tty uses a lock internally
   166   tty->print_cr("Heap free   " INTPTR_FORMAT, p);
   167 }
   169 bool warn_new_operator = false; // see vm_main
   171 //--------------------------------------------------------------------------------------
   172 // ChunkPool implementation
   174 // MT-safe pool of chunks to reduce malloc/free thrashing
   175 // NB: not using Mutex because pools are used before Threads are initialized
   176 class ChunkPool {
   177   Chunk*       _first;        // first cached Chunk; its first word points to next chunk
   178   size_t       _num_chunks;   // number of unused chunks in pool
   179   size_t       _num_used;     // number of chunks currently checked out
   180   const size_t _size;         // size of each chunk (must be uniform)
   182   // Our three static pools
   183   static ChunkPool* _large_pool;
   184   static ChunkPool* _medium_pool;
   185   static ChunkPool* _small_pool;
   187   // return first element or null
   188   void* get_first() {
   189     Chunk* c = _first;
   190     if (_first) {
   191       _first = _first->next();
   192       _num_chunks--;
   193     }
   194     return c;
   195   }
   197  public:
   198   // All chunks in a ChunkPool has the same size
   199    ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
   201   // Allocate a new chunk from the pool (might expand the pool)
   202   void* allocate(size_t bytes) {
   203     assert(bytes == _size, "bad size");
   204     void* p = NULL;
   205     { ThreadCritical tc;
   206       _num_used++;
   207       p = get_first();
   208       if (p == NULL) p = os::malloc(bytes);
   209     }
   210     if (p == NULL)
   211       vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
   213     return p;
   214   }
   216   // Return a chunk to the pool
   217   void free(Chunk* chunk) {
   218     assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size");
   219     ThreadCritical tc;
   220     _num_used--;
   222     // Add chunk to list
   223     chunk->set_next(_first);
   224     _first = chunk;
   225     _num_chunks++;
   226   }
   228   // Prune the pool
   229   void free_all_but(size_t n) {
   230     // if we have more than n chunks, free all of them
   231     ThreadCritical tc;
   232     if (_num_chunks > n) {
   233       // free chunks at end of queue, for better locality
   234       Chunk* cur = _first;
   235       for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
   237       if (cur != NULL) {
   238         Chunk* next = cur->next();
   239         cur->set_next(NULL);
   240         cur = next;
   242         // Free all remaining chunks
   243         while(cur != NULL) {
   244           next = cur->next();
   245           os::free(cur);
   246           _num_chunks--;
   247           cur = next;
   248         }
   249       }
   250     }
   251   }
   253   // Accessors to preallocated pool's
   254   static ChunkPool* large_pool()  { assert(_large_pool  != NULL, "must be initialized"); return _large_pool;  }
   255   static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; }
   256   static ChunkPool* small_pool()  { assert(_small_pool  != NULL, "must be initialized"); return _small_pool;  }
   258   static void initialize() {
   259     _large_pool  = new ChunkPool(Chunk::size        + Chunk::aligned_overhead_size());
   260     _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size());
   261     _small_pool  = new ChunkPool(Chunk::init_size   + Chunk::aligned_overhead_size());
   262   }
   264   static void clean() {
   265     enum { BlocksToKeep = 5 };
   266      _small_pool->free_all_but(BlocksToKeep);
   267      _medium_pool->free_all_but(BlocksToKeep);
   268      _large_pool->free_all_but(BlocksToKeep);
   269   }
   270 };
   272 ChunkPool* ChunkPool::_large_pool  = NULL;
   273 ChunkPool* ChunkPool::_medium_pool = NULL;
   274 ChunkPool* ChunkPool::_small_pool  = NULL;
   276 void chunkpool_init() {
   277   ChunkPool::initialize();
   278 }
   280 void
   281 Chunk::clean_chunk_pool() {
   282   ChunkPool::clean();
   283 }
   286 //--------------------------------------------------------------------------------------
   287 // ChunkPoolCleaner implementation
   288 //
   290 class ChunkPoolCleaner : public PeriodicTask {
   291   enum { CleaningInterval = 5000 };      // cleaning interval in ms
   293  public:
   294    ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {}
   295    void task() {
   296      ChunkPool::clean();
   297    }
   298 };
   300 //--------------------------------------------------------------------------------------
   301 // Chunk implementation
   303 void* Chunk::operator new(size_t requested_size, size_t length) {
   304   // requested_size is equal to sizeof(Chunk) but in order for the arena
   305   // allocations to come out aligned as expected the size must be aligned
   306   // to expected arean alignment.
   307   // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it.
   308   assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
   309   size_t bytes = ARENA_ALIGN(requested_size) + length;
   310   switch (length) {
   311    case Chunk::size:        return ChunkPool::large_pool()->allocate(bytes);
   312    case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
   313    case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes);
   314    default: {
   315      void *p =  os::malloc(bytes);
   316      if (p == NULL)
   317        vm_exit_out_of_memory(bytes, "Chunk::new");
   318      return p;
   319    }
   320   }
   321 }
   323 void Chunk::operator delete(void* p) {
   324   Chunk* c = (Chunk*)p;
   325   switch (c->length()) {
   326    case Chunk::size:        ChunkPool::large_pool()->free(c); break;
   327    case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
   328    case Chunk::init_size:   ChunkPool::small_pool()->free(c); break;
   329    default:                 os::free(c);
   330   }
   331 }
   333 Chunk::Chunk(size_t length) : _len(length) {
   334   _next = NULL;         // Chain on the linked list
   335 }
   338 void Chunk::chop() {
   339   Chunk *k = this;
   340   while( k ) {
   341     Chunk *tmp = k->next();
   342     // clear out this chunk (to detect allocation bugs)
   343     if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length());
   344     delete k;                   // Free chunk (was malloc'd)
   345     k = tmp;
   346   }
   347 }
   349 void Chunk::next_chop() {
   350   _next->chop();
   351   _next = NULL;
   352 }
   355 void Chunk::start_chunk_pool_cleaner_task() {
   356 #ifdef ASSERT
   357   static bool task_created = false;
   358   assert(!task_created, "should not start chuck pool cleaner twice");
   359   task_created = true;
   360 #endif
   361   ChunkPoolCleaner* cleaner = new ChunkPoolCleaner();
   362   cleaner->enroll();
   363 }
   365 //------------------------------Arena------------------------------------------
   367 Arena::Arena(size_t init_size) {
   368   size_t round_size = (sizeof (char *)) - 1;
   369   init_size = (init_size+round_size) & ~round_size;
   370   _first = _chunk = new (init_size) Chunk(init_size);
   371   _hwm = _chunk->bottom();      // Save the cached hwm, max
   372   _max = _chunk->top();
   373   set_size_in_bytes(init_size);
   374 }
   376 Arena::Arena() {
   377   _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
   378   _hwm = _chunk->bottom();      // Save the cached hwm, max
   379   _max = _chunk->top();
   380   set_size_in_bytes(Chunk::init_size);
   381 }
   383 Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) {
   384   set_size_in_bytes(a->size_in_bytes());
   385 }
   387 Arena *Arena::move_contents(Arena *copy) {
   388   copy->destruct_contents();
   389   copy->_chunk = _chunk;
   390   copy->_hwm   = _hwm;
   391   copy->_max   = _max;
   392   copy->_first = _first;
   393   copy->set_size_in_bytes(size_in_bytes());
   394   // Destroy original arena
   395   reset();
   396   return copy;            // Return Arena with contents
   397 }
   399 Arena::~Arena() {
   400   destruct_contents();
   401 }
   403 // Destroy this arenas contents and reset to empty
   404 void Arena::destruct_contents() {
   405   if (UseMallocOnly && _first != NULL) {
   406     char* end = _first->next() ? _first->top() : _hwm;
   407     free_malloced_objects(_first, _first->bottom(), end, _hwm);
   408   }
   409   _first->chop();
   410   reset();
   411 }
   414 // Total of all Chunks in arena
   415 size_t Arena::used() const {
   416   size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk
   417   register Chunk *k = _first;
   418   while( k != _chunk) {         // Whilst have Chunks in a row
   419     sum += k->length();         // Total size of this Chunk
   420     k = k->next();              // Bump along to next Chunk
   421   }
   422   return sum;                   // Return total consumed space.
   423 }
   425 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
   426   vm_exit_out_of_memory(sz, whence);
   427 }
   429 // Grow a new Chunk
   430 void* Arena::grow( size_t x ) {
   431   // Get minimal required size.  Either real big, or even bigger for giant objs
   432   size_t len = MAX2(x, (size_t) Chunk::size);
   434   Chunk *k = _chunk;            // Get filled-up chunk address
   435   _chunk = new (len) Chunk(len);
   437   if (_chunk == NULL) {
   438     signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
   439   }
   441   if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
   442   else _first = _chunk;
   443   _hwm  = _chunk->bottom();     // Save the cached hwm, max
   444   _max =  _chunk->top();
   445   set_size_in_bytes(size_in_bytes() + len);
   446   void* result = _hwm;
   447   _hwm += x;
   448   return result;
   449 }
   453 // Reallocate storage in Arena.
   454 void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) {
   455   assert(new_size >= 0, "bad size");
   456   if (new_size == 0) return NULL;
   457 #ifdef ASSERT
   458   if (UseMallocOnly) {
   459     // always allocate a new object  (otherwise we'll free this one twice)
   460     char* copy = (char*)Amalloc(new_size);
   461     size_t n = MIN2(old_size, new_size);
   462     if (n > 0) memcpy(copy, old_ptr, n);
   463     Afree(old_ptr,old_size);    // Mostly done to keep stats accurate
   464     return copy;
   465   }
   466 #endif
   467   char *c_old = (char*)old_ptr; // Handy name
   468   // Stupid fast special case
   469   if( new_size <= old_size ) {  // Shrink in-place
   470     if( c_old+old_size == _hwm) // Attempt to free the excess bytes
   471       _hwm = c_old+new_size;    // Adjust hwm
   472     return c_old;
   473   }
   475   // make sure that new_size is legal
   476   size_t corrected_new_size = ARENA_ALIGN(new_size);
   478   // See if we can resize in-place
   479   if( (c_old+old_size == _hwm) &&       // Adjusting recent thing
   480       (c_old+corrected_new_size <= _max) ) {      // Still fits where it sits
   481     _hwm = c_old+corrected_new_size;      // Adjust hwm
   482     return c_old;               // Return old pointer
   483   }
   485   // Oops, got to relocate guts
   486   void *new_ptr = Amalloc(new_size);
   487   memcpy( new_ptr, c_old, old_size );
   488   Afree(c_old,old_size);        // Mostly done to keep stats accurate
   489   return new_ptr;
   490 }
   493 // Determine if pointer belongs to this Arena or not.
   494 bool Arena::contains( const void *ptr ) const {
   495 #ifdef ASSERT
   496   if (UseMallocOnly) {
   497     // really slow, but not easy to make fast
   498     if (_chunk == NULL) return false;
   499     char** bottom = (char**)_chunk->bottom();
   500     for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
   501       if (*p == ptr) return true;
   502     }
   503     for (Chunk *c = _first; c != NULL; c = c->next()) {
   504       if (c == _chunk) continue;  // current chunk has been processed
   505       char** bottom = (char**)c->bottom();
   506       for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
   507         if (*p == ptr) return true;
   508       }
   509     }
   510     return false;
   511   }
   512 #endif
   513   if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
   514     return true;                // Check for in this chunk
   515   for (Chunk *c = _first; c; c = c->next()) {
   516     if (c == _chunk) continue;  // current chunk has been processed
   517     if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) {
   518       return true;              // Check for every chunk in Arena
   519     }
   520   }
   521   return false;                 // Not in any Chunk, so not in Arena
   522 }
   525 #ifdef ASSERT
   526 void* Arena::malloc(size_t size) {
   527   assert(UseMallocOnly, "shouldn't call");
   528   // use malloc, but save pointer in res. area for later freeing
   529   char** save = (char**)internal_malloc_4(sizeof(char*));
   530   return (*save = (char*)os::malloc(size));
   531 }
   533 // for debugging with UseMallocOnly
   534 void* Arena::internal_malloc_4(size_t x) {
   535   assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
   536   check_for_overflow(x, "Arena::internal_malloc_4");
   537   if (_hwm + x > _max) {
   538     return grow(x);
   539   } else {
   540     char *old = _hwm;
   541     _hwm += x;
   542     return old;
   543   }
   544 }
   545 #endif
   548 //--------------------------------------------------------------------------------------
   549 // Non-product code
   551 #ifndef PRODUCT
   552 // The global operator new should never be called since it will usually indicate
   553 // a memory leak.  Use CHeapObj as the base class of such objects to make it explicit
   554 // that they're allocated on the C heap.
   555 // Commented out in product version to avoid conflicts with third-party C++ native code.
   556 // %% note this is causing a problem on solaris debug build. the global
   557 // new is being called from jdk source and causing data corruption.
   558 // src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew
   559 // define CATCH_OPERATOR_NEW_USAGE if you want to use this.
   560 #ifdef CATCH_OPERATOR_NEW_USAGE
   561 void* operator new(size_t size){
   562   static bool warned = false;
   563   if (!warned && warn_new_operator)
   564     warning("should not call global (default) operator new");
   565   warned = true;
   566   return (void *) AllocateHeap(size, "global operator new");
   567 }
   568 #endif
   570 void AllocatedObj::print() const       { print_on(tty); }
   571 void AllocatedObj::print_value() const { print_value_on(tty); }
   573 void AllocatedObj::print_on(outputStream* st) const {
   574   st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this);
   575 }
   577 void AllocatedObj::print_value_on(outputStream* st) const {
   578   st->print("AllocatedObj(" INTPTR_FORMAT ")", this);
   579 }
   581 julong Arena::_bytes_allocated = 0;
   583 void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); }
   585 AllocStats::AllocStats() {
   586   start_mallocs      = os::num_mallocs;
   587   start_frees        = os::num_frees;
   588   start_malloc_bytes = os::alloc_bytes;
   589   start_mfree_bytes  = os::free_bytes;
   590   start_res_bytes    = Arena::_bytes_allocated;
   591 }
   593 julong  AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; }
   594 julong  AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; }
   595 julong  AllocStats::num_frees()   { return os::num_frees - start_frees; }
   596 julong  AllocStats::free_bytes()  { return os::free_bytes - start_mfree_bytes; }
   597 julong  AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; }
   598 void    AllocStats::print() {
   599   tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), "
   600                 UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc",
   601                 num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M);
   602 }
   605 // debugging code
   606 inline void Arena::free_all(char** start, char** end) {
   607   for (char** p = start; p < end; p++) if (*p) os::free(*p);
   608 }
   610 void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
   611   assert(UseMallocOnly, "should not call");
   612   // free all objects malloced since resource mark was created; resource area
   613   // contains their addresses
   614   if (chunk->next()) {
   615     // this chunk is full, and some others too
   616     for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
   617       char* top = c->top();
   618       if (c->next() == NULL) {
   619         top = hwm2;     // last junk is only used up to hwm2
   620         assert(c->contains(hwm2), "bad hwm2");
   621       }
   622       free_all((char**)c->bottom(), (char**)top);
   623     }
   624     assert(chunk->contains(hwm), "bad hwm");
   625     assert(chunk->contains(max), "bad max");
   626     free_all((char**)hwm, (char**)max);
   627   } else {
   628     // this chunk was partially used
   629     assert(chunk->contains(hwm), "bad hwm");
   630     assert(chunk->contains(hwm2), "bad hwm2");
   631     free_all((char**)hwm, (char**)hwm2);
   632   }
   633 }
   636 ReallocMark::ReallocMark() {
   637 #ifdef ASSERT
   638   Thread *thread = ThreadLocalStorage::get_thread_slow();
   639   _nesting = thread->resource_area()->nesting();
   640 #endif
   641 }
   643 void ReallocMark::check() {
   644 #ifdef ASSERT
   645   if (_nesting != Thread::current()->resource_area()->nesting()) {
   646     fatal("allocation bug: array could grow within nested ResourceMark");
   647   }
   648 #endif
   649 }
   651 #endif // Non-product

mercurial