src/share/vm/memory/allocation.cpp

changeset 3900
d2a62e0f25eb
parent 3156
f08d439fab8c
child 4037
da91efe96a93
     1.1 --- a/src/share/vm/memory/allocation.cpp	Wed Jun 27 15:23:36 2012 +0200
     1.2 +++ b/src/share/vm/memory/allocation.cpp	Thu Jun 28 17:03:16 2012 -0400
     1.3 @@ -26,10 +26,13 @@
     1.4  #include "memory/allocation.hpp"
     1.5  #include "memory/allocation.inline.hpp"
     1.6  #include "memory/resourceArea.hpp"
     1.7 +#include "runtime/atomic.hpp"
     1.8  #include "runtime/os.hpp"
     1.9  #include "runtime/task.hpp"
    1.10  #include "runtime/threadCritical.hpp"
    1.11 +#include "services/memTracker.hpp"
    1.12  #include "utilities/ostream.hpp"
    1.13 +
    1.14  #ifdef TARGET_OS_FAMILY_linux
    1.15  # include "os_linux.inline.hpp"
    1.16  #endif
    1.17 @@ -43,32 +46,16 @@
    1.18  # include "os_bsd.inline.hpp"
    1.19  #endif
    1.20  
    1.21 -void* CHeapObj::operator new(size_t size){
    1.22 -  return (void *) AllocateHeap(size, "CHeapObj-new");
    1.23 -}
    1.24 -
    1.25 -void* CHeapObj::operator new (size_t size, const std::nothrow_t&  nothrow_constant) {
    1.26 -  char* p = (char*) os::malloc(size);
    1.27 -#ifdef ASSERT
    1.28 -  if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
    1.29 -#endif
    1.30 -  return p;
    1.31 -}
    1.32 -
    1.33 -void CHeapObj::operator delete(void* p){
    1.34 - FreeHeap(p);
    1.35 -}
    1.36 -
    1.37  void* StackObj::operator new(size_t size)  { ShouldNotCallThis(); return 0; };
    1.38  void  StackObj::operator delete(void* p)   { ShouldNotCallThis(); };
    1.39  void* _ValueObj::operator new(size_t size)  { ShouldNotCallThis(); return 0; };
    1.40  void  _ValueObj::operator delete(void* p)   { ShouldNotCallThis(); };
    1.41  
    1.42 -void* ResourceObj::operator new(size_t size, allocation_type type) {
    1.43 +void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) {
    1.44    address res;
    1.45    switch (type) {
    1.46     case C_HEAP:
    1.47 -    res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
    1.48 +    res = (address)AllocateHeap(size, flags, CALLER_PC);
    1.49      DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
    1.50      break;
    1.51     case RESOURCE_AREA:
    1.52 @@ -184,7 +171,7 @@
    1.53  
    1.54  // MT-safe pool of chunks to reduce malloc/free thrashing
    1.55  // NB: not using Mutex because pools are used before Threads are initialized
    1.56 -class ChunkPool {
    1.57 +class ChunkPool: public CHeapObj<mtInternal> {
    1.58    Chunk*       _first;        // first cached Chunk; its first word points to next chunk
    1.59    size_t       _num_chunks;   // number of unused chunks in pool
    1.60    size_t       _num_used;     // number of chunks currently checked out
    1.61 @@ -210,14 +197,16 @@
    1.62     ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
    1.63  
    1.64    // Allocate a new chunk from the pool (might expand the pool)
    1.65 -  void* allocate(size_t bytes) {
    1.66 +  _NOINLINE_ void* allocate(size_t bytes) {
    1.67      assert(bytes == _size, "bad size");
    1.68      void* p = NULL;
    1.69 +    // No VM lock can be taken inside ThreadCritical lock, so os::malloc
    1.70 +    // should be done outside ThreadCritical lock due to NMT
    1.71      { ThreadCritical tc;
    1.72        _num_used++;
    1.73        p = get_first();
    1.74 -      if (p == NULL) p = os::malloc(bytes);
    1.75      }
    1.76 +    if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
    1.77      if (p == NULL)
    1.78        vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
    1.79  
    1.80 @@ -238,28 +227,34 @@
    1.81  
    1.82    // Prune the pool
    1.83    void free_all_but(size_t n) {
    1.84 +    Chunk* cur = NULL;
    1.85 +    Chunk* next;
    1.86 +    {
    1.87      // if we have more than n chunks, free all of them
    1.88      ThreadCritical tc;
    1.89      if (_num_chunks > n) {
    1.90        // free chunks at end of queue, for better locality
    1.91 -      Chunk* cur = _first;
    1.92 +        cur = _first;
    1.93        for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next();
    1.94  
    1.95        if (cur != NULL) {
    1.96 -        Chunk* next = cur->next();
    1.97 +          next = cur->next();
    1.98          cur->set_next(NULL);
    1.99          cur = next;
   1.100  
   1.101 -        // Free all remaining chunks
   1.102 +          _num_chunks = n;
   1.103 +        }
   1.104 +      }
   1.105 +    }
   1.106 +
   1.107 +    // Free all remaining chunks, outside of ThreadCritical
   1.108 +    // to avoid deadlock with NMT
   1.109          while(cur != NULL) {
   1.110            next = cur->next();
   1.111 -          os::free(cur);
   1.112 -          _num_chunks--;
   1.113 +      os::free(cur, mtChunk);
   1.114            cur = next;
   1.115          }
   1.116        }
   1.117 -    }
   1.118 -  }
   1.119  
   1.120    // Accessors to preallocated pool's
   1.121    static ChunkPool* large_pool()  { assert(_large_pool  != NULL, "must be initialized"); return _large_pool;  }
   1.122 @@ -323,7 +318,7 @@
   1.123     case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
   1.124     case Chunk::init_size:   return ChunkPool::small_pool()->allocate(bytes);
   1.125     default: {
   1.126 -     void *p =  os::malloc(bytes);
   1.127 +     void *p =  os::malloc(bytes, mtChunk, CALLER_PC);
   1.128       if (p == NULL)
   1.129         vm_exit_out_of_memory(bytes, "Chunk::new");
   1.130       return p;
   1.131 @@ -337,7 +332,7 @@
   1.132     case Chunk::size:        ChunkPool::large_pool()->free(c); break;
   1.133     case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break;
   1.134     case Chunk::init_size:   ChunkPool::small_pool()->free(c); break;
   1.135 -   default:                 os::free(c);
   1.136 +   default:                 os::free(c, mtChunk);
   1.137    }
   1.138  }
   1.139  
   1.140 @@ -374,6 +369,7 @@
   1.141  }
   1.142  
   1.143  //------------------------------Arena------------------------------------------
   1.144 +NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
   1.145  
   1.146  Arena::Arena(size_t init_size) {
   1.147    size_t round_size = (sizeof (char *)) - 1;
   1.148 @@ -382,6 +378,7 @@
   1.149    _hwm = _chunk->bottom();      // Save the cached hwm, max
   1.150    _max = _chunk->top();
   1.151    set_size_in_bytes(init_size);
   1.152 +  NOT_PRODUCT(Atomic::inc(&_instance_count);)
   1.153  }
   1.154  
   1.155  Arena::Arena() {
   1.156 @@ -389,12 +386,15 @@
   1.157    _hwm = _chunk->bottom();      // Save the cached hwm, max
   1.158    _max = _chunk->top();
   1.159    set_size_in_bytes(Chunk::init_size);
   1.160 +  NOT_PRODUCT(Atomic::inc(&_instance_count);)
   1.161  }
   1.162  
   1.163  Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) {
   1.164    set_size_in_bytes(a->size_in_bytes());
   1.165 +  NOT_PRODUCT(Atomic::inc(&_instance_count);)
   1.166  }
   1.167  
   1.168 +
   1.169  Arena *Arena::move_contents(Arena *copy) {
   1.170    copy->destruct_contents();
   1.171    copy->_chunk = _chunk;
   1.172 @@ -409,6 +409,42 @@
   1.173  
   1.174  Arena::~Arena() {
   1.175    destruct_contents();
   1.176 +  NOT_PRODUCT(Atomic::dec(&_instance_count);)
   1.177 +}
   1.178 +
   1.179 +void* Arena::operator new(size_t size) {
   1.180 +  assert(false, "Use dynamic memory type binding");
   1.181 +  return NULL;
   1.182 +}
   1.183 +
   1.184 +void* Arena::operator new (size_t size, const std::nothrow_t&  nothrow_constant) {
   1.185 +  assert(false, "Use dynamic memory type binding");
   1.186 +  return NULL;
   1.187 +}
   1.188 +
   1.189 +  // dynamic memory type binding
   1.190 +void* Arena::operator new(size_t size, MEMFLAGS flags) {
   1.191 +#ifdef ASSERT
   1.192 +  void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
   1.193 +  if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
   1.194 +  return p;
   1.195 +#else
   1.196 +  return (void *) AllocateHeap(size, flags|otArena, CALLER_PC);
   1.197 +#endif
   1.198 +}
   1.199 +
   1.200 +void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) {
   1.201 +#ifdef ASSERT
   1.202 +  void* p = os::malloc(size, flags|otArena, CALLER_PC);
   1.203 +  if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
   1.204 +  return p;
   1.205 +#else
   1.206 +  return os::malloc(size, flags|otArena, CALLER_PC);
   1.207 +#endif
   1.208 +}
   1.209 +
   1.210 +void Arena::operator delete(void* p) {
   1.211 +  FreeHeap(p);
   1.212  }
   1.213  
   1.214  // Destroy this arenas contents and reset to empty
   1.215 @@ -421,6 +457,14 @@
   1.216    reset();
   1.217  }
   1.218  
   1.219 +// This is high traffic method, but many calls actually don't
   1.220 +// change the size
   1.221 +void Arena::set_size_in_bytes(size_t size) {
   1.222 +  if (_size_in_bytes != size) {
   1.223 +    _size_in_bytes = size;
   1.224 +    MemTracker::record_arena_size((address)this, size);
   1.225 +  }
   1.226 +}
   1.227  
   1.228  // Total of all Chunks in arena
   1.229  size_t Arena::used() const {
   1.230 @@ -448,7 +492,6 @@
   1.231    if (_chunk == NULL) {
   1.232      signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
   1.233    }
   1.234 -
   1.235    if (k) k->set_next(_chunk);   // Append new chunk to end of linked list
   1.236    else _first = _chunk;
   1.237    _hwm  = _chunk->bottom();     // Save the cached hwm, max
   1.238 @@ -538,7 +581,7 @@
   1.239    assert(UseMallocOnly, "shouldn't call");
   1.240    // use malloc, but save pointer in res. area for later freeing
   1.241    char** save = (char**)internal_malloc_4(sizeof(char*));
   1.242 -  return (*save = (char*)os::malloc(size));
   1.243 +  return (*save = (char*)os::malloc(size, mtChunk));
   1.244  }
   1.245  
   1.246  // for debugging with UseMallocOnly

mercurial