src/share/vm/services/memBaseline.cpp

Thu, 28 Jun 2012 17:03:16 -0400

author
zgu
date
Thu, 28 Jun 2012 17:03:16 -0400
changeset 3900
d2a62e0f25eb
child 4193
716c64bda5ba
permissions
-rw-r--r--

6995781: Native Memory Tracking (Phase 1)
7151532: DCmd for hotspot native memory tracking
Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd
Reviewed-by: acorn, coleenp, fparain

     1 /*
     2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    24 #include "precompiled.hpp"
    25 #include "classfile/systemDictionary.hpp"
    26 #include "memory/allocation.hpp"
    27 #include "services/memBaseline.hpp"
    28 #include "services/memTracker.hpp"
    30 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
    31   {mtJavaHeap,   "Java Heap"},
    32   {mtClass,      "Class"},
    33   {mtThreadStack,"Thread Stack"},
    34   {mtThread,     "Thread"},
    35   {mtCode,       "Code"},
    36   {mtGC,         "GC"},
    37   {mtCompiler,   "Compiler"},
    38   {mtInternal,   "Internal"},
    39   {mtOther,      "Other"},
    40   {mtSymbol,     "Symbol"},
    41   {mtNMT,        "Memory Tracking"},
    42   {mtChunk,      "Pooled Free Chunks"},
    43   {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
    44                              // behind
    45 };
    47 MemBaseline::MemBaseline() {
    48   _baselined = false;
    50   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    51     _malloc_data[index].set_type(MemType2NameMap[index]._flag);
    52     _vm_data[index].set_type(MemType2NameMap[index]._flag);
    53     _arena_data[index].set_type(MemType2NameMap[index]._flag);
    54   }
    56   _malloc_cs = NULL;
    57   _vm_cs = NULL;
    59   _number_of_classes = 0;
    60   _number_of_threads = 0;
    61 }
    64 void MemBaseline::clear() {
    65   if (_malloc_cs != NULL) {
    66     delete _malloc_cs;
    67     _malloc_cs = NULL;
    68   }
    70   if (_vm_cs != NULL) {
    71     delete _vm_cs;
    72     _vm_cs = NULL;
    73   }
    75   reset();
    76 }
    79 void MemBaseline::reset() {
    80   _baselined = false;
    81   _total_vm_reserved = 0;
    82   _total_vm_committed = 0;
    83   _total_malloced = 0;
    84   _number_of_classes = 0;
    86   if (_malloc_cs != NULL) _malloc_cs->clear();
    87   if (_vm_cs != NULL) _vm_cs->clear();
    89   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    90     _malloc_data[index].clear();
    91     _vm_data[index].clear();
    92     _arena_data[index].clear();
    93   }
    94 }
    96 MemBaseline::~MemBaseline() {
    97   if (_malloc_cs != NULL) {
    98     delete _malloc_cs;
    99   }
   101   if (_vm_cs != NULL) {
   102     delete _vm_cs;
   103   }
   104 }
   106 // baseline malloc'd memory records, generate overall summary and summaries by
   107 // memory types
   108 bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
   109   MemPointerArrayIteratorImpl mItr((MemPointerArray*)malloc_records);
   110   MemPointerRecord* mptr = (MemPointerRecord*)mItr.current();
   111   size_t used_arena_size = 0;
   112   int index;
   113   while (mptr != NULL) {
   114     index = flag2index(FLAGS_TO_MEMORY_TYPE(mptr->flags()));
   115     size_t size = mptr->size();
   116     _total_malloced += size;
   117     _malloc_data[index].inc(size);
   118     if (MemPointerRecord::is_arena_record(mptr->flags())) {
   119       // see if arena size record present
   120       MemPointerRecord* next_p = (MemPointerRecordEx*)mItr.peek_next();
   121       if (MemPointerRecord::is_arena_size_record(next_p->flags())) {
   122         assert(next_p->is_size_record_of_arena(mptr), "arena records do not match");
   123         size = next_p->size();
   124         _arena_data[index].inc(size);
   125         used_arena_size += size;
   126         mItr.next();
   127       }
   128     }
   129     mptr = (MemPointerRecordEx*)mItr.next();
   130   }
   132   // substract used arena size to get size of arena chunk in free list
   133   index = flag2index(mtChunk);
   134   _malloc_data[index].reduce(used_arena_size);
   135   // we really don't know how many chunks in free list, so just set to
   136   // 0
   137   _malloc_data[index].overwrite_counter(0);
   139   return true;
   140 }
   142 // baseline mmap'd memory records, generate overall summary and summaries by
   143 // memory types
   144 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
   145   MemPointerArrayIteratorImpl vItr((MemPointerArray*)vm_records);
   146   VMMemRegion* vptr = (VMMemRegion*)vItr.current();
   147   int index;
   148   while (vptr != NULL) {
   149     index = flag2index(FLAGS_TO_MEMORY_TYPE(vptr->flags()));
   151     // we use the number of thread stack to count threads
   152     if (IS_MEMORY_TYPE(vptr->flags(), mtThreadStack)) {
   153       _number_of_threads ++;
   154     }
   155     _total_vm_reserved += vptr->reserved_size();
   156     _total_vm_committed += vptr->committed_size();
   157     _vm_data[index].inc(vptr->reserved_size(), vptr->committed_size());
   158     vptr = (VMMemRegion*)vItr.next();
   159   }
   160   return true;
   161 }
   163 // baseline malloc'd memory by callsites, but only the callsites with memory allocation
   164 // over 1KB are stored.
   165 bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
   166   assert(MemTracker::track_callsite(), "detail tracking is off");
   168   MemPointerArrayIteratorImpl mItr((MemPointerArray*)malloc_records);
   169   MemPointerRecordEx* mptr = (MemPointerRecordEx*)mItr.current();
   170   MallocCallsitePointer mp;
   172   if (_malloc_cs == NULL) {
   173     _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
   174     // out of native memory
   175     if (_malloc_cs == NULL) {
   176       return false;
   177     }
   178   } else {
   179     _malloc_cs->clear();
   180   }
   182   // baseline memory that is totaled over 1 KB
   183   while (mptr != NULL) {
   184     if (!MemPointerRecord::is_arena_size_record(mptr->flags())) {
   185       // skip thread stacks
   186       if (!IS_MEMORY_TYPE(mptr->flags(), mtThreadStack)) {
   187         if (mp.addr() != mptr->pc()) {
   188           if ((mp.amount()/K) > 0) {
   189             if (!_malloc_cs->append(&mp)) {
   190               return false;
   191             }
   192           }
   193           mp = MallocCallsitePointer(mptr->pc());
   194         }
   195         mp.inc(mptr->size());
   196       }
   197     }
   198     mptr = (MemPointerRecordEx*)mItr.next();
   199   }
   201   if (mp.addr() != 0 && (mp.amount()/K) > 0) {
   202     if (!_malloc_cs->append(&mp)) {
   203       return false;
   204     }
   205   }
   206   return true;
   207 }
   209 // baseline mmap'd memory by callsites
   210 bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
   211   assert(MemTracker::track_callsite(), "detail tracking is off");
   213   VMCallsitePointer vp;
   214   MemPointerArrayIteratorImpl vItr((MemPointerArray*)vm_records);
   215   VMMemRegionEx* vptr = (VMMemRegionEx*)vItr.current();
   217   if (_vm_cs == NULL) {
   218     _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
   219     if (_vm_cs == NULL) {
   220       return false;
   221     }
   222   } else {
   223     _vm_cs->clear();
   224   }
   226   while (vptr != NULL) {
   227     if (vp.addr() != vptr->pc()) {
   228       if (!_vm_cs->append(&vp)) {
   229         return false;
   230       }
   231       vp = VMCallsitePointer(vptr->pc());
   232     }
   233     vp.inc(vptr->size(), vptr->committed_size());
   234     vptr = (VMMemRegionEx*)vItr.next();
   235   }
   236   if (vp.addr() != 0) {
   237     if (!_vm_cs->append(&vp)) {
   238       return false;
   239     }
   240   }
   241   return true;
   242 }
   244 // baseline a snapshot. If summary_only = false, memory usages aggregated by
   245 // callsites are also baselined.
   246 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
   247   MutexLockerEx snapshot_locker(snapshot._lock, true);
   248   reset();
   249   _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
   250                baseline_vm_summary(snapshot._vm_ptrs);
   251   _number_of_classes = SystemDictionary::number_of_classes();
   253   if (!summary_only && MemTracker::track_callsite() && _baselined) {
   254     ((MemPointerArray*)snapshot._alloc_ptrs)->sort((FN_SORT)malloc_sort_by_pc);
   255     ((MemPointerArray*)snapshot._vm_ptrs)->sort((FN_SORT)vm_sort_by_pc);
   256     _baselined =  baseline_malloc_details(snapshot._alloc_ptrs) &&
   257       baseline_vm_details(snapshot._vm_ptrs);
   258     ((MemPointerArray*)snapshot._alloc_ptrs)->sort((FN_SORT)malloc_sort_by_addr);
   259     ((MemPointerArray*)snapshot._vm_ptrs)->sort((FN_SORT)vm_sort_by_addr);
   260   }
   261   return _baselined;
   262 }
   265 int MemBaseline::flag2index(MEMFLAGS flag) const {
   266   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   267     if (MemType2NameMap[index]._flag == flag) {
   268       return index;
   269     }
   270   }
   271   assert(false, "no type");
   272   return -1;
   273 }
   275 const char* MemBaseline::type2name(MEMFLAGS type) {
   276   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   277     if (MemType2NameMap[index]._flag == type) {
   278       return MemType2NameMap[index]._name;
   279     }
   280   }
   281   assert(false, "no type");
   282   return NULL;
   283 }
   286 MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
   287   _total_malloced = other._total_malloced;
   288   _total_vm_reserved = other._total_vm_reserved;
   289   _total_vm_committed = other._total_vm_committed;
   291   _baselined = other._baselined;
   292   _number_of_classes = other._number_of_classes;
   294   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   295     _malloc_data[index] = other._malloc_data[index];
   296     _vm_data[index] = other._vm_data[index];
   297     _arena_data[index] = other._arena_data[index];
   298   }
   300   if (MemTracker::track_callsite()) {
   301     assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
   302     assert(other._malloc_cs != NULL && other._vm_cs != NULL,
   303            "not properly baselined");
   304     _malloc_cs->clear();
   305     _vm_cs->clear();
   306     int index;
   307     for (index = 0; index < other._malloc_cs->length(); index ++) {
   308       _malloc_cs->append(other._malloc_cs->at(index));
   309     }
   311     for (index = 0; index < other._vm_cs->length(); index ++) {
   312       _vm_cs->append(other._vm_cs->at(index));
   313     }
   314   }
   315   return *this;
   316 }
   318 /* compare functions for sorting */
   320 // sort snapshot malloc'd records in callsite pc order
   321 int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
   322   assert(MemTracker::track_callsite(),"Just check");
   323   const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
   324   const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
   325   return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
   326 }
   328 // sort baselined malloc'd records in size order
   329 int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
   330   assert(MemTracker::is_on(), "Just check");
   331   const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
   332   const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
   333   return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
   334 }
   336 // sort baselined malloc'd records in callsite pc order
   337 int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
   338   assert(MemTracker::is_on(), "Just check");
   339   const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
   340   const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
   341   return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   342 }
   344 // sort snapshot mmap'd records in callsite pc order
   345 int MemBaseline::vm_sort_by_pc(const void* p1, const void* p2) {
   346   assert(MemTracker::track_callsite(),"Just check");
   347   const VMMemRegionEx* mp1 = (const VMMemRegionEx*)p1;
   348   const VMMemRegionEx* mp2 = (const VMMemRegionEx*)p2;
   349   return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
   350 }
   352 // sort baselined mmap'd records in size (reserved size) order
   353 int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
   354   assert(MemTracker::is_on(), "Just check");
   355   const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
   356   const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
   357   return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
   358 }
   360 // sort baselined mmap'd records in callsite pc order
   361 int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
   362   assert(MemTracker::is_on(), "Just check");
   363   const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
   364   const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
   365   return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   366 }
   369 // sort snapshot malloc'd records in memory block address order
   370 int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
   371   assert(MemTracker::is_on(), "Just check");
   372   const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
   373   const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
   374   int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   375   assert(delta != 0, "dup pointer");
   376   return delta;
   377 }
   379 // sort snapshot mmap'd records in memory block address order
   380 int MemBaseline::vm_sort_by_addr(const void* p1, const void* p2) {
   381   assert(MemTracker::is_on(), "Just check");
   382   const VMMemRegion* mp1 = (const VMMemRegion*)p1;
   383   const VMMemRegion* mp2 = (const VMMemRegion*)p2;
   384   int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   385   assert(delta != 0, "dup pointer");
   386   return delta;
   387 }

mercurial