src/share/vm/services/memBaseline.cpp

changeset 3900
d2a62e0f25eb
child 4193
716c64bda5ba
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/services/memBaseline.cpp	Thu Jun 28 17:03:16 2012 -0400
     1.3 @@ -0,0 +1,387 @@
     1.4 +/*
     1.5 + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +#include "precompiled.hpp"
    1.28 +#include "classfile/systemDictionary.hpp"
    1.29 +#include "memory/allocation.hpp"
    1.30 +#include "services/memBaseline.hpp"
    1.31 +#include "services/memTracker.hpp"
    1.32 +
    1.33 +MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
    1.34 +  {mtJavaHeap,   "Java Heap"},
    1.35 +  {mtClass,      "Class"},
    1.36 +  {mtThreadStack,"Thread Stack"},
    1.37 +  {mtThread,     "Thread"},
    1.38 +  {mtCode,       "Code"},
    1.39 +  {mtGC,         "GC"},
    1.40 +  {mtCompiler,   "Compiler"},
    1.41 +  {mtInternal,   "Internal"},
    1.42 +  {mtOther,      "Other"},
    1.43 +  {mtSymbol,     "Symbol"},
    1.44 +  {mtNMT,        "Memory Tracking"},
    1.45 +  {mtChunk,      "Pooled Free Chunks"},
    1.46 +  {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
    1.47 +                             // behind
    1.48 +};
    1.49 +
    1.50 +MemBaseline::MemBaseline() {
    1.51 +  _baselined = false;
    1.52 +
    1.53 +  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    1.54 +    _malloc_data[index].set_type(MemType2NameMap[index]._flag);
    1.55 +    _vm_data[index].set_type(MemType2NameMap[index]._flag);
    1.56 +    _arena_data[index].set_type(MemType2NameMap[index]._flag);
    1.57 +  }
    1.58 +
    1.59 +  _malloc_cs = NULL;
    1.60 +  _vm_cs = NULL;
    1.61 +
    1.62 +  _number_of_classes = 0;
    1.63 +  _number_of_threads = 0;
    1.64 +}
    1.65 +
    1.66 +
    1.67 +void MemBaseline::clear() {
    1.68 +  if (_malloc_cs != NULL) {
    1.69 +    delete _malloc_cs;
    1.70 +    _malloc_cs = NULL;
    1.71 +  }
    1.72 +
    1.73 +  if (_vm_cs != NULL) {
    1.74 +    delete _vm_cs;
    1.75 +    _vm_cs = NULL;
    1.76 +  }
    1.77 +
    1.78 +  reset();
    1.79 +}
    1.80 +
    1.81 +
    1.82 +void MemBaseline::reset() {
    1.83 +  _baselined = false;
    1.84 +  _total_vm_reserved = 0;
    1.85 +  _total_vm_committed = 0;
    1.86 +  _total_malloced = 0;
    1.87 +  _number_of_classes = 0;
    1.88 +
    1.89 +  if (_malloc_cs != NULL) _malloc_cs->clear();
    1.90 +  if (_vm_cs != NULL) _vm_cs->clear();
    1.91 +
    1.92 +  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    1.93 +    _malloc_data[index].clear();
    1.94 +    _vm_data[index].clear();
    1.95 +    _arena_data[index].clear();
    1.96 +  }
    1.97 +}
    1.98 +
    1.99 +MemBaseline::~MemBaseline() {
   1.100 +  if (_malloc_cs != NULL) {
   1.101 +    delete _malloc_cs;
   1.102 +  }
   1.103 +
   1.104 +  if (_vm_cs != NULL) {
   1.105 +    delete _vm_cs;
   1.106 +  }
   1.107 +}
   1.108 +
   1.109 +// baseline malloc'd memory records, generate overall summary and summaries by
   1.110 +// memory types
   1.111 +bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
   1.112 +  MemPointerArrayIteratorImpl mItr((MemPointerArray*)malloc_records);
   1.113 +  MemPointerRecord* mptr = (MemPointerRecord*)mItr.current();
   1.114 +  size_t used_arena_size = 0;
   1.115 +  int index;
   1.116 +  while (mptr != NULL) {
   1.117 +    index = flag2index(FLAGS_TO_MEMORY_TYPE(mptr->flags()));
   1.118 +    size_t size = mptr->size();
   1.119 +    _total_malloced += size;
   1.120 +    _malloc_data[index].inc(size);
   1.121 +    if (MemPointerRecord::is_arena_record(mptr->flags())) {
   1.122 +      // see if arena size record present
   1.123 +      MemPointerRecord* next_p = (MemPointerRecordEx*)mItr.peek_next();
   1.124 +      if (MemPointerRecord::is_arena_size_record(next_p->flags())) {
   1.125 +        assert(next_p->is_size_record_of_arena(mptr), "arena records do not match");
   1.126 +        size = next_p->size();
   1.127 +        _arena_data[index].inc(size);
   1.128 +        used_arena_size += size;
   1.129 +        mItr.next();
   1.130 +      }
   1.131 +    }
   1.132 +    mptr = (MemPointerRecordEx*)mItr.next();
   1.133 +  }
   1.134 +
   1.135 +  // substract used arena size to get size of arena chunk in free list
   1.136 +  index = flag2index(mtChunk);
   1.137 +  _malloc_data[index].reduce(used_arena_size);
   1.138 +  // we really don't know how many chunks in free list, so just set to
   1.139 +  // 0
   1.140 +  _malloc_data[index].overwrite_counter(0);
   1.141 +
   1.142 +  return true;
   1.143 +}
   1.144 +
   1.145 +// baseline mmap'd memory records, generate overall summary and summaries by
   1.146 +// memory types
   1.147 +bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
   1.148 +  MemPointerArrayIteratorImpl vItr((MemPointerArray*)vm_records);
   1.149 +  VMMemRegion* vptr = (VMMemRegion*)vItr.current();
   1.150 +  int index;
   1.151 +  while (vptr != NULL) {
   1.152 +    index = flag2index(FLAGS_TO_MEMORY_TYPE(vptr->flags()));
   1.153 +
   1.154 +    // we use the number of thread stack to count threads
   1.155 +    if (IS_MEMORY_TYPE(vptr->flags(), mtThreadStack)) {
   1.156 +      _number_of_threads ++;
   1.157 +    }
   1.158 +    _total_vm_reserved += vptr->reserved_size();
   1.159 +    _total_vm_committed += vptr->committed_size();
   1.160 +    _vm_data[index].inc(vptr->reserved_size(), vptr->committed_size());
   1.161 +    vptr = (VMMemRegion*)vItr.next();
   1.162 +  }
   1.163 +  return true;
   1.164 +}
   1.165 +
   1.166 +// baseline malloc'd memory by callsites, but only the callsites with memory allocation
   1.167 +// over 1KB are stored.
   1.168 +bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
   1.169 +  assert(MemTracker::track_callsite(), "detail tracking is off");
   1.170 +
   1.171 +  MemPointerArrayIteratorImpl mItr((MemPointerArray*)malloc_records);
   1.172 +  MemPointerRecordEx* mptr = (MemPointerRecordEx*)mItr.current();
   1.173 +  MallocCallsitePointer mp;
   1.174 +
   1.175 +  if (_malloc_cs == NULL) {
   1.176 +    _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
   1.177 +    // out of native memory
   1.178 +    if (_malloc_cs == NULL) {
   1.179 +      return false;
   1.180 +    }
   1.181 +  } else {
   1.182 +    _malloc_cs->clear();
   1.183 +  }
   1.184 +
   1.185 +  // baseline memory that is totaled over 1 KB
   1.186 +  while (mptr != NULL) {
   1.187 +    if (!MemPointerRecord::is_arena_size_record(mptr->flags())) {
   1.188 +      // skip thread stacks
   1.189 +      if (!IS_MEMORY_TYPE(mptr->flags(), mtThreadStack)) {
   1.190 +        if (mp.addr() != mptr->pc()) {
   1.191 +          if ((mp.amount()/K) > 0) {
   1.192 +            if (!_malloc_cs->append(&mp)) {
   1.193 +              return false;
   1.194 +            }
   1.195 +          }
   1.196 +          mp = MallocCallsitePointer(mptr->pc());
   1.197 +        }
   1.198 +        mp.inc(mptr->size());
   1.199 +      }
   1.200 +    }
   1.201 +    mptr = (MemPointerRecordEx*)mItr.next();
   1.202 +  }
   1.203 +
   1.204 +  if (mp.addr() != 0 && (mp.amount()/K) > 0) {
   1.205 +    if (!_malloc_cs->append(&mp)) {
   1.206 +      return false;
   1.207 +    }
   1.208 +  }
   1.209 +  return true;
   1.210 +}
   1.211 +
   1.212 +// baseline mmap'd memory by callsites
   1.213 +bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
   1.214 +  assert(MemTracker::track_callsite(), "detail tracking is off");
   1.215 +
   1.216 +  VMCallsitePointer vp;
   1.217 +  MemPointerArrayIteratorImpl vItr((MemPointerArray*)vm_records);
   1.218 +  VMMemRegionEx* vptr = (VMMemRegionEx*)vItr.current();
   1.219 +
   1.220 +  if (_vm_cs == NULL) {
   1.221 +    _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
   1.222 +    if (_vm_cs == NULL) {
   1.223 +      return false;
   1.224 +    }
   1.225 +  } else {
   1.226 +    _vm_cs->clear();
   1.227 +  }
   1.228 +
   1.229 +  while (vptr != NULL) {
   1.230 +    if (vp.addr() != vptr->pc()) {
   1.231 +      if (!_vm_cs->append(&vp)) {
   1.232 +        return false;
   1.233 +      }
   1.234 +      vp = VMCallsitePointer(vptr->pc());
   1.235 +    }
   1.236 +    vp.inc(vptr->size(), vptr->committed_size());
   1.237 +    vptr = (VMMemRegionEx*)vItr.next();
   1.238 +  }
   1.239 +  if (vp.addr() != 0) {
   1.240 +    if (!_vm_cs->append(&vp)) {
   1.241 +      return false;
   1.242 +    }
   1.243 +  }
   1.244 +  return true;
   1.245 +}
   1.246 +
   1.247 +// baseline a snapshot. If summary_only = false, memory usages aggregated by
   1.248 +// callsites are also baselined.
   1.249 +bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
   1.250 +  MutexLockerEx snapshot_locker(snapshot._lock, true);
   1.251 +  reset();
   1.252 +  _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
   1.253 +               baseline_vm_summary(snapshot._vm_ptrs);
   1.254 +  _number_of_classes = SystemDictionary::number_of_classes();
   1.255 +
   1.256 +  if (!summary_only && MemTracker::track_callsite() && _baselined) {
   1.257 +    ((MemPointerArray*)snapshot._alloc_ptrs)->sort((FN_SORT)malloc_sort_by_pc);
   1.258 +    ((MemPointerArray*)snapshot._vm_ptrs)->sort((FN_SORT)vm_sort_by_pc);
   1.259 +    _baselined =  baseline_malloc_details(snapshot._alloc_ptrs) &&
   1.260 +      baseline_vm_details(snapshot._vm_ptrs);
   1.261 +    ((MemPointerArray*)snapshot._alloc_ptrs)->sort((FN_SORT)malloc_sort_by_addr);
   1.262 +    ((MemPointerArray*)snapshot._vm_ptrs)->sort((FN_SORT)vm_sort_by_addr);
   1.263 +  }
   1.264 +  return _baselined;
   1.265 +}
   1.266 +
   1.267 +
   1.268 +int MemBaseline::flag2index(MEMFLAGS flag) const {
   1.269 +  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   1.270 +    if (MemType2NameMap[index]._flag == flag) {
   1.271 +      return index;
   1.272 +    }
   1.273 +  }
   1.274 +  assert(false, "no type");
   1.275 +  return -1;
   1.276 +}
   1.277 +
   1.278 +const char* MemBaseline::type2name(MEMFLAGS type) {
   1.279 +  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   1.280 +    if (MemType2NameMap[index]._flag == type) {
   1.281 +      return MemType2NameMap[index]._name;
   1.282 +    }
   1.283 +  }
   1.284 +  assert(false, "no type");
   1.285 +  return NULL;
   1.286 +}
   1.287 +
   1.288 +
   1.289 +MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
   1.290 +  _total_malloced = other._total_malloced;
   1.291 +  _total_vm_reserved = other._total_vm_reserved;
   1.292 +  _total_vm_committed = other._total_vm_committed;
   1.293 +
   1.294 +  _baselined = other._baselined;
   1.295 +  _number_of_classes = other._number_of_classes;
   1.296 +
   1.297 +  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   1.298 +    _malloc_data[index] = other._malloc_data[index];
   1.299 +    _vm_data[index] = other._vm_data[index];
   1.300 +    _arena_data[index] = other._arena_data[index];
   1.301 +  }
   1.302 +
   1.303 +  if (MemTracker::track_callsite()) {
   1.304 +    assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
   1.305 +    assert(other._malloc_cs != NULL && other._vm_cs != NULL,
   1.306 +           "not properly baselined");
   1.307 +    _malloc_cs->clear();
   1.308 +    _vm_cs->clear();
   1.309 +    int index;
   1.310 +    for (index = 0; index < other._malloc_cs->length(); index ++) {
   1.311 +      _malloc_cs->append(other._malloc_cs->at(index));
   1.312 +    }
   1.313 +
   1.314 +    for (index = 0; index < other._vm_cs->length(); index ++) {
   1.315 +      _vm_cs->append(other._vm_cs->at(index));
   1.316 +    }
   1.317 +  }
   1.318 +  return *this;
   1.319 +}
   1.320 +
   1.321 +/* compare functions for sorting */
   1.322 +
   1.323 +// sort snapshot malloc'd records in callsite pc order
   1.324 +int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
   1.325 +  assert(MemTracker::track_callsite(),"Just check");
   1.326 +  const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
   1.327 +  const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
   1.328 +  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
   1.329 +}
   1.330 +
   1.331 +// sort baselined malloc'd records in size order
   1.332 +int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
   1.333 +  assert(MemTracker::is_on(), "Just check");
   1.334 +  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
   1.335 +  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
   1.336 +  return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
   1.337 +}
   1.338 +
   1.339 +// sort baselined malloc'd records in callsite pc order
   1.340 +int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
   1.341 +  assert(MemTracker::is_on(), "Just check");
   1.342 +  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
   1.343 +  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
   1.344 +  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   1.345 +}
   1.346 +
   1.347 +// sort snapshot mmap'd records in callsite pc order
   1.348 +int MemBaseline::vm_sort_by_pc(const void* p1, const void* p2) {
   1.349 +  assert(MemTracker::track_callsite(),"Just check");
   1.350 +  const VMMemRegionEx* mp1 = (const VMMemRegionEx*)p1;
   1.351 +  const VMMemRegionEx* mp2 = (const VMMemRegionEx*)p2;
   1.352 +  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
   1.353 +}
   1.354 +
   1.355 +// sort baselined mmap'd records in size (reserved size) order
   1.356 +int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
   1.357 +  assert(MemTracker::is_on(), "Just check");
   1.358 +  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
   1.359 +  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
   1.360 +  return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
   1.361 +}
   1.362 +
   1.363 +// sort baselined mmap'd records in callsite pc order
   1.364 +int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
   1.365 +  assert(MemTracker::is_on(), "Just check");
   1.366 +  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
   1.367 +  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
   1.368 +  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   1.369 +}
   1.370 +
   1.371 +
   1.372 +// sort snapshot malloc'd records in memory block address order
   1.373 +int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
   1.374 +  assert(MemTracker::is_on(), "Just check");
   1.375 +  const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
   1.376 +  const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
   1.377 +  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   1.378 +  assert(delta != 0, "dup pointer");
   1.379 +  return delta;
   1.380 +}
   1.381 +
   1.382 +// sort snapshot mmap'd records in memory block address order
   1.383 +int MemBaseline::vm_sort_by_addr(const void* p1, const void* p2) {
   1.384 +  assert(MemTracker::is_on(), "Just check");
   1.385 +  const VMMemRegion* mp1 = (const VMMemRegion*)p1;
   1.386 +  const VMMemRegion* mp2 = (const VMMemRegion*)p2;
   1.387 +  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   1.388 +  assert(delta != 0, "dup pointer");
   1.389 +  return delta;
   1.390 +}

mercurial