src/share/vm/services/memBaseline.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/services/memBaseline.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,492 @@
     1.4 +/*
     1.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +#include "precompiled.hpp"
    1.28 +#include "memory/allocation.hpp"
    1.29 +#include "runtime/safepoint.hpp"
    1.30 +#include "runtime/thread.inline.hpp"
    1.31 +#include "services/memBaseline.hpp"
    1.32 +#include "services/memTracker.hpp"
    1.33 +
    1.34 +
    1.35 +MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
    1.36 +  {mtJavaHeap,   "Java Heap"},
    1.37 +  {mtClass,      "Class"},
    1.38 +  {mtThreadStack,"Thread Stack"},
    1.39 +  {mtThread,     "Thread"},
    1.40 +  {mtCode,       "Code"},
    1.41 +  {mtGC,         "GC"},
    1.42 +  {mtCompiler,   "Compiler"},
    1.43 +  {mtInternal,   "Internal"},
    1.44 +  {mtOther,      "Other"},
    1.45 +  {mtSymbol,     "Symbol"},
    1.46 +  {mtNMT,        "Memory Tracking"},
    1.47 +  {mtTracing,    "Tracing"},
    1.48 +  {mtChunk,      "Pooled Free Chunks"},
    1.49 +  {mtClassShared,"Shared spaces for classes"},
    1.50 +  {mtTest,       "Test"},
    1.51 +  {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
    1.52 +                             // behind
    1.53 +};
    1.54 +
    1.55 +MemBaseline::MemBaseline() {
    1.56 +  _baselined = false;
    1.57 +
    1.58 +  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    1.59 +    _malloc_data[index].set_type(MemType2NameMap[index]._flag);
    1.60 +    _vm_data[index].set_type(MemType2NameMap[index]._flag);
    1.61 +    _arena_data[index].set_type(MemType2NameMap[index]._flag);
    1.62 +  }
    1.63 +
    1.64 +  _malloc_cs = NULL;
    1.65 +  _vm_cs = NULL;
    1.66 +  _vm_map = NULL;
    1.67 +
    1.68 +  _number_of_classes = 0;
    1.69 +  _number_of_threads = 0;
    1.70 +}
    1.71 +
    1.72 +
    1.73 +void MemBaseline::clear() {
    1.74 +  if (_malloc_cs != NULL) {
    1.75 +    delete _malloc_cs;
    1.76 +    _malloc_cs = NULL;
    1.77 +  }
    1.78 +
    1.79 +  if (_vm_cs != NULL) {
    1.80 +    delete _vm_cs;
    1.81 +    _vm_cs = NULL;
    1.82 +  }
    1.83 +
    1.84 +  if (_vm_map != NULL) {
    1.85 +    delete _vm_map;
    1.86 +    _vm_map = NULL;
    1.87 +  }
    1.88 +
    1.89 +  reset();
    1.90 +}
    1.91 +
    1.92 +
    1.93 +void MemBaseline::reset() {
    1.94 +  _baselined = false;
    1.95 +  _total_vm_reserved = 0;
    1.96 +  _total_vm_committed = 0;
    1.97 +  _total_malloced = 0;
    1.98 +  _number_of_classes = 0;
    1.99 +
   1.100 +  if (_malloc_cs != NULL) _malloc_cs->clear();
   1.101 +  if (_vm_cs != NULL) _vm_cs->clear();
   1.102 +  if (_vm_map != NULL) _vm_map->clear();
   1.103 +
   1.104 +  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   1.105 +    _malloc_data[index].clear();
   1.106 +    _vm_data[index].clear();
   1.107 +    _arena_data[index].clear();
   1.108 +  }
   1.109 +}
   1.110 +
   1.111 +MemBaseline::~MemBaseline() {
   1.112 +  clear();
   1.113 +}
   1.114 +
   1.115 +// baseline malloc'd memory records, generate overall summary and summaries by
   1.116 +// memory types
   1.117 +bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
   1.118 +  MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
   1.119 +  MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
   1.120 +  size_t used_arena_size = 0;
   1.121 +  int index;
   1.122 +  while (malloc_ptr != NULL) {
   1.123 +    index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
   1.124 +    size_t size = malloc_ptr->size();
   1.125 +    if (malloc_ptr->is_arena_memory_record()) {
   1.126 +      // We do have anonymous arenas, they are either used as value objects,
   1.127 +      // which are embedded inside other objects, or used as stack objects.
   1.128 +      _arena_data[index].inc(size);
   1.129 +      used_arena_size += size;
   1.130 +    } else {
   1.131 +      _total_malloced += size;
   1.132 +      _malloc_data[index].inc(size);
   1.133 +      if (malloc_ptr->is_arena_record()) {
   1.134 +        // see if arena memory record present
   1.135 +        MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
   1.136 +        if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
   1.137 +          assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
   1.138 +             "Arena records do not match");
   1.139 +          size = next_malloc_ptr->size();
   1.140 +          _arena_data[index].inc(size);
   1.141 +          used_arena_size += size;
   1.142 +          malloc_itr.next();
   1.143 +        }
   1.144 +      }
   1.145 +    }
   1.146 +    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
   1.147 +  }
   1.148 +
   1.149 +  // substract used arena size to get size of arena chunk in free list
   1.150 +  index = flag2index(mtChunk);
   1.151 +  _malloc_data[index].reduce(used_arena_size);
   1.152 +  // we really don't know how many chunks in free list, so just set to
   1.153 +  // 0
   1.154 +  _malloc_data[index].overwrite_counter(0);
   1.155 +
   1.156 +  return true;
   1.157 +}
   1.158 +
   1.159 +// check if there is a safepoint in progress, if so, block the thread
   1.160 +// for the safepoint
   1.161 +void MemBaseline::check_safepoint(JavaThread* thr) {
   1.162 +  if (SafepointSynchronize::is_synchronizing()) {
   1.163 +    // grab and drop the SR_lock to honor the safepoint protocol
   1.164 +    MutexLocker ml(thr->SR_lock());
   1.165 +  }
   1.166 +}
   1.167 +
   1.168 +// baseline mmap'd memory records, generate overall summary and summaries by
   1.169 +// memory types
   1.170 +bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
   1.171 +  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
   1.172 +  VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
   1.173 +  int index;
   1.174 +  while (vm_ptr != NULL) {
   1.175 +    if (vm_ptr->is_reserved_region()) {
   1.176 +      index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
   1.177 +    // we use the number of thread stack to count threads
   1.178 +      if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
   1.179 +      _number_of_threads ++;
   1.180 +    }
   1.181 +      _total_vm_reserved += vm_ptr->size();
   1.182 +      _vm_data[index].inc(vm_ptr->size(), 0);
   1.183 +    } else {
   1.184 +      _total_vm_committed += vm_ptr->size();
   1.185 +      _vm_data[index].inc(0, vm_ptr->size());
   1.186 +    }
   1.187 +    vm_ptr = (VMMemRegion*)vm_itr.next();
   1.188 +  }
   1.189 +  return true;
   1.190 +}
   1.191 +
   1.192 +// baseline malloc'd memory by callsites, but only the callsites with memory allocation
   1.193 +// over 1KB are stored.
   1.194 +bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
   1.195 +  assert(MemTracker::track_callsite(), "detail tracking is off");
   1.196 +
   1.197 +  MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
   1.198 +  MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
   1.199 +  MallocCallsitePointer malloc_callsite;
   1.200 +
   1.201 +  // initailize malloc callsite array
   1.202 +  if (_malloc_cs == NULL) {
   1.203 +    _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
   1.204 +    // out of native memory
   1.205 +    if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
   1.206 +      return false;
   1.207 +    }
   1.208 +  } else {
   1.209 +    _malloc_cs->clear();
   1.210 +  }
   1.211 +
   1.212 +  MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
   1.213 +
   1.214 +  // sort into callsite pc order. Details are aggregated by callsites
   1.215 +  malloc_data->sort((FN_SORT)malloc_sort_by_pc);
   1.216 +  bool ret = true;
   1.217 +
   1.218 +  // baseline memory that is totaled over 1 KB
   1.219 +  while (malloc_ptr != NULL) {
   1.220 +    if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
   1.221 +      // skip thread stacks
   1.222 +      if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
   1.223 +        if (malloc_callsite.addr() != malloc_ptr->pc()) {
   1.224 +          if ((malloc_callsite.amount()/K) > 0) {
   1.225 +            if (!_malloc_cs->append(&malloc_callsite)) {
   1.226 +              ret = false;
   1.227 +              break;
   1.228 +            }
   1.229 +          }
   1.230 +          malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
   1.231 +        }
   1.232 +        malloc_callsite.inc(malloc_ptr->size());
   1.233 +      }
   1.234 +    }
   1.235 +    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
   1.236 +  }
   1.237 +
   1.238 +  // restore to address order. Snapshot malloc data is maintained in memory
   1.239 +  // address order.
   1.240 +  malloc_data->sort((FN_SORT)malloc_sort_by_addr);
   1.241 +
   1.242 +  if (!ret) {
   1.243 +              return false;
   1.244 +            }
   1.245 +  // deal with last record
   1.246 +  if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
   1.247 +    if (!_malloc_cs->append(&malloc_callsite)) {
   1.248 +      return false;
   1.249 +    }
   1.250 +  }
   1.251 +  return true;
   1.252 +}
   1.253 +
   1.254 +// baseline mmap'd memory by callsites
   1.255 +bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
   1.256 +  assert(MemTracker::track_callsite(), "detail tracking is off");
   1.257 +
   1.258 +  VMCallsitePointer  vm_callsite;
   1.259 +  VMCallsitePointer* cur_callsite = NULL;
   1.260 +  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
   1.261 +  VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
   1.262 +
   1.263 +  // initialize virtual memory map array
   1.264 +  if (_vm_map == NULL) {
   1.265 +    _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
   1.266 +   if (_vm_map == NULL || _vm_map->out_of_memory()) {
   1.267 +     return false;
   1.268 +   }
   1.269 +  } else {
   1.270 +    _vm_map->clear();
   1.271 +  }
   1.272 +
   1.273 +  // initialize virtual memory callsite array
   1.274 +  if (_vm_cs == NULL) {
   1.275 +    _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
   1.276 +    if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
   1.277 +      return false;
   1.278 +    }
   1.279 +  } else {
   1.280 +    _vm_cs->clear();
   1.281 +  }
   1.282 +
   1.283 +  // consolidate virtual memory data
   1.284 +  VMMemRegionEx*     reserved_rec = NULL;
   1.285 +  VMMemRegionEx*     committed_rec = NULL;
   1.286 +
   1.287 +  // vm_ptr is coming in increasing base address order
   1.288 +  while (vm_ptr != NULL) {
   1.289 +    if (vm_ptr->is_reserved_region()) {
   1.290 +      // consolidate reserved memory regions for virtual memory map.
   1.291 +      // The criteria for consolidation is:
   1.292 +      // 1. two adjacent reserved memory regions
   1.293 +      // 2. belong to the same memory type
   1.294 +      // 3. reserved from the same callsite
   1.295 +      if (reserved_rec == NULL ||
   1.296 +        reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
   1.297 +        FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
   1.298 +        reserved_rec->pc() != vm_ptr->pc()) {
   1.299 +        if (!_vm_map->append(vm_ptr)) {
   1.300 +        return false;
   1.301 +      }
   1.302 +        // inserted reserved region, we need the pointer to the element in virtual
   1.303 +        // memory map array.
   1.304 +        reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
   1.305 +      } else {
   1.306 +        reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
   1.307 +    }
   1.308 +
   1.309 +      if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
   1.310 +      return false;
   1.311 +    }
   1.312 +      vm_callsite = VMCallsitePointer(vm_ptr->pc());
   1.313 +      cur_callsite = &vm_callsite;
   1.314 +      vm_callsite.inc(vm_ptr->size(), 0);
   1.315 +    } else {
   1.316 +      // consolidate committed memory regions for virtual memory map
   1.317 +      // The criterial is:
   1.318 +      // 1. two adjacent committed memory regions
   1.319 +      // 2. committed from the same callsite
   1.320 +      if (committed_rec == NULL ||
   1.321 +        committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
   1.322 +        committed_rec->pc() != vm_ptr->pc()) {
   1.323 +        if (!_vm_map->append(vm_ptr)) {
   1.324 +          return false;
   1.325 +        }
   1.326 +        committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
   1.327 +    } else {
   1.328 +        committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
   1.329 +      }
   1.330 +      vm_callsite.inc(0, vm_ptr->size());
   1.331 +    }
   1.332 +    vm_ptr = (VMMemRegionEx*)vm_itr.next();
   1.333 +  }
   1.334 +  // deal with last record
   1.335 +  if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
   1.336 +    return false;
   1.337 +  }
   1.338 +
   1.339 +  // sort it into callsite pc order. Details are aggregated by callsites
   1.340 +  _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
   1.341 +
   1.342 +  // walk the array to consolidate record by pc
   1.343 +  MemPointerArrayIteratorImpl itr(_vm_cs);
   1.344 +  VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
   1.345 +  VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
   1.346 +  while (next_rec != NULL) {
   1.347 +    assert(callsite_rec != NULL, "Sanity check");
   1.348 +    if (next_rec->addr() == callsite_rec->addr()) {
   1.349 +      callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
   1.350 +      itr.remove();
   1.351 +      next_rec = (VMCallsitePointer*)itr.current();
   1.352 +    } else {
   1.353 +      callsite_rec = next_rec;
   1.354 +      next_rec = (VMCallsitePointer*)itr.next();
   1.355 +    }
   1.356 +  }
   1.357 +
   1.358 +  return true;
   1.359 +}
   1.360 +
   1.361 +// baseline a snapshot. If summary_only = false, memory usages aggregated by
   1.362 +// callsites are also baselined.
   1.363 +// The method call can be lengthy, especially when detail tracking info is
   1.364 +// requested. So the method checks for safepoint explicitly.
   1.365 +bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
   1.366 +  Thread* THREAD = Thread::current();
   1.367 +  assert(THREAD->is_Java_thread(), "must be a JavaThread");
   1.368 +  MutexLocker snapshot_locker(snapshot._lock);
   1.369 +  reset();
   1.370 +  _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
   1.371 +  if (_baselined) {
   1.372 +    check_safepoint((JavaThread*)THREAD);
   1.373 +    _baselined = baseline_vm_summary(snapshot._vm_ptrs);
   1.374 +  }
   1.375 +  _number_of_classes = snapshot.number_of_classes();
   1.376 +
   1.377 +  if (!summary_only && MemTracker::track_callsite() && _baselined) {
   1.378 +    check_safepoint((JavaThread*)THREAD);
   1.379 +    _baselined =  baseline_malloc_details(snapshot._alloc_ptrs);
   1.380 +    if (_baselined) {
   1.381 +      check_safepoint((JavaThread*)THREAD);
   1.382 +      _baselined =  baseline_vm_details(snapshot._vm_ptrs);
   1.383 +    }
   1.384 +  }
   1.385 +  return _baselined;
   1.386 +}
   1.387 +
   1.388 +
   1.389 +int MemBaseline::flag2index(MEMFLAGS flag) const {
   1.390 +  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   1.391 +    if (MemType2NameMap[index]._flag == flag) {
   1.392 +      return index;
   1.393 +    }
   1.394 +  }
   1.395 +  assert(false, "no type");
   1.396 +  return -1;
   1.397 +}
   1.398 +
   1.399 +const char* MemBaseline::type2name(MEMFLAGS type) {
   1.400 +  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   1.401 +    if (MemType2NameMap[index]._flag == type) {
   1.402 +      return MemType2NameMap[index]._name;
   1.403 +    }
   1.404 +  }
   1.405 +  assert(false, err_msg("bad type %x", type));
   1.406 +  return NULL;
   1.407 +}
   1.408 +
   1.409 +
   1.410 +MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
   1.411 +  _total_malloced = other._total_malloced;
   1.412 +  _total_vm_reserved = other._total_vm_reserved;
   1.413 +  _total_vm_committed = other._total_vm_committed;
   1.414 +
   1.415 +  _baselined = other._baselined;
   1.416 +  _number_of_classes = other._number_of_classes;
   1.417 +
   1.418 +  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   1.419 +    _malloc_data[index] = other._malloc_data[index];
   1.420 +    _vm_data[index] = other._vm_data[index];
   1.421 +    _arena_data[index] = other._arena_data[index];
   1.422 +  }
   1.423 +
   1.424 +  if (MemTracker::track_callsite()) {
   1.425 +    assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
   1.426 +    assert(other._malloc_cs != NULL && other._vm_cs != NULL,
   1.427 +           "not properly baselined");
   1.428 +    _malloc_cs->clear();
   1.429 +    _vm_cs->clear();
   1.430 +    int index;
   1.431 +    for (index = 0; index < other._malloc_cs->length(); index ++) {
   1.432 +      _malloc_cs->append(other._malloc_cs->at(index));
   1.433 +    }
   1.434 +
   1.435 +    for (index = 0; index < other._vm_cs->length(); index ++) {
   1.436 +      _vm_cs->append(other._vm_cs->at(index));
   1.437 +    }
   1.438 +  }
   1.439 +  return *this;
   1.440 +}
   1.441 +
   1.442 +/* compare functions for sorting */
   1.443 +
   1.444 +// sort snapshot malloc'd records in callsite pc order
   1.445 +int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
   1.446 +  assert(MemTracker::track_callsite(),"Just check");
   1.447 +  const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
   1.448 +  const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
   1.449 +  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
   1.450 +}
   1.451 +
   1.452 +// sort baselined malloc'd records in size order
   1.453 +int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
   1.454 +  assert(MemTracker::is_on(), "Just check");
   1.455 +  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
   1.456 +  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
   1.457 +  return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
   1.458 +}
   1.459 +
   1.460 +// sort baselined malloc'd records in callsite pc order
   1.461 +int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
   1.462 +  assert(MemTracker::is_on(), "Just check");
   1.463 +  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
   1.464 +  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
   1.465 +  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   1.466 +}
   1.467 +
   1.468 +
   1.469 +// sort baselined mmap'd records in size (reserved size) order
   1.470 +int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
   1.471 +  assert(MemTracker::is_on(), "Just check");
   1.472 +  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
   1.473 +  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
   1.474 +  return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
   1.475 +}
   1.476 +
   1.477 +// sort baselined mmap'd records in callsite pc order
   1.478 +int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
   1.479 +  assert(MemTracker::is_on(), "Just check");
   1.480 +  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
   1.481 +  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
   1.482 +  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   1.483 +}
   1.484 +
   1.485 +
   1.486 +// sort snapshot malloc'd records in memory block address order
   1.487 +int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
   1.488 +  assert(MemTracker::is_on(), "Just check");
   1.489 +  const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
   1.490 +  const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
   1.491 +  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   1.492 +  assert(p1 == p2 || delta != 0, "dup pointer");
   1.493 +  return delta;
   1.494 +}
   1.495 +

mercurial