src/share/vm/services/memBaseline.cpp

changeset 7074
833b0f92429a
parent 5375
72fce0b2d341
child 7080
dd3939fe8424
     1.1 --- a/src/share/vm/services/memBaseline.cpp	Wed Aug 27 09:36:55 2014 +0200
     1.2 +++ b/src/share/vm/services/memBaseline.cpp	Wed Aug 27 08:19:12 2014 -0400
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -22,471 +22,301 @@
    1.11   *
    1.12   */
    1.13  #include "precompiled.hpp"
    1.14 +
    1.15  #include "memory/allocation.hpp"
    1.16  #include "runtime/safepoint.hpp"
    1.17  #include "runtime/thread.inline.hpp"
    1.18  #include "services/memBaseline.hpp"
    1.19  #include "services/memTracker.hpp"
    1.20  
    1.21 -
    1.22 -MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
    1.23 -  {mtJavaHeap,   "Java Heap"},
    1.24 -  {mtClass,      "Class"},
    1.25 -  {mtThreadStack,"Thread Stack"},
    1.26 -  {mtThread,     "Thread"},
    1.27 -  {mtCode,       "Code"},
    1.28 -  {mtGC,         "GC"},
    1.29 -  {mtCompiler,   "Compiler"},
    1.30 -  {mtInternal,   "Internal"},
    1.31 -  {mtOther,      "Other"},
    1.32 -  {mtSymbol,     "Symbol"},
    1.33 -  {mtNMT,        "Memory Tracking"},
    1.34 -  {mtTracing,    "Tracing"},
    1.35 -  {mtChunk,      "Pooled Free Chunks"},
    1.36 -  {mtClassShared,"Shared spaces for classes"},
    1.37 -  {mtTest,       "Test"},
    1.38 -  {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
    1.39 -                             // behind
    1.40 -};
    1.41 -
    1.42 -MemBaseline::MemBaseline() {
    1.43 -  _baselined = false;
    1.44 -
    1.45 -  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    1.46 -    _malloc_data[index].set_type(MemType2NameMap[index]._flag);
    1.47 -    _vm_data[index].set_type(MemType2NameMap[index]._flag);
    1.48 -    _arena_data[index].set_type(MemType2NameMap[index]._flag);
    1.49 -  }
    1.50 -
    1.51 -  _malloc_cs = NULL;
    1.52 -  _vm_cs = NULL;
    1.53 -  _vm_map = NULL;
    1.54 -
    1.55 -  _number_of_classes = 0;
    1.56 -  _number_of_threads = 0;
    1.57 -}
    1.58 -
    1.59 -
    1.60 -void MemBaseline::clear() {
    1.61 -  if (_malloc_cs != NULL) {
    1.62 -    delete _malloc_cs;
    1.63 -    _malloc_cs = NULL;
    1.64 -  }
    1.65 -
    1.66 -  if (_vm_cs != NULL) {
    1.67 -    delete _vm_cs;
    1.68 -    _vm_cs = NULL;
    1.69 -  }
    1.70 -
    1.71 -  if (_vm_map != NULL) {
    1.72 -    delete _vm_map;
    1.73 -    _vm_map = NULL;
    1.74 -  }
    1.75 -
    1.76 -  reset();
    1.77 -}
    1.78 -
    1.79 -
    1.80 -void MemBaseline::reset() {
    1.81 -  _baselined = false;
    1.82 -  _total_vm_reserved = 0;
    1.83 -  _total_vm_committed = 0;
    1.84 -  _total_malloced = 0;
    1.85 -  _number_of_classes = 0;
    1.86 -
    1.87 -  if (_malloc_cs != NULL) _malloc_cs->clear();
    1.88 -  if (_vm_cs != NULL) _vm_cs->clear();
    1.89 -  if (_vm_map != NULL) _vm_map->clear();
    1.90 -
    1.91 -  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    1.92 -    _malloc_data[index].clear();
    1.93 -    _vm_data[index].clear();
    1.94 -    _arena_data[index].clear();
    1.95 +/*
    1.96 + * Sizes are sorted in descenting order for reporting
    1.97 + */
    1.98 +int compare_malloc_size(const MallocSite& s1, const MallocSite& s2) {
    1.99 +  if (s1.size() == s2.size()) {
   1.100 +    return 0;
   1.101 +  } else if (s1.size() > s2.size()) {
   1.102 +    return -1;
   1.103 +  } else {
   1.104 +    return 1;
   1.105    }
   1.106  }
   1.107  
   1.108 -MemBaseline::~MemBaseline() {
   1.109 -  clear();
   1.110 +
   1.111 +int compare_virtual_memory_size(const VirtualMemoryAllocationSite& s1,
   1.112 +  const VirtualMemoryAllocationSite& s2) {
   1.113 +  if (s1.reserved() == s2.reserved()) {
   1.114 +    return 0;
   1.115 +  } else if (s1.reserved() > s2.reserved()) {
   1.116 +    return -1;
   1.117 +  } else {
   1.118 +    return 1;
   1.119 +  }
   1.120  }
   1.121  
   1.122 -// baseline malloc'd memory records, generate overall summary and summaries by
   1.123 -// memory types
   1.124 -bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
   1.125 -  MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
   1.126 -  MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
   1.127 -  size_t used_arena_size = 0;
   1.128 -  int index;
   1.129 -  while (malloc_ptr != NULL) {
   1.130 -    index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
   1.131 -    size_t size = malloc_ptr->size();
   1.132 -    if (malloc_ptr->is_arena_memory_record()) {
   1.133 -      // We do have anonymous arenas, they are either used as value objects,
   1.134 -      // which are embedded inside other objects, or used as stack objects.
   1.135 -      _arena_data[index].inc(size);
   1.136 -      used_arena_size += size;
   1.137 +// Sort into allocation site addresses order for baseline comparison
   1.138 +int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) {
   1.139 +  return s1.call_stack()->compare(*s2.call_stack());
   1.140 +}
   1.141 +
   1.142 +
   1.143 +int compare_virtual_memory_site(const VirtualMemoryAllocationSite& s1,
   1.144 +  const VirtualMemoryAllocationSite& s2) {
   1.145 +  return s1.call_stack()->compare(*s2.call_stack());
   1.146 +}
   1.147 +
   1.148 +/*
   1.149 + * Walker to walk malloc allocation site table
   1.150 + */
   1.151 +class MallocAllocationSiteWalker : public MallocSiteWalker {
   1.152 + private:
   1.153 +  SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
   1.154 +                 _malloc_sites;
   1.155 +  size_t         _count;
   1.156 +
   1.157 +  // Entries in MallocSiteTable with size = 0 and count = 0,
   1.158 +  // when the malloc site is not longer there.
   1.159 + public:
   1.160 +  MallocAllocationSiteWalker(Arena* arena) : _count(0), _malloc_sites(arena) {
   1.161 +  }
   1.162 +
   1.163 +  inline size_t count() const { return _count; }
   1.164 +
   1.165 +  LinkedList<MallocSite>* malloc_sites() {
   1.166 +    return &_malloc_sites;
   1.167 +  }
   1.168 +
   1.169 +  bool do_malloc_site(const MallocSite* site) {
   1.170 +    if (site->size() >= MemBaseline::SIZE_THRESHOLD) {
   1.171 +      if (_malloc_sites.add(*site) != NULL) {
   1.172 +        _count++;
   1.173 +        return true;
   1.174 +      } else {
   1.175 +        return false;  // OOM
   1.176 +      }
   1.177      } else {
   1.178 -      _total_malloced += size;
   1.179 -      _malloc_data[index].inc(size);
   1.180 -      if (malloc_ptr->is_arena_record()) {
   1.181 -        // see if arena memory record present
   1.182 -        MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
   1.183 -        if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
   1.184 -          assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
   1.185 -             "Arena records do not match");
   1.186 -          size = next_malloc_ptr->size();
   1.187 -          _arena_data[index].inc(size);
   1.188 -          used_arena_size += size;
   1.189 -          malloc_itr.next();
   1.190 -        }
   1.191 +      // malloc site does not meet threshold, ignore and continue
   1.192 +      return true;
   1.193 +    }
   1.194 +  }
   1.195 +};
   1.196 +
   1.197 +// Compare virtual memory region's base address
   1.198 +int compare_virtual_memory_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
   1.199 +  return r1.compare(r2);
   1.200 +}
   1.201 +
   1.202 +// Walk all virtual memory regions for baselining
   1.203 +class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
   1.204 + private:
   1.205 +  SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base, ResourceObj::ARENA>
   1.206 +                _virtual_memory_regions;
   1.207 +  size_t        _count;
   1.208 +
   1.209 + public:
   1.210 +  VirtualMemoryAllocationWalker(Arena* a) : _count(0), _virtual_memory_regions(a) {
   1.211 +  }
   1.212 +
   1.213 +  bool do_allocation_site(const ReservedMemoryRegion* rgn)  {
   1.214 +    if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) {
   1.215 +      if (_virtual_memory_regions.add(*rgn) != NULL) {
   1.216 +        _count ++;
   1.217 +        return true;
   1.218 +      } else {
   1.219 +        return false;
   1.220        }
   1.221      }
   1.222 -    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
   1.223 +    return true;
   1.224    }
   1.225  
   1.226 -  // substract used arena size to get size of arena chunk in free list
   1.227 -  index = flag2index(mtChunk);
   1.228 -  _malloc_data[index].reduce(used_arena_size);
   1.229 -  // we really don't know how many chunks in free list, so just set to
   1.230 -  // 0
   1.231 -  _malloc_data[index].overwrite_counter(0);
   1.232 +  LinkedList<ReservedMemoryRegion>* virtual_memory_allocations() {
   1.233 +    return &_virtual_memory_regions;
   1.234 +  }
   1.235 +};
   1.236 +
   1.237 +
   1.238 +bool MemBaseline::baseline_summary() {
   1.239 +  assert(_malloc_memory_snapshot == NULL, "Malloc baseline not yet reset");
   1.240 +  assert(_virtual_memory_snapshot == NULL, "Virtual baseline not yet reset");
   1.241 +
   1.242 +  _malloc_memory_snapshot =  new (arena()) MallocMemorySnapshot();
   1.243 +  _virtual_memory_snapshot = new (arena()) VirtualMemorySnapshot();
   1.244 +  if (_malloc_memory_snapshot == NULL || _virtual_memory_snapshot == NULL) {
   1.245 +    return false;
   1.246 +  }
   1.247 +  MallocMemorySummary::snapshot(_malloc_memory_snapshot);
   1.248 +  VirtualMemorySummary::snapshot(_virtual_memory_snapshot);
   1.249 +  return true;
   1.250 +}
   1.251 +
   1.252 +bool MemBaseline::baseline_allocation_sites() {
   1.253 +  assert(arena() != NULL, "Just check");
   1.254 +  // Malloc allocation sites
   1.255 +  MallocAllocationSiteWalker malloc_walker(arena());
   1.256 +  if (!MallocSiteTable::walk_malloc_site(&malloc_walker)) {
   1.257 +    return false;
   1.258 +  }
   1.259 +
   1.260 +  _malloc_sites.set_head(malloc_walker.malloc_sites()->head());
   1.261 +  // The malloc sites are collected in size order
   1.262 +  _malloc_sites_order = by_size;
   1.263 +
   1.264 +  // Virtual memory allocation sites
   1.265 +  VirtualMemoryAllocationWalker virtual_memory_walker(arena());
   1.266 +  if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) {
   1.267 +    return false;
   1.268 +  }
   1.269 +
   1.270 +  // Virtual memory allocations are collected in call stack order
   1.271 +  _virtual_memory_allocations.set_head(virtual_memory_walker.virtual_memory_allocations()->head());
   1.272 +
   1.273 +  if (!aggregate_virtual_memory_allocation_sites()) {
   1.274 +    return false;
   1.275 +  }
   1.276 +  // Virtual memory allocation sites are aggregrated in call stack order
   1.277 +  _virtual_memory_sites_order = by_address;
   1.278  
   1.279    return true;
   1.280  }
   1.281  
   1.282 -// check if there is a safepoint in progress, if so, block the thread
   1.283 -// for the safepoint
   1.284 -void MemBaseline::check_safepoint(JavaThread* thr) {
   1.285 -  if (SafepointSynchronize::is_synchronizing()) {
   1.286 -    // grab and drop the SR_lock to honor the safepoint protocol
   1.287 -    MutexLocker ml(thr->SR_lock());
   1.288 -  }
   1.289 -}
   1.290 -
   1.291 -// baseline mmap'd memory records, generate overall summary and summaries by
   1.292 -// memory types
   1.293 -bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
   1.294 -  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
   1.295 -  VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
   1.296 -  int index;
   1.297 -  while (vm_ptr != NULL) {
   1.298 -    if (vm_ptr->is_reserved_region()) {
   1.299 -      index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
   1.300 -    // we use the number of thread stack to count threads
   1.301 -      if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
   1.302 -      _number_of_threads ++;
   1.303 -    }
   1.304 -      _total_vm_reserved += vm_ptr->size();
   1.305 -      _vm_data[index].inc(vm_ptr->size(), 0);
   1.306 -    } else {
   1.307 -      _total_vm_committed += vm_ptr->size();
   1.308 -      _vm_data[index].inc(0, vm_ptr->size());
   1.309 -    }
   1.310 -    vm_ptr = (VMMemRegion*)vm_itr.next();
   1.311 -  }
   1.312 -  return true;
   1.313 -}
   1.314 -
   1.315 -// baseline malloc'd memory by callsites, but only the callsites with memory allocation
   1.316 -// over 1KB are stored.
   1.317 -bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
   1.318 -  assert(MemTracker::track_callsite(), "detail tracking is off");
   1.319 -
   1.320 -  MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
   1.321 -  MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
   1.322 -  MallocCallsitePointer malloc_callsite;
   1.323 -
   1.324 -  // initailize malloc callsite array
   1.325 -  if (_malloc_cs == NULL) {
   1.326 -    _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
   1.327 -    // out of native memory
   1.328 -    if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
   1.329 -      return false;
   1.330 -    }
   1.331 -  } else {
   1.332 -    _malloc_cs->clear();
   1.333 +bool MemBaseline::baseline(bool summaryOnly) {
   1.334 +  if (arena() == NULL) {
   1.335 +    _arena = new (std::nothrow, mtNMT) Arena(mtNMT);
   1.336 +    if (arena() == NULL) return false;
   1.337    }
   1.338  
   1.339 -  MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
   1.340 +  reset();
   1.341  
   1.342 -  // sort into callsite pc order. Details are aggregated by callsites
   1.343 -  malloc_data->sort((FN_SORT)malloc_sort_by_pc);
   1.344 -  bool ret = true;
   1.345 +  _class_count = InstanceKlass::number_of_instance_classes();
   1.346  
   1.347 -  // baseline memory that is totaled over 1 KB
   1.348 -  while (malloc_ptr != NULL) {
   1.349 -    if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
   1.350 -      // skip thread stacks
   1.351 -      if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
   1.352 -        if (malloc_callsite.addr() != malloc_ptr->pc()) {
   1.353 -          if ((malloc_callsite.amount()/K) > 0) {
   1.354 -            if (!_malloc_cs->append(&malloc_callsite)) {
   1.355 -              ret = false;
   1.356 -              break;
   1.357 -            }
   1.358 -          }
   1.359 -          malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
   1.360 -        }
   1.361 -        malloc_callsite.inc(malloc_ptr->size());
   1.362 -      }
   1.363 -    }
   1.364 -    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
   1.365 -  }
   1.366 -
   1.367 -  // restore to address order. Snapshot malloc data is maintained in memory
   1.368 -  // address order.
   1.369 -  malloc_data->sort((FN_SORT)malloc_sort_by_addr);
   1.370 -
   1.371 -  if (!ret) {
   1.372 -              return false;
   1.373 -            }
   1.374 -  // deal with last record
   1.375 -  if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
   1.376 -    if (!_malloc_cs->append(&malloc_callsite)) {
   1.377 -      return false;
   1.378 -    }
   1.379 -  }
   1.380 -  return true;
   1.381 -}
   1.382 -
   1.383 -// baseline mmap'd memory by callsites
   1.384 -bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
   1.385 -  assert(MemTracker::track_callsite(), "detail tracking is off");
   1.386 -
   1.387 -  VMCallsitePointer  vm_callsite;
   1.388 -  VMCallsitePointer* cur_callsite = NULL;
   1.389 -  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
   1.390 -  VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
   1.391 -
   1.392 -  // initialize virtual memory map array
   1.393 -  if (_vm_map == NULL) {
   1.394 -    _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
   1.395 -   if (_vm_map == NULL || _vm_map->out_of_memory()) {
   1.396 -     return false;
   1.397 -   }
   1.398 -  } else {
   1.399 -    _vm_map->clear();
   1.400 -  }
   1.401 -
   1.402 -  // initialize virtual memory callsite array
   1.403 -  if (_vm_cs == NULL) {
   1.404 -    _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
   1.405 -    if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
   1.406 -      return false;
   1.407 -    }
   1.408 -  } else {
   1.409 -    _vm_cs->clear();
   1.410 -  }
   1.411 -
   1.412 -  // consolidate virtual memory data
   1.413 -  VMMemRegionEx*     reserved_rec = NULL;
   1.414 -  VMMemRegionEx*     committed_rec = NULL;
   1.415 -
   1.416 -  // vm_ptr is coming in increasing base address order
   1.417 -  while (vm_ptr != NULL) {
   1.418 -    if (vm_ptr->is_reserved_region()) {
   1.419 -      // consolidate reserved memory regions for virtual memory map.
   1.420 -      // The criteria for consolidation is:
   1.421 -      // 1. two adjacent reserved memory regions
   1.422 -      // 2. belong to the same memory type
   1.423 -      // 3. reserved from the same callsite
   1.424 -      if (reserved_rec == NULL ||
   1.425 -        reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
   1.426 -        FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
   1.427 -        reserved_rec->pc() != vm_ptr->pc()) {
   1.428 -        if (!_vm_map->append(vm_ptr)) {
   1.429 -        return false;
   1.430 -      }
   1.431 -        // inserted reserved region, we need the pointer to the element in virtual
   1.432 -        // memory map array.
   1.433 -        reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
   1.434 -      } else {
   1.435 -        reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
   1.436 -    }
   1.437 -
   1.438 -      if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
   1.439 -      return false;
   1.440 -    }
   1.441 -      vm_callsite = VMCallsitePointer(vm_ptr->pc());
   1.442 -      cur_callsite = &vm_callsite;
   1.443 -      vm_callsite.inc(vm_ptr->size(), 0);
   1.444 -    } else {
   1.445 -      // consolidate committed memory regions for virtual memory map
   1.446 -      // The criterial is:
   1.447 -      // 1. two adjacent committed memory regions
   1.448 -      // 2. committed from the same callsite
   1.449 -      if (committed_rec == NULL ||
   1.450 -        committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
   1.451 -        committed_rec->pc() != vm_ptr->pc()) {
   1.452 -        if (!_vm_map->append(vm_ptr)) {
   1.453 -          return false;
   1.454 -        }
   1.455 -        committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
   1.456 -    } else {
   1.457 -        committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
   1.458 -      }
   1.459 -      vm_callsite.inc(0, vm_ptr->size());
   1.460 -    }
   1.461 -    vm_ptr = (VMMemRegionEx*)vm_itr.next();
   1.462 -  }
   1.463 -  // deal with last record
   1.464 -  if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
   1.465 +  if (!baseline_summary()) {
   1.466      return false;
   1.467    }
   1.468  
   1.469 -  // sort it into callsite pc order. Details are aggregated by callsites
   1.470 -  _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
   1.471 +  _baseline_type = Summary_baselined;
   1.472  
   1.473 -  // walk the array to consolidate record by pc
   1.474 -  MemPointerArrayIteratorImpl itr(_vm_cs);
   1.475 -  VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
   1.476 -  VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
   1.477 -  while (next_rec != NULL) {
   1.478 -    assert(callsite_rec != NULL, "Sanity check");
   1.479 -    if (next_rec->addr() == callsite_rec->addr()) {
   1.480 -      callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
   1.481 -      itr.remove();
   1.482 -      next_rec = (VMCallsitePointer*)itr.current();
   1.483 -    } else {
   1.484 -      callsite_rec = next_rec;
   1.485 -      next_rec = (VMCallsitePointer*)itr.next();
   1.486 -    }
   1.487 +  // baseline details
   1.488 +  if (!summaryOnly &&
   1.489 +      MemTracker::tracking_level() == NMT_detail) {
   1.490 +    baseline_allocation_sites();
   1.491 +    _baseline_type = Detail_baselined;
   1.492    }
   1.493  
   1.494    return true;
   1.495  }
   1.496  
   1.497 -// baseline a snapshot. If summary_only = false, memory usages aggregated by
   1.498 -// callsites are also baselined.
   1.499 -// The method call can be lengthy, especially when detail tracking info is
   1.500 -// requested. So the method checks for safepoint explicitly.
   1.501 -bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
   1.502 -  Thread* THREAD = Thread::current();
   1.503 -  assert(THREAD->is_Java_thread(), "must be a JavaThread");
   1.504 -  MutexLocker snapshot_locker(snapshot._lock);
   1.505 -  reset();
   1.506 -  _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
   1.507 -  if (_baselined) {
   1.508 -    check_safepoint((JavaThread*)THREAD);
   1.509 -    _baselined = baseline_vm_summary(snapshot._vm_ptrs);
   1.510 -  }
   1.511 -  _number_of_classes = snapshot.number_of_classes();
   1.512 -
   1.513 -  if (!summary_only && MemTracker::track_callsite() && _baselined) {
   1.514 -    check_safepoint((JavaThread*)THREAD);
   1.515 -    _baselined =  baseline_malloc_details(snapshot._alloc_ptrs);
   1.516 -    if (_baselined) {
   1.517 -      check_safepoint((JavaThread*)THREAD);
   1.518 -      _baselined =  baseline_vm_details(snapshot._vm_ptrs);
   1.519 -    }
   1.520 -  }
   1.521 -  return _baselined;
   1.522 +int compare_allocation_site(const VirtualMemoryAllocationSite& s1,
   1.523 +  const VirtualMemoryAllocationSite& s2) {
   1.524 +  return s1.call_stack()->compare(*s2.call_stack());
   1.525  }
   1.526  
   1.527 +bool MemBaseline::aggregate_virtual_memory_allocation_sites() {
   1.528 +  SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site, ResourceObj::ARENA>
   1.529 +    allocation_sites(arena());
   1.530  
   1.531 -int MemBaseline::flag2index(MEMFLAGS flag) const {
   1.532 -  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   1.533 -    if (MemType2NameMap[index]._flag == flag) {
   1.534 -      return index;
   1.535 +  VirtualMemoryAllocationIterator itr = virtual_memory_allocations();
   1.536 +  const ReservedMemoryRegion* rgn;
   1.537 +  VirtualMemoryAllocationSite* site;
   1.538 +  while ((rgn = itr.next()) != NULL) {
   1.539 +    VirtualMemoryAllocationSite tmp(*rgn->call_stack());
   1.540 +    site = allocation_sites.find(tmp);
   1.541 +    if (site == NULL) {
   1.542 +      LinkedListNode<VirtualMemoryAllocationSite>* node =
   1.543 +        allocation_sites.add(tmp);
   1.544 +      if (node == NULL) return false;
   1.545 +      site = node->data();
   1.546      }
   1.547 +    site->reserve_memory(rgn->size());
   1.548 +    site->commit_memory(rgn->committed_size());
   1.549    }
   1.550 -  assert(false, "no type");
   1.551 -  return -1;
   1.552 +
   1.553 +  _virtual_memory_sites.set_head(allocation_sites.head());
   1.554 +  return true;
   1.555  }
   1.556  
   1.557 -const char* MemBaseline::type2name(MEMFLAGS type) {
   1.558 -  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   1.559 -    if (MemType2NameMap[index]._flag == type) {
   1.560 -      return MemType2NameMap[index]._name;
   1.561 -    }
   1.562 +MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) {
   1.563 +  assert(!_malloc_sites.is_empty(), "Detail baseline?");
   1.564 +  switch(order) {
   1.565 +    case by_size:
   1.566 +      malloc_sites_to_size_order();
   1.567 +      break;
   1.568 +    case by_site:
   1.569 +      malloc_sites_to_allocation_site_order();
   1.570 +      break;
   1.571 +    case by_address:
   1.572 +    default:
   1.573 +      ShouldNotReachHere();
   1.574    }
   1.575 -  assert(false, err_msg("bad type %x", type));
   1.576 -  return NULL;
   1.577 +  return MallocSiteIterator(_malloc_sites.head());
   1.578  }
   1.579  
   1.580 -
   1.581 -MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
   1.582 -  _total_malloced = other._total_malloced;
   1.583 -  _total_vm_reserved = other._total_vm_reserved;
   1.584 -  _total_vm_committed = other._total_vm_committed;
   1.585 -
   1.586 -  _baselined = other._baselined;
   1.587 -  _number_of_classes = other._number_of_classes;
   1.588 -
   1.589 -  for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
   1.590 -    _malloc_data[index] = other._malloc_data[index];
   1.591 -    _vm_data[index] = other._vm_data[index];
   1.592 -    _arena_data[index] = other._arena_data[index];
   1.593 +VirtualMemorySiteIterator MemBaseline::virtual_memory_sites(SortingOrder order) {
   1.594 +  assert(!_virtual_memory_sites.is_empty(), "Detail baseline?");
   1.595 +  switch(order) {
   1.596 +    case by_size:
   1.597 +      virtual_memory_sites_to_size_order();
   1.598 +      break;
   1.599 +    case by_site:
   1.600 +      virtual_memory_sites_to_reservation_site_order();
   1.601 +      break;
   1.602 +    case by_address:
   1.603 +    default:
   1.604 +      ShouldNotReachHere();
   1.605    }
   1.606 -
   1.607 -  if (MemTracker::track_callsite()) {
   1.608 -    assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
   1.609 -    assert(other._malloc_cs != NULL && other._vm_cs != NULL,
   1.610 -           "not properly baselined");
   1.611 -    _malloc_cs->clear();
   1.612 -    _vm_cs->clear();
   1.613 -    int index;
   1.614 -    for (index = 0; index < other._malloc_cs->length(); index ++) {
   1.615 -      _malloc_cs->append(other._malloc_cs->at(index));
   1.616 -    }
   1.617 -
   1.618 -    for (index = 0; index < other._vm_cs->length(); index ++) {
   1.619 -      _vm_cs->append(other._vm_cs->at(index));
   1.620 -    }
   1.621 -  }
   1.622 -  return *this;
   1.623 +  return VirtualMemorySiteIterator(_virtual_memory_sites.head());
   1.624  }
   1.625  
   1.626 -/* compare functions for sorting */
   1.627  
   1.628 -// sort snapshot malloc'd records in callsite pc order
   1.629 -int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
   1.630 -  assert(MemTracker::track_callsite(),"Just check");
   1.631 -  const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
   1.632 -  const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
   1.633 -  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
   1.634 +// Sorting allocations sites in different orders
   1.635 +void MemBaseline::malloc_sites_to_size_order() {
   1.636 +  if (_malloc_sites_order != by_size) {
   1.637 +    SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA>
   1.638 +      tmp(arena());
   1.639 +
   1.640 +    // Add malloc sites to sorted linked list to sort into size order
   1.641 +    tmp.move(&_malloc_sites);
   1.642 +    _malloc_sites.set_head(tmp.head());
   1.643 +    tmp.set_head(NULL);
   1.644 +    _malloc_sites_order = by_size;
   1.645 +  }
   1.646  }
   1.647  
   1.648 -// sort baselined malloc'd records in size order
   1.649 -int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
   1.650 -  assert(MemTracker::is_on(), "Just check");
   1.651 -  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
   1.652 -  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
   1.653 -  return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
   1.654 +void MemBaseline::malloc_sites_to_allocation_site_order() {
   1.655 +  if (_malloc_sites_order != by_site) {
   1.656 +    SortedLinkedList<MallocSite, compare_malloc_site, ResourceObj::ARENA>
   1.657 +      tmp(arena());
   1.658 +    // Add malloc sites to sorted linked list to sort into site (address) order
   1.659 +    tmp.move(&_malloc_sites);
   1.660 +    _malloc_sites.set_head(tmp.head());
   1.661 +    tmp.set_head(NULL);
   1.662 +    _malloc_sites_order = by_site;
   1.663 +  }
   1.664  }
   1.665  
   1.666 -// sort baselined malloc'd records in callsite pc order
   1.667 -int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
   1.668 -  assert(MemTracker::is_on(), "Just check");
   1.669 -  const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
   1.670 -  const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
   1.671 -  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   1.672 +void MemBaseline::virtual_memory_sites_to_size_order() {
   1.673 +  if (_virtual_memory_sites_order != by_size) {
   1.674 +    SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size, ResourceObj::ARENA>
   1.675 +      tmp(arena());
   1.676 +
   1.677 +    tmp.move(&_virtual_memory_sites);
   1.678 +
   1.679 +    _virtual_memory_sites.set_head(tmp.head());
   1.680 +    tmp.set_head(NULL);
   1.681 +    _virtual_memory_sites_order = by_size;
   1.682 +  }
   1.683  }
   1.684  
   1.685 +void MemBaseline::virtual_memory_sites_to_reservation_site_order() {
   1.686 +  if (_virtual_memory_sites_order != by_size) {
   1.687 +    SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site, ResourceObj::ARENA>
   1.688 +      tmp(arena());
   1.689  
   1.690 -// sort baselined mmap'd records in size (reserved size) order
   1.691 -int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
   1.692 -  assert(MemTracker::is_on(), "Just check");
   1.693 -  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
   1.694 -  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
   1.695 -  return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
   1.696 +    tmp.add(&_virtual_memory_sites);
   1.697 +
   1.698 +    _virtual_memory_sites.set_head(tmp.head());
   1.699 +    tmp.set_head(NULL);
   1.700 +
   1.701 +    _virtual_memory_sites_order = by_size;
   1.702 +  }
   1.703  }
   1.704  
   1.705 -// sort baselined mmap'd records in callsite pc order
   1.706 -int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
   1.707 -  assert(MemTracker::is_on(), "Just check");
   1.708 -  const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
   1.709 -  const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
   1.710 -  return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   1.711 -}
   1.712 -
   1.713 -
   1.714 -// sort snapshot malloc'd records in memory block address order
   1.715 -int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
   1.716 -  assert(MemTracker::is_on(), "Just check");
   1.717 -  const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
   1.718 -  const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
   1.719 -  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   1.720 -  assert(p1 == p2 || delta != 0, "dup pointer");
   1.721 -  return delta;
   1.722 -}
   1.723 -

mercurial