src/share/vm/services/memBaseline.cpp

changeset 4193
716c64bda5ba
parent 3900
d2a62e0f25eb
child 4274
fb3190e77d3c
     1.1 --- a/src/share/vm/services/memBaseline.cpp	Thu Oct 18 13:09:47 2012 -0400
     1.2 +++ b/src/share/vm/services/memBaseline.cpp	Fri Oct 19 21:40:07 2012 -0400
     1.3 @@ -40,6 +40,7 @@
     1.4    {mtSymbol,     "Symbol"},
     1.5    {mtNMT,        "Memory Tracking"},
     1.6    {mtChunk,      "Pooled Free Chunks"},
     1.7 +  {mtClassShared,"Shared spaces for classes"},
     1.8    {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
     1.9                               // behind
    1.10  };
    1.11 @@ -55,6 +56,7 @@
    1.12  
    1.13    _malloc_cs = NULL;
    1.14    _vm_cs = NULL;
    1.15 +  _vm_map = NULL;
    1.16  
    1.17    _number_of_classes = 0;
    1.18    _number_of_threads = 0;
    1.19 @@ -72,6 +74,11 @@
    1.20      _vm_cs = NULL;
    1.21    }
    1.22  
    1.23 +  if (_vm_map != NULL) {
    1.24 +    delete _vm_map;
    1.25 +    _vm_map = NULL;
    1.26 +  }
    1.27 +
    1.28    reset();
    1.29  }
    1.30  
    1.31 @@ -85,6 +92,7 @@
    1.32  
    1.33    if (_malloc_cs != NULL) _malloc_cs->clear();
    1.34    if (_vm_cs != NULL) _vm_cs->clear();
    1.35 +  if (_vm_map != NULL) _vm_map->clear();
    1.36  
    1.37    for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
    1.38      _malloc_data[index].clear();
    1.39 @@ -94,39 +102,33 @@
    1.40  }
    1.41  
    1.42  MemBaseline::~MemBaseline() {
    1.43 -  if (_malloc_cs != NULL) {
    1.44 -    delete _malloc_cs;
    1.45 -  }
    1.46 -
    1.47 -  if (_vm_cs != NULL) {
    1.48 -    delete _vm_cs;
    1.49 -  }
    1.50 +  clear();
    1.51  }
    1.52  
    1.53  // baseline malloc'd memory records, generate overall summary and summaries by
    1.54  // memory types
    1.55  bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
    1.56 -  MemPointerArrayIteratorImpl mItr((MemPointerArray*)malloc_records);
    1.57 -  MemPointerRecord* mptr = (MemPointerRecord*)mItr.current();
    1.58 +  MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
    1.59 +  MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
    1.60    size_t used_arena_size = 0;
    1.61    int index;
    1.62 -  while (mptr != NULL) {
    1.63 -    index = flag2index(FLAGS_TO_MEMORY_TYPE(mptr->flags()));
    1.64 -    size_t size = mptr->size();
    1.65 +  while (malloc_ptr != NULL) {
    1.66 +    index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
    1.67 +    size_t size = malloc_ptr->size();
    1.68      _total_malloced += size;
    1.69      _malloc_data[index].inc(size);
    1.70 -    if (MemPointerRecord::is_arena_record(mptr->flags())) {
    1.71 +    if (MemPointerRecord::is_arena_record(malloc_ptr->flags())) {
    1.72        // see if arena size record present
    1.73 -      MemPointerRecord* next_p = (MemPointerRecordEx*)mItr.peek_next();
    1.74 -      if (MemPointerRecord::is_arena_size_record(next_p->flags())) {
    1.75 -        assert(next_p->is_size_record_of_arena(mptr), "arena records do not match");
    1.76 -        size = next_p->size();
    1.77 +      MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
    1.78 +      if (MemPointerRecord::is_arena_size_record(next_malloc_ptr->flags())) {
    1.79 +        assert(next_malloc_ptr->is_size_record_of_arena(malloc_ptr), "arena records do not match");
    1.80 +        size = next_malloc_ptr->size();
    1.81          _arena_data[index].inc(size);
    1.82          used_arena_size += size;
    1.83 -        mItr.next();
    1.84 +        malloc_itr.next();
    1.85        }
    1.86      }
    1.87 -    mptr = (MemPointerRecordEx*)mItr.next();
    1.88 +    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
    1.89    }
    1.90  
    1.91    // substract used arena size to get size of arena chunk in free list
    1.92 @@ -142,20 +144,23 @@
    1.93  // baseline mmap'd memory records, generate overall summary and summaries by
    1.94  // memory types
    1.95  bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
    1.96 -  MemPointerArrayIteratorImpl vItr((MemPointerArray*)vm_records);
    1.97 -  VMMemRegion* vptr = (VMMemRegion*)vItr.current();
    1.98 +  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
    1.99 +  VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
   1.100    int index;
   1.101 -  while (vptr != NULL) {
   1.102 -    index = flag2index(FLAGS_TO_MEMORY_TYPE(vptr->flags()));
   1.103 -
   1.104 +  while (vm_ptr != NULL) {
   1.105 +    if (vm_ptr->is_reserved_region()) {
   1.106 +      index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
   1.107      // we use the number of thread stack to count threads
   1.108 -    if (IS_MEMORY_TYPE(vptr->flags(), mtThreadStack)) {
   1.109 +      if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
   1.110        _number_of_threads ++;
   1.111      }
   1.112 -    _total_vm_reserved += vptr->reserved_size();
   1.113 -    _total_vm_committed += vptr->committed_size();
   1.114 -    _vm_data[index].inc(vptr->reserved_size(), vptr->committed_size());
   1.115 -    vptr = (VMMemRegion*)vItr.next();
   1.116 +      _total_vm_reserved += vm_ptr->size();
   1.117 +      _vm_data[index].inc(vm_ptr->size(), 0);
   1.118 +    } else {
   1.119 +      _total_vm_committed += vm_ptr->size();
   1.120 +      _vm_data[index].inc(0, vm_ptr->size());
   1.121 +    }
   1.122 +    vm_ptr = (VMMemRegion*)vm_itr.next();
   1.123    }
   1.124    return true;
   1.125  }
   1.126 @@ -165,41 +170,57 @@
   1.127  bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
   1.128    assert(MemTracker::track_callsite(), "detail tracking is off");
   1.129  
   1.130 -  MemPointerArrayIteratorImpl mItr((MemPointerArray*)malloc_records);
   1.131 -  MemPointerRecordEx* mptr = (MemPointerRecordEx*)mItr.current();
   1.132 -  MallocCallsitePointer mp;
   1.133 +  MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
   1.134 +  MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
   1.135 +  MallocCallsitePointer malloc_callsite;
   1.136  
   1.137 +  // initailize malloc callsite array
   1.138    if (_malloc_cs == NULL) {
   1.139      _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
   1.140      // out of native memory
   1.141 -    if (_malloc_cs == NULL) {
   1.142 +    if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
   1.143        return false;
   1.144      }
   1.145    } else {
   1.146      _malloc_cs->clear();
   1.147    }
   1.148  
   1.149 +  MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
   1.150 +
   1.151 +  // sort into callsite pc order. Details are aggregated by callsites
   1.152 +  malloc_data->sort((FN_SORT)malloc_sort_by_pc);
   1.153 +  bool ret = true;
   1.154 +
   1.155    // baseline memory that is totaled over 1 KB
   1.156 -  while (mptr != NULL) {
   1.157 -    if (!MemPointerRecord::is_arena_size_record(mptr->flags())) {
   1.158 +  while (malloc_ptr != NULL) {
   1.159 +    if (!MemPointerRecord::is_arena_size_record(malloc_ptr->flags())) {
   1.160        // skip thread stacks
   1.161 -      if (!IS_MEMORY_TYPE(mptr->flags(), mtThreadStack)) {
   1.162 -        if (mp.addr() != mptr->pc()) {
   1.163 -          if ((mp.amount()/K) > 0) {
   1.164 -            if (!_malloc_cs->append(&mp)) {
   1.165 +      if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
   1.166 +        if (malloc_callsite.addr() != malloc_ptr->pc()) {
   1.167 +          if ((malloc_callsite.amount()/K) > 0) {
   1.168 +            if (!_malloc_cs->append(&malloc_callsite)) {
   1.169 +              ret = false;
   1.170 +              break;
   1.171 +            }
   1.172 +          }
   1.173 +          malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
   1.174 +        }
   1.175 +        malloc_callsite.inc(malloc_ptr->size());
   1.176 +      }
   1.177 +    }
   1.178 +    malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
   1.179 +  }
   1.180 +
   1.181 +  // restore to address order. Snapshot malloc data is maintained in memory
   1.182 +  // address order.
   1.183 +  malloc_data->sort((FN_SORT)malloc_sort_by_addr);
   1.184 +
   1.185 +  if (!ret) {
   1.186                return false;
   1.187              }
   1.188 -          }
   1.189 -          mp = MallocCallsitePointer(mptr->pc());
   1.190 -        }
   1.191 -        mp.inc(mptr->size());
   1.192 -      }
   1.193 -    }
   1.194 -    mptr = (MemPointerRecordEx*)mItr.next();
   1.195 -  }
   1.196 -
   1.197 -  if (mp.addr() != 0 && (mp.amount()/K) > 0) {
   1.198 -    if (!_malloc_cs->append(&mp)) {
   1.199 +  // deal with last record
   1.200 +  if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
   1.201 +    if (!_malloc_cs->append(&malloc_callsite)) {
   1.202        return false;
   1.203      }
   1.204    }
   1.205 @@ -210,34 +231,106 @@
   1.206  bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
   1.207    assert(MemTracker::track_callsite(), "detail tracking is off");
   1.208  
   1.209 -  VMCallsitePointer vp;
   1.210 -  MemPointerArrayIteratorImpl vItr((MemPointerArray*)vm_records);
   1.211 -  VMMemRegionEx* vptr = (VMMemRegionEx*)vItr.current();
   1.212 +  VMCallsitePointer  vm_callsite;
   1.213 +  VMCallsitePointer* cur_callsite = NULL;
   1.214 +  MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
   1.215 +  VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
   1.216  
   1.217 +  // initialize virtual memory map array
   1.218 +  if (_vm_map == NULL) {
   1.219 +    _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
   1.220 +   if (_vm_map == NULL || _vm_map->out_of_memory()) {
   1.221 +     return false;
   1.222 +   }
   1.223 +  } else {
   1.224 +    _vm_map->clear();
   1.225 +  }
   1.226 +
   1.227 +  // initialize virtual memory callsite array
   1.228    if (_vm_cs == NULL) {
   1.229      _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
   1.230 -    if (_vm_cs == NULL) {
   1.231 +    if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
   1.232        return false;
   1.233      }
   1.234    } else {
   1.235      _vm_cs->clear();
   1.236    }
   1.237  
   1.238 -  while (vptr != NULL) {
   1.239 -    if (vp.addr() != vptr->pc()) {
   1.240 -      if (!_vm_cs->append(&vp)) {
   1.241 +  // consolidate virtual memory data
   1.242 +  VMMemRegionEx*     reserved_rec = NULL;
   1.243 +  VMMemRegionEx*     committed_rec = NULL;
   1.244 +
   1.245 +  // vm_ptr is coming in increasing base address order
   1.246 +  while (vm_ptr != NULL) {
   1.247 +    if (vm_ptr->is_reserved_region()) {
   1.248 +      // consolidate reserved memory regions for virtual memory map.
   1.249 +      // The criteria for consolidation is:
   1.250 +      // 1. two adjacent reserved memory regions
   1.251 +      // 2. belong to the same memory type
   1.252 +      // 3. reserved from the same callsite
   1.253 +      if (reserved_rec == NULL ||
   1.254 +        reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
   1.255 +        FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
   1.256 +        reserved_rec->pc() != vm_ptr->pc()) {
   1.257 +        if (!_vm_map->append(vm_ptr)) {
   1.258          return false;
   1.259        }
   1.260 -      vp = VMCallsitePointer(vptr->pc());
   1.261 +        // inserted reserved region, we need the pointer to the element in virtual
   1.262 +        // memory map array.
   1.263 +        reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
   1.264 +      } else {
   1.265 +        reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
   1.266      }
   1.267 -    vp.inc(vptr->size(), vptr->committed_size());
   1.268 -    vptr = (VMMemRegionEx*)vItr.next();
   1.269 -  }
   1.270 -  if (vp.addr() != 0) {
   1.271 -    if (!_vm_cs->append(&vp)) {
   1.272 +
   1.273 +      if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
   1.274        return false;
   1.275      }
   1.276 +      vm_callsite = VMCallsitePointer(vm_ptr->pc());
   1.277 +      cur_callsite = &vm_callsite;
   1.278 +      vm_callsite.inc(vm_ptr->size(), 0);
   1.279 +    } else {
   1.280 +      // consolidate committed memory regions for virtual memory map
   1.281 +      // The criterial is:
   1.282 +      // 1. two adjacent committed memory regions
   1.283 +      // 2. committed from the same callsite
   1.284 +      if (committed_rec == NULL ||
   1.285 +        committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
   1.286 +        committed_rec->pc() != vm_ptr->pc()) {
   1.287 +        if (!_vm_map->append(vm_ptr)) {
   1.288 +          return false;
   1.289    }
   1.290 +        committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
   1.291 +    } else {
   1.292 +        committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
   1.293 +      }
   1.294 +      vm_callsite.inc(0, vm_ptr->size());
   1.295 +    }
   1.296 +    vm_ptr = (VMMemRegionEx*)vm_itr.next();
   1.297 +  }
   1.298 +  // deal with last record
   1.299 +  if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
   1.300 +    return false;
   1.301 +  }
   1.302 +
   1.303 +  // sort it into callsite pc order. Details are aggregated by callsites
   1.304 +  _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
   1.305 +
   1.306 +  // walk the array to consolidate record by pc
   1.307 +  MemPointerArrayIteratorImpl itr(_vm_cs);
   1.308 +  VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
   1.309 +  VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
   1.310 +  while (next_rec != NULL) {
   1.311 +    assert(callsite_rec != NULL, "Sanity check");
   1.312 +    if (next_rec->addr() == callsite_rec->addr()) {
   1.313 +      callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
   1.314 +      itr.remove();
   1.315 +      next_rec = (VMCallsitePointer*)itr.current();
   1.316 +    } else {
   1.317 +      callsite_rec = next_rec;
   1.318 +      next_rec = (VMCallsitePointer*)itr.next();
   1.319 +    }
   1.320 +  }
   1.321 +
   1.322    return true;
   1.323  }
   1.324  
   1.325 @@ -251,12 +344,8 @@
   1.326    _number_of_classes = SystemDictionary::number_of_classes();
   1.327  
   1.328    if (!summary_only && MemTracker::track_callsite() && _baselined) {
   1.329 -    ((MemPointerArray*)snapshot._alloc_ptrs)->sort((FN_SORT)malloc_sort_by_pc);
   1.330 -    ((MemPointerArray*)snapshot._vm_ptrs)->sort((FN_SORT)vm_sort_by_pc);
   1.331      _baselined =  baseline_malloc_details(snapshot._alloc_ptrs) &&
   1.332        baseline_vm_details(snapshot._vm_ptrs);
   1.333 -    ((MemPointerArray*)snapshot._alloc_ptrs)->sort((FN_SORT)malloc_sort_by_addr);
   1.334 -    ((MemPointerArray*)snapshot._vm_ptrs)->sort((FN_SORT)vm_sort_by_addr);
   1.335    }
   1.336    return _baselined;
   1.337  }
   1.338 @@ -278,7 +367,7 @@
   1.339        return MemType2NameMap[index]._name;
   1.340      }
   1.341    }
   1.342 -  assert(false, "no type");
   1.343 +  assert(false, err_msg("bad type %x", type));
   1.344    return NULL;
   1.345  }
   1.346  
   1.347 @@ -341,13 +430,6 @@
   1.348    return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   1.349  }
   1.350  
   1.351 -// sort snapshot mmap'd records in callsite pc order
   1.352 -int MemBaseline::vm_sort_by_pc(const void* p1, const void* p2) {
   1.353 -  assert(MemTracker::track_callsite(),"Just check");
   1.354 -  const VMMemRegionEx* mp1 = (const VMMemRegionEx*)p1;
   1.355 -  const VMMemRegionEx* mp2 = (const VMMemRegionEx*)p2;
   1.356 -  return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
   1.357 -}
   1.358  
   1.359  // sort baselined mmap'd records in size (reserved size) order
   1.360  int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
   1.361 @@ -376,12 +458,3 @@
   1.362    return delta;
   1.363  }
   1.364  
   1.365 -// sort snapshot mmap'd records in memory block address order
   1.366 -int MemBaseline::vm_sort_by_addr(const void* p1, const void* p2) {
   1.367 -  assert(MemTracker::is_on(), "Just check");
   1.368 -  const VMMemRegion* mp1 = (const VMMemRegion*)p1;
   1.369 -  const VMMemRegion* mp2 = (const VMMemRegion*)p2;
   1.370 -  int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
   1.371 -  assert(delta != 0, "dup pointer");
   1.372 -  return delta;
   1.373 -}

mercurial