src/share/vm/services/memBaseline.cpp

Tue, 09 Jul 2013 13:18:16 -0400

author
zgu
date
Tue, 09 Jul 2013 13:18:16 -0400
changeset 5375
72fce0b2d341
parent 5261
ab313d4e9a8b
child 6876
710a3c8b516e
child 7074
833b0f92429a
permissions
-rw-r--r--

8011760: assert(delta != 0) failed: dup pointer in MemBaseline::malloc_sort_by_addr
Summary: Some of qsort implementation on Linux x86 compares element to itself, which is mistakenly treated as duplicate pointer
Reviewed-by: dcubed, acorn

zgu@3900 1 /*
zgu@4980 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
zgu@3900 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
zgu@3900 4 *
zgu@3900 5 * This code is free software; you can redistribute it and/or modify it
zgu@3900 6 * under the terms of the GNU General Public License version 2 only, as
zgu@3900 7 * published by the Free Software Foundation.
zgu@3900 8 *
zgu@3900 9 * This code is distributed in the hope that it will be useful, but WITHOUT
zgu@3900 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
zgu@3900 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
zgu@3900 12 * version 2 for more details (a copy is included in the LICENSE file that
zgu@3900 13 * accompanied this code).
zgu@3900 14 *
zgu@3900 15 * You should have received a copy of the GNU General Public License version
zgu@3900 16 * 2 along with this work; if not, write to the Free Software Foundation,
zgu@3900 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
zgu@3900 18 *
zgu@3900 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
zgu@3900 20 * or visit www.oracle.com if you need additional information or have any
zgu@3900 21 * questions.
zgu@3900 22 *
zgu@3900 23 */
zgu@3900 24 #include "precompiled.hpp"
zgu@3900 25 #include "memory/allocation.hpp"
zgu@4980 26 #include "runtime/safepoint.hpp"
zgu@4980 27 #include "runtime/thread.inline.hpp"
zgu@3900 28 #include "services/memBaseline.hpp"
zgu@3900 29 #include "services/memTracker.hpp"
zgu@3900 30
zgu@4980 31
zgu@3900 32 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
zgu@3900 33 {mtJavaHeap, "Java Heap"},
zgu@3900 34 {mtClass, "Class"},
zgu@3900 35 {mtThreadStack,"Thread Stack"},
zgu@3900 36 {mtThread, "Thread"},
zgu@3900 37 {mtCode, "Code"},
zgu@3900 38 {mtGC, "GC"},
zgu@3900 39 {mtCompiler, "Compiler"},
zgu@3900 40 {mtInternal, "Internal"},
zgu@3900 41 {mtOther, "Other"},
zgu@3900 42 {mtSymbol, "Symbol"},
zgu@3900 43 {mtNMT, "Memory Tracking"},
sla@5237 44 {mtTracing, "Tracing"},
zgu@3900 45 {mtChunk, "Pooled Free Chunks"},
zgu@4193 46 {mtClassShared,"Shared spaces for classes"},
ctornqvi@4512 47 {mtTest, "Test"},
zgu@3900 48 {mtNone, "Unknown"} // It can happen when type tagging records are lagging
zgu@3900 49 // behind
zgu@3900 50 };
zgu@3900 51
zgu@3900 52 MemBaseline::MemBaseline() {
zgu@3900 53 _baselined = false;
zgu@3900 54
zgu@3900 55 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
zgu@3900 56 _malloc_data[index].set_type(MemType2NameMap[index]._flag);
zgu@3900 57 _vm_data[index].set_type(MemType2NameMap[index]._flag);
zgu@3900 58 _arena_data[index].set_type(MemType2NameMap[index]._flag);
zgu@3900 59 }
zgu@3900 60
zgu@3900 61 _malloc_cs = NULL;
zgu@3900 62 _vm_cs = NULL;
zgu@4193 63 _vm_map = NULL;
zgu@3900 64
zgu@3900 65 _number_of_classes = 0;
zgu@3900 66 _number_of_threads = 0;
zgu@3900 67 }
zgu@3900 68
zgu@3900 69
zgu@3900 70 void MemBaseline::clear() {
zgu@3900 71 if (_malloc_cs != NULL) {
zgu@3900 72 delete _malloc_cs;
zgu@3900 73 _malloc_cs = NULL;
zgu@3900 74 }
zgu@3900 75
zgu@3900 76 if (_vm_cs != NULL) {
zgu@3900 77 delete _vm_cs;
zgu@3900 78 _vm_cs = NULL;
zgu@3900 79 }
zgu@3900 80
zgu@4193 81 if (_vm_map != NULL) {
zgu@4193 82 delete _vm_map;
zgu@4193 83 _vm_map = NULL;
zgu@4193 84 }
zgu@4193 85
zgu@3900 86 reset();
zgu@3900 87 }
zgu@3900 88
zgu@3900 89
zgu@3900 90 void MemBaseline::reset() {
zgu@3900 91 _baselined = false;
zgu@3900 92 _total_vm_reserved = 0;
zgu@3900 93 _total_vm_committed = 0;
zgu@3900 94 _total_malloced = 0;
zgu@3900 95 _number_of_classes = 0;
zgu@3900 96
zgu@3900 97 if (_malloc_cs != NULL) _malloc_cs->clear();
zgu@3900 98 if (_vm_cs != NULL) _vm_cs->clear();
zgu@4193 99 if (_vm_map != NULL) _vm_map->clear();
zgu@3900 100
zgu@3900 101 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
zgu@3900 102 _malloc_data[index].clear();
zgu@3900 103 _vm_data[index].clear();
zgu@3900 104 _arena_data[index].clear();
zgu@3900 105 }
zgu@3900 106 }
zgu@3900 107
zgu@3900 108 MemBaseline::~MemBaseline() {
zgu@4193 109 clear();
zgu@3900 110 }
zgu@3900 111
zgu@3900 112 // baseline malloc'd memory records, generate overall summary and summaries by
zgu@3900 113 // memory types
zgu@3900 114 bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
zgu@4193 115 MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
zgu@4193 116 MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
zgu@3900 117 size_t used_arena_size = 0;
zgu@3900 118 int index;
zgu@4193 119 while (malloc_ptr != NULL) {
zgu@4193 120 index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
zgu@4193 121 size_t size = malloc_ptr->size();
zgu@4274 122 if (malloc_ptr->is_arena_memory_record()) {
zgu@4274 123 // We do have anonymous arenas, they are either used as value objects,
zgu@4274 124 // which are embedded inside other objects, or used as stack objects.
zgu@4274 125 _arena_data[index].inc(size);
zgu@4274 126 used_arena_size += size;
zgu@4274 127 } else {
zgu@4274 128 _total_malloced += size;
zgu@4274 129 _malloc_data[index].inc(size);
zgu@4274 130 if (malloc_ptr->is_arena_record()) {
zgu@4274 131 // see if arena memory record present
zgu@4274 132 MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
zgu@5261 133 if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
zgu@4274 134 assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
zgu@4274 135 "Arena records do not match");
zgu@4274 136 size = next_malloc_ptr->size();
zgu@4274 137 _arena_data[index].inc(size);
zgu@4274 138 used_arena_size += size;
zgu@4274 139 malloc_itr.next();
zgu@4274 140 }
zgu@3900 141 }
zgu@3900 142 }
zgu@4193 143 malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
zgu@3900 144 }
zgu@3900 145
zgu@3900 146 // substract used arena size to get size of arena chunk in free list
zgu@3900 147 index = flag2index(mtChunk);
zgu@3900 148 _malloc_data[index].reduce(used_arena_size);
zgu@3900 149 // we really don't know how many chunks in free list, so just set to
zgu@3900 150 // 0
zgu@3900 151 _malloc_data[index].overwrite_counter(0);
zgu@3900 152
zgu@3900 153 return true;
zgu@3900 154 }
zgu@3900 155
zgu@4980 156 // check if there is a safepoint in progress, if so, block the thread
zgu@4980 157 // for the safepoint
zgu@4980 158 void MemBaseline::check_safepoint(JavaThread* thr) {
zgu@4980 159 if (SafepointSynchronize::is_synchronizing()) {
zgu@4992 160 // grab and drop the SR_lock to honor the safepoint protocol
zgu@4992 161 MutexLocker ml(thr->SR_lock());
zgu@4980 162 }
zgu@4980 163 }
zgu@4980 164
zgu@3900 165 // baseline mmap'd memory records, generate overall summary and summaries by
zgu@3900 166 // memory types
zgu@3900 167 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
zgu@4193 168 MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
zgu@4193 169 VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
zgu@3900 170 int index;
zgu@4193 171 while (vm_ptr != NULL) {
zgu@4193 172 if (vm_ptr->is_reserved_region()) {
zgu@4193 173 index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
zgu@3900 174 // we use the number of thread stack to count threads
zgu@4193 175 if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
zgu@3900 176 _number_of_threads ++;
zgu@3900 177 }
zgu@4193 178 _total_vm_reserved += vm_ptr->size();
zgu@4193 179 _vm_data[index].inc(vm_ptr->size(), 0);
zgu@4193 180 } else {
zgu@4193 181 _total_vm_committed += vm_ptr->size();
zgu@4193 182 _vm_data[index].inc(0, vm_ptr->size());
zgu@4193 183 }
zgu@4193 184 vm_ptr = (VMMemRegion*)vm_itr.next();
zgu@3900 185 }
zgu@3900 186 return true;
zgu@3900 187 }
zgu@3900 188
zgu@3900 189 // baseline malloc'd memory by callsites, but only the callsites with memory allocation
zgu@3900 190 // over 1KB are stored.
zgu@3900 191 bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
zgu@3900 192 assert(MemTracker::track_callsite(), "detail tracking is off");
zgu@3900 193
zgu@4193 194 MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
zgu@4193 195 MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
zgu@4193 196 MallocCallsitePointer malloc_callsite;
zgu@3900 197
zgu@4193 198 // initailize malloc callsite array
zgu@3900 199 if (_malloc_cs == NULL) {
zgu@3900 200 _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
zgu@3900 201 // out of native memory
zgu@4193 202 if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
zgu@3900 203 return false;
zgu@3900 204 }
zgu@3900 205 } else {
zgu@3900 206 _malloc_cs->clear();
zgu@3900 207 }
zgu@3900 208
zgu@4193 209 MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
zgu@4193 210
zgu@4193 211 // sort into callsite pc order. Details are aggregated by callsites
zgu@4193 212 malloc_data->sort((FN_SORT)malloc_sort_by_pc);
zgu@4193 213 bool ret = true;
zgu@4193 214
zgu@3900 215 // baseline memory that is totaled over 1 KB
zgu@4193 216 while (malloc_ptr != NULL) {
zgu@4274 217 if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
zgu@3900 218 // skip thread stacks
zgu@4193 219 if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
zgu@4193 220 if (malloc_callsite.addr() != malloc_ptr->pc()) {
zgu@4193 221 if ((malloc_callsite.amount()/K) > 0) {
zgu@4193 222 if (!_malloc_cs->append(&malloc_callsite)) {
zgu@4193 223 ret = false;
zgu@4193 224 break;
zgu@4193 225 }
zgu@4193 226 }
zgu@4193 227 malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
zgu@4193 228 }
zgu@4193 229 malloc_callsite.inc(malloc_ptr->size());
zgu@4193 230 }
zgu@4193 231 }
zgu@4193 232 malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
zgu@4193 233 }
zgu@4193 234
zgu@4193 235 // restore to address order. Snapshot malloc data is maintained in memory
zgu@4193 236 // address order.
zgu@4193 237 malloc_data->sort((FN_SORT)malloc_sort_by_addr);
zgu@4193 238
zgu@4193 239 if (!ret) {
zgu@3900 240 return false;
zgu@3900 241 }
zgu@4193 242 // deal with last record
zgu@4193 243 if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
zgu@4193 244 if (!_malloc_cs->append(&malloc_callsite)) {
zgu@3900 245 return false;
zgu@3900 246 }
zgu@3900 247 }
zgu@3900 248 return true;
zgu@3900 249 }
zgu@3900 250
zgu@3900 251 // baseline mmap'd memory by callsites
zgu@3900 252 bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
zgu@3900 253 assert(MemTracker::track_callsite(), "detail tracking is off");
zgu@3900 254
zgu@4193 255 VMCallsitePointer vm_callsite;
zgu@4193 256 VMCallsitePointer* cur_callsite = NULL;
zgu@4193 257 MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
zgu@4193 258 VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
zgu@3900 259
zgu@4193 260 // initialize virtual memory map array
zgu@4193 261 if (_vm_map == NULL) {
zgu@4193 262 _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
zgu@4193 263 if (_vm_map == NULL || _vm_map->out_of_memory()) {
zgu@4193 264 return false;
zgu@4193 265 }
zgu@4193 266 } else {
zgu@4193 267 _vm_map->clear();
zgu@4193 268 }
zgu@4193 269
zgu@4193 270 // initialize virtual memory callsite array
zgu@3900 271 if (_vm_cs == NULL) {
zgu@3900 272 _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
zgu@4193 273 if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
zgu@3900 274 return false;
zgu@3900 275 }
zgu@3900 276 } else {
zgu@3900 277 _vm_cs->clear();
zgu@3900 278 }
zgu@3900 279
zgu@4193 280 // consolidate virtual memory data
zgu@4193 281 VMMemRegionEx* reserved_rec = NULL;
zgu@4193 282 VMMemRegionEx* committed_rec = NULL;
zgu@4193 283
zgu@4193 284 // vm_ptr is coming in increasing base address order
zgu@4193 285 while (vm_ptr != NULL) {
zgu@4193 286 if (vm_ptr->is_reserved_region()) {
zgu@4193 287 // consolidate reserved memory regions for virtual memory map.
zgu@4193 288 // The criteria for consolidation is:
zgu@4193 289 // 1. two adjacent reserved memory regions
zgu@4193 290 // 2. belong to the same memory type
zgu@4193 291 // 3. reserved from the same callsite
zgu@4193 292 if (reserved_rec == NULL ||
zgu@4193 293 reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
zgu@4193 294 FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
zgu@4193 295 reserved_rec->pc() != vm_ptr->pc()) {
zgu@4193 296 if (!_vm_map->append(vm_ptr)) {
zgu@3900 297 return false;
zgu@3900 298 }
zgu@4193 299 // inserted reserved region, we need the pointer to the element in virtual
zgu@4193 300 // memory map array.
zgu@4193 301 reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
zgu@4193 302 } else {
zgu@4193 303 reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
zgu@3900 304 }
zgu@4193 305
zgu@4193 306 if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
zgu@3900 307 return false;
zgu@3900 308 }
zgu@4193 309 vm_callsite = VMCallsitePointer(vm_ptr->pc());
zgu@4193 310 cur_callsite = &vm_callsite;
zgu@4193 311 vm_callsite.inc(vm_ptr->size(), 0);
zgu@4193 312 } else {
zgu@4193 313 // consolidate committed memory regions for virtual memory map
zgu@4193 314 // The criterial is:
zgu@4193 315 // 1. two adjacent committed memory regions
zgu@4193 316 // 2. committed from the same callsite
zgu@4193 317 if (committed_rec == NULL ||
zgu@4193 318 committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
zgu@4193 319 committed_rec->pc() != vm_ptr->pc()) {
zgu@4193 320 if (!_vm_map->append(vm_ptr)) {
zgu@4193 321 return false;
zgu@4980 322 }
zgu@4193 323 committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
zgu@4193 324 } else {
zgu@4193 325 committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
zgu@4193 326 }
zgu@4193 327 vm_callsite.inc(0, vm_ptr->size());
zgu@4193 328 }
zgu@4193 329 vm_ptr = (VMMemRegionEx*)vm_itr.next();
zgu@4193 330 }
zgu@4193 331 // deal with last record
zgu@4193 332 if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
zgu@4193 333 return false;
zgu@4193 334 }
zgu@4193 335
zgu@4193 336 // sort it into callsite pc order. Details are aggregated by callsites
zgu@4193 337 _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
zgu@4193 338
zgu@4193 339 // walk the array to consolidate record by pc
zgu@4193 340 MemPointerArrayIteratorImpl itr(_vm_cs);
zgu@4193 341 VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
zgu@4193 342 VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
zgu@4193 343 while (next_rec != NULL) {
zgu@4193 344 assert(callsite_rec != NULL, "Sanity check");
zgu@4193 345 if (next_rec->addr() == callsite_rec->addr()) {
zgu@4193 346 callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
zgu@4193 347 itr.remove();
zgu@4193 348 next_rec = (VMCallsitePointer*)itr.current();
zgu@4193 349 } else {
zgu@4193 350 callsite_rec = next_rec;
zgu@4193 351 next_rec = (VMCallsitePointer*)itr.next();
zgu@4193 352 }
zgu@4193 353 }
zgu@4193 354
zgu@3900 355 return true;
zgu@3900 356 }
zgu@3900 357
zgu@3900 358 // baseline a snapshot. If summary_only = false, memory usages aggregated by
zgu@3900 359 // callsites are also baselined.
zgu@4980 360 // The method call can be lengthy, especially when detail tracking info is
zgu@4980 361 // requested. So the method checks for safepoint explicitly.
zgu@3900 362 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
zgu@4980 363 Thread* THREAD = Thread::current();
zgu@4980 364 assert(THREAD->is_Java_thread(), "must be a JavaThread");
zgu@4980 365 MutexLocker snapshot_locker(snapshot._lock);
zgu@3900 366 reset();
zgu@4980 367 _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
zgu@4980 368 if (_baselined) {
zgu@4980 369 check_safepoint((JavaThread*)THREAD);
zgu@4980 370 _baselined = baseline_vm_summary(snapshot._vm_ptrs);
zgu@4980 371 }
zgu@4400 372 _number_of_classes = snapshot.number_of_classes();
zgu@3900 373
zgu@3900 374 if (!summary_only && MemTracker::track_callsite() && _baselined) {
zgu@4980 375 check_safepoint((JavaThread*)THREAD);
zgu@4980 376 _baselined = baseline_malloc_details(snapshot._alloc_ptrs);
zgu@4980 377 if (_baselined) {
zgu@4980 378 check_safepoint((JavaThread*)THREAD);
zgu@4980 379 _baselined = baseline_vm_details(snapshot._vm_ptrs);
zgu@4980 380 }
zgu@3900 381 }
zgu@3900 382 return _baselined;
zgu@3900 383 }
zgu@3900 384
zgu@3900 385
zgu@3900 386 int MemBaseline::flag2index(MEMFLAGS flag) const {
zgu@3900 387 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
zgu@3900 388 if (MemType2NameMap[index]._flag == flag) {
zgu@3900 389 return index;
zgu@3900 390 }
zgu@3900 391 }
zgu@3900 392 assert(false, "no type");
zgu@3900 393 return -1;
zgu@3900 394 }
zgu@3900 395
zgu@3900 396 const char* MemBaseline::type2name(MEMFLAGS type) {
zgu@3900 397 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
zgu@3900 398 if (MemType2NameMap[index]._flag == type) {
zgu@3900 399 return MemType2NameMap[index]._name;
zgu@3900 400 }
zgu@3900 401 }
zgu@4193 402 assert(false, err_msg("bad type %x", type));
zgu@3900 403 return NULL;
zgu@3900 404 }
zgu@3900 405
zgu@3900 406
zgu@3900 407 MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
zgu@3900 408 _total_malloced = other._total_malloced;
zgu@3900 409 _total_vm_reserved = other._total_vm_reserved;
zgu@3900 410 _total_vm_committed = other._total_vm_committed;
zgu@3900 411
zgu@3900 412 _baselined = other._baselined;
zgu@3900 413 _number_of_classes = other._number_of_classes;
zgu@3900 414
zgu@3900 415 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
zgu@3900 416 _malloc_data[index] = other._malloc_data[index];
zgu@3900 417 _vm_data[index] = other._vm_data[index];
zgu@3900 418 _arena_data[index] = other._arena_data[index];
zgu@3900 419 }
zgu@3900 420
zgu@3900 421 if (MemTracker::track_callsite()) {
zgu@3900 422 assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
zgu@3900 423 assert(other._malloc_cs != NULL && other._vm_cs != NULL,
zgu@3900 424 "not properly baselined");
zgu@3900 425 _malloc_cs->clear();
zgu@3900 426 _vm_cs->clear();
zgu@3900 427 int index;
zgu@3900 428 for (index = 0; index < other._malloc_cs->length(); index ++) {
zgu@3900 429 _malloc_cs->append(other._malloc_cs->at(index));
zgu@3900 430 }
zgu@3900 431
zgu@3900 432 for (index = 0; index < other._vm_cs->length(); index ++) {
zgu@3900 433 _vm_cs->append(other._vm_cs->at(index));
zgu@3900 434 }
zgu@3900 435 }
zgu@3900 436 return *this;
zgu@3900 437 }
zgu@3900 438
zgu@3900 439 /* compare functions for sorting */
zgu@3900 440
zgu@3900 441 // sort snapshot malloc'd records in callsite pc order
zgu@3900 442 int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
zgu@3900 443 assert(MemTracker::track_callsite(),"Just check");
zgu@3900 444 const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
zgu@3900 445 const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
zgu@3900 446 return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
zgu@3900 447 }
zgu@3900 448
zgu@3900 449 // sort baselined malloc'd records in size order
zgu@3900 450 int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
zgu@3900 451 assert(MemTracker::is_on(), "Just check");
zgu@3900 452 const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
zgu@3900 453 const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
zgu@3900 454 return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
zgu@3900 455 }
zgu@3900 456
zgu@3900 457 // sort baselined malloc'd records in callsite pc order
zgu@3900 458 int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
zgu@3900 459 assert(MemTracker::is_on(), "Just check");
zgu@3900 460 const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
zgu@3900 461 const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
zgu@3900 462 return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
zgu@3900 463 }
zgu@3900 464
zgu@3900 465
zgu@3900 466 // sort baselined mmap'd records in size (reserved size) order
zgu@3900 467 int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
zgu@3900 468 assert(MemTracker::is_on(), "Just check");
zgu@3900 469 const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
zgu@3900 470 const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
zgu@3900 471 return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
zgu@3900 472 }
zgu@3900 473
zgu@3900 474 // sort baselined mmap'd records in callsite pc order
zgu@3900 475 int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
zgu@3900 476 assert(MemTracker::is_on(), "Just check");
zgu@3900 477 const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
zgu@3900 478 const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
zgu@3900 479 return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
zgu@3900 480 }
zgu@3900 481
zgu@3900 482
zgu@3900 483 // sort snapshot malloc'd records in memory block address order
zgu@3900 484 int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
zgu@3900 485 assert(MemTracker::is_on(), "Just check");
zgu@3900 486 const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
zgu@3900 487 const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
zgu@3900 488 int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
zgu@5375 489 assert(p1 == p2 || delta != 0, "dup pointer");
zgu@3900 490 return delta;
zgu@3900 491 }
zgu@3900 492

mercurial