src/share/vm/services/memBaseline.cpp

Wed, 27 Apr 2016 01:25:04 +0800

author
aoqi
date
Wed, 27 Apr 2016 01:25:04 +0800
changeset 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24 #include "precompiled.hpp"
aoqi@0 25 #include "memory/allocation.hpp"
aoqi@0 26 #include "runtime/safepoint.hpp"
aoqi@0 27 #include "runtime/thread.inline.hpp"
aoqi@0 28 #include "services/memBaseline.hpp"
aoqi@0 29 #include "services/memTracker.hpp"
aoqi@0 30
aoqi@0 31
aoqi@0 32 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
aoqi@0 33 {mtJavaHeap, "Java Heap"},
aoqi@0 34 {mtClass, "Class"},
aoqi@0 35 {mtThreadStack,"Thread Stack"},
aoqi@0 36 {mtThread, "Thread"},
aoqi@0 37 {mtCode, "Code"},
aoqi@0 38 {mtGC, "GC"},
aoqi@0 39 {mtCompiler, "Compiler"},
aoqi@0 40 {mtInternal, "Internal"},
aoqi@0 41 {mtOther, "Other"},
aoqi@0 42 {mtSymbol, "Symbol"},
aoqi@0 43 {mtNMT, "Memory Tracking"},
aoqi@0 44 {mtTracing, "Tracing"},
aoqi@0 45 {mtChunk, "Pooled Free Chunks"},
aoqi@0 46 {mtClassShared,"Shared spaces for classes"},
aoqi@0 47 {mtTest, "Test"},
aoqi@0 48 {mtNone, "Unknown"} // It can happen when type tagging records are lagging
aoqi@0 49 // behind
aoqi@0 50 };
aoqi@0 51
aoqi@0 52 MemBaseline::MemBaseline() {
aoqi@0 53 _baselined = false;
aoqi@0 54
aoqi@0 55 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
aoqi@0 56 _malloc_data[index].set_type(MemType2NameMap[index]._flag);
aoqi@0 57 _vm_data[index].set_type(MemType2NameMap[index]._flag);
aoqi@0 58 _arena_data[index].set_type(MemType2NameMap[index]._flag);
aoqi@0 59 }
aoqi@0 60
aoqi@0 61 _malloc_cs = NULL;
aoqi@0 62 _vm_cs = NULL;
aoqi@0 63 _vm_map = NULL;
aoqi@0 64
aoqi@0 65 _number_of_classes = 0;
aoqi@0 66 _number_of_threads = 0;
aoqi@0 67 }
aoqi@0 68
aoqi@0 69
aoqi@0 70 void MemBaseline::clear() {
aoqi@0 71 if (_malloc_cs != NULL) {
aoqi@0 72 delete _malloc_cs;
aoqi@0 73 _malloc_cs = NULL;
aoqi@0 74 }
aoqi@0 75
aoqi@0 76 if (_vm_cs != NULL) {
aoqi@0 77 delete _vm_cs;
aoqi@0 78 _vm_cs = NULL;
aoqi@0 79 }
aoqi@0 80
aoqi@0 81 if (_vm_map != NULL) {
aoqi@0 82 delete _vm_map;
aoqi@0 83 _vm_map = NULL;
aoqi@0 84 }
aoqi@0 85
aoqi@0 86 reset();
aoqi@0 87 }
aoqi@0 88
aoqi@0 89
aoqi@0 90 void MemBaseline::reset() {
aoqi@0 91 _baselined = false;
aoqi@0 92 _total_vm_reserved = 0;
aoqi@0 93 _total_vm_committed = 0;
aoqi@0 94 _total_malloced = 0;
aoqi@0 95 _number_of_classes = 0;
aoqi@0 96
aoqi@0 97 if (_malloc_cs != NULL) _malloc_cs->clear();
aoqi@0 98 if (_vm_cs != NULL) _vm_cs->clear();
aoqi@0 99 if (_vm_map != NULL) _vm_map->clear();
aoqi@0 100
aoqi@0 101 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
aoqi@0 102 _malloc_data[index].clear();
aoqi@0 103 _vm_data[index].clear();
aoqi@0 104 _arena_data[index].clear();
aoqi@0 105 }
aoqi@0 106 }
aoqi@0 107
aoqi@0 108 MemBaseline::~MemBaseline() {
aoqi@0 109 clear();
aoqi@0 110 }
aoqi@0 111
aoqi@0 112 // baseline malloc'd memory records, generate overall summary and summaries by
aoqi@0 113 // memory types
aoqi@0 114 bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
aoqi@0 115 MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
aoqi@0 116 MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
aoqi@0 117 size_t used_arena_size = 0;
aoqi@0 118 int index;
aoqi@0 119 while (malloc_ptr != NULL) {
aoqi@0 120 index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
aoqi@0 121 size_t size = malloc_ptr->size();
aoqi@0 122 if (malloc_ptr->is_arena_memory_record()) {
aoqi@0 123 // We do have anonymous arenas, they are either used as value objects,
aoqi@0 124 // which are embedded inside other objects, or used as stack objects.
aoqi@0 125 _arena_data[index].inc(size);
aoqi@0 126 used_arena_size += size;
aoqi@0 127 } else {
aoqi@0 128 _total_malloced += size;
aoqi@0 129 _malloc_data[index].inc(size);
aoqi@0 130 if (malloc_ptr->is_arena_record()) {
aoqi@0 131 // see if arena memory record present
aoqi@0 132 MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
aoqi@0 133 if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
aoqi@0 134 assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
aoqi@0 135 "Arena records do not match");
aoqi@0 136 size = next_malloc_ptr->size();
aoqi@0 137 _arena_data[index].inc(size);
aoqi@0 138 used_arena_size += size;
aoqi@0 139 malloc_itr.next();
aoqi@0 140 }
aoqi@0 141 }
aoqi@0 142 }
aoqi@0 143 malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
aoqi@0 144 }
aoqi@0 145
aoqi@0 146 // substract used arena size to get size of arena chunk in free list
aoqi@0 147 index = flag2index(mtChunk);
aoqi@0 148 _malloc_data[index].reduce(used_arena_size);
aoqi@0 149 // we really don't know how many chunks in free list, so just set to
aoqi@0 150 // 0
aoqi@0 151 _malloc_data[index].overwrite_counter(0);
aoqi@0 152
aoqi@0 153 return true;
aoqi@0 154 }
aoqi@0 155
aoqi@0 156 // check if there is a safepoint in progress, if so, block the thread
aoqi@0 157 // for the safepoint
aoqi@0 158 void MemBaseline::check_safepoint(JavaThread* thr) {
aoqi@0 159 if (SafepointSynchronize::is_synchronizing()) {
aoqi@0 160 // grab and drop the SR_lock to honor the safepoint protocol
aoqi@0 161 MutexLocker ml(thr->SR_lock());
aoqi@0 162 }
aoqi@0 163 }
aoqi@0 164
aoqi@0 165 // baseline mmap'd memory records, generate overall summary and summaries by
aoqi@0 166 // memory types
aoqi@0 167 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
aoqi@0 168 MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
aoqi@0 169 VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
aoqi@0 170 int index;
aoqi@0 171 while (vm_ptr != NULL) {
aoqi@0 172 if (vm_ptr->is_reserved_region()) {
aoqi@0 173 index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
aoqi@0 174 // we use the number of thread stack to count threads
aoqi@0 175 if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
aoqi@0 176 _number_of_threads ++;
aoqi@0 177 }
aoqi@0 178 _total_vm_reserved += vm_ptr->size();
aoqi@0 179 _vm_data[index].inc(vm_ptr->size(), 0);
aoqi@0 180 } else {
aoqi@0 181 _total_vm_committed += vm_ptr->size();
aoqi@0 182 _vm_data[index].inc(0, vm_ptr->size());
aoqi@0 183 }
aoqi@0 184 vm_ptr = (VMMemRegion*)vm_itr.next();
aoqi@0 185 }
aoqi@0 186 return true;
aoqi@0 187 }
aoqi@0 188
aoqi@0 189 // baseline malloc'd memory by callsites, but only the callsites with memory allocation
aoqi@0 190 // over 1KB are stored.
aoqi@0 191 bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
aoqi@0 192 assert(MemTracker::track_callsite(), "detail tracking is off");
aoqi@0 193
aoqi@0 194 MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
aoqi@0 195 MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
aoqi@0 196 MallocCallsitePointer malloc_callsite;
aoqi@0 197
aoqi@0 198 // initailize malloc callsite array
aoqi@0 199 if (_malloc_cs == NULL) {
aoqi@0 200 _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
aoqi@0 201 // out of native memory
aoqi@0 202 if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
aoqi@0 203 return false;
aoqi@0 204 }
aoqi@0 205 } else {
aoqi@0 206 _malloc_cs->clear();
aoqi@0 207 }
aoqi@0 208
aoqi@0 209 MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
aoqi@0 210
aoqi@0 211 // sort into callsite pc order. Details are aggregated by callsites
aoqi@0 212 malloc_data->sort((FN_SORT)malloc_sort_by_pc);
aoqi@0 213 bool ret = true;
aoqi@0 214
aoqi@0 215 // baseline memory that is totaled over 1 KB
aoqi@0 216 while (malloc_ptr != NULL) {
aoqi@0 217 if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
aoqi@0 218 // skip thread stacks
aoqi@0 219 if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
aoqi@0 220 if (malloc_callsite.addr() != malloc_ptr->pc()) {
aoqi@0 221 if ((malloc_callsite.amount()/K) > 0) {
aoqi@0 222 if (!_malloc_cs->append(&malloc_callsite)) {
aoqi@0 223 ret = false;
aoqi@0 224 break;
aoqi@0 225 }
aoqi@0 226 }
aoqi@0 227 malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
aoqi@0 228 }
aoqi@0 229 malloc_callsite.inc(malloc_ptr->size());
aoqi@0 230 }
aoqi@0 231 }
aoqi@0 232 malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
aoqi@0 233 }
aoqi@0 234
aoqi@0 235 // restore to address order. Snapshot malloc data is maintained in memory
aoqi@0 236 // address order.
aoqi@0 237 malloc_data->sort((FN_SORT)malloc_sort_by_addr);
aoqi@0 238
aoqi@0 239 if (!ret) {
aoqi@0 240 return false;
aoqi@0 241 }
aoqi@0 242 // deal with last record
aoqi@0 243 if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
aoqi@0 244 if (!_malloc_cs->append(&malloc_callsite)) {
aoqi@0 245 return false;
aoqi@0 246 }
aoqi@0 247 }
aoqi@0 248 return true;
aoqi@0 249 }
aoqi@0 250
aoqi@0 251 // baseline mmap'd memory by callsites
aoqi@0 252 bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
aoqi@0 253 assert(MemTracker::track_callsite(), "detail tracking is off");
aoqi@0 254
aoqi@0 255 VMCallsitePointer vm_callsite;
aoqi@0 256 VMCallsitePointer* cur_callsite = NULL;
aoqi@0 257 MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
aoqi@0 258 VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
aoqi@0 259
aoqi@0 260 // initialize virtual memory map array
aoqi@0 261 if (_vm_map == NULL) {
aoqi@0 262 _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
aoqi@0 263 if (_vm_map == NULL || _vm_map->out_of_memory()) {
aoqi@0 264 return false;
aoqi@0 265 }
aoqi@0 266 } else {
aoqi@0 267 _vm_map->clear();
aoqi@0 268 }
aoqi@0 269
aoqi@0 270 // initialize virtual memory callsite array
aoqi@0 271 if (_vm_cs == NULL) {
aoqi@0 272 _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
aoqi@0 273 if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
aoqi@0 274 return false;
aoqi@0 275 }
aoqi@0 276 } else {
aoqi@0 277 _vm_cs->clear();
aoqi@0 278 }
aoqi@0 279
aoqi@0 280 // consolidate virtual memory data
aoqi@0 281 VMMemRegionEx* reserved_rec = NULL;
aoqi@0 282 VMMemRegionEx* committed_rec = NULL;
aoqi@0 283
aoqi@0 284 // vm_ptr is coming in increasing base address order
aoqi@0 285 while (vm_ptr != NULL) {
aoqi@0 286 if (vm_ptr->is_reserved_region()) {
aoqi@0 287 // consolidate reserved memory regions for virtual memory map.
aoqi@0 288 // The criteria for consolidation is:
aoqi@0 289 // 1. two adjacent reserved memory regions
aoqi@0 290 // 2. belong to the same memory type
aoqi@0 291 // 3. reserved from the same callsite
aoqi@0 292 if (reserved_rec == NULL ||
aoqi@0 293 reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
aoqi@0 294 FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
aoqi@0 295 reserved_rec->pc() != vm_ptr->pc()) {
aoqi@0 296 if (!_vm_map->append(vm_ptr)) {
aoqi@0 297 return false;
aoqi@0 298 }
aoqi@0 299 // inserted reserved region, we need the pointer to the element in virtual
aoqi@0 300 // memory map array.
aoqi@0 301 reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
aoqi@0 302 } else {
aoqi@0 303 reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
aoqi@0 304 }
aoqi@0 305
aoqi@0 306 if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
aoqi@0 307 return false;
aoqi@0 308 }
aoqi@0 309 vm_callsite = VMCallsitePointer(vm_ptr->pc());
aoqi@0 310 cur_callsite = &vm_callsite;
aoqi@0 311 vm_callsite.inc(vm_ptr->size(), 0);
aoqi@0 312 } else {
aoqi@0 313 // consolidate committed memory regions for virtual memory map
aoqi@0 314 // The criterial is:
aoqi@0 315 // 1. two adjacent committed memory regions
aoqi@0 316 // 2. committed from the same callsite
aoqi@0 317 if (committed_rec == NULL ||
aoqi@0 318 committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
aoqi@0 319 committed_rec->pc() != vm_ptr->pc()) {
aoqi@0 320 if (!_vm_map->append(vm_ptr)) {
aoqi@0 321 return false;
aoqi@0 322 }
aoqi@0 323 committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
aoqi@0 324 } else {
aoqi@0 325 committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
aoqi@0 326 }
aoqi@0 327 vm_callsite.inc(0, vm_ptr->size());
aoqi@0 328 }
aoqi@0 329 vm_ptr = (VMMemRegionEx*)vm_itr.next();
aoqi@0 330 }
aoqi@0 331 // deal with last record
aoqi@0 332 if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
aoqi@0 333 return false;
aoqi@0 334 }
aoqi@0 335
aoqi@0 336 // sort it into callsite pc order. Details are aggregated by callsites
aoqi@0 337 _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
aoqi@0 338
aoqi@0 339 // walk the array to consolidate record by pc
aoqi@0 340 MemPointerArrayIteratorImpl itr(_vm_cs);
aoqi@0 341 VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
aoqi@0 342 VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
aoqi@0 343 while (next_rec != NULL) {
aoqi@0 344 assert(callsite_rec != NULL, "Sanity check");
aoqi@0 345 if (next_rec->addr() == callsite_rec->addr()) {
aoqi@0 346 callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
aoqi@0 347 itr.remove();
aoqi@0 348 next_rec = (VMCallsitePointer*)itr.current();
aoqi@0 349 } else {
aoqi@0 350 callsite_rec = next_rec;
aoqi@0 351 next_rec = (VMCallsitePointer*)itr.next();
aoqi@0 352 }
aoqi@0 353 }
aoqi@0 354
aoqi@0 355 return true;
aoqi@0 356 }
aoqi@0 357
aoqi@0 358 // baseline a snapshot. If summary_only = false, memory usages aggregated by
aoqi@0 359 // callsites are also baselined.
aoqi@0 360 // The method call can be lengthy, especially when detail tracking info is
aoqi@0 361 // requested. So the method checks for safepoint explicitly.
aoqi@0 362 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
aoqi@0 363 Thread* THREAD = Thread::current();
aoqi@0 364 assert(THREAD->is_Java_thread(), "must be a JavaThread");
aoqi@0 365 MutexLocker snapshot_locker(snapshot._lock);
aoqi@0 366 reset();
aoqi@0 367 _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
aoqi@0 368 if (_baselined) {
aoqi@0 369 check_safepoint((JavaThread*)THREAD);
aoqi@0 370 _baselined = baseline_vm_summary(snapshot._vm_ptrs);
aoqi@0 371 }
aoqi@0 372 _number_of_classes = snapshot.number_of_classes();
aoqi@0 373
aoqi@0 374 if (!summary_only && MemTracker::track_callsite() && _baselined) {
aoqi@0 375 check_safepoint((JavaThread*)THREAD);
aoqi@0 376 _baselined = baseline_malloc_details(snapshot._alloc_ptrs);
aoqi@0 377 if (_baselined) {
aoqi@0 378 check_safepoint((JavaThread*)THREAD);
aoqi@0 379 _baselined = baseline_vm_details(snapshot._vm_ptrs);
aoqi@0 380 }
aoqi@0 381 }
aoqi@0 382 return _baselined;
aoqi@0 383 }
aoqi@0 384
aoqi@0 385
aoqi@0 386 int MemBaseline::flag2index(MEMFLAGS flag) const {
aoqi@0 387 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
aoqi@0 388 if (MemType2NameMap[index]._flag == flag) {
aoqi@0 389 return index;
aoqi@0 390 }
aoqi@0 391 }
aoqi@0 392 assert(false, "no type");
aoqi@0 393 return -1;
aoqi@0 394 }
aoqi@0 395
aoqi@0 396 const char* MemBaseline::type2name(MEMFLAGS type) {
aoqi@0 397 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
aoqi@0 398 if (MemType2NameMap[index]._flag == type) {
aoqi@0 399 return MemType2NameMap[index]._name;
aoqi@0 400 }
aoqi@0 401 }
aoqi@0 402 assert(false, err_msg("bad type %x", type));
aoqi@0 403 return NULL;
aoqi@0 404 }
aoqi@0 405
aoqi@0 406
aoqi@0 407 MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
aoqi@0 408 _total_malloced = other._total_malloced;
aoqi@0 409 _total_vm_reserved = other._total_vm_reserved;
aoqi@0 410 _total_vm_committed = other._total_vm_committed;
aoqi@0 411
aoqi@0 412 _baselined = other._baselined;
aoqi@0 413 _number_of_classes = other._number_of_classes;
aoqi@0 414
aoqi@0 415 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
aoqi@0 416 _malloc_data[index] = other._malloc_data[index];
aoqi@0 417 _vm_data[index] = other._vm_data[index];
aoqi@0 418 _arena_data[index] = other._arena_data[index];
aoqi@0 419 }
aoqi@0 420
aoqi@0 421 if (MemTracker::track_callsite()) {
aoqi@0 422 assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
aoqi@0 423 assert(other._malloc_cs != NULL && other._vm_cs != NULL,
aoqi@0 424 "not properly baselined");
aoqi@0 425 _malloc_cs->clear();
aoqi@0 426 _vm_cs->clear();
aoqi@0 427 int index;
aoqi@0 428 for (index = 0; index < other._malloc_cs->length(); index ++) {
aoqi@0 429 _malloc_cs->append(other._malloc_cs->at(index));
aoqi@0 430 }
aoqi@0 431
aoqi@0 432 for (index = 0; index < other._vm_cs->length(); index ++) {
aoqi@0 433 _vm_cs->append(other._vm_cs->at(index));
aoqi@0 434 }
aoqi@0 435 }
aoqi@0 436 return *this;
aoqi@0 437 }
aoqi@0 438
aoqi@0 439 /* compare functions for sorting */
aoqi@0 440
aoqi@0 441 // sort snapshot malloc'd records in callsite pc order
aoqi@0 442 int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
aoqi@0 443 assert(MemTracker::track_callsite(),"Just check");
aoqi@0 444 const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
aoqi@0 445 const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
aoqi@0 446 return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
aoqi@0 447 }
aoqi@0 448
aoqi@0 449 // sort baselined malloc'd records in size order
aoqi@0 450 int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
aoqi@0 451 assert(MemTracker::is_on(), "Just check");
aoqi@0 452 const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
aoqi@0 453 const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
aoqi@0 454 return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
aoqi@0 455 }
aoqi@0 456
aoqi@0 457 // sort baselined malloc'd records in callsite pc order
aoqi@0 458 int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
aoqi@0 459 assert(MemTracker::is_on(), "Just check");
aoqi@0 460 const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
aoqi@0 461 const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
aoqi@0 462 return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
aoqi@0 463 }
aoqi@0 464
aoqi@0 465
aoqi@0 466 // sort baselined mmap'd records in size (reserved size) order
aoqi@0 467 int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
aoqi@0 468 assert(MemTracker::is_on(), "Just check");
aoqi@0 469 const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
aoqi@0 470 const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
aoqi@0 471 return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
aoqi@0 472 }
aoqi@0 473
aoqi@0 474 // sort baselined mmap'd records in callsite pc order
aoqi@0 475 int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
aoqi@0 476 assert(MemTracker::is_on(), "Just check");
aoqi@0 477 const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
aoqi@0 478 const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
aoqi@0 479 return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
aoqi@0 480 }
aoqi@0 481
aoqi@0 482
aoqi@0 483 // sort snapshot malloc'd records in memory block address order
aoqi@0 484 int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
aoqi@0 485 assert(MemTracker::is_on(), "Just check");
aoqi@0 486 const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
aoqi@0 487 const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
aoqi@0 488 int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
aoqi@0 489 assert(p1 == p2 || delta != 0, "dup pointer");
aoqi@0 490 return delta;
aoqi@0 491 }
aoqi@0 492

mercurial