zgu@3900: /* zgu@4980: * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. zgu@3900: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. zgu@3900: * zgu@3900: * This code is free software; you can redistribute it and/or modify it zgu@3900: * under the terms of the GNU General Public License version 2 only, as zgu@3900: * published by the Free Software Foundation. zgu@3900: * zgu@3900: * This code is distributed in the hope that it will be useful, but WITHOUT zgu@3900: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or zgu@3900: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License zgu@3900: * version 2 for more details (a copy is included in the LICENSE file that zgu@3900: * accompanied this code). zgu@3900: * zgu@3900: * You should have received a copy of the GNU General Public License version zgu@3900: * 2 along with this work; if not, write to the Free Software Foundation, zgu@3900: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. zgu@3900: * zgu@3900: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA zgu@3900: * or visit www.oracle.com if you need additional information or have any zgu@3900: * questions. zgu@3900: * zgu@3900: */ zgu@3900: #include "precompiled.hpp" zgu@3900: #include "memory/allocation.hpp" zgu@4980: #include "runtime/safepoint.hpp" zgu@4980: #include "runtime/thread.inline.hpp" zgu@3900: #include "services/memBaseline.hpp" zgu@3900: #include "services/memTracker.hpp" zgu@3900: zgu@4980: zgu@3900: MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = { zgu@3900: {mtJavaHeap, "Java Heap"}, zgu@3900: {mtClass, "Class"}, zgu@3900: {mtThreadStack,"Thread Stack"}, zgu@3900: {mtThread, "Thread"}, zgu@3900: {mtCode, "Code"}, zgu@3900: {mtGC, "GC"}, zgu@3900: {mtCompiler, "Compiler"}, zgu@3900: {mtInternal, "Internal"}, zgu@3900: {mtOther, "Other"}, zgu@3900: {mtSymbol, "Symbol"}, zgu@3900: {mtNMT, "Memory Tracking"}, zgu@3900: {mtChunk, "Pooled Free Chunks"}, zgu@4193: {mtClassShared,"Shared spaces for classes"}, ctornqvi@4512: {mtTest, "Test"}, zgu@3900: {mtNone, "Unknown"} // It can happen when type tagging records are lagging zgu@3900: // behind zgu@3900: }; zgu@3900: zgu@3900: MemBaseline::MemBaseline() { zgu@3900: _baselined = false; zgu@3900: zgu@3900: for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { zgu@3900: _malloc_data[index].set_type(MemType2NameMap[index]._flag); zgu@3900: _vm_data[index].set_type(MemType2NameMap[index]._flag); zgu@3900: _arena_data[index].set_type(MemType2NameMap[index]._flag); zgu@3900: } zgu@3900: zgu@3900: _malloc_cs = NULL; zgu@3900: _vm_cs = NULL; zgu@4193: _vm_map = NULL; zgu@3900: zgu@3900: _number_of_classes = 0; zgu@3900: _number_of_threads = 0; zgu@3900: } zgu@3900: zgu@3900: zgu@3900: void MemBaseline::clear() { zgu@3900: if (_malloc_cs != NULL) { zgu@3900: delete _malloc_cs; zgu@3900: _malloc_cs = NULL; zgu@3900: } zgu@3900: zgu@3900: if (_vm_cs != NULL) { zgu@3900: delete _vm_cs; zgu@3900: _vm_cs = NULL; zgu@3900: } zgu@3900: zgu@4193: if (_vm_map != NULL) { zgu@4193: delete _vm_map; zgu@4193: _vm_map = NULL; zgu@4193: } zgu@4193: zgu@3900: reset(); zgu@3900: } zgu@3900: zgu@3900: zgu@3900: void MemBaseline::reset() { zgu@3900: _baselined = false; zgu@3900: _total_vm_reserved = 0; zgu@3900: _total_vm_committed = 0; zgu@3900: _total_malloced = 0; zgu@3900: _number_of_classes = 0; zgu@3900: zgu@3900: if (_malloc_cs != NULL) _malloc_cs->clear(); zgu@3900: if (_vm_cs != NULL) _vm_cs->clear(); zgu@4193: if (_vm_map != NULL) _vm_map->clear(); zgu@3900: zgu@3900: for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { zgu@3900: _malloc_data[index].clear(); zgu@3900: _vm_data[index].clear(); zgu@3900: _arena_data[index].clear(); zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: MemBaseline::~MemBaseline() { zgu@4193: clear(); zgu@3900: } zgu@3900: zgu@3900: // baseline malloc'd memory records, generate overall summary and summaries by zgu@3900: // memory types zgu@3900: bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) { zgu@4193: MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records); zgu@4193: MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current(); zgu@3900: size_t used_arena_size = 0; zgu@3900: int index; zgu@4193: while (malloc_ptr != NULL) { zgu@4193: index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags())); zgu@4193: size_t size = malloc_ptr->size(); zgu@4274: if (malloc_ptr->is_arena_memory_record()) { zgu@4274: // We do have anonymous arenas, they are either used as value objects, zgu@4274: // which are embedded inside other objects, or used as stack objects. zgu@4274: _arena_data[index].inc(size); zgu@4274: used_arena_size += size; zgu@4274: } else { zgu@4274: _total_malloced += size; zgu@4274: _malloc_data[index].inc(size); zgu@4274: if (malloc_ptr->is_arena_record()) { zgu@4274: // see if arena memory record present zgu@4274: MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next(); zgu@4274: if (next_malloc_ptr->is_arena_memory_record()) { zgu@4274: assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr), zgu@4274: "Arena records do not match"); zgu@4274: size = next_malloc_ptr->size(); zgu@4274: _arena_data[index].inc(size); zgu@4274: used_arena_size += size; zgu@4274: malloc_itr.next(); zgu@4274: } zgu@3900: } zgu@3900: } zgu@4193: malloc_ptr = (MemPointerRecordEx*)malloc_itr.next(); zgu@3900: } zgu@3900: zgu@3900: // substract used arena size to get size of arena chunk in free list zgu@3900: index = flag2index(mtChunk); zgu@3900: _malloc_data[index].reduce(used_arena_size); zgu@3900: // we really don't know how many chunks in free list, so just set to zgu@3900: // 0 zgu@3900: _malloc_data[index].overwrite_counter(0); zgu@3900: zgu@3900: return true; zgu@3900: } zgu@3900: zgu@4980: // check if there is a safepoint in progress, if so, block the thread zgu@4980: // for the safepoint zgu@4980: void MemBaseline::check_safepoint(JavaThread* thr) { zgu@4980: if (SafepointSynchronize::is_synchronizing()) { zgu@4992: // grab and drop the SR_lock to honor the safepoint protocol zgu@4992: MutexLocker ml(thr->SR_lock()); zgu@4980: } zgu@4980: } zgu@4980: zgu@3900: // baseline mmap'd memory records, generate overall summary and summaries by zgu@3900: // memory types zgu@3900: bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) { zgu@4193: MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records); zgu@4193: VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current(); zgu@3900: int index; zgu@4193: while (vm_ptr != NULL) { zgu@4193: if (vm_ptr->is_reserved_region()) { zgu@4193: index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags())); zgu@3900: // we use the number of thread stack to count threads zgu@4193: if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) { zgu@3900: _number_of_threads ++; zgu@3900: } zgu@4193: _total_vm_reserved += vm_ptr->size(); zgu@4193: _vm_data[index].inc(vm_ptr->size(), 0); zgu@4193: } else { zgu@4193: _total_vm_committed += vm_ptr->size(); zgu@4193: _vm_data[index].inc(0, vm_ptr->size()); zgu@4193: } zgu@4193: vm_ptr = (VMMemRegion*)vm_itr.next(); zgu@3900: } zgu@3900: return true; zgu@3900: } zgu@3900: zgu@3900: // baseline malloc'd memory by callsites, but only the callsites with memory allocation zgu@3900: // over 1KB are stored. zgu@3900: bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) { zgu@3900: assert(MemTracker::track_callsite(), "detail tracking is off"); zgu@3900: zgu@4193: MemPointerArrayIteratorImpl malloc_itr(const_cast(malloc_records)); zgu@4193: MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current(); zgu@4193: MallocCallsitePointer malloc_callsite; zgu@3900: zgu@4193: // initailize malloc callsite array zgu@3900: if (_malloc_cs == NULL) { zgu@3900: _malloc_cs = new (std::nothrow) MemPointerArrayImpl(64); zgu@3900: // out of native memory zgu@4193: if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) { zgu@3900: return false; zgu@3900: } zgu@3900: } else { zgu@3900: _malloc_cs->clear(); zgu@3900: } zgu@3900: zgu@4193: MemPointerArray* malloc_data = const_cast(malloc_records); zgu@4193: zgu@4193: // sort into callsite pc order. Details are aggregated by callsites zgu@4193: malloc_data->sort((FN_SORT)malloc_sort_by_pc); zgu@4193: bool ret = true; zgu@4193: zgu@3900: // baseline memory that is totaled over 1 KB zgu@4193: while (malloc_ptr != NULL) { zgu@4274: if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) { zgu@3900: // skip thread stacks zgu@4193: if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) { zgu@4193: if (malloc_callsite.addr() != malloc_ptr->pc()) { zgu@4193: if ((malloc_callsite.amount()/K) > 0) { zgu@4193: if (!_malloc_cs->append(&malloc_callsite)) { zgu@4193: ret = false; zgu@4193: break; zgu@4193: } zgu@4193: } zgu@4193: malloc_callsite = MallocCallsitePointer(malloc_ptr->pc()); zgu@4193: } zgu@4193: malloc_callsite.inc(malloc_ptr->size()); zgu@4193: } zgu@4193: } zgu@4193: malloc_ptr = (MemPointerRecordEx*)malloc_itr.next(); zgu@4193: } zgu@4193: zgu@4193: // restore to address order. Snapshot malloc data is maintained in memory zgu@4193: // address order. zgu@4193: malloc_data->sort((FN_SORT)malloc_sort_by_addr); zgu@4193: zgu@4193: if (!ret) { zgu@3900: return false; zgu@3900: } zgu@4193: // deal with last record zgu@4193: if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) { zgu@4193: if (!_malloc_cs->append(&malloc_callsite)) { zgu@3900: return false; zgu@3900: } zgu@3900: } zgu@3900: return true; zgu@3900: } zgu@3900: zgu@3900: // baseline mmap'd memory by callsites zgu@3900: bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) { zgu@3900: assert(MemTracker::track_callsite(), "detail tracking is off"); zgu@3900: zgu@4193: VMCallsitePointer vm_callsite; zgu@4193: VMCallsitePointer* cur_callsite = NULL; zgu@4193: MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records); zgu@4193: VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current(); zgu@3900: zgu@4193: // initialize virtual memory map array zgu@4193: if (_vm_map == NULL) { zgu@4193: _vm_map = new (std::nothrow) MemPointerArrayImpl(vm_records->length()); zgu@4193: if (_vm_map == NULL || _vm_map->out_of_memory()) { zgu@4193: return false; zgu@4193: } zgu@4193: } else { zgu@4193: _vm_map->clear(); zgu@4193: } zgu@4193: zgu@4193: // initialize virtual memory callsite array zgu@3900: if (_vm_cs == NULL) { zgu@3900: _vm_cs = new (std::nothrow) MemPointerArrayImpl(64); zgu@4193: if (_vm_cs == NULL || _vm_cs->out_of_memory()) { zgu@3900: return false; zgu@3900: } zgu@3900: } else { zgu@3900: _vm_cs->clear(); zgu@3900: } zgu@3900: zgu@4193: // consolidate virtual memory data zgu@4193: VMMemRegionEx* reserved_rec = NULL; zgu@4193: VMMemRegionEx* committed_rec = NULL; zgu@4193: zgu@4193: // vm_ptr is coming in increasing base address order zgu@4193: while (vm_ptr != NULL) { zgu@4193: if (vm_ptr->is_reserved_region()) { zgu@4193: // consolidate reserved memory regions for virtual memory map. zgu@4193: // The criteria for consolidation is: zgu@4193: // 1. two adjacent reserved memory regions zgu@4193: // 2. belong to the same memory type zgu@4193: // 3. reserved from the same callsite zgu@4193: if (reserved_rec == NULL || zgu@4193: reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() || zgu@4193: FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) || zgu@4193: reserved_rec->pc() != vm_ptr->pc()) { zgu@4193: if (!_vm_map->append(vm_ptr)) { zgu@3900: return false; zgu@3900: } zgu@4193: // inserted reserved region, we need the pointer to the element in virtual zgu@4193: // memory map array. zgu@4193: reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1); zgu@4193: } else { zgu@4193: reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size()); zgu@3900: } zgu@4193: zgu@4193: if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) { zgu@3900: return false; zgu@3900: } zgu@4193: vm_callsite = VMCallsitePointer(vm_ptr->pc()); zgu@4193: cur_callsite = &vm_callsite; zgu@4193: vm_callsite.inc(vm_ptr->size(), 0); zgu@4193: } else { zgu@4193: // consolidate committed memory regions for virtual memory map zgu@4193: // The criterial is: zgu@4193: // 1. two adjacent committed memory regions zgu@4193: // 2. committed from the same callsite zgu@4193: if (committed_rec == NULL || zgu@4193: committed_rec->base() + committed_rec->size() != vm_ptr->addr() || zgu@4193: committed_rec->pc() != vm_ptr->pc()) { zgu@4193: if (!_vm_map->append(vm_ptr)) { zgu@4193: return false; zgu@4980: } zgu@4193: committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1); zgu@4193: } else { zgu@4193: committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size()); zgu@4193: } zgu@4193: vm_callsite.inc(0, vm_ptr->size()); zgu@4193: } zgu@4193: vm_ptr = (VMMemRegionEx*)vm_itr.next(); zgu@4193: } zgu@4193: // deal with last record zgu@4193: if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) { zgu@4193: return false; zgu@4193: } zgu@4193: zgu@4193: // sort it into callsite pc order. Details are aggregated by callsites zgu@4193: _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc); zgu@4193: zgu@4193: // walk the array to consolidate record by pc zgu@4193: MemPointerArrayIteratorImpl itr(_vm_cs); zgu@4193: VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current(); zgu@4193: VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next(); zgu@4193: while (next_rec != NULL) { zgu@4193: assert(callsite_rec != NULL, "Sanity check"); zgu@4193: if (next_rec->addr() == callsite_rec->addr()) { zgu@4193: callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount()); zgu@4193: itr.remove(); zgu@4193: next_rec = (VMCallsitePointer*)itr.current(); zgu@4193: } else { zgu@4193: callsite_rec = next_rec; zgu@4193: next_rec = (VMCallsitePointer*)itr.next(); zgu@4193: } zgu@4193: } zgu@4193: zgu@3900: return true; zgu@3900: } zgu@3900: zgu@3900: // baseline a snapshot. If summary_only = false, memory usages aggregated by zgu@3900: // callsites are also baselined. zgu@4980: // The method call can be lengthy, especially when detail tracking info is zgu@4980: // requested. So the method checks for safepoint explicitly. zgu@3900: bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) { zgu@4980: Thread* THREAD = Thread::current(); zgu@4980: assert(THREAD->is_Java_thread(), "must be a JavaThread"); zgu@4980: MutexLocker snapshot_locker(snapshot._lock); zgu@3900: reset(); zgu@4980: _baselined = baseline_malloc_summary(snapshot._alloc_ptrs); zgu@4980: if (_baselined) { zgu@4980: check_safepoint((JavaThread*)THREAD); zgu@4980: _baselined = baseline_vm_summary(snapshot._vm_ptrs); zgu@4980: } zgu@4400: _number_of_classes = snapshot.number_of_classes(); zgu@3900: zgu@3900: if (!summary_only && MemTracker::track_callsite() && _baselined) { zgu@4980: check_safepoint((JavaThread*)THREAD); zgu@4980: _baselined = baseline_malloc_details(snapshot._alloc_ptrs); zgu@4980: if (_baselined) { zgu@4980: check_safepoint((JavaThread*)THREAD); zgu@4980: _baselined = baseline_vm_details(snapshot._vm_ptrs); zgu@4980: } zgu@3900: } zgu@3900: return _baselined; zgu@3900: } zgu@3900: zgu@3900: zgu@3900: int MemBaseline::flag2index(MEMFLAGS flag) const { zgu@3900: for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { zgu@3900: if (MemType2NameMap[index]._flag == flag) { zgu@3900: return index; zgu@3900: } zgu@3900: } zgu@3900: assert(false, "no type"); zgu@3900: return -1; zgu@3900: } zgu@3900: zgu@3900: const char* MemBaseline::type2name(MEMFLAGS type) { zgu@3900: for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { zgu@3900: if (MemType2NameMap[index]._flag == type) { zgu@3900: return MemType2NameMap[index]._name; zgu@3900: } zgu@3900: } zgu@4193: assert(false, err_msg("bad type %x", type)); zgu@3900: return NULL; zgu@3900: } zgu@3900: zgu@3900: zgu@3900: MemBaseline& MemBaseline::operator=(const MemBaseline& other) { zgu@3900: _total_malloced = other._total_malloced; zgu@3900: _total_vm_reserved = other._total_vm_reserved; zgu@3900: _total_vm_committed = other._total_vm_committed; zgu@3900: zgu@3900: _baselined = other._baselined; zgu@3900: _number_of_classes = other._number_of_classes; zgu@3900: zgu@3900: for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { zgu@3900: _malloc_data[index] = other._malloc_data[index]; zgu@3900: _vm_data[index] = other._vm_data[index]; zgu@3900: _arena_data[index] = other._arena_data[index]; zgu@3900: } zgu@3900: zgu@3900: if (MemTracker::track_callsite()) { zgu@3900: assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory"); zgu@3900: assert(other._malloc_cs != NULL && other._vm_cs != NULL, zgu@3900: "not properly baselined"); zgu@3900: _malloc_cs->clear(); zgu@3900: _vm_cs->clear(); zgu@3900: int index; zgu@3900: for (index = 0; index < other._malloc_cs->length(); index ++) { zgu@3900: _malloc_cs->append(other._malloc_cs->at(index)); zgu@3900: } zgu@3900: zgu@3900: for (index = 0; index < other._vm_cs->length(); index ++) { zgu@3900: _vm_cs->append(other._vm_cs->at(index)); zgu@3900: } zgu@3900: } zgu@3900: return *this; zgu@3900: } zgu@3900: zgu@3900: /* compare functions for sorting */ zgu@3900: zgu@3900: // sort snapshot malloc'd records in callsite pc order zgu@3900: int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) { zgu@3900: assert(MemTracker::track_callsite(),"Just check"); zgu@3900: const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1; zgu@3900: const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2; zgu@3900: return UNSIGNED_COMPARE(mp1->pc(), mp2->pc()); zgu@3900: } zgu@3900: zgu@3900: // sort baselined malloc'd records in size order zgu@3900: int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) { zgu@3900: assert(MemTracker::is_on(), "Just check"); zgu@3900: const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1; zgu@3900: const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2; zgu@3900: return UNSIGNED_COMPARE(mp2->amount(), mp1->amount()); zgu@3900: } zgu@3900: zgu@3900: // sort baselined malloc'd records in callsite pc order zgu@3900: int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) { zgu@3900: assert(MemTracker::is_on(), "Just check"); zgu@3900: const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1; zgu@3900: const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2; zgu@3900: return UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); zgu@3900: } zgu@3900: zgu@3900: zgu@3900: // sort baselined mmap'd records in size (reserved size) order zgu@3900: int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) { zgu@3900: assert(MemTracker::is_on(), "Just check"); zgu@3900: const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1; zgu@3900: const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2; zgu@3900: return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount()); zgu@3900: } zgu@3900: zgu@3900: // sort baselined mmap'd records in callsite pc order zgu@3900: int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) { zgu@3900: assert(MemTracker::is_on(), "Just check"); zgu@3900: const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1; zgu@3900: const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2; zgu@3900: return UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); zgu@3900: } zgu@3900: zgu@3900: zgu@3900: // sort snapshot malloc'd records in memory block address order zgu@3900: int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) { zgu@3900: assert(MemTracker::is_on(), "Just check"); zgu@3900: const MemPointerRecord* mp1 = (const MemPointerRecord*)p1; zgu@3900: const MemPointerRecord* mp2 = (const MemPointerRecord*)p2; zgu@3900: int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); zgu@3900: assert(delta != 0, "dup pointer"); zgu@3900: return delta; zgu@3900: } zgu@3900: