zgu@3900: /* drchase@6680: * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. zgu@3900: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. zgu@3900: * zgu@3900: * This code is free software; you can redistribute it and/or modify it zgu@3900: * under the terms of the GNU General Public License version 2 only, as zgu@3900: * published by the Free Software Foundation. zgu@3900: * zgu@3900: * This code is distributed in the hope that it will be useful, but WITHOUT zgu@3900: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or zgu@3900: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License zgu@3900: * version 2 for more details (a copy is included in the LICENSE file that zgu@3900: * accompanied this code). zgu@3900: * zgu@3900: * You should have received a copy of the GNU General Public License version zgu@3900: * 2 along with this work; if not, write to the Free Software Foundation, zgu@3900: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. zgu@3900: * zgu@3900: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA zgu@3900: * or visit www.oracle.com if you need additional information or have any zgu@3900: * questions. zgu@3900: * zgu@3900: */ zgu@3900: zgu@3900: #include "precompiled.hpp" zgu@3900: #include "runtime/mutexLocker.hpp" zgu@3900: #include "utilities/decoder.hpp" zgu@3900: #include "services/memBaseline.hpp" zgu@3900: #include "services/memPtr.hpp" zgu@3900: #include "services/memPtrArray.hpp" zgu@3900: #include "services/memSnapshot.hpp" zgu@3900: #include "services/memTracker.hpp" zgu@3900: drchase@6680: PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC drchase@6680: zgu@4248: #ifdef ASSERT zgu@4248: zgu@4248: void decode_pointer_record(MemPointerRecord* rec) { zgu@4248: tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT "] size = %d bytes", rec->addr(), zgu@4248: rec->addr() + rec->size(), (int)rec->size()); zgu@4248: tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags()))); zgu@4248: if (rec->is_vm_pointer()) { zgu@4248: if (rec->is_allocation_record()) { zgu@4248: tty->print_cr(" (reserve)"); zgu@4248: } else if (rec->is_commit_record()) { zgu@4248: tty->print_cr(" (commit)"); zgu@4248: } else if (rec->is_uncommit_record()) { zgu@4248: tty->print_cr(" (uncommit)"); zgu@4248: } else if (rec->is_deallocation_record()) { zgu@4248: tty->print_cr(" (release)"); zgu@4248: } else { zgu@4248: tty->print_cr(" (tag)"); zgu@4248: } zgu@4248: } else { zgu@4274: if (rec->is_arena_memory_record()) { zgu@4248: tty->print_cr(" (arena size)"); zgu@4248: } else if (rec->is_allocation_record()) { zgu@4248: tty->print_cr(" (malloc)"); zgu@4248: } else { zgu@4248: tty->print_cr(" (free)"); zgu@4248: } zgu@4248: } zgu@4248: if (MemTracker::track_callsite()) { zgu@4248: char buf[1024]; zgu@4248: address pc = ((MemPointerRecordEx*)rec)->pc(); zgu@4248: if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) { zgu@4248: tty->print_cr("\tfrom %s", buf); zgu@4248: } else { zgu@4248: tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc); zgu@4248: } zgu@4248: } zgu@4248: } zgu@4248: zgu@4248: void decode_vm_region_record(VMMemRegion* rec) { zgu@4248: tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(), zgu@4248: rec->addr() + rec->size()); zgu@4248: tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags()))); zgu@4248: if (rec->is_allocation_record()) { zgu@4248: tty->print_cr(" (reserved)"); zgu@4248: } else if (rec->is_commit_record()) { zgu@4248: tty->print_cr(" (committed)"); zgu@4248: } else { zgu@4248: ShouldNotReachHere(); zgu@4248: } zgu@4248: if (MemTracker::track_callsite()) { zgu@4248: char buf[1024]; zgu@4248: address pc = ((VMMemRegionEx*)rec)->pc(); zgu@4248: if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) { zgu@4248: tty->print_cr("\tfrom %s", buf); zgu@4248: } else { zgu@4248: tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc); zgu@4248: } zgu@4248: zgu@4248: } zgu@4248: } zgu@4248: zgu@4248: #endif zgu@4248: zgu@4193: zgu@4193: bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) { zgu@4193: VMMemRegionEx new_rec; zgu@4193: assert(rec->is_allocation_record() || rec->is_commit_record(), zgu@4193: "Sanity check"); zgu@4193: if (MemTracker::track_callsite()) { zgu@4193: new_rec.init((MemPointerRecordEx*)rec); zgu@4193: } else { zgu@4193: new_rec.init(rec); zgu@4193: } zgu@4193: return insert(&new_rec); zgu@4193: } zgu@4193: zgu@4193: bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) { zgu@4193: VMMemRegionEx new_rec; zgu@4193: assert(rec->is_allocation_record() || rec->is_commit_record(), zgu@4193: "Sanity check"); zgu@4193: if (MemTracker::track_callsite()) { zgu@4193: new_rec.init((MemPointerRecordEx*)rec); zgu@4193: } else { zgu@4193: new_rec.init(rec); zgu@4193: } zgu@4193: return insert_after(&new_rec); zgu@4193: } zgu@4193: zgu@4193: // we don't consolidate reserved regions, since they may be categorized zgu@4193: // in different types. zgu@4193: bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) { zgu@4193: assert(rec->is_allocation_record(), "Sanity check"); zgu@4272: VMMemRegion* reserved_region = (VMMemRegion*)current(); zgu@4193: zgu@4193: // we don't have anything yet zgu@4272: if (reserved_region == NULL) { zgu@4193: return insert_record(rec); zgu@4193: } zgu@4193: zgu@4272: assert(reserved_region->is_reserved_region(), "Sanity check"); zgu@4193: // duplicated records zgu@4272: if (reserved_region->is_same_region(rec)) { zgu@4193: return true; zgu@4193: } zgu@4272: // Overlapping stack regions indicate that a JNI thread failed to zgu@4272: // detach from the VM before exiting. This leaks the JavaThread object. zgu@4272: if (CheckJNICalls) { zgu@4272: guarantee(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) != mtThreadStack || zgu@4272: !reserved_region->overlaps_region(rec), zgu@4272: "Attached JNI thread exited without being detached"); zgu@4272: } zgu@4272: // otherwise, we should not have overlapping reserved regions zgu@4272: assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack || zgu@4272: reserved_region->base() > rec->addr(), "Just check: locate()"); zgu@4272: assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack || zgu@4272: !reserved_region->overlaps_region(rec), "overlapping reserved regions"); zgu@4272: zgu@4193: return insert_record(rec); zgu@4193: } zgu@4193: zgu@4193: // we do consolidate committed regions zgu@4193: bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) { zgu@4193: assert(rec->is_commit_record(), "Sanity check"); zgu@4248: VMMemRegion* reserved_rgn = (VMMemRegion*)current(); zgu@4248: assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec), zgu@4193: "Sanity check"); zgu@4193: zgu@4193: // thread's native stack is always marked as "committed", ignore zgu@4193: // the "commit" operation for creating stack guard pages zgu@4248: if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack && zgu@4193: FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) { zgu@4193: return true; zgu@4193: } zgu@4193: zgu@4248: // if the reserved region has any committed regions zgu@4248: VMMemRegion* committed_rgn = (VMMemRegion*)next(); zgu@4248: while (committed_rgn != NULL && committed_rgn->is_committed_region()) { zgu@4193: // duplicated commit records zgu@4248: if(committed_rgn->contains_region(rec)) { zgu@4193: return true; zgu@4248: } else if (committed_rgn->overlaps_region(rec)) { zgu@4248: // overlaps front part zgu@4248: if (rec->addr() < committed_rgn->addr()) { zgu@4248: committed_rgn->expand_region(rec->addr(), zgu@4248: committed_rgn->addr() - rec->addr()); zgu@4193: } else { zgu@4248: // overlaps tail part zgu@4248: address committed_rgn_end = committed_rgn->addr() + zgu@4248: committed_rgn->size(); zgu@4248: assert(committed_rgn_end < rec->addr() + rec->size(), zgu@4248: "overlap tail part"); zgu@4248: committed_rgn->expand_region(committed_rgn_end, zgu@4248: (rec->addr() + rec->size()) - committed_rgn_end); zgu@4193: } zgu@4248: } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) { zgu@4248: // adjunct each other zgu@4248: committed_rgn->expand_region(rec->addr(), rec->size()); zgu@4193: VMMemRegion* next_reg = (VMMemRegion*)next(); zgu@4193: // see if we can consolidate next committed region zgu@4193: if (next_reg != NULL && next_reg->is_committed_region() && zgu@4248: next_reg->base() == committed_rgn->base() + committed_rgn->size()) { zgu@4248: committed_rgn->expand_region(next_reg->base(), next_reg->size()); zgu@4248: // delete merged region zgu@4193: remove(); zgu@4193: } zgu@4193: return true; zgu@4248: } else if (committed_rgn->base() > rec->addr()) { zgu@4248: // found the location, insert this committed region zgu@4248: return insert_record(rec); zgu@4193: } zgu@4248: committed_rgn = (VMMemRegion*)next(); zgu@4193: } zgu@4193: return insert_record(rec); zgu@4193: } zgu@4193: zgu@4193: bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) { zgu@4193: assert(rec->is_uncommit_record(), "sanity check"); zgu@4193: VMMemRegion* cur; zgu@4193: cur = (VMMemRegion*)current(); zgu@4193: assert(cur->is_reserved_region() && cur->contains_region(rec), zgu@4193: "Sanity check"); zgu@4193: // thread's native stack is always marked as "committed", ignore zgu@4193: // the "commit" operation for creating stack guard pages zgu@4193: if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack && zgu@4193: FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) { zgu@4193: return true; zgu@4193: } zgu@4193: zgu@4193: cur = (VMMemRegion*)next(); zgu@4193: while (cur != NULL && cur->is_committed_region()) { zgu@4193: // region already uncommitted, must be due to duplicated record zgu@4193: if (cur->addr() >= rec->addr() + rec->size()) { zgu@4193: break; zgu@4193: } else if (cur->contains_region(rec)) { zgu@4193: // uncommit whole region zgu@4193: if (cur->is_same_region(rec)) { zgu@4193: remove(); zgu@4193: break; zgu@4193: } else if (rec->addr() == cur->addr() || zgu@4193: rec->addr() + rec->size() == cur->addr() + cur->size()) { zgu@4193: // uncommitted from either end of current memory region. zgu@4193: cur->exclude_region(rec->addr(), rec->size()); zgu@4193: break; zgu@4193: } else { // split the committed region and release the middle zgu@4193: address high_addr = cur->addr() + cur->size(); zgu@4193: size_t sz = high_addr - rec->addr(); zgu@4193: cur->exclude_region(rec->addr(), sz); zgu@4193: sz = high_addr - (rec->addr() + rec->size()); zgu@4193: if (MemTracker::track_callsite()) { zgu@4193: MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz, zgu@4193: ((VMMemRegionEx*)cur)->pc()); zgu@4193: return insert_record_after(&tmp); zgu@4193: } else { zgu@4193: MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz); zgu@4193: return insert_record_after(&tmp); zgu@4193: } zgu@4193: } zgu@4193: } zgu@4193: cur = (VMMemRegion*)next(); zgu@4193: } zgu@4193: zgu@4193: // we may not find committed record due to duplicated records zgu@4193: return true; zgu@4193: } zgu@4193: zgu@4193: bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) { zgu@4193: assert(rec->is_deallocation_record(), "Sanity check"); zgu@4193: VMMemRegion* cur = (VMMemRegion*)current(); zgu@4193: assert(cur->is_reserved_region() && cur->contains_region(rec), zgu@4193: "Sanity check"); zgu@4285: if (rec->is_same_region(cur)) { zgu@5053: zgu@5053: // In snapshot, the virtual memory records are sorted in following orders: zgu@5053: // 1. virtual memory's base address zgu@5053: // 2. virtual memory reservation record, followed by commit records within this reservation. zgu@5053: // The commit records are also in base address order. zgu@5053: // When a reserved region is released, we want to remove the reservation record and all zgu@5053: // commit records following it. zgu@4193: #ifdef ASSERT zgu@5053: address low_addr = cur->addr(); zgu@5053: address high_addr = low_addr + cur->size(); zgu@4193: #endif zgu@5053: // remove virtual memory reservation record zgu@4193: remove(); zgu@5053: // remove committed regions within above reservation zgu@5053: VMMemRegion* next_region = (VMMemRegion*)current(); zgu@5053: while (next_region != NULL && next_region->is_committed_region()) { zgu@5053: assert(next_region->addr() >= low_addr && zgu@5053: next_region->addr() + next_region->size() <= high_addr, zgu@5053: "Range check"); zgu@5053: remove(); zgu@5053: next_region = (VMMemRegion*)current(); zgu@5053: } zgu@4193: } else if (rec->addr() == cur->addr() || zgu@4193: rec->addr() + rec->size() == cur->addr() + cur->size()) { zgu@4193: // released region is at either end of this region zgu@4193: cur->exclude_region(rec->addr(), rec->size()); zgu@4285: assert(check_reserved_region(), "Integrity check"); zgu@4193: } else { // split the reserved region and release the middle zgu@4193: address high_addr = cur->addr() + cur->size(); zgu@4193: size_t sz = high_addr - rec->addr(); zgu@4193: cur->exclude_region(rec->addr(), sz); zgu@4193: sz = high_addr - rec->addr() - rec->size(); zgu@4193: if (MemTracker::track_callsite()) { zgu@4193: MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz, zgu@4193: ((VMMemRegionEx*)cur)->pc()); zgu@4285: bool ret = insert_reserved_region(&tmp); zgu@4285: assert(!ret || check_reserved_region(), "Integrity check"); zgu@4285: return ret; zgu@4193: } else { zgu@4193: MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz); zgu@4285: bool ret = insert_reserved_region(&tmp); zgu@4285: assert(!ret || check_reserved_region(), "Integrity check"); zgu@4285: return ret; zgu@4193: } zgu@4193: } zgu@4193: return true; zgu@4193: } zgu@4193: zgu@4193: bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) { zgu@4193: // skip all 'commit' records associated with previous reserved region zgu@4193: VMMemRegion* p = (VMMemRegion*)next(); zgu@4193: while (p != NULL && p->is_committed_region() && zgu@4193: p->base() + p->size() < rec->addr()) { zgu@4193: p = (VMMemRegion*)next(); zgu@4193: } zgu@4193: return insert_record(rec); zgu@4193: } zgu@4193: zgu@4193: bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) { zgu@4193: assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained"); zgu@4193: address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL); zgu@4193: if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region zgu@4193: size_t sz = rgn->size() - new_rgn_size; zgu@4193: // the original region becomes 'new' region zgu@4193: rgn->exclude_region(new_rgn_addr + new_rgn_size, sz); zgu@4193: // remaining becomes next region zgu@4193: MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc); zgu@4193: return insert_reserved_region(&next_rgn); zgu@4193: } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) { zgu@4193: rgn->exclude_region(new_rgn_addr, new_rgn_size); zgu@4193: MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc); zgu@4193: return insert_reserved_region(&next_rgn); zgu@4193: } else { zgu@4193: // the orginal region will be split into three zgu@4193: address rgn_high_addr = rgn->base() + rgn->size(); zgu@4193: // first region zgu@4193: rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr)); zgu@4193: // the second region is the new region zgu@4193: MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc); zgu@4193: if (!insert_reserved_region(&new_rgn)) return false; zgu@4193: // the remaining region zgu@4193: MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), zgu@4193: rgn_high_addr - (new_rgn_addr + new_rgn_size), pc); zgu@4193: return insert_reserved_region(&rem_rgn); zgu@4193: } zgu@4193: } zgu@4193: zgu@4053: static int sort_in_seq_order(const void* p1, const void* p2) { zgu@4053: assert(p1 != NULL && p2 != NULL, "Sanity check"); zgu@4053: const MemPointerRecord* mp1 = (MemPointerRecord*)p1; zgu@4053: const MemPointerRecord* mp2 = (MemPointerRecord*)p2; zgu@4053: return (mp1->seq() - mp2->seq()); zgu@4053: } zgu@3900: zgu@4053: bool StagingArea::init() { zgu@4053: if (MemTracker::track_callsite()) { zgu@4053: _malloc_data = new (std::nothrow)MemPointerArrayImpl(); zgu@4053: _vm_data = new (std::nothrow)MemPointerArrayImpl(); zgu@3900: } else { zgu@4053: _malloc_data = new (std::nothrow)MemPointerArrayImpl(); zgu@4053: _vm_data = new (std::nothrow)MemPointerArrayImpl(); zgu@3900: } zgu@3900: zgu@4053: if (_malloc_data != NULL && _vm_data != NULL && zgu@4053: !_malloc_data->out_of_memory() && zgu@4053: !_vm_data->out_of_memory()) { zgu@3900: return true; zgu@4053: } else { zgu@4053: if (_malloc_data != NULL) delete _malloc_data; zgu@4053: if (_vm_data != NULL) delete _vm_data; zgu@4053: _malloc_data = NULL; zgu@4053: _vm_data = NULL; zgu@4053: return false; zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: zgu@4193: VMRecordIterator StagingArea::virtual_memory_record_walker() { zgu@4053: MemPointerArray* arr = vm_data(); zgu@4053: // sort into seq number order zgu@4053: arr->sort((FN_SORT)sort_in_seq_order); zgu@4193: return VMRecordIterator(arr); zgu@4053: } zgu@3900: zgu@3900: zgu@3900: MemSnapshot::MemSnapshot() { zgu@3900: if (MemTracker::track_callsite()) { zgu@3900: _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl(); zgu@3900: _vm_ptrs = new (std::nothrow)MemPointerArrayImpl(64, true); zgu@3900: } else { zgu@3900: _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl(); zgu@3900: _vm_ptrs = new (std::nothrow)MemPointerArrayImpl(64, true); zgu@3900: } zgu@3900: zgu@4053: _staging_area.init(); zgu@3936: _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock"); zgu@3900: NOT_PRODUCT(_untracked_count = 0;) zgu@4400: _number_of_classes = 0; zgu@3900: } zgu@3900: zgu@3900: MemSnapshot::~MemSnapshot() { zgu@3900: assert(MemTracker::shutdown_in_progress(), "native memory tracking still on"); zgu@3900: { zgu@3900: MutexLockerEx locker(_lock); zgu@3900: if (_alloc_ptrs != NULL) { zgu@3900: delete _alloc_ptrs; zgu@3900: _alloc_ptrs = NULL; zgu@3900: } zgu@3900: zgu@3900: if (_vm_ptrs != NULL) { zgu@3900: delete _vm_ptrs; zgu@3900: _vm_ptrs = NULL; zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: if (_lock != NULL) { zgu@3900: delete _lock; zgu@3900: _lock = NULL; zgu@3900: } zgu@3900: } zgu@3900: zgu@4274: zgu@4274: void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) { zgu@3900: assert(dest != NULL && src != NULL, "Just check"); zgu@3900: assert(dest->addr() == src->addr(), "Just check"); zgu@4274: assert(dest->seq() > 0 && src->seq() > 0, "not sequenced"); zgu@3900: zgu@4274: if (MemTracker::track_callsite()) { zgu@4274: *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src; zgu@4274: } else { zgu@4274: *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src; zgu@4274: } zgu@4274: } zgu@4274: zgu@4274: void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) { zgu@4274: assert(src != NULL && dest != NULL, "Just check"); zgu@4274: assert(dest->seq() == 0 && src->seq() >0, "cast away sequence"); zgu@3900: zgu@3900: if (MemTracker::track_callsite()) { zgu@3900: *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src; zgu@3900: } else { zgu@4274: *(MemPointerRecord*)dest = *(MemPointerRecord*)src; zgu@3900: } zgu@3900: } zgu@3900: zgu@4274: // merge a recorder to the staging area zgu@3900: bool MemSnapshot::merge(MemRecorder* rec) { zgu@3900: assert(rec != NULL && !rec->out_of_memory(), "Just check"); zgu@3900: zgu@3900: SequencedRecordIterator itr(rec->pointer_itr()); zgu@3900: zgu@3900: MutexLockerEx lock(_lock, true); zgu@4053: MemPointerIterator malloc_staging_itr(_staging_area.malloc_data()); zgu@4274: MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current(); zgu@4274: MemPointerRecord* matched_rec; zgu@4274: zgu@4274: while (incoming_rec != NULL) { zgu@4274: if (incoming_rec->is_vm_pointer()) { zgu@4053: // we don't do anything with virtual memory records during merge zgu@4274: if (!_staging_area.vm_data()->append(incoming_rec)) { zgu@3900: return false; zgu@3900: } zgu@4053: } else { zgu@4193: // locate matched record and/or also position the iterator to proper zgu@4193: // location for this incoming record. zgu@4274: matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr()); zgu@4274: // we have not seen this memory block in this generation, zgu@4274: // so just add to staging area zgu@4274: if (matched_rec == NULL) { zgu@4274: if (!malloc_staging_itr.insert(incoming_rec)) { zgu@4053: return false; zgu@4053: } zgu@4274: } else if (incoming_rec->addr() == matched_rec->addr()) { zgu@4274: // whoever has higher sequence number wins zgu@4274: if (incoming_rec->seq() > matched_rec->seq()) { zgu@4274: copy_seq_pointer(matched_rec, incoming_rec); zgu@4053: } zgu@4274: } else if (incoming_rec->addr() < matched_rec->addr()) { zgu@4274: if (!malloc_staging_itr.insert(incoming_rec)) { zgu@3900: return false; zgu@3900: } zgu@3900: } else { zgu@4274: ShouldNotReachHere(); zgu@3900: } zgu@3900: } zgu@4274: incoming_rec = (MemPointerRecord*)itr.next(); zgu@3900: } zgu@3900: NOT_PRODUCT(void check_staging_data();) zgu@3900: return true; zgu@3900: } zgu@3900: zgu@3900: zgu@3900: // promote data to next generation zgu@4400: bool MemSnapshot::promote(int number_of_classes) { zgu@4053: assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check"); zgu@4053: assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL, zgu@4053: "Just check"); zgu@3900: MutexLockerEx lock(_lock, true); zgu@4053: zgu@4053: MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker(); zgu@4053: bool promoted = false; zgu@4053: if (promote_malloc_records(&malloc_itr)) { zgu@4193: VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker(); zgu@4053: if (promote_virtual_memory_records(&vm_itr)) { zgu@4053: promoted = true; zgu@4053: } zgu@4053: } zgu@4053: zgu@4053: NOT_PRODUCT(check_malloc_pointers();) zgu@4053: _staging_area.clear(); zgu@4400: _number_of_classes = number_of_classes; zgu@4053: return promoted; zgu@4053: } zgu@4053: zgu@4053: bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) { zgu@4053: MemPointerIterator malloc_snapshot_itr(_alloc_ptrs); zgu@4053: MemPointerRecord* new_rec = (MemPointerRecord*)itr->current(); zgu@4053: MemPointerRecord* matched_rec; zgu@4053: while (new_rec != NULL) { zgu@4053: matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr()); zgu@4053: // found matched memory block zgu@4053: if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) { zgu@4193: // snapshot already contains 'live' records zgu@4274: assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(), zgu@4053: "Sanity check"); zgu@4053: // update block states zgu@4274: if (new_rec->is_allocation_record()) { zgu@4274: assign_pointer(matched_rec, new_rec); zgu@4274: } else if (new_rec->is_arena_memory_record()) { zgu@4274: if (new_rec->size() == 0) { zgu@4274: // remove size record once size drops to 0 zgu@4274: malloc_snapshot_itr.remove(); zgu@4274: } else { zgu@4274: assign_pointer(matched_rec, new_rec); zgu@4274: } zgu@4053: } else { zgu@4053: // a deallocation record zgu@4053: assert(new_rec->is_deallocation_record(), "Sanity check"); zgu@4053: // an arena record can be followed by a size record, we need to remove both zgu@4053: if (matched_rec->is_arena_record()) { zgu@4053: MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next(); zgu@4641: if (next != NULL && next->is_arena_memory_record() && zgu@4641: next->is_memory_record_of_arena(matched_rec)) { zgu@4053: malloc_snapshot_itr.remove(); zgu@3900: } zgu@4053: } zgu@4053: // the memory is deallocated, remove related record(s) zgu@4053: malloc_snapshot_itr.remove(); zgu@4053: } zgu@4053: } else { zgu@4274: // don't insert size 0 record zgu@4274: if (new_rec->is_arena_memory_record() && new_rec->size() == 0) { zgu@4274: new_rec = NULL; zgu@4053: } zgu@4274: zgu@4053: if (new_rec != NULL) { zgu@4274: if (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) { zgu@4053: if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) { zgu@4053: if (!malloc_snapshot_itr.insert_after(new_rec)) { zgu@4053: return false; zgu@4053: } zgu@3900: } else { zgu@4053: if (!malloc_snapshot_itr.insert(new_rec)) { zgu@4053: return false; zgu@4053: } zgu@4053: } zgu@4053: } zgu@4053: #ifndef PRODUCT zgu@4053: else if (!has_allocation_record(new_rec->addr())) { zgu@4053: // NMT can not track some startup memory, which is allocated before NMT is on zgu@4053: _untracked_count ++; zgu@4053: } zgu@4053: #endif zgu@4053: } zgu@4053: } zgu@4053: new_rec = (MemPointerRecord*)itr->next(); zgu@4053: } zgu@4053: return true; zgu@4053: } zgu@4053: zgu@4053: bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) { zgu@4053: VMMemPointerIterator vm_snapshot_itr(_vm_ptrs); zgu@4053: MemPointerRecord* new_rec = (MemPointerRecord*)itr->current(); zgu@4193: VMMemRegion* reserved_rec; zgu@4053: while (new_rec != NULL) { zgu@4053: assert(new_rec->is_vm_pointer(), "Sanity check"); zgu@4193: zgu@4193: // locate a reserved region that contains the specified address, or zgu@4193: // the nearest reserved region has base address just above the specified zgu@4193: // address zgu@4193: reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr()); zgu@4193: if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) { zgu@4053: // snapshot can only have 'live' records zgu@4193: assert(reserved_rec->is_reserved_region(), "Sanity check"); zgu@4193: if (new_rec->is_allocation_record()) { zgu@4193: if (!reserved_rec->is_same_region(new_rec)) { zgu@4193: // only deal with split a bigger reserved region into smaller regions. zgu@4193: // So far, CDS is the only use case. zgu@4193: if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) { zgu@4053: return false; zgu@3900: } zgu@3900: } zgu@4193: } else if (new_rec->is_uncommit_record()) { zgu@4193: if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) { zgu@4193: return false; zgu@4193: } zgu@4193: } else if (new_rec->is_commit_record()) { zgu@4193: // insert or expand existing committed region to cover this zgu@4193: // newly committed region zgu@4193: if (!vm_snapshot_itr.add_committed_region(new_rec)) { zgu@4193: return false; zgu@4193: } zgu@4193: } else if (new_rec->is_deallocation_record()) { zgu@4193: // release part or all memory region zgu@4193: if (!vm_snapshot_itr.remove_released_region(new_rec)) { zgu@4193: return false; zgu@4193: } zgu@4193: } else if (new_rec->is_type_tagging_record()) { zgu@4193: // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range zgu@4193: // to different type. zgu@4193: assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone || zgu@4193: FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()), zgu@4193: "Sanity check"); zgu@4193: reserved_rec->tag(new_rec->flags()); zgu@4193: } else { zgu@4193: ShouldNotReachHere(); zgu@4193: } zgu@4193: } else { zgu@4193: /* zgu@4193: * The assertion failure indicates mis-matched virtual memory records. The likely zgu@4193: * scenario is, that some virtual memory operations are not going through os::xxxx_memory() zgu@4193: * api, which have to be tracked manually. (perfMemory is an example). zgu@4193: */ zgu@4193: assert(new_rec->is_allocation_record(), "Sanity check"); zgu@4193: if (!vm_snapshot_itr.add_reserved_region(new_rec)) { zgu@4193: return false; zgu@4193: } zgu@3900: } zgu@4053: new_rec = (MemPointerRecord*)itr->next(); zgu@4053: } zgu@4053: return true; zgu@3900: } zgu@3900: zgu@3994: #ifndef PRODUCT zgu@3900: void MemSnapshot::print_snapshot_stats(outputStream* st) { zgu@3900: st->print_cr("Snapshot:"); zgu@3900: st->print_cr("\tMalloced: %d/%d [%5.2f%%] %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(), zgu@3900: (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K); zgu@3900: zgu@3900: st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(), zgu@3900: (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K); zgu@3900: zgu@4053: st->print_cr("\tMalloc staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(), zgu@4053: _staging_area.malloc_data()->capacity(), zgu@4053: (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(), zgu@4053: _staging_area.malloc_data()->instance_size()/K); zgu@4053: zgu@4053: st->print_cr("\tVirtual memory staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(), zgu@4053: _staging_area.vm_data()->capacity(), zgu@4053: (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(), zgu@4053: _staging_area.vm_data()->instance_size()/K); zgu@3900: zgu@3900: st->print_cr("\tUntracked allocation: %d", _untracked_count); zgu@3900: } zgu@3900: zgu@3900: void MemSnapshot::check_malloc_pointers() { zgu@3900: MemPointerArrayIteratorImpl mItr(_alloc_ptrs); zgu@3900: MemPointerRecord* p = (MemPointerRecord*)mItr.current(); zgu@3900: MemPointerRecord* prev = NULL; zgu@3900: while (p != NULL) { zgu@3900: if (prev != NULL) { zgu@3900: assert(p->addr() >= prev->addr(), "sorting order"); zgu@3900: } zgu@3900: prev = p; zgu@3900: p = (MemPointerRecord*)mItr.next(); zgu@3900: } zgu@3900: } zgu@3900: zgu@3994: bool MemSnapshot::has_allocation_record(address addr) { zgu@4053: MemPointerArrayIteratorImpl itr(_staging_area.malloc_data()); zgu@3994: MemPointerRecord* cur = (MemPointerRecord*)itr.current(); zgu@3994: while (cur != NULL) { zgu@3994: if (cur->addr() == addr && cur->is_allocation_record()) { zgu@3994: return true; zgu@3994: } zgu@3994: cur = (MemPointerRecord*)itr.next(); zgu@3994: } zgu@3994: return false; zgu@3994: } zgu@3994: #endif // PRODUCT zgu@3994: zgu@3994: #ifdef ASSERT zgu@3900: void MemSnapshot::check_staging_data() { zgu@4053: MemPointerArrayIteratorImpl itr(_staging_area.malloc_data()); zgu@3900: MemPointerRecord* cur = (MemPointerRecord*)itr.current(); zgu@3900: MemPointerRecord* next = (MemPointerRecord*)itr.next(); zgu@3900: while (next != NULL) { zgu@3900: assert((next->addr() > cur->addr()) || zgu@3900: ((next->flags() & MemPointerRecord::tag_masks) > zgu@3900: (cur->flags() & MemPointerRecord::tag_masks)), zgu@3900: "sorting order"); zgu@3900: cur = next; zgu@3900: next = (MemPointerRecord*)itr.next(); zgu@3900: } zgu@4053: zgu@4053: MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data()); zgu@4053: cur = (MemPointerRecord*)vm_itr.current(); zgu@4053: while (cur != NULL) { zgu@4053: assert(cur->is_vm_pointer(), "virtual memory pointer only"); zgu@4053: cur = (MemPointerRecord*)vm_itr.next(); zgu@4053: } zgu@3900: } zgu@4193: zgu@4193: void MemSnapshot::dump_all_vm_pointers() { zgu@4193: MemPointerArrayIteratorImpl itr(_vm_ptrs); zgu@4193: VMMemRegion* ptr = (VMMemRegion*)itr.current(); zgu@4193: tty->print_cr("dump virtual memory pointers:"); zgu@4193: while (ptr != NULL) { zgu@4193: if (ptr->is_committed_region()) { zgu@4193: tty->print("\t"); zgu@4193: } zgu@4193: tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(), zgu@4193: (ptr->addr() + ptr->size()), ptr->flags()); zgu@4193: zgu@4193: if (MemTracker::track_callsite()) { zgu@4193: VMMemRegionEx* ex = (VMMemRegionEx*)ptr; zgu@4193: if (ex->pc() != NULL) { zgu@4193: char buf[1024]; zgu@4193: if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) { zgu@4193: tty->print_cr("\t%s", buf); zgu@4193: } else { drchase@6680: tty->cr(); zgu@4193: } zgu@4193: } zgu@4193: } zgu@4193: zgu@4193: ptr = (VMMemRegion*)itr.next(); zgu@4193: } zgu@4193: tty->flush(); zgu@4193: } zgu@3994: #endif // ASSERT zgu@3900: