zgu@3900: /* zgu@3900: * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. zgu@3900: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. zgu@3900: * zgu@3900: * This code is free software; you can redistribute it and/or modify it zgu@3900: * under the terms of the GNU General Public License version 2 only, as zgu@3900: * published by the Free Software Foundation. zgu@3900: * zgu@3900: * This code is distributed in the hope that it will be useful, but WITHOUT zgu@3900: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or zgu@3900: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License zgu@3900: * version 2 for more details (a copy is included in the LICENSE file that zgu@3900: * accompanied this code). zgu@3900: * zgu@3900: * You should have received a copy of the GNU General Public License version zgu@3900: * 2 along with this work; if not, write to the Free Software Foundation, zgu@3900: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. zgu@3900: * zgu@3900: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA zgu@3900: * or visit www.oracle.com if you need additional information or have any zgu@3900: * questions. zgu@3900: * zgu@3900: */ zgu@3900: zgu@3900: #ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP zgu@3900: #define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP zgu@3900: zgu@3900: #include "memory/allocation.hpp" zgu@3900: #include "runtime/mutex.hpp" zgu@3900: #include "runtime/mutexLocker.hpp" zgu@3900: #include "services/memBaseline.hpp" zgu@3900: #include "services/memPtrArray.hpp" zgu@3900: zgu@3900: // Snapshot pointer array iterator zgu@3900: zgu@3900: // The pointer array contains malloc-ed pointers zgu@3900: class MemPointerIterator : public MemPointerArrayIteratorImpl { zgu@3900: public: zgu@3900: MemPointerIterator(MemPointerArray* arr): zgu@3900: MemPointerArrayIteratorImpl(arr) { zgu@3900: assert(arr != NULL, "null array"); zgu@3900: } zgu@3900: zgu@3900: #ifdef ASSERT zgu@3900: virtual bool is_dup_pointer(const MemPointer* ptr1, zgu@3900: const MemPointer* ptr2) const { zgu@3900: MemPointerRecord* p1 = (MemPointerRecord*)ptr1; zgu@3900: MemPointerRecord* p2 = (MemPointerRecord*)ptr2; zgu@3900: zgu@3900: if (p1->addr() != p2->addr()) return false; zgu@3900: if ((p1->flags() & MemPointerRecord::tag_masks) != zgu@3900: (p2->flags() & MemPointerRecord::tag_masks)) { zgu@3900: return false; zgu@3900: } zgu@3900: // we do see multiple commit/uncommit on the same memory, it is ok zgu@3900: return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc || zgu@3900: (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release; zgu@3900: } zgu@3900: zgu@3900: virtual bool insert(MemPointer* ptr) { zgu@3900: if (_pos > 0) { zgu@3900: MemPointer* p1 = (MemPointer*)ptr; zgu@3900: MemPointer* p2 = (MemPointer*)_array->at(_pos - 1); zgu@3900: assert(!is_dup_pointer(p1, p2), zgu@3986: err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); zgu@3900: } zgu@3900: if (_pos < _array->length() -1) { zgu@3900: MemPointer* p1 = (MemPointer*)ptr; zgu@3900: MemPointer* p2 = (MemPointer*)_array->at(_pos + 1); zgu@3900: assert(!is_dup_pointer(p1, p2), zgu@3986: err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); zgu@3900: } zgu@3900: return _array->insert_at(ptr, _pos); zgu@3900: } zgu@3900: zgu@3900: virtual bool insert_after(MemPointer* ptr) { zgu@3900: if (_pos > 0) { zgu@3900: MemPointer* p1 = (MemPointer*)ptr; zgu@3900: MemPointer* p2 = (MemPointer*)_array->at(_pos - 1); zgu@3900: assert(!is_dup_pointer(p1, p2), zgu@3986: err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); zgu@3900: } zgu@3900: if (_pos < _array->length() - 1) { zgu@3900: MemPointer* p1 = (MemPointer*)ptr; zgu@3900: MemPointer* p2 = (MemPointer*)_array->at(_pos + 1); zgu@3900: zgu@3900: assert(!is_dup_pointer(p1, p2), zgu@3986: err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); zgu@3900: } zgu@3900: if (_array->insert_at(ptr, _pos + 1)) { zgu@3900: _pos ++; zgu@3900: return true; zgu@3900: } zgu@3900: return false; zgu@3900: } zgu@3900: #endif zgu@3900: zgu@3900: virtual MemPointer* locate(address addr) { zgu@3900: MemPointer* cur = current(); zgu@3900: while (cur != NULL && cur->addr() < addr) { zgu@3900: cur = next(); zgu@3900: } zgu@3900: return cur; zgu@3900: } zgu@3900: }; zgu@3900: zgu@3900: class VMMemPointerIterator : public MemPointerIterator { zgu@3900: public: zgu@3900: VMMemPointerIterator(MemPointerArray* arr): zgu@3900: MemPointerIterator(arr) { zgu@3900: } zgu@3900: zgu@4193: // locate an existing reserved memory region that contains specified address, zgu@4193: // or the reserved region just above this address, where the incoming zgu@4193: // reserved region should be inserted. zgu@3900: virtual MemPointer* locate(address addr) { zgu@4193: reset(); zgu@4193: VMMemRegion* reg = (VMMemRegion*)current(); zgu@4193: while (reg != NULL) { zgu@4193: if (reg->is_reserved_region()) { zgu@4193: if (reg->contains_address(addr) || addr < reg->base()) { zgu@4193: return reg; zgu@3900: } zgu@3900: } zgu@4193: reg = (VMMemRegion*)next(); zgu@4193: } zgu@4053: return NULL; zgu@4053: } zgu@3900: zgu@4193: // following methods update virtual memory in the context zgu@4193: // of 'current' position, which is properly positioned by zgu@4193: // callers via locate method. zgu@4193: bool add_reserved_region(MemPointerRecord* rec); zgu@4193: bool add_committed_region(MemPointerRecord* rec); zgu@4193: bool remove_uncommitted_region(MemPointerRecord* rec); zgu@4193: bool remove_released_region(MemPointerRecord* rec); zgu@4193: zgu@4193: // split a reserved region to create a new memory region with specified base and size zgu@4193: bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size); zgu@4193: private: zgu@4193: bool insert_record(MemPointerRecord* rec); zgu@4193: bool insert_record_after(MemPointerRecord* rec); zgu@4193: zgu@4193: bool insert_reserved_region(MemPointerRecord* rec); zgu@4193: zgu@4193: // reset current position zgu@4193: inline void reset() { _pos = 0; } zgu@3900: #ifdef ASSERT zgu@4285: // check integrity of records on current reserved memory region. zgu@4285: bool check_reserved_region() { zgu@4285: VMMemRegion* reserved_region = (VMMemRegion*)current(); zgu@4285: assert(reserved_region != NULL && reserved_region->is_reserved_region(), zgu@4285: "Sanity check"); zgu@4285: // all committed regions that follow current reserved region, should all zgu@4285: // belong to the reserved region. zgu@4285: VMMemRegion* next_region = (VMMemRegion*)next(); zgu@4285: for (; next_region != NULL && next_region->is_committed_region(); zgu@4285: next_region = (VMMemRegion*)next() ) { zgu@4285: if(!reserved_region->contains_region(next_region)) { zgu@4285: return false; zgu@4285: } zgu@4285: } zgu@4285: return true; zgu@4285: } zgu@4285: zgu@3900: virtual bool is_dup_pointer(const MemPointer* ptr1, zgu@3900: const MemPointer* ptr2) const { zgu@3900: VMMemRegion* p1 = (VMMemRegion*)ptr1; zgu@3900: VMMemRegion* p2 = (VMMemRegion*)ptr2; zgu@3900: zgu@3900: if (p1->addr() != p2->addr()) return false; zgu@3900: if ((p1->flags() & MemPointerRecord::tag_masks) != zgu@3900: (p2->flags() & MemPointerRecord::tag_masks)) { zgu@3900: return false; zgu@3900: } zgu@3900: // we do see multiple commit/uncommit on the same memory, it is ok zgu@3900: return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc || zgu@3900: (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release; zgu@3900: } zgu@3900: #endif zgu@3900: }; zgu@3900: zgu@4053: class MallocRecordIterator : public MemPointerArrayIterator { zgu@4274: private: zgu@3900: MemPointerArrayIteratorImpl _itr; zgu@3900: zgu@4274: zgu@4274: zgu@3900: public: zgu@4053: MallocRecordIterator(MemPointerArray* arr) : _itr(arr) { zgu@3900: } zgu@3900: zgu@4193: virtual MemPointer* current() const { zgu@4274: #ifdef ASSERT zgu@4274: MemPointer* cur_rec = _itr.current(); zgu@4274: if (cur_rec != NULL) { zgu@4274: MemPointer* prev_rec = _itr.peek_prev(); zgu@4274: MemPointer* next_rec = _itr.peek_next(); zgu@4274: assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order"); zgu@4274: assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order"); zgu@3900: } zgu@4274: #endif zgu@4274: return _itr.current(); zgu@3900: } zgu@4193: virtual MemPointer* next() { zgu@4274: MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next(); zgu@4274: // arena memory record is a special case, which we have to compare zgu@4274: // sequence number against its associated arena record. zgu@4274: if (next_rec != NULL && next_rec->is_arena_memory_record()) { zgu@4274: MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev(); zgu@4274: // if there is an associated arena record, it has to be previous zgu@4274: // record because of sorting order (by address) - NMT generates a pseudo address zgu@4274: // for arena's size record by offsetting arena's address, that guarantees zgu@4274: // the order of arena record and it's size record. zgu@4274: if (prev_rec != NULL && prev_rec->is_arena_record() && zgu@4274: next_rec->is_memory_record_of_arena(prev_rec)) { zgu@4274: if (prev_rec->seq() > next_rec->seq()) { zgu@4274: // Skip this arena memory record zgu@4274: // Two scenarios: zgu@4274: // - if the arena record is an allocation record, this early zgu@4274: // size record must be leftover by previous arena, zgu@4274: // and the last size record should have size = 0. zgu@4274: // - if the arena record is a deallocation record, this zgu@4274: // size record should be its cleanup record, which should zgu@4274: // also have size = 0. In other world, arena alway reset zgu@4274: // its size before gone (see Arena's destructor) zgu@4274: assert(next_rec->size() == 0, "size not reset"); zgu@4274: return _itr.next(); zgu@4274: } else { zgu@4274: assert(prev_rec->is_allocation_record(), zgu@4274: "Arena size record ahead of allocation record"); zgu@4274: } zgu@4274: } zgu@4053: } zgu@4274: return next_rec; zgu@3900: } zgu@3900: zgu@4053: MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; } zgu@4053: MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; } zgu@4053: void remove() { ShouldNotReachHere(); } zgu@4053: bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; } zgu@4053: bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; } zgu@4053: }; zgu@4053: zgu@4193: // collapse duplicated records. Eliminating duplicated records here, is much zgu@4193: // cheaper than during promotion phase. However, it does have limitation - it zgu@4193: // can only eliminate duplicated records within the generation, there are zgu@4193: // still chances seeing duplicated records during promotion. zgu@4193: // We want to use the record with higher sequence number, because it has zgu@4193: // more accurate callsite pc. zgu@4274: class VMRecordIterator : public MemPointerArrayIterator { zgu@4274: private: zgu@4274: MemPointerArrayIteratorImpl _itr; zgu@4274: zgu@4193: public: zgu@4274: VMRecordIterator(MemPointerArray* arr) : _itr(arr) { zgu@4193: MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); zgu@4193: MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); zgu@4193: while (next != NULL) { zgu@4193: assert(cur != NULL, "Sanity check"); zgu@4193: assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(), zgu@4193: "pre-sort order"); zgu@4193: zgu@4193: if (is_duplicated_record(cur, next)) { zgu@4193: _itr.next(); zgu@4193: next = (MemPointerRecord*)_itr.peek_next(); zgu@4193: } else { zgu@4193: break; zgu@4193: } zgu@4193: } zgu@4193: } zgu@4193: zgu@4193: virtual MemPointer* current() const { zgu@4193: return _itr.current(); zgu@4193: } zgu@4193: zgu@4193: // get next record, but skip the duplicated records zgu@4193: virtual MemPointer* next() { zgu@4193: MemPointerRecord* cur = (MemPointerRecord*)_itr.next(); zgu@4193: MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); zgu@4193: while (next != NULL) { zgu@4193: assert(cur != NULL, "Sanity check"); zgu@4193: assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(), zgu@4193: "pre-sort order"); zgu@4193: zgu@4193: if (is_duplicated_record(cur, next)) { zgu@4193: _itr.next(); zgu@4193: cur = next; zgu@4193: next = (MemPointerRecord*)_itr.peek_next(); zgu@4193: } else { zgu@4193: break; zgu@4193: } zgu@4193: } zgu@4193: return cur; zgu@4193: } zgu@4193: zgu@4274: MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; } zgu@4274: MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; } zgu@4274: void remove() { ShouldNotReachHere(); } zgu@4274: bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; } zgu@4274: bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; } zgu@4274: zgu@4193: private: zgu@4193: bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const { zgu@4193: bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags()); zgu@4193: assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record"); zgu@4193: return ret; zgu@4193: } zgu@4193: }; zgu@4193: zgu@4053: class StagingArea : public _ValueObj { zgu@4053: private: zgu@4053: MemPointerArray* _malloc_data; zgu@4053: MemPointerArray* _vm_data; zgu@4053: zgu@4053: public: zgu@4053: StagingArea() : _malloc_data(NULL), _vm_data(NULL) { zgu@4053: init(); zgu@3900: } zgu@3900: zgu@4053: ~StagingArea() { zgu@4053: if (_malloc_data != NULL) delete _malloc_data; zgu@4053: if (_vm_data != NULL) delete _vm_data; zgu@3900: } zgu@3900: zgu@4053: MallocRecordIterator malloc_record_walker() { zgu@4053: return MallocRecordIterator(malloc_data()); zgu@3900: } zgu@3900: zgu@4193: VMRecordIterator virtual_memory_record_walker(); zgu@4193: zgu@4053: bool init(); zgu@4053: void clear() { zgu@4053: assert(_malloc_data != NULL && _vm_data != NULL, "Just check"); zgu@4053: _malloc_data->shrink(); zgu@4053: _malloc_data->clear(); zgu@4053: _vm_data->clear(); zgu@3900: } zgu@3900: zgu@4053: inline MemPointerArray* malloc_data() { return _malloc_data; } zgu@4053: inline MemPointerArray* vm_data() { return _vm_data; } zgu@3900: }; zgu@3900: zgu@3900: class MemBaseline; zgu@3900: class MemSnapshot : public CHeapObj { zgu@3900: private: zgu@3900: // the following two arrays contain records of all known lived memory blocks zgu@3900: // live malloc-ed memory pointers zgu@3900: MemPointerArray* _alloc_ptrs; zgu@3900: // live virtual memory pointers zgu@3900: MemPointerArray* _vm_ptrs; zgu@3900: zgu@4053: StagingArea _staging_area; zgu@3900: zgu@3900: // the lock to protect this snapshot zgu@3900: Monitor* _lock; zgu@3900: zgu@4400: // the number of instance classes zgu@4400: int _number_of_classes; zgu@4400: zgu@3900: NOT_PRODUCT(size_t _untracked_count;) zgu@3900: friend class MemBaseline; zgu@3900: zgu@3900: public: zgu@3900: MemSnapshot(); zgu@3900: virtual ~MemSnapshot(); zgu@3900: zgu@3900: // if we are running out of native memory zgu@4053: bool out_of_memory() { zgu@4053: return (_alloc_ptrs == NULL || zgu@4053: _staging_area.malloc_data() == NULL || zgu@4053: _staging_area.vm_data() == NULL || zgu@3900: _vm_ptrs == NULL || _lock == NULL || zgu@3900: _alloc_ptrs->out_of_memory() || zgu@3900: _vm_ptrs->out_of_memory()); zgu@3900: } zgu@3900: zgu@3900: // merge a per-thread memory recorder into staging area zgu@3900: bool merge(MemRecorder* rec); zgu@3900: // promote staged data to snapshot zgu@4400: bool promote(int number_of_classes); zgu@3900: zgu@4400: int number_of_classes() const { return _number_of_classes; } zgu@3900: zgu@3900: void wait(long timeout) { zgu@3900: assert(_lock != NULL, "Just check"); zgu@3900: MonitorLockerEx locker(_lock); zgu@3900: locker.wait(true, timeout); zgu@3900: } zgu@3900: zgu@3900: NOT_PRODUCT(void print_snapshot_stats(outputStream* st);) zgu@3900: NOT_PRODUCT(void check_staging_data();) zgu@3900: NOT_PRODUCT(void check_malloc_pointers();) zgu@3900: NOT_PRODUCT(bool has_allocation_record(address addr);) zgu@4193: // dump all virtual memory pointers in snapshot zgu@4193: DEBUG_ONLY( void dump_all_vm_pointers();) zgu@3900: zgu@3900: private: zgu@4274: // copy sequenced pointer from src to dest zgu@4274: void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src); zgu@4274: // assign a sequenced pointer to non-sequenced pointer zgu@4274: void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src); zgu@4053: zgu@4053: bool promote_malloc_records(MemPointerArrayIterator* itr); zgu@4053: bool promote_virtual_memory_records(MemPointerArrayIterator* itr); zgu@3900: }; zgu@3900: zgu@3900: #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP