zgu@3900: /* zgu@3900: * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. zgu@3900: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. zgu@3900: * zgu@3900: * This code is free software; you can redistribute it and/or modify it zgu@3900: * under the terms of the GNU General Public License version 2 only, as zgu@3900: * published by the Free Software Foundation. zgu@3900: * zgu@3900: * This code is distributed in the hope that it will be useful, but WITHOUT zgu@3900: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or zgu@3900: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License zgu@3900: * version 2 for more details (a copy is included in the LICENSE file that zgu@3900: * accompanied this code). zgu@3900: * zgu@3900: * You should have received a copy of the GNU General Public License version zgu@3900: * 2 along with this work; if not, write to the Free Software Foundation, zgu@3900: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. zgu@3900: * zgu@3900: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA zgu@3900: * or visit www.oracle.com if you need additional information or have any zgu@3900: * questions. zgu@3900: * zgu@3900: */ zgu@3900: zgu@3900: #ifndef SHARE_VM_SERVICES_MEM_PTR_HPP zgu@3900: #define SHARE_VM_SERVICES_MEM_PTR_HPP zgu@3900: zgu@3900: #include "memory/allocation.hpp" zgu@3900: #include "runtime/atomic.hpp" zgu@3900: #include "runtime/os.hpp" zgu@3900: #include "runtime/safepoint.hpp" zgu@3900: zgu@3900: /* zgu@3900: * global sequence generator that generates sequence numbers to serialize zgu@3900: * memory records. zgu@3900: */ zgu@3900: class SequenceGenerator : AllStatic { zgu@3900: public: zgu@3900: static jint next(); zgu@3900: zgu@3900: // peek last sequence number zgu@3900: static jint peek() { zgu@3900: return _seq_number; zgu@3900: } zgu@3900: zgu@3900: // reset sequence number zgu@3900: static void reset() { zgu@3900: assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); zgu@3900: _seq_number = 1; ctornqvi@4512: _generation ++; zgu@3900: }; zgu@3900: ctornqvi@4512: static unsigned long current_generation() { return _generation; } zgu@3994: NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; }) zgu@3900: zgu@3900: private: ctornqvi@4512: static volatile jint _seq_number; ctornqvi@4512: static volatile unsigned long _generation; ctornqvi@4512: NOT_PRODUCT(static jint _max_seq_number; ) zgu@3900: }; zgu@3900: zgu@3900: /* zgu@3900: * followings are the classes that are used to hold memory activity records in different stages. zgu@3900: * MemPointer zgu@3900: * |--------MemPointerRecord zgu@3900: * | zgu@3900: * |----MemPointerRecordEx zgu@3900: * | | zgu@3900: * | |-------SeqMemPointerRecordEx zgu@3900: * | zgu@3900: * |----SeqMemPointerRecord zgu@3900: * | zgu@3900: * |----VMMemRegion zgu@3900: * | zgu@3900: * |-----VMMemRegionEx zgu@3900: * zgu@3900: * zgu@3900: * prefix 'Seq' - sequenced, the record contains a sequence number zgu@3900: * surfix 'Ex' - extension, the record contains a caller's pc zgu@3900: * zgu@3900: * per-thread recorder : SeqMemPointerRecord(Ex) zgu@3900: * snapshot staging : SeqMemPointerRecord(Ex) zgu@3900: * snapshot : MemPointerRecord(Ex) and VMMemRegion(Ex) zgu@3900: * zgu@3900: */ zgu@3900: zgu@3900: /* zgu@3900: * class that wraps an address to a memory block, zgu@3900: * the memory pointer either points to a malloc'd zgu@3900: * memory block, or a mmap'd memory block zgu@3900: */ zgu@3900: class MemPointer : public _ValueObj { zgu@3900: public: zgu@3900: MemPointer(): _addr(0) { } zgu@3900: MemPointer(address addr): _addr(addr) { } zgu@3900: zgu@3900: MemPointer(const MemPointer& copy_from) { zgu@3900: _addr = copy_from.addr(); zgu@3900: } zgu@3900: zgu@3900: inline address addr() const { zgu@3900: return _addr; zgu@3900: } zgu@3900: zgu@3900: inline operator address() const { zgu@3900: return addr(); zgu@3900: } zgu@3900: zgu@3900: inline bool operator == (const MemPointer& other) const { zgu@3900: return addr() == other.addr(); zgu@3900: } zgu@3900: zgu@3900: inline MemPointer& operator = (const MemPointer& other) { zgu@3900: _addr = other.addr(); zgu@3900: return *this; zgu@3900: } zgu@3900: zgu@3900: protected: zgu@3900: inline void set_addr(address addr) { _addr = addr; } zgu@3900: zgu@3900: protected: zgu@3900: // memory address zgu@3900: address _addr; zgu@3900: }; zgu@3900: zgu@3900: /* MemPointerRecord records an activityand associated zgu@3900: * attributes on a memory block. zgu@3900: */ zgu@3900: class MemPointerRecord : public MemPointer { zgu@3900: private: zgu@3900: MEMFLAGS _flags; zgu@3900: size_t _size; zgu@3900: zgu@3900: public: zgu@3900: /* extension of MemoryType enum zgu@3900: * see share/vm/memory/allocation.hpp for details. zgu@3900: * zgu@3900: * The tag values are associated to sorting orders, so be zgu@3900: * careful if changes are needed. zgu@3900: * The allocation records should be sorted ahead of tagging zgu@3900: * records, which in turn ahead of deallocation records zgu@3900: */ zgu@3900: enum MemPointerTags { zgu@3900: tag_alloc = 0x0001, // malloc or reserve record zgu@3900: tag_commit = 0x0002, // commit record zgu@3900: tag_type = 0x0003, // tag virtual memory to a memory type zgu@3900: tag_uncommit = 0x0004, // uncommit record zgu@3900: tag_release = 0x0005, // free or release record zgu@3900: tag_size = 0x0006, // arena size zgu@3900: tag_masks = 0x0007, // all tag bits zgu@3900: vmBit = 0x0008 zgu@3900: }; zgu@3900: zgu@3900: /* helper functions to interpret the tagging flags */ zgu@3900: zgu@3900: inline static bool is_allocation_record(MEMFLAGS flags) { zgu@3900: return (flags & tag_masks) == tag_alloc; zgu@3900: } zgu@3900: zgu@3900: inline static bool is_deallocation_record(MEMFLAGS flags) { zgu@3900: return (flags & tag_masks) == tag_release; zgu@3900: } zgu@3900: zgu@3900: inline static bool is_arena_record(MEMFLAGS flags) { zgu@3900: return (flags & (otArena | tag_size)) == otArena; zgu@3900: } zgu@3900: zgu@4274: inline static bool is_arena_memory_record(MEMFLAGS flags) { zgu@3900: return (flags & (otArena | tag_size)) == (otArena | tag_size); zgu@3900: } zgu@3900: zgu@3900: inline static bool is_virtual_memory_record(MEMFLAGS flags) { zgu@3900: return (flags & vmBit) != 0; zgu@3900: } zgu@3900: zgu@3900: inline static bool is_virtual_memory_reserve_record(MEMFLAGS flags) { zgu@3900: return (flags & 0x0F) == (tag_alloc | vmBit); zgu@3900: } zgu@3900: zgu@3900: inline static bool is_virtual_memory_commit_record(MEMFLAGS flags) { zgu@3900: return (flags & 0x0F) == (tag_commit | vmBit); zgu@3900: } zgu@3900: zgu@3900: inline static bool is_virtual_memory_uncommit_record(MEMFLAGS flags) { zgu@3900: return (flags & 0x0F) == (tag_uncommit | vmBit); zgu@3900: } zgu@3900: zgu@3900: inline static bool is_virtual_memory_release_record(MEMFLAGS flags) { zgu@3900: return (flags & 0x0F) == (tag_release | vmBit); zgu@3900: } zgu@3900: zgu@3900: inline static bool is_virtual_memory_type_record(MEMFLAGS flags) { zgu@3900: return (flags & 0x0F) == (tag_type | vmBit); zgu@3900: } zgu@3900: zgu@3900: /* tagging flags */ zgu@3900: inline static MEMFLAGS malloc_tag() { return tag_alloc; } zgu@3900: inline static MEMFLAGS free_tag() { return tag_release; } zgu@3900: inline static MEMFLAGS arena_size_tag() { return tag_size | otArena; } zgu@3900: inline static MEMFLAGS virtual_memory_tag() { return vmBit; } zgu@3900: inline static MEMFLAGS virtual_memory_reserve_tag() { return (tag_alloc | vmBit); } zgu@3900: inline static MEMFLAGS virtual_memory_commit_tag() { return (tag_commit | vmBit); } zgu@3900: inline static MEMFLAGS virtual_memory_uncommit_tag(){ return (tag_uncommit | vmBit); } zgu@3900: inline static MEMFLAGS virtual_memory_release_tag() { return (tag_release | vmBit); } zgu@3900: inline static MEMFLAGS virtual_memory_type_tag() { return (tag_type | vmBit); } zgu@3900: zgu@3900: public: zgu@3900: MemPointerRecord(): _size(0), _flags(mtNone) { } zgu@3900: zgu@3900: MemPointerRecord(address addr, MEMFLAGS memflags, size_t size = 0): zgu@3900: MemPointer(addr), _flags(memflags), _size(size) { } zgu@3900: zgu@3900: MemPointerRecord(const MemPointerRecord& copy_from): zgu@3900: MemPointer(copy_from), _flags(copy_from.flags()), zgu@3900: _size(copy_from.size()) { zgu@3900: } zgu@3900: zgu@3900: /* MemPointerRecord is not sequenced, it always return zgu@3900: * 0 to indicate non-sequenced zgu@3900: */ zgu@3900: virtual jint seq() const { return 0; } zgu@3900: zgu@3900: inline size_t size() const { return _size; } zgu@3900: inline void set_size(size_t size) { _size = size; } zgu@3900: zgu@3900: inline MEMFLAGS flags() const { return _flags; } zgu@3900: inline void set_flags(MEMFLAGS flags) { _flags = flags; } zgu@3900: zgu@3900: MemPointerRecord& operator= (const MemPointerRecord& ptr) { zgu@3900: MemPointer::operator=(ptr); zgu@3900: _flags = ptr.flags(); zgu@3900: #ifdef ASSERT zgu@3900: if (IS_ARENA_OBJ(_flags)) { zgu@3900: assert(!is_vm_pointer(), "wrong flags"); zgu@3900: assert((_flags & ot_masks) == otArena, "wrong flags"); zgu@3900: } zgu@3900: #endif zgu@3900: _size = ptr.size(); zgu@3900: return *this; zgu@3900: } zgu@3900: zgu@3900: // if the pointer represents a malloc-ed memory address zgu@3900: inline bool is_malloced_pointer() const { zgu@3900: return !is_vm_pointer(); zgu@3900: } zgu@3900: zgu@3900: // if the pointer represents a virtual memory address zgu@3900: inline bool is_vm_pointer() const { zgu@3900: return is_virtual_memory_record(_flags); zgu@3900: } zgu@3900: zgu@3900: // if this record records a 'malloc' or virtual memory zgu@3900: // 'reserve' call zgu@3900: inline bool is_allocation_record() const { zgu@3900: return is_allocation_record(_flags); zgu@3900: } zgu@3900: zgu@3900: // if this record records a size information of an arena zgu@4274: inline bool is_arena_memory_record() const { zgu@4274: return is_arena_memory_record(_flags); zgu@3900: } zgu@3900: zgu@3900: // if this pointer represents an address to an arena object zgu@3900: inline bool is_arena_record() const { zgu@3900: return is_arena_record(_flags); zgu@3900: } zgu@3900: zgu@3900: // if this record represents a size information of specific arena zgu@4274: inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) { zgu@4274: assert(is_arena_memory_record(), "not size record"); zgu@3900: assert(arena_rc->is_arena_record(), "not arena record"); zgu@3900: return (arena_rc->addr() + sizeof(void*)) == addr(); zgu@3900: } zgu@3900: zgu@3900: // if this record records a 'free' or virtual memory 'free' call zgu@3900: inline bool is_deallocation_record() const { zgu@3900: return is_deallocation_record(_flags); zgu@3900: } zgu@3900: zgu@3900: // if this record records a virtual memory 'commit' call zgu@3900: inline bool is_commit_record() const { zgu@3900: return is_virtual_memory_commit_record(_flags); zgu@3900: } zgu@3900: zgu@3900: // if this record records a virtual memory 'uncommit' call zgu@3900: inline bool is_uncommit_record() const { zgu@3900: return is_virtual_memory_uncommit_record(_flags); zgu@3900: } zgu@3900: zgu@3900: // if this record is a tagging record of a virtual memory block zgu@3900: inline bool is_type_tagging_record() const { zgu@3900: return is_virtual_memory_type_record(_flags); zgu@3900: } zgu@4193: zgu@4193: // if the two memory pointer records actually represent the same zgu@4193: // memory block zgu@4193: inline bool is_same_region(const MemPointerRecord* other) const { zgu@4193: return (addr() == other->addr() && size() == other->size()); zgu@4193: } zgu@4193: zgu@4193: // if this memory region fully contains another one zgu@4193: inline bool contains_region(const MemPointerRecord* other) const { zgu@4193: return contains_region(other->addr(), other->size()); zgu@4193: } zgu@4193: zgu@4193: // if this memory region fully contains specified memory range zgu@4193: inline bool contains_region(address add, size_t sz) const { zgu@4193: return (addr() <= add && addr() + size() >= add + sz); zgu@4193: } zgu@4193: zgu@4193: inline bool contains_address(address add) const { zgu@4193: return (addr() <= add && addr() + size() > add); zgu@4193: } zgu@4248: zgu@4248: // if this memory region overlaps another region zgu@4248: inline bool overlaps_region(const MemPointerRecord* other) const { zgu@4248: assert(other != NULL, "Just check"); zgu@4248: assert(size() > 0 && other->size() > 0, "empty range"); zgu@4248: return contains_address(other->addr()) || zgu@4248: contains_address(other->addr() + other->size() - 1) || // exclude end address zgu@4248: other->contains_address(addr()) || zgu@4248: other->contains_address(addr() + size() - 1); // exclude end address zgu@4248: } zgu@4248: zgu@3900: }; zgu@3900: zgu@3900: // MemPointerRecordEx also records callsite pc, from where zgu@3900: // the memory block is allocated zgu@3900: class MemPointerRecordEx : public MemPointerRecord { zgu@3900: private: zgu@3900: address _pc; // callsite pc zgu@3900: zgu@3900: public: zgu@3900: MemPointerRecordEx(): _pc(0) { } zgu@3900: zgu@3900: MemPointerRecordEx(address addr, MEMFLAGS memflags, size_t size = 0, address pc = 0): zgu@3900: MemPointerRecord(addr, memflags, size), _pc(pc) {} zgu@3900: zgu@3900: MemPointerRecordEx(const MemPointerRecordEx& copy_from): zgu@3900: MemPointerRecord(copy_from), _pc(copy_from.pc()) {} zgu@3900: zgu@3900: inline address pc() const { return _pc; } zgu@3900: zgu@3900: void init(const MemPointerRecordEx* mpe) { zgu@3900: MemPointerRecord::operator=(*mpe); zgu@3900: _pc = mpe->pc(); zgu@3900: } zgu@3900: zgu@3900: void init(const MemPointerRecord* mp) { zgu@3900: MemPointerRecord::operator=(*mp); zgu@3900: _pc = 0; zgu@3900: } zgu@3900: }; zgu@3900: zgu@4193: // a virtual memory region. The region can represent a reserved zgu@4193: // virtual memory region or a committed memory region zgu@3900: class VMMemRegion : public MemPointerRecord { zgu@3900: public: zgu@4193: VMMemRegion() { } zgu@3900: zgu@3900: void init(const MemPointerRecord* mp) { zgu@4193: assert(mp->is_vm_pointer(), "Sanity check"); zgu@3900: _addr = mp->addr(); zgu@3900: set_size(mp->size()); zgu@3900: set_flags(mp->flags()); zgu@3900: } zgu@3900: zgu@3900: VMMemRegion& operator=(const VMMemRegion& other) { zgu@3900: MemPointerRecord::operator=(other); zgu@3900: return *this; zgu@3900: } zgu@3900: zgu@4193: inline bool is_reserved_region() const { zgu@4193: return is_allocation_record(); zgu@3900: } zgu@3900: zgu@4193: inline bool is_committed_region() const { zgu@4193: return is_commit_record(); zgu@3900: } zgu@3900: zgu@3900: /* base address of this virtual memory range */ zgu@3900: inline address base() const { zgu@3900: return addr(); zgu@3900: } zgu@3900: zgu@3900: /* tag this virtual memory range to the specified memory type */ zgu@3900: inline void tag(MEMFLAGS f) { zgu@3900: set_flags(flags() | (f & mt_masks)); zgu@3900: } zgu@3900: zgu@4193: // expand this region to also cover specified range. zgu@4193: // The range has to be on either end of the memory region. zgu@4193: void expand_region(address addr, size_t sz) { zgu@4193: if (addr < base()) { zgu@4193: assert(addr + sz == base(), "Sanity check"); zgu@4193: _addr = addr; zgu@4193: set_size(size() + sz); zgu@4193: } else { zgu@4193: assert(base() + size() == addr, "Sanity check"); zgu@4193: set_size(size() + sz); zgu@4193: } zgu@4193: } zgu@4193: zgu@4193: // exclude the specified address range from this region. zgu@4193: // The excluded memory range has to be on either end of this memory zgu@4193: // region. zgu@4193: inline void exclude_region(address add, size_t sz) { zgu@4193: assert(is_reserved_region() || is_committed_region(), "Sanity check"); zgu@4193: assert(addr() != NULL && size() != 0, "Sanity check"); zgu@4193: assert(add >= addr() && add < addr() + size(), "Sanity check"); zgu@3900: assert(add == addr() || (add + sz) == (addr() + size()), zgu@4193: "exclude in the middle"); zgu@3900: if (add == addr()) { zgu@3900: set_addr(add + sz); zgu@3900: set_size(size() - sz); zgu@3900: } else { zgu@3900: set_size(size() - sz); zgu@3900: } zgu@3900: } zgu@3900: }; zgu@3900: zgu@3900: class VMMemRegionEx : public VMMemRegion { zgu@3900: private: zgu@3900: jint _seq; // sequence number zgu@3900: zgu@3900: public: zgu@3900: VMMemRegionEx(): _pc(0) { } zgu@3900: zgu@3900: void init(const MemPointerRecordEx* mpe) { zgu@3900: VMMemRegion::init(mpe); zgu@3900: _pc = mpe->pc(); zgu@3900: } zgu@3900: zgu@3900: void init(const MemPointerRecord* mpe) { zgu@3900: VMMemRegion::init(mpe); zgu@3900: _pc = 0; zgu@3900: } zgu@3900: zgu@3900: VMMemRegionEx& operator=(const VMMemRegionEx& other) { zgu@3900: VMMemRegion::operator=(other); zgu@3900: _pc = other.pc(); zgu@3900: return *this; zgu@3900: } zgu@3900: zgu@3900: inline address pc() const { return _pc; } zgu@3900: private: zgu@3900: address _pc; zgu@3900: }; zgu@3900: zgu@3900: /* zgu@3900: * Sequenced memory record zgu@3900: */ zgu@3900: class SeqMemPointerRecord : public MemPointerRecord { zgu@3900: private: zgu@3900: jint _seq; // sequence number zgu@3900: zgu@3900: public: zgu@3900: SeqMemPointerRecord(): _seq(0){ } zgu@3900: zgu@3900: SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size) zgu@3900: : MemPointerRecord(addr, flags, size) { zgu@3900: _seq = SequenceGenerator::next(); zgu@3900: } zgu@3900: zgu@3900: SeqMemPointerRecord(const SeqMemPointerRecord& copy_from) zgu@3900: : MemPointerRecord(copy_from) { zgu@3900: _seq = copy_from.seq(); zgu@3900: } zgu@3900: zgu@3900: SeqMemPointerRecord& operator= (const SeqMemPointerRecord& ptr) { zgu@3900: MemPointerRecord::operator=(ptr); zgu@3900: _seq = ptr.seq(); zgu@3900: return *this; zgu@3900: } zgu@3900: zgu@3900: inline jint seq() const { zgu@3900: return _seq; zgu@3900: } zgu@3900: }; zgu@3900: zgu@3900: zgu@3900: zgu@3900: class SeqMemPointerRecordEx : public MemPointerRecordEx { zgu@3900: private: zgu@3900: jint _seq; // sequence number zgu@3900: zgu@3900: public: zgu@3900: SeqMemPointerRecordEx(): _seq(0) { } zgu@3900: zgu@3900: SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size, zgu@3900: address pc): MemPointerRecordEx(addr, flags, size, pc) { zgu@3900: _seq = SequenceGenerator::next(); zgu@3900: } zgu@3900: zgu@3900: SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from) zgu@3900: : MemPointerRecordEx(copy_from) { zgu@3900: _seq = copy_from.seq(); zgu@3900: } zgu@3900: zgu@3900: SeqMemPointerRecordEx& operator= (const SeqMemPointerRecordEx& ptr) { zgu@3900: MemPointerRecordEx::operator=(ptr); zgu@3900: _seq = ptr.seq(); zgu@3900: return *this; zgu@3900: } zgu@3900: zgu@3900: inline jint seq() const { zgu@3900: return _seq; zgu@3900: } zgu@3900: }; zgu@3900: zgu@3900: #endif // SHARE_VM_SERVICES_MEM_PTR_HPP