zgu@7074: /* zgu@7074: * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. zgu@7074: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. zgu@7074: * zgu@7074: * This code is free software; you can redistribute it and/or modify it zgu@7074: * under the terms of the GNU General Public License version 2 only, as zgu@7074: * published by the Free Software Foundation. zgu@7074: * zgu@7074: * This code is distributed in the hope that it will be useful, but WITHOUT zgu@7074: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or zgu@7074: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License zgu@7074: * version 2 for more details (a copy is included in the LICENSE file that zgu@7074: * accompanied this code). zgu@7074: * zgu@7074: * You should have received a copy of the GNU General Public License version zgu@7074: * 2 along with this work; if not, write to the Free Software Foundation, zgu@7074: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. zgu@7074: * zgu@7074: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA zgu@7074: * or visit www.oracle.com if you need additional information or have any zgu@7074: * questions. zgu@7074: * zgu@7074: */ zgu@7074: zgu@7074: #ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP zgu@7074: #define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP zgu@7074: zgu@7074: #if INCLUDE_NMT zgu@7074: zgu@7074: #include "memory/allocation.hpp" zgu@7074: #include "services/allocationSite.hpp" zgu@7074: #include "services/nmtCommon.hpp" zgu@7074: #include "utilities/linkedlist.hpp" zgu@7074: #include "utilities/nativeCallStack.hpp" zgu@7074: #include "utilities/ostream.hpp" zgu@7074: zgu@7074: zgu@7074: /* zgu@7074: * Virtual memory counter zgu@7074: */ zgu@7074: class VirtualMemory VALUE_OBJ_CLASS_SPEC { zgu@7074: private: zgu@7074: size_t _reserved; zgu@7074: size_t _committed; zgu@7074: zgu@7074: public: zgu@7074: VirtualMemory() : _reserved(0), _committed(0) { } zgu@7074: zgu@7074: inline void reserve_memory(size_t sz) { _reserved += sz; } zgu@7074: inline void commit_memory (size_t sz) { zgu@7074: _committed += sz; zgu@7074: assert(_committed <= _reserved, "Sanity check"); zgu@7074: } zgu@7074: zgu@7074: inline void release_memory (size_t sz) { zgu@7074: assert(_reserved >= sz, "Negative amount"); zgu@7074: _reserved -= sz; zgu@7074: } zgu@7074: zgu@7074: inline void uncommit_memory(size_t sz) { zgu@7074: assert(_committed >= sz, "Negative amount"); zgu@7074: _committed -= sz; zgu@7074: } zgu@7074: zgu@7074: void reset() { zgu@7074: _reserved = 0; zgu@7074: _committed = 0; zgu@7074: } zgu@7074: zgu@7074: inline size_t reserved() const { return _reserved; } zgu@7074: inline size_t committed() const { return _committed; } zgu@7074: }; zgu@7074: zgu@7074: // Virtual memory allocation site, keeps track where the virtual memory is reserved. zgu@7074: class VirtualMemoryAllocationSite : public AllocationSite { zgu@7074: public: zgu@7074: VirtualMemoryAllocationSite(const NativeCallStack& stack) : zgu@7074: AllocationSite(stack) { } zgu@7074: zgu@7074: inline void reserve_memory(size_t sz) { data()->reserve_memory(sz); } zgu@7074: inline void commit_memory (size_t sz) { data()->commit_memory(sz); } zgu@7074: inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); } zgu@7074: inline void release_memory(size_t sz) { data()->release_memory(sz); } zgu@7074: inline size_t reserved() const { return peek()->reserved(); } zgu@7074: inline size_t committed() const { return peek()->committed(); } zgu@7074: }; zgu@7074: zgu@7074: class VirtualMemorySummary; zgu@7074: zgu@7074: // This class represents a snapshot of virtual memory at a given time. zgu@7074: // The latest snapshot is saved in a static area. zgu@7074: class VirtualMemorySnapshot : public ResourceObj { zgu@7074: friend class VirtualMemorySummary; zgu@7074: zgu@7074: private: zgu@7074: VirtualMemory _virtual_memory[mt_number_of_types]; zgu@7074: zgu@7074: public: zgu@7074: inline VirtualMemory* by_type(MEMFLAGS flag) { zgu@7074: int index = NMTUtil::flag_to_index(flag); zgu@7074: return &_virtual_memory[index]; zgu@7074: } zgu@7074: zgu@7074: inline VirtualMemory* by_index(int index) { zgu@7074: assert(index >= 0, "Index out of bound"); zgu@7074: assert(index < mt_number_of_types, "Index out of bound"); zgu@7074: return &_virtual_memory[index]; zgu@7074: } zgu@7074: zgu@7074: inline size_t total_reserved() const { zgu@7074: size_t amount = 0; zgu@7074: for (int index = 0; index < mt_number_of_types; index ++) { zgu@7074: amount += _virtual_memory[index].reserved(); zgu@7074: } zgu@7074: return amount; zgu@7074: } zgu@7074: zgu@7074: inline size_t total_committed() const { zgu@7074: size_t amount = 0; zgu@7074: for (int index = 0; index < mt_number_of_types; index ++) { zgu@7074: amount += _virtual_memory[index].committed(); zgu@7074: } zgu@7074: return amount; zgu@7074: } zgu@7074: zgu@7074: inline void reset() { zgu@7074: for (int index = 0; index < mt_number_of_types; index ++) { zgu@7074: _virtual_memory[index].reset(); zgu@7074: } zgu@7074: } zgu@7074: zgu@7074: void copy_to(VirtualMemorySnapshot* s) { zgu@7074: for (int index = 0; index < mt_number_of_types; index ++) { zgu@7074: s->_virtual_memory[index] = _virtual_memory[index]; zgu@7074: } zgu@7074: } zgu@7074: }; zgu@7074: zgu@7074: class VirtualMemorySummary : AllStatic { zgu@7074: public: zgu@7074: static void initialize(); zgu@7074: zgu@7074: static inline void record_reserved_memory(size_t size, MEMFLAGS flag) { zgu@7074: as_snapshot()->by_type(flag)->reserve_memory(size); zgu@7074: } zgu@7074: zgu@7074: static inline void record_committed_memory(size_t size, MEMFLAGS flag) { zgu@7074: as_snapshot()->by_type(flag)->commit_memory(size); zgu@7074: } zgu@7074: zgu@7074: static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) { zgu@7074: as_snapshot()->by_type(flag)->uncommit_memory(size); zgu@7074: } zgu@7074: zgu@7074: static inline void record_released_memory(size_t size, MEMFLAGS flag) { zgu@7074: as_snapshot()->by_type(flag)->release_memory(size); zgu@7074: } zgu@7074: zgu@7074: // Move virtual memory from one memory type to another. zgu@7074: // Virtual memory can be reserved before it is associated with a memory type, and tagged zgu@7074: // as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown' zgu@7074: // type to specified memory type. zgu@7074: static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) { zgu@7074: as_snapshot()->by_type(from)->release_memory(size); zgu@7074: as_snapshot()->by_type(to)->reserve_memory(size); zgu@7074: } zgu@7074: zgu@7074: static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) { zgu@7074: as_snapshot()->by_type(from)->uncommit_memory(size); zgu@7074: as_snapshot()->by_type(to)->commit_memory(size); zgu@7074: } zgu@7074: zgu@7074: static inline void snapshot(VirtualMemorySnapshot* s) { zgu@7074: as_snapshot()->copy_to(s); zgu@7074: } zgu@7074: zgu@7074: static inline void reset() { zgu@7074: as_snapshot()->reset(); zgu@7074: } zgu@7074: zgu@7074: static VirtualMemorySnapshot* as_snapshot() { zgu@7074: return (VirtualMemorySnapshot*)_snapshot; zgu@7074: } zgu@7074: zgu@7074: private: zgu@7074: static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)]; zgu@7074: }; zgu@7074: zgu@7074: zgu@7074: zgu@7074: /* zgu@7074: * A virtual memory region zgu@7074: */ zgu@7074: class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC { zgu@7074: private: zgu@7074: address _base_address; zgu@7074: size_t _size; zgu@7074: zgu@7074: public: zgu@7074: VirtualMemoryRegion(address addr, size_t size) : zgu@7074: _base_address(addr), _size(size) { zgu@7074: assert(addr != NULL, "Invalid address"); zgu@7074: assert(size > 0, "Invalid size"); zgu@7074: } zgu@7074: zgu@7074: inline address base() const { return _base_address; } zgu@7074: inline address end() const { return base() + size(); } zgu@7074: inline size_t size() const { return _size; } zgu@7074: zgu@7074: inline bool is_empty() const { return size() == 0; } zgu@7074: zgu@7074: inline bool contain_address(address addr) const { zgu@7074: return (addr >= base() && addr < end()); zgu@7074: } zgu@7074: zgu@7074: zgu@7074: inline bool contain_region(address addr, size_t size) const { zgu@7074: return contain_address(addr) && contain_address(addr + size - 1); zgu@7074: } zgu@7074: zgu@7074: inline bool same_region(address addr, size_t sz) const { zgu@7074: return (addr == base() && sz == size()); zgu@7074: } zgu@7074: zgu@7074: zgu@7074: inline bool overlap_region(address addr, size_t sz) const { zgu@7074: VirtualMemoryRegion rgn(addr, sz); zgu@7074: return contain_address(addr) || zgu@7074: contain_address(addr + sz - 1) || zgu@7074: rgn.contain_address(base()) || zgu@7074: rgn.contain_address(end() - 1); zgu@7074: } zgu@7074: zgu@7074: inline bool adjacent_to(address addr, size_t sz) const { zgu@7074: return (addr == end() || (addr + sz) == base()); zgu@7074: } zgu@7074: zgu@7074: void exclude_region(address addr, size_t sz) { zgu@7074: assert(contain_region(addr, sz), "Not containment"); zgu@7074: assert(addr == base() || addr + sz == end(), "Can not exclude from middle"); zgu@7074: size_t new_size = size() - sz; zgu@7074: zgu@7074: if (addr == base()) { zgu@7074: set_base(addr + sz); zgu@7074: } zgu@7074: set_size(new_size); zgu@7074: } zgu@7074: zgu@7074: void expand_region(address addr, size_t sz) { zgu@7074: assert(adjacent_to(addr, sz), "Not adjacent regions"); zgu@7074: if (base() == addr + sz) { zgu@7074: set_base(addr); zgu@7074: } zgu@7074: set_size(size() + sz); zgu@7074: } zgu@7074: zgu@7074: protected: zgu@7074: void set_base(address base) { zgu@7074: assert(base != NULL, "Sanity check"); zgu@7074: _base_address = base; zgu@7074: } zgu@7074: zgu@7074: void set_size(size_t size) { zgu@7074: assert(size > 0, "Sanity check"); zgu@7074: _size = size; zgu@7074: } zgu@7074: }; zgu@7074: zgu@7074: zgu@7074: class CommittedMemoryRegion : public VirtualMemoryRegion { zgu@7074: private: zgu@7074: NativeCallStack _stack; zgu@7074: zgu@7074: public: zgu@7074: CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) : zgu@7074: VirtualMemoryRegion(addr, size), _stack(stack) { } zgu@7074: zgu@7074: inline int compare(const CommittedMemoryRegion& rgn) const { zgu@7074: if (overlap_region(rgn.base(), rgn.size()) || zgu@7074: adjacent_to (rgn.base(), rgn.size())) { zgu@7074: return 0; zgu@7074: } else { zgu@7074: if (base() == rgn.base()) { zgu@7074: return 0; zgu@7074: } else if (base() > rgn.base()) { zgu@7074: return 1; zgu@7074: } else { zgu@7074: return -1; zgu@7074: } zgu@7074: } zgu@7074: } zgu@7074: zgu@7074: inline bool equals(const CommittedMemoryRegion& rgn) const { zgu@7074: return compare(rgn) == 0; zgu@7074: } zgu@7074: zgu@7074: inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; } zgu@7074: inline const NativeCallStack* call_stack() const { return &_stack; } zgu@7074: }; zgu@7074: zgu@7074: zgu@7074: typedef LinkedListIterator CommittedRegionIterator; zgu@7074: zgu@7074: int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&); zgu@7074: class ReservedMemoryRegion : public VirtualMemoryRegion { zgu@7074: private: zgu@7074: SortedLinkedList zgu@7074: _committed_regions; zgu@7074: zgu@7074: NativeCallStack _stack; zgu@7074: MEMFLAGS _flag; zgu@7074: zgu@7074: bool _all_committed; zgu@7074: zgu@7074: public: zgu@7074: ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack, zgu@7074: MEMFLAGS flag = mtNone) : zgu@7074: VirtualMemoryRegion(base, size), _stack(stack), _flag(flag), zgu@7074: _all_committed(false) { } zgu@7074: zgu@7074: zgu@7074: ReservedMemoryRegion(address base, size_t size) : zgu@7074: VirtualMemoryRegion(base, size), _stack(emptyStack), _flag(mtNone), zgu@7074: _all_committed(false) { } zgu@7074: zgu@7074: // Copy constructor zgu@7074: ReservedMemoryRegion(const ReservedMemoryRegion& rr) : zgu@7074: VirtualMemoryRegion(rr.base(), rr.size()) { zgu@7074: *this = rr; zgu@7074: } zgu@7074: zgu@7074: inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; } zgu@7074: inline const NativeCallStack* call_stack() const { return &_stack; } zgu@7074: zgu@7074: void set_flag(MEMFLAGS flag); zgu@7074: inline MEMFLAGS flag() const { return _flag; } zgu@7074: zgu@7074: inline int compare(const ReservedMemoryRegion& rgn) const { zgu@7074: if (overlap_region(rgn.base(), rgn.size())) { zgu@7074: return 0; zgu@7074: } else { zgu@7074: if (base() == rgn.base()) { zgu@7074: return 0; zgu@7074: } else if (base() > rgn.base()) { zgu@7074: return 1; zgu@7074: } else { zgu@7074: return -1; zgu@7074: } zgu@7074: } zgu@7074: } zgu@7074: zgu@7074: inline bool equals(const ReservedMemoryRegion& rgn) const { zgu@7074: return compare(rgn) == 0; zgu@7074: } zgu@7074: zgu@7074: bool add_committed_region(address addr, size_t size, const NativeCallStack& stack); zgu@7074: bool remove_uncommitted_region(address addr, size_t size); zgu@7074: zgu@7074: size_t committed_size() const; zgu@7074: zgu@7074: // move committed regions that higher than specified address to zgu@7074: // the new region zgu@7074: void move_committed_regions(address addr, ReservedMemoryRegion& rgn); zgu@7074: zgu@7074: inline bool all_committed() const { return _all_committed; } zgu@7074: void set_all_committed(bool b); zgu@7074: zgu@7074: CommittedRegionIterator iterate_committed_regions() const { zgu@7074: return CommittedRegionIterator(_committed_regions.head()); zgu@7074: } zgu@7074: zgu@7074: ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) { zgu@7074: set_base(other.base()); zgu@7074: set_size(other.size()); zgu@7074: zgu@7074: _stack = *other.call_stack(); zgu@7074: _flag = other.flag(); zgu@7074: _all_committed = other.all_committed(); zgu@7074: if (other.all_committed()) { zgu@7074: set_all_committed(true); zgu@7074: } else { zgu@7074: CommittedRegionIterator itr = other.iterate_committed_regions(); zgu@7074: const CommittedMemoryRegion* rgn = itr.next(); zgu@7074: while (rgn != NULL) { zgu@7074: _committed_regions.add(*rgn); zgu@7074: rgn = itr.next(); zgu@7074: } zgu@7074: } zgu@7074: return *this; zgu@7074: } zgu@7074: zgu@7074: private: zgu@7074: // The committed region contains the uncommitted region, subtract the uncommitted zgu@7074: // region from this committed region zgu@7074: bool remove_uncommitted_region(LinkedListNode* node, zgu@7074: address addr, size_t sz); zgu@7074: zgu@7074: bool add_committed_region(const CommittedMemoryRegion& rgn) { zgu@7074: assert(rgn.base() != NULL, "Invalid base address"); zgu@7074: assert(size() > 0, "Invalid size"); zgu@7074: return _committed_regions.add(rgn) != NULL; zgu@7074: } zgu@7074: }; zgu@7074: zgu@7074: int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2); zgu@7074: zgu@7074: class VirtualMemoryWalker : public StackObj { zgu@7074: public: zgu@7074: virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; } zgu@7074: }; zgu@7074: zgu@7074: // Main class called from MemTracker to track virtual memory allocations, commits and releases. zgu@7074: class VirtualMemoryTracker : AllStatic { zgu@7074: public: zgu@7074: static bool initialize(NMT_TrackingLevel level); zgu@7074: zgu@7074: static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, zgu@7074: MEMFLAGS flag = mtNone, bool all_committed = false); zgu@7074: zgu@7074: static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack); zgu@7074: static bool remove_uncommitted_region (address base_addr, size_t size); zgu@7074: static bool remove_released_region (address base_addr, size_t size); zgu@7074: static void set_reserved_region_type (address addr, MEMFLAGS flag); zgu@7074: zgu@7074: // Walk virtual memory data structure for creating baseline, etc. zgu@7074: static bool walk_virtual_memory(VirtualMemoryWalker* walker); zgu@7074: zgu@7074: static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to); zgu@7074: zgu@7074: private: zgu@7074: static SortedLinkedList _reserved_regions; zgu@7074: }; zgu@7074: zgu@7074: zgu@7074: #endif // INCLUDE_NMT zgu@7074: zgu@7074: #endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP