zgu@7074: /* zgu@7074: * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. zgu@7074: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. zgu@7074: * zgu@7074: * This code is free software; you can redistribute it and/or modify it zgu@7074: * under the terms of the GNU General Public License version 2 only, as zgu@7074: * published by the Free Software Foundation. zgu@7074: * zgu@7074: * This code is distributed in the hope that it will be useful, but WITHOUT zgu@7074: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or zgu@7074: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License zgu@7074: * version 2 for more details (a copy is included in the LICENSE file that zgu@7074: * accompanied this code). zgu@7074: * zgu@7074: * You should have received a copy of the GNU General Public License version zgu@7074: * 2 along with this work; if not, write to the Free Software Foundation, zgu@7074: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. zgu@7074: * zgu@7074: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA zgu@7074: * or visit www.oracle.com if you need additional information or have any zgu@7074: * questions. zgu@7074: * zgu@7074: */ zgu@7074: zgu@7074: #ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP zgu@7074: #define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP zgu@7074: zgu@7074: #if INCLUDE_NMT zgu@7074: zgu@7074: #include "memory/allocation.hpp" zgu@7074: #include "runtime/atomic.hpp" zgu@7074: #include "services/allocationSite.hpp" zgu@7074: #include "services/mallocTracker.hpp" zgu@7074: #include "services/nmtCommon.hpp" zgu@7074: zgu@7074: // MallocSite represents a code path that eventually calls zgu@7074: // os::malloc() to allocate memory zgu@7074: class MallocSite : public AllocationSite { zgu@7074: public: zgu@7074: MallocSite() : zgu@7074: AllocationSite(emptyStack) { } zgu@7074: zgu@7074: MallocSite(const NativeCallStack& stack) : zgu@7074: AllocationSite(stack) { } zgu@7074: zgu@7074: void allocate(size_t size) { data()->allocate(size); } zgu@7074: void deallocate(size_t size) { data()->deallocate(size); } zgu@7074: zgu@7074: // Memory allocated from this code path zgu@7074: size_t size() const { return peek()->size(); } zgu@7074: // The number of calls were made zgu@7074: size_t count() const { return peek()->count(); } zgu@7074: }; zgu@7074: zgu@7074: // Malloc site hashtable entry zgu@7074: class MallocSiteHashtableEntry : public CHeapObj { zgu@7074: private: zgu@7074: MallocSite _malloc_site; zgu@7074: MallocSiteHashtableEntry* _next; zgu@7074: zgu@7074: public: zgu@7074: MallocSiteHashtableEntry() : _next(NULL) { } zgu@7074: zgu@7074: MallocSiteHashtableEntry(NativeCallStack stack): zgu@7074: _malloc_site(stack), _next(NULL) { } zgu@7074: zgu@7074: inline const MallocSiteHashtableEntry* next() const { zgu@7074: return _next; zgu@7074: } zgu@7074: zgu@7074: // Insert an entry atomically. zgu@7074: // Return true if the entry is inserted successfully. zgu@7074: // The operation can be failed due to contention from other thread. zgu@7074: bool atomic_insert(const MallocSiteHashtableEntry* entry) { zgu@7074: return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next, zgu@7074: NULL) == NULL); zgu@7074: } zgu@7074: zgu@7074: void set_callsite(const MallocSite& site) { zgu@7074: _malloc_site = site; zgu@7074: } zgu@7074: zgu@7074: inline const MallocSite* peek() const { return &_malloc_site; } zgu@7074: inline MallocSite* data() { return &_malloc_site; } zgu@7074: zgu@7074: inline long hash() const { return _malloc_site.hash(); } zgu@7074: inline bool equals(const NativeCallStack& stack) const { zgu@7074: return _malloc_site.equals(stack); zgu@7074: } zgu@7074: // Allocation/deallocation on this allocation site zgu@7074: inline void allocate(size_t size) { _malloc_site.allocate(size); } zgu@7074: inline void deallocate(size_t size) { _malloc_site.deallocate(size); } zgu@7074: // Memory counters zgu@7074: inline size_t size() const { return _malloc_site.size(); } zgu@7074: inline size_t count() const { return _malloc_site.count(); } zgu@7074: }; zgu@7074: zgu@7074: // The walker walks every entry on MallocSiteTable zgu@7074: class MallocSiteWalker : public StackObj { zgu@7074: public: zgu@7074: virtual bool do_malloc_site(const MallocSite* e) { return false; } zgu@7074: }; zgu@7074: zgu@7074: /* zgu@7074: * Native memory tracking call site table. zgu@7074: * The table is only needed when detail tracking is enabled. zgu@7074: */ zgu@7074: class MallocSiteTable : AllStatic { zgu@7074: private: zgu@7074: // The number of hash bucket in this hashtable. The number should zgu@7074: // be tuned if malloc activities changed significantly. zgu@7074: // The statistics data can be obtained via Jcmd zgu@7074: // jcmd VM.native_memory statistics. zgu@7074: zgu@7074: // Currently, (number of buckets / number of entires) ratio is zgu@7074: // about 1 / 6 zgu@7074: enum { zgu@7074: table_base_size = 128, // The base size is calculated from statistics to give zgu@7074: // table ratio around 1:6 zgu@7074: table_size = (table_base_size * NMT_TrackingStackDepth - 1) zgu@7074: }; zgu@7074: zgu@7074: zgu@7074: // This is a very special lock, that allows multiple shared accesses (sharedLock), but zgu@7074: // once exclusive access (exclusiveLock) is requested, all shared accesses are zgu@7074: // rejected forever. zgu@7074: class AccessLock : public StackObj { zgu@7074: enum LockState { zgu@7074: NoLock, zgu@7074: SharedLock, zgu@7074: ExclusiveLock zgu@7074: }; zgu@7074: zgu@7074: private: zgu@7074: // A very large negative number. The only possibility to "overflow" zgu@7074: // this number is when there are more than -min_jint threads in zgu@7074: // this process, which is not going to happen in foreseeable future. zgu@7074: const static int _MAGIC_ = min_jint; zgu@7074: zgu@7074: LockState _lock_state; zgu@7074: volatile int* _lock; zgu@7074: public: zgu@7074: AccessLock(volatile int* lock) : zgu@7074: _lock(lock), _lock_state(NoLock) { zgu@7074: } zgu@7074: zgu@7074: ~AccessLock() { zgu@7074: if (_lock_state == SharedLock) { zgu@7074: Atomic::dec((volatile jint*)_lock); zgu@7074: } zgu@7074: } zgu@7074: // Acquire shared lock. zgu@7074: // Return true if shared access is granted. zgu@7074: inline bool sharedLock() { zgu@7074: jint res = Atomic::add(1, _lock); zgu@7074: if (res < 0) { zgu@7074: Atomic::add(-1, _lock); zgu@7074: return false; zgu@7074: } zgu@7074: _lock_state = SharedLock; zgu@7074: return true; zgu@7074: } zgu@7074: // Acquire exclusive lock zgu@7074: void exclusiveLock(); zgu@7074: }; zgu@7074: zgu@7074: public: zgu@7074: static bool initialize(); zgu@7074: static void shutdown(); zgu@7074: zgu@7074: NOT_PRODUCT(static int access_peak_count() { return _peak_count; }) zgu@7074: zgu@7074: // Number of hash buckets zgu@7074: static inline int hash_buckets() { return (int)table_size; } zgu@7074: zgu@7074: // Access and copy a call stack from this table. Shared lock should be zgu@7074: // acquired before access the entry. zgu@7074: static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx, zgu@7074: size_t pos_idx) { zgu@7074: AccessLock locker(&_access_count); zgu@7074: if (locker.sharedLock()) { zgu@7074: NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);) zgu@7074: MallocSite* site = malloc_site(bucket_idx, pos_idx); zgu@7074: if (site != NULL) { zgu@7074: stack = *site->call_stack(); zgu@7074: return true; zgu@7074: } zgu@7074: } zgu@7074: return false; zgu@7074: } zgu@7074: zgu@7074: // Record a new allocation from specified call path. zgu@7074: // Return true if the allocation is recorded successfully, bucket_idx zgu@7074: // and pos_idx are also updated to indicate the entry where the allocation zgu@7074: // information was recorded. zgu@7074: // Return false only occurs under rare scenarios: zgu@7074: // 1. out of memory zgu@7074: // 2. overflow hash bucket zgu@7074: static inline bool allocation_at(const NativeCallStack& stack, size_t size, zgu@7074: size_t* bucket_idx, size_t* pos_idx) { zgu@7074: AccessLock locker(&_access_count); zgu@7074: if (locker.sharedLock()) { zgu@7074: NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);) zgu@7074: MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx); zgu@7074: if (site != NULL) site->allocate(size); zgu@7074: return site != NULL; zgu@7074: } zgu@7074: return false; zgu@7074: } zgu@7074: zgu@7074: // Record memory deallocation. bucket_idx and pos_idx indicate where the allocation zgu@7074: // information was recorded. zgu@7074: static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) { zgu@7074: AccessLock locker(&_access_count); zgu@7074: if (locker.sharedLock()) { zgu@7074: NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);) zgu@7074: MallocSite* site = malloc_site(bucket_idx, pos_idx); zgu@7074: if (site != NULL) { zgu@7074: site->deallocate(size); zgu@7074: return true; zgu@7074: } zgu@7074: } zgu@7074: return false; zgu@7074: } zgu@7074: zgu@7074: // Walk this table. zgu@7074: static bool walk_malloc_site(MallocSiteWalker* walker); zgu@7074: zgu@7074: private: zgu@7074: static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key); zgu@7074: static void reset(); zgu@7074: zgu@7074: // Delete a bucket linked list zgu@7074: static void delete_linked_list(MallocSiteHashtableEntry* head); zgu@7074: zgu@7074: static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx); zgu@7074: static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx); zgu@7074: static bool walk(MallocSiteWalker* walker); zgu@7074: zgu@7074: static inline int hash_to_index(int hash) { zgu@7074: hash = (hash > 0) ? hash : (-hash); zgu@7074: return (hash % table_size); zgu@7074: } zgu@7074: zgu@7074: static inline const NativeCallStack* hash_entry_allocation_stack() { zgu@7074: return (NativeCallStack*)_hash_entry_allocation_stack; zgu@7074: } zgu@7074: zgu@7074: private: zgu@7074: // Counter for counting concurrent access zgu@7074: static volatile int _access_count; zgu@7074: zgu@7074: // The callsite hashtable. It has to be a static table, zgu@7074: // since malloc call can come from C runtime linker. zgu@7074: static MallocSiteHashtableEntry* _table[table_size]; zgu@7074: zgu@7074: zgu@7074: // Reserve enough memory for placing the objects zgu@7074: zgu@7074: // The memory for hashtable entry allocation stack object zgu@7074: static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)]; zgu@7074: // The memory for hashtable entry allocation callsite object zgu@7074: static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)]; zgu@7074: NOT_PRODUCT(static int _peak_count;) zgu@7074: }; zgu@7074: zgu@7074: #endif // INCLUDE_NMT zgu@7074: #endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP