src/share/vm/services/mallocSiteTable.hpp

Wed, 27 Aug 2014 08:19:12 -0400

author
zgu
date
Wed, 27 Aug 2014 08:19:12 -0400
changeset 7074
833b0f92429a
child 7078
c6211b707068
permissions
-rw-r--r--

8046598: Scalable Native memory tracking development
Summary: Enhance scalability of native memory tracking
Reviewed-by: coleenp, ctornqvi, gtriantafill

zgu@7074 1 /*
zgu@7074 2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
zgu@7074 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
zgu@7074 4 *
zgu@7074 5 * This code is free software; you can redistribute it and/or modify it
zgu@7074 6 * under the terms of the GNU General Public License version 2 only, as
zgu@7074 7 * published by the Free Software Foundation.
zgu@7074 8 *
zgu@7074 9 * This code is distributed in the hope that it will be useful, but WITHOUT
zgu@7074 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
zgu@7074 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
zgu@7074 12 * version 2 for more details (a copy is included in the LICENSE file that
zgu@7074 13 * accompanied this code).
zgu@7074 14 *
zgu@7074 15 * You should have received a copy of the GNU General Public License version
zgu@7074 16 * 2 along with this work; if not, write to the Free Software Foundation,
zgu@7074 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
zgu@7074 18 *
zgu@7074 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
zgu@7074 20 * or visit www.oracle.com if you need additional information or have any
zgu@7074 21 * questions.
zgu@7074 22 *
zgu@7074 23 */
zgu@7074 24
zgu@7074 25 #ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
zgu@7074 26 #define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
zgu@7074 27
zgu@7074 28 #if INCLUDE_NMT
zgu@7074 29
zgu@7074 30 #include "memory/allocation.hpp"
zgu@7074 31 #include "runtime/atomic.hpp"
zgu@7074 32 #include "services/allocationSite.hpp"
zgu@7074 33 #include "services/mallocTracker.hpp"
zgu@7074 34 #include "services/nmtCommon.hpp"
zgu@7074 35
zgu@7074 36 // MallocSite represents a code path that eventually calls
zgu@7074 37 // os::malloc() to allocate memory
zgu@7074 38 class MallocSite : public AllocationSite<MemoryCounter> {
zgu@7074 39 public:
zgu@7074 40 MallocSite() :
zgu@7074 41 AllocationSite<MemoryCounter>(emptyStack) { }
zgu@7074 42
zgu@7074 43 MallocSite(const NativeCallStack& stack) :
zgu@7074 44 AllocationSite<MemoryCounter>(stack) { }
zgu@7074 45
zgu@7074 46 void allocate(size_t size) { data()->allocate(size); }
zgu@7074 47 void deallocate(size_t size) { data()->deallocate(size); }
zgu@7074 48
zgu@7074 49 // Memory allocated from this code path
zgu@7074 50 size_t size() const { return peek()->size(); }
zgu@7074 51 // The number of calls were made
zgu@7074 52 size_t count() const { return peek()->count(); }
zgu@7074 53 };
zgu@7074 54
zgu@7074 55 // Malloc site hashtable entry
zgu@7074 56 class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
zgu@7074 57 private:
zgu@7074 58 MallocSite _malloc_site;
zgu@7074 59 MallocSiteHashtableEntry* _next;
zgu@7074 60
zgu@7074 61 public:
zgu@7074 62 MallocSiteHashtableEntry() : _next(NULL) { }
zgu@7074 63
zgu@7074 64 MallocSiteHashtableEntry(NativeCallStack stack):
zgu@7074 65 _malloc_site(stack), _next(NULL) { }
zgu@7074 66
zgu@7074 67 inline const MallocSiteHashtableEntry* next() const {
zgu@7074 68 return _next;
zgu@7074 69 }
zgu@7074 70
zgu@7074 71 // Insert an entry atomically.
zgu@7074 72 // Return true if the entry is inserted successfully.
zgu@7074 73 // The operation can be failed due to contention from other thread.
zgu@7074 74 bool atomic_insert(const MallocSiteHashtableEntry* entry) {
zgu@7074 75 return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
zgu@7074 76 NULL) == NULL);
zgu@7074 77 }
zgu@7074 78
zgu@7074 79 void set_callsite(const MallocSite& site) {
zgu@7074 80 _malloc_site = site;
zgu@7074 81 }
zgu@7074 82
zgu@7074 83 inline const MallocSite* peek() const { return &_malloc_site; }
zgu@7074 84 inline MallocSite* data() { return &_malloc_site; }
zgu@7074 85
zgu@7074 86 inline long hash() const { return _malloc_site.hash(); }
zgu@7074 87 inline bool equals(const NativeCallStack& stack) const {
zgu@7074 88 return _malloc_site.equals(stack);
zgu@7074 89 }
zgu@7074 90 // Allocation/deallocation on this allocation site
zgu@7074 91 inline void allocate(size_t size) { _malloc_site.allocate(size); }
zgu@7074 92 inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
zgu@7074 93 // Memory counters
zgu@7074 94 inline size_t size() const { return _malloc_site.size(); }
zgu@7074 95 inline size_t count() const { return _malloc_site.count(); }
zgu@7074 96 };
zgu@7074 97
zgu@7074 98 // The walker walks every entry on MallocSiteTable
zgu@7074 99 class MallocSiteWalker : public StackObj {
zgu@7074 100 public:
zgu@7074 101 virtual bool do_malloc_site(const MallocSite* e) { return false; }
zgu@7074 102 };
zgu@7074 103
zgu@7074 104 /*
zgu@7074 105 * Native memory tracking call site table.
zgu@7074 106 * The table is only needed when detail tracking is enabled.
zgu@7074 107 */
zgu@7074 108 class MallocSiteTable : AllStatic {
zgu@7074 109 private:
zgu@7074 110 // The number of hash bucket in this hashtable. The number should
zgu@7074 111 // be tuned if malloc activities changed significantly.
zgu@7074 112 // The statistics data can be obtained via Jcmd
zgu@7074 113 // jcmd <pid> VM.native_memory statistics.
zgu@7074 114
zgu@7074 115 // Currently, (number of buckets / number of entires) ratio is
zgu@7074 116 // about 1 / 6
zgu@7074 117 enum {
zgu@7074 118 table_base_size = 128, // The base size is calculated from statistics to give
zgu@7074 119 // table ratio around 1:6
zgu@7074 120 table_size = (table_base_size * NMT_TrackingStackDepth - 1)
zgu@7074 121 };
zgu@7074 122
zgu@7074 123
zgu@7074 124 // This is a very special lock, that allows multiple shared accesses (sharedLock), but
zgu@7074 125 // once exclusive access (exclusiveLock) is requested, all shared accesses are
zgu@7074 126 // rejected forever.
zgu@7074 127 class AccessLock : public StackObj {
zgu@7074 128 enum LockState {
zgu@7074 129 NoLock,
zgu@7074 130 SharedLock,
zgu@7074 131 ExclusiveLock
zgu@7074 132 };
zgu@7074 133
zgu@7074 134 private:
zgu@7074 135 // A very large negative number. The only possibility to "overflow"
zgu@7074 136 // this number is when there are more than -min_jint threads in
zgu@7074 137 // this process, which is not going to happen in foreseeable future.
zgu@7074 138 const static int _MAGIC_ = min_jint;
zgu@7074 139
zgu@7074 140 LockState _lock_state;
zgu@7074 141 volatile int* _lock;
zgu@7074 142 public:
zgu@7074 143 AccessLock(volatile int* lock) :
zgu@7074 144 _lock(lock), _lock_state(NoLock) {
zgu@7074 145 }
zgu@7074 146
zgu@7074 147 ~AccessLock() {
zgu@7074 148 if (_lock_state == SharedLock) {
zgu@7074 149 Atomic::dec((volatile jint*)_lock);
zgu@7074 150 }
zgu@7074 151 }
zgu@7074 152 // Acquire shared lock.
zgu@7074 153 // Return true if shared access is granted.
zgu@7074 154 inline bool sharedLock() {
zgu@7074 155 jint res = Atomic::add(1, _lock);
zgu@7074 156 if (res < 0) {
zgu@7074 157 Atomic::add(-1, _lock);
zgu@7074 158 return false;
zgu@7074 159 }
zgu@7074 160 _lock_state = SharedLock;
zgu@7074 161 return true;
zgu@7074 162 }
zgu@7074 163 // Acquire exclusive lock
zgu@7074 164 void exclusiveLock();
zgu@7074 165 };
zgu@7074 166
zgu@7074 167 public:
zgu@7074 168 static bool initialize();
zgu@7074 169 static void shutdown();
zgu@7074 170
zgu@7074 171 NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
zgu@7074 172
zgu@7074 173 // Number of hash buckets
zgu@7074 174 static inline int hash_buckets() { return (int)table_size; }
zgu@7074 175
zgu@7074 176 // Access and copy a call stack from this table. Shared lock should be
zgu@7074 177 // acquired before access the entry.
zgu@7074 178 static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
zgu@7074 179 size_t pos_idx) {
zgu@7074 180 AccessLock locker(&_access_count);
zgu@7074 181 if (locker.sharedLock()) {
zgu@7074 182 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
zgu@7074 183 MallocSite* site = malloc_site(bucket_idx, pos_idx);
zgu@7074 184 if (site != NULL) {
zgu@7074 185 stack = *site->call_stack();
zgu@7074 186 return true;
zgu@7074 187 }
zgu@7074 188 }
zgu@7074 189 return false;
zgu@7074 190 }
zgu@7074 191
zgu@7074 192 // Record a new allocation from specified call path.
zgu@7074 193 // Return true if the allocation is recorded successfully, bucket_idx
zgu@7074 194 // and pos_idx are also updated to indicate the entry where the allocation
zgu@7074 195 // information was recorded.
zgu@7074 196 // Return false only occurs under rare scenarios:
zgu@7074 197 // 1. out of memory
zgu@7074 198 // 2. overflow hash bucket
zgu@7074 199 static inline bool allocation_at(const NativeCallStack& stack, size_t size,
zgu@7074 200 size_t* bucket_idx, size_t* pos_idx) {
zgu@7074 201 AccessLock locker(&_access_count);
zgu@7074 202 if (locker.sharedLock()) {
zgu@7074 203 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
zgu@7074 204 MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx);
zgu@7074 205 if (site != NULL) site->allocate(size);
zgu@7074 206 return site != NULL;
zgu@7074 207 }
zgu@7074 208 return false;
zgu@7074 209 }
zgu@7074 210
zgu@7074 211 // Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
zgu@7074 212 // information was recorded.
zgu@7074 213 static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
zgu@7074 214 AccessLock locker(&_access_count);
zgu@7074 215 if (locker.sharedLock()) {
zgu@7074 216 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
zgu@7074 217 MallocSite* site = malloc_site(bucket_idx, pos_idx);
zgu@7074 218 if (site != NULL) {
zgu@7074 219 site->deallocate(size);
zgu@7074 220 return true;
zgu@7074 221 }
zgu@7074 222 }
zgu@7074 223 return false;
zgu@7074 224 }
zgu@7074 225
zgu@7074 226 // Walk this table.
zgu@7074 227 static bool walk_malloc_site(MallocSiteWalker* walker);
zgu@7074 228
zgu@7074 229 private:
zgu@7074 230 static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key);
zgu@7074 231 static void reset();
zgu@7074 232
zgu@7074 233 // Delete a bucket linked list
zgu@7074 234 static void delete_linked_list(MallocSiteHashtableEntry* head);
zgu@7074 235
zgu@7074 236 static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx);
zgu@7074 237 static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
zgu@7074 238 static bool walk(MallocSiteWalker* walker);
zgu@7074 239
zgu@7074 240 static inline int hash_to_index(int hash) {
zgu@7074 241 hash = (hash > 0) ? hash : (-hash);
zgu@7074 242 return (hash % table_size);
zgu@7074 243 }
zgu@7074 244
zgu@7074 245 static inline const NativeCallStack* hash_entry_allocation_stack() {
zgu@7074 246 return (NativeCallStack*)_hash_entry_allocation_stack;
zgu@7074 247 }
zgu@7074 248
zgu@7074 249 private:
zgu@7074 250 // Counter for counting concurrent access
zgu@7074 251 static volatile int _access_count;
zgu@7074 252
zgu@7074 253 // The callsite hashtable. It has to be a static table,
zgu@7074 254 // since malloc call can come from C runtime linker.
zgu@7074 255 static MallocSiteHashtableEntry* _table[table_size];
zgu@7074 256
zgu@7074 257
zgu@7074 258 // Reserve enough memory for placing the objects
zgu@7074 259
zgu@7074 260 // The memory for hashtable entry allocation stack object
zgu@7074 261 static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
zgu@7074 262 // The memory for hashtable entry allocation callsite object
zgu@7074 263 static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
zgu@7074 264 NOT_PRODUCT(static int _peak_count;)
zgu@7074 265 };
zgu@7074 266
zgu@7074 267 #endif // INCLUDE_NMT
zgu@7074 268 #endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP

mercurial