src/share/vm/services/mallocSiteTable.hpp

changeset 7074
833b0f92429a
child 7078
c6211b707068
equal deleted inserted replaced
7073:4d3a43351904 7074:833b0f92429a
1 /*
2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
26 #define SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP
27
28 #if INCLUDE_NMT
29
30 #include "memory/allocation.hpp"
31 #include "runtime/atomic.hpp"
32 #include "services/allocationSite.hpp"
33 #include "services/mallocTracker.hpp"
34 #include "services/nmtCommon.hpp"
35
36 // MallocSite represents a code path that eventually calls
37 // os::malloc() to allocate memory
38 class MallocSite : public AllocationSite<MemoryCounter> {
39 public:
40 MallocSite() :
41 AllocationSite<MemoryCounter>(emptyStack) { }
42
43 MallocSite(const NativeCallStack& stack) :
44 AllocationSite<MemoryCounter>(stack) { }
45
46 void allocate(size_t size) { data()->allocate(size); }
47 void deallocate(size_t size) { data()->deallocate(size); }
48
49 // Memory allocated from this code path
50 size_t size() const { return peek()->size(); }
51 // The number of calls were made
52 size_t count() const { return peek()->count(); }
53 };
54
55 // Malloc site hashtable entry
56 class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
57 private:
58 MallocSite _malloc_site;
59 MallocSiteHashtableEntry* _next;
60
61 public:
62 MallocSiteHashtableEntry() : _next(NULL) { }
63
64 MallocSiteHashtableEntry(NativeCallStack stack):
65 _malloc_site(stack), _next(NULL) { }
66
67 inline const MallocSiteHashtableEntry* next() const {
68 return _next;
69 }
70
71 // Insert an entry atomically.
72 // Return true if the entry is inserted successfully.
73 // The operation can be failed due to contention from other thread.
74 bool atomic_insert(const MallocSiteHashtableEntry* entry) {
75 return (Atomic::cmpxchg_ptr((void*)entry, (volatile void*)&_next,
76 NULL) == NULL);
77 }
78
79 void set_callsite(const MallocSite& site) {
80 _malloc_site = site;
81 }
82
83 inline const MallocSite* peek() const { return &_malloc_site; }
84 inline MallocSite* data() { return &_malloc_site; }
85
86 inline long hash() const { return _malloc_site.hash(); }
87 inline bool equals(const NativeCallStack& stack) const {
88 return _malloc_site.equals(stack);
89 }
90 // Allocation/deallocation on this allocation site
91 inline void allocate(size_t size) { _malloc_site.allocate(size); }
92 inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
93 // Memory counters
94 inline size_t size() const { return _malloc_site.size(); }
95 inline size_t count() const { return _malloc_site.count(); }
96 };
97
98 // The walker walks every entry on MallocSiteTable
99 class MallocSiteWalker : public StackObj {
100 public:
101 virtual bool do_malloc_site(const MallocSite* e) { return false; }
102 };
103
104 /*
105 * Native memory tracking call site table.
106 * The table is only needed when detail tracking is enabled.
107 */
108 class MallocSiteTable : AllStatic {
109 private:
110 // The number of hash bucket in this hashtable. The number should
111 // be tuned if malloc activities changed significantly.
112 // The statistics data can be obtained via Jcmd
113 // jcmd <pid> VM.native_memory statistics.
114
115 // Currently, (number of buckets / number of entires) ratio is
116 // about 1 / 6
117 enum {
118 table_base_size = 128, // The base size is calculated from statistics to give
119 // table ratio around 1:6
120 table_size = (table_base_size * NMT_TrackingStackDepth - 1)
121 };
122
123
124 // This is a very special lock, that allows multiple shared accesses (sharedLock), but
125 // once exclusive access (exclusiveLock) is requested, all shared accesses are
126 // rejected forever.
127 class AccessLock : public StackObj {
128 enum LockState {
129 NoLock,
130 SharedLock,
131 ExclusiveLock
132 };
133
134 private:
135 // A very large negative number. The only possibility to "overflow"
136 // this number is when there are more than -min_jint threads in
137 // this process, which is not going to happen in foreseeable future.
138 const static int _MAGIC_ = min_jint;
139
140 LockState _lock_state;
141 volatile int* _lock;
142 public:
143 AccessLock(volatile int* lock) :
144 _lock(lock), _lock_state(NoLock) {
145 }
146
147 ~AccessLock() {
148 if (_lock_state == SharedLock) {
149 Atomic::dec((volatile jint*)_lock);
150 }
151 }
152 // Acquire shared lock.
153 // Return true if shared access is granted.
154 inline bool sharedLock() {
155 jint res = Atomic::add(1, _lock);
156 if (res < 0) {
157 Atomic::add(-1, _lock);
158 return false;
159 }
160 _lock_state = SharedLock;
161 return true;
162 }
163 // Acquire exclusive lock
164 void exclusiveLock();
165 };
166
167 public:
168 static bool initialize();
169 static void shutdown();
170
171 NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
172
173 // Number of hash buckets
174 static inline int hash_buckets() { return (int)table_size; }
175
176 // Access and copy a call stack from this table. Shared lock should be
177 // acquired before access the entry.
178 static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
179 size_t pos_idx) {
180 AccessLock locker(&_access_count);
181 if (locker.sharedLock()) {
182 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
183 MallocSite* site = malloc_site(bucket_idx, pos_idx);
184 if (site != NULL) {
185 stack = *site->call_stack();
186 return true;
187 }
188 }
189 return false;
190 }
191
192 // Record a new allocation from specified call path.
193 // Return true if the allocation is recorded successfully, bucket_idx
194 // and pos_idx are also updated to indicate the entry where the allocation
195 // information was recorded.
196 // Return false only occurs under rare scenarios:
197 // 1. out of memory
198 // 2. overflow hash bucket
199 static inline bool allocation_at(const NativeCallStack& stack, size_t size,
200 size_t* bucket_idx, size_t* pos_idx) {
201 AccessLock locker(&_access_count);
202 if (locker.sharedLock()) {
203 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
204 MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx);
205 if (site != NULL) site->allocate(size);
206 return site != NULL;
207 }
208 return false;
209 }
210
211 // Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
212 // information was recorded.
213 static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
214 AccessLock locker(&_access_count);
215 if (locker.sharedLock()) {
216 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
217 MallocSite* site = malloc_site(bucket_idx, pos_idx);
218 if (site != NULL) {
219 site->deallocate(size);
220 return true;
221 }
222 }
223 return false;
224 }
225
226 // Walk this table.
227 static bool walk_malloc_site(MallocSiteWalker* walker);
228
229 private:
230 static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key);
231 static void reset();
232
233 // Delete a bucket linked list
234 static void delete_linked_list(MallocSiteHashtableEntry* head);
235
236 static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx);
237 static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
238 static bool walk(MallocSiteWalker* walker);
239
240 static inline int hash_to_index(int hash) {
241 hash = (hash > 0) ? hash : (-hash);
242 return (hash % table_size);
243 }
244
245 static inline const NativeCallStack* hash_entry_allocation_stack() {
246 return (NativeCallStack*)_hash_entry_allocation_stack;
247 }
248
249 private:
250 // Counter for counting concurrent access
251 static volatile int _access_count;
252
253 // The callsite hashtable. It has to be a static table,
254 // since malloc call can come from C runtime linker.
255 static MallocSiteHashtableEntry* _table[table_size];
256
257
258 // Reserve enough memory for placing the objects
259
260 // The memory for hashtable entry allocation stack object
261 static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)];
262 // The memory for hashtable entry allocation callsite object
263 static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)];
264 NOT_PRODUCT(static int _peak_count;)
265 };
266
267 #endif // INCLUDE_NMT
268 #endif // SHARE_VM_SERVICES_MALLOC_SITE_TABLE_HPP

mercurial