src/share/vm/services/mallocTracker.hpp

changeset 7074
833b0f92429a
child 7080
dd3939fe8424
equal deleted inserted replaced
7073:4d3a43351904 7074:833b0f92429a
1 /*
2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
26 #define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
27
28 #if INCLUDE_NMT
29
30 #include "memory/allocation.hpp"
31 #include "runtime/atomic.hpp"
32 #include "services/nmtCommon.hpp"
33 #include "utilities/nativeCallStack.hpp"
34
35 /*
36 * This counter class counts memory allocation and deallocation,
37 * records total memory allocation size and number of allocations.
38 * The counters are updated atomically.
39 */
40 class MemoryCounter VALUE_OBJ_CLASS_SPEC {
41 private:
42 size_t _count;
43 size_t _size;
44
45 DEBUG_ONLY(size_t _peak_count;)
46 DEBUG_ONLY(size_t _peak_size; )
47
48 public:
49 MemoryCounter() : _count(0), _size(0) {
50 DEBUG_ONLY(_peak_count = 0;)
51 DEBUG_ONLY(_peak_size = 0;)
52 }
53
54 // Reset counters
55 void reset() {
56 _size = 0;
57 _count = 0;
58 DEBUG_ONLY(_peak_size = 0;)
59 DEBUG_ONLY(_peak_count = 0;)
60 }
61
62 inline void allocate(size_t sz) {
63 Atomic::add(1, (volatile MemoryCounterType*)&_count);
64 if (sz > 0) {
65 Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
66 DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
67 }
68 DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
69 }
70
71 inline void deallocate(size_t sz) {
72 assert(_count > 0, "Negative counter");
73 assert(_size >= sz, "Negative size");
74 Atomic::add(-1, (volatile MemoryCounterType*)&_count);
75 if (sz > 0) {
76 Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
77 }
78 }
79
80 inline void resize(long sz) {
81 if (sz != 0) {
82 Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
83 DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
84 }
85 }
86
87 inline size_t count() const { return _count; }
88 inline size_t size() const { return _size; }
89 DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
90 DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; })
91
92 };
93
94 /*
95 * Malloc memory used by a particular subsystem.
96 * It includes the memory acquired through os::malloc()
97 * call and arena's backing memory.
98 */
99 class MallocMemory VALUE_OBJ_CLASS_SPEC {
100 private:
101 MemoryCounter _malloc;
102 MemoryCounter _arena;
103
104 public:
105 MallocMemory() { }
106
107 inline void record_malloc(size_t sz) {
108 _malloc.allocate(sz);
109 }
110
111 inline void record_free(size_t sz) {
112 _malloc.deallocate(sz);
113 }
114
115 inline void record_new_arena() {
116 _arena.allocate(0);
117 }
118
119 inline void record_arena_free() {
120 _arena.deallocate(0);
121 }
122
123 inline void record_arena_size_change(long sz) {
124 _arena.resize(sz);
125 }
126
127 void reset() {
128 _malloc.reset();
129 _arena.reset();
130 }
131
132 inline size_t malloc_size() const { return _malloc.size(); }
133 inline size_t malloc_count() const { return _malloc.count();}
134 inline size_t arena_size() const { return _arena.size(); }
135 inline size_t arena_count() const { return _arena.count(); }
136
137 DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
138 DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; })
139 };
140
141 class MallocMemorySummary;
142
143 // A snapshot of malloc'd memory, includes malloc memory
144 // usage by types and memory used by tracking itself.
145 class MallocMemorySnapshot : public ResourceObj {
146 friend class MallocMemorySummary;
147
148 private:
149 MallocMemory _malloc[mt_number_of_types];
150 MemoryCounter _tracking_header;
151
152
153 public:
154 inline MallocMemory* by_type(MEMFLAGS flags) {
155 int index = NMTUtil::flag_to_index(flags);
156 return &_malloc[index];
157 }
158
159 inline MallocMemory* by_index(int index) {
160 assert(index >= 0, "Index out of bound");
161 assert(index < mt_number_of_types, "Index out of bound");
162 return &_malloc[index];
163 }
164
165 inline MemoryCounter* malloc_overhead() {
166 return &_tracking_header;
167 }
168
169 // Total malloc'd memory amount
170 size_t total() const;
171 // Total malloc'd memory used by arenas
172 size_t total_arena() const;
173
174 inline size_t thread_count() {
175 return by_type(mtThreadStack)->malloc_count();
176 }
177
178 void reset();
179
180 void copy_to(MallocMemorySnapshot* s) {
181 s->_tracking_header = _tracking_header;
182 for (int index = 0; index < mt_number_of_types; index ++) {
183 s->_malloc[index] = _malloc[index];
184 }
185 }
186
187 // Make adjustment by subtracting chunks used by arenas
188 // from total chunks to get total free chunk size
189 void make_adjustment();
190 };
191
192 /*
193 * This class is for collecting malloc statistics at summary level
194 */
195 class MallocMemorySummary : AllStatic {
196 private:
197 // Reserve memory for placement of MallocMemorySnapshot object
198 static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
199
200 public:
201 static void initialize();
202
203 static inline void record_malloc(size_t size, MEMFLAGS flag) {
204 as_snapshot()->by_type(flag)->record_malloc(size);
205 }
206
207 static inline void record_free(size_t size, MEMFLAGS flag) {
208 as_snapshot()->by_type(flag)->record_free(size);
209 }
210
211 static inline void record_new_arena(MEMFLAGS flag) {
212 as_snapshot()->by_type(flag)->record_new_arena();
213 }
214
215 static inline void record_arena_free(MEMFLAGS flag) {
216 as_snapshot()->by_type(flag)->record_arena_free();
217 }
218
219 static inline void record_arena_size_change(long size, MEMFLAGS flag) {
220 as_snapshot()->by_type(flag)->record_arena_size_change(size);
221 }
222
223 static void snapshot(MallocMemorySnapshot* s) {
224 as_snapshot()->copy_to(s);
225 s->make_adjustment();
226 }
227
228 // Record memory used by malloc tracking header
229 static inline void record_new_malloc_header(size_t sz) {
230 as_snapshot()->malloc_overhead()->allocate(sz);
231 }
232
233 static inline void record_free_malloc_header(size_t sz) {
234 as_snapshot()->malloc_overhead()->deallocate(sz);
235 }
236
237 // The memory used by malloc tracking headers
238 static inline size_t tracking_overhead() {
239 return as_snapshot()->malloc_overhead()->size();
240 }
241
242 // Reset all counters to zero
243 static void reset() {
244 as_snapshot()->reset();
245 }
246
247 static MallocMemorySnapshot* as_snapshot() {
248 return (MallocMemorySnapshot*)_snapshot;
249 }
250 };
251
252
253 /*
254 * Malloc tracking header.
255 * To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
256 * which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
257 */
258
259 class MallocHeader VALUE_OBJ_CLASS_SPEC {
260 #ifdef _LP64
261 size_t _size : 62;
262 size_t _level : 2;
263 size_t _flags : 8;
264 size_t _pos_idx : 16;
265 size_t _bucket_idx: 40;
266 #define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
267 #define MAX_BUCKET_LENGTH ((size_t)(1 << 16))
268 #define MAX_MALLOC_SIZE (((size_t)1 << 62) - 1)
269 #else
270 size_t _size : 30;
271 size_t _level : 2;
272 size_t _flags : 8;
273 size_t _pos_idx : 8;
274 size_t _bucket_idx: 16;
275 #define MAX_MALLOCSITE_TABLE_SIZE ((size_t)(1 << 16))
276 #define MAX_BUCKET_LENGTH ((size_t)(1 << 8))
277 // Max malloc size = 1GB - 1 on 32 bit system, such has total 4GB memory
278 #define MAX_MALLOC_SIZE ((size_t)(1 << 30) - 1)
279 #endif // _LP64
280
281 public:
282 // Summary tracking header
283 MallocHeader(size_t size, MEMFLAGS flags) {
284 assert(sizeof(MallocHeader) == sizeof(void*) * 2,
285 "Wrong header size");
286
287 _level = NMT_summary;
288 _flags = flags;
289 set_size(size);
290 MallocMemorySummary::record_malloc(size, flags);
291 MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
292 }
293 // Detail tracking header
294 MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack) {
295 assert(sizeof(MallocHeader) == sizeof(void*) * 2,
296 "Wrong header size");
297
298 _level = NMT_detail;
299 _flags = flags;
300 set_size(size);
301 size_t bucket_idx;
302 size_t pos_idx;
303 if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
304 assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
305 assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
306 _bucket_idx = bucket_idx;
307 _pos_idx = pos_idx;
308 }
309 MallocMemorySummary::record_malloc(size, flags);
310 MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
311 }
312 // Minimal tracking header
313 MallocHeader() {
314 assert(sizeof(MallocHeader) == sizeof(void*) * 2,
315 "Wrong header size");
316
317 _level = (unsigned short)NMT_minimal;
318 }
319
320 inline NMT_TrackingLevel tracking_level() const {
321 return (NMT_TrackingLevel)_level;
322 }
323
324 inline size_t size() const { return _size; }
325 inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
326 bool get_stack(NativeCallStack& stack) const;
327
328 // Cleanup tracking information before the memory is released.
329 void release() const;
330
331 private:
332 inline void set_size(size_t size) {
333 assert(size <= MAX_MALLOC_SIZE, "Malloc size too large, should use virtual memory?");
334 _size = size;
335 }
336 bool record_malloc_site(const NativeCallStack& stack, size_t size,
337 size_t* bucket_idx, size_t* pos_idx) const;
338 };
339
340
341 // Main class called from MemTracker to track malloc activities
342 class MallocTracker : AllStatic {
343 public:
344 // Initialize malloc tracker for specific tracking level
345 static bool initialize(NMT_TrackingLevel level);
346
347 static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
348
349 // malloc tracking header size for specific tracking level
350 static inline size_t malloc_header_size(NMT_TrackingLevel level) {
351 return (level == NMT_off) ? 0 : sizeof(MallocHeader);
352 }
353
354 // Parameter name convention:
355 // memblock : the beginning address for user data
356 // malloc_base: the beginning address that includes malloc tracking header
357 //
358 // The relationship:
359 // memblock = (char*)malloc_base + sizeof(nmt header)
360 //
361
362 // Record malloc on specified memory block
363 static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
364 const NativeCallStack& stack, NMT_TrackingLevel level);
365
366 // Record free on specified memory block
367 static void* record_free(void* memblock);
368
369 // Get tracking level of specified memory block
370 static inline NMT_TrackingLevel get_memory_tracking_level(void* memblock);
371
372
373 // Offset memory address to header address
374 static inline void* get_base(void* memblock);
375 static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
376 if (memblock == NULL || level == NMT_off) return memblock;
377 return (char*)memblock - malloc_header_size(level);
378 }
379
380 // Get memory size
381 static inline size_t get_size(void* memblock) {
382 MallocHeader* header = malloc_header(memblock);
383 assert(header->tracking_level() >= NMT_summary,
384 "Wrong tracking level");
385 return header->size();
386 }
387
388 // Get memory type
389 static inline MEMFLAGS get_flags(void* memblock) {
390 MallocHeader* header = malloc_header(memblock);
391 assert(header->tracking_level() >= NMT_summary,
392 "Wrong tracking level");
393 return header->flags();
394 }
395
396 // Get header size
397 static inline size_t get_header_size(void* memblock) {
398 return (memblock == NULL) ? 0 : sizeof(MallocHeader);
399 }
400
401 static inline void record_new_arena(MEMFLAGS flags) {
402 MallocMemorySummary::record_new_arena(flags);
403 }
404
405 static inline void record_arena_free(MEMFLAGS flags) {
406 MallocMemorySummary::record_arena_free(flags);
407 }
408
409 static inline void record_arena_size_change(int size, MEMFLAGS flags) {
410 MallocMemorySummary::record_arena_size_change(size, flags);
411 }
412 private:
413 static inline MallocHeader* malloc_header(void *memblock) {
414 assert(memblock != NULL, "NULL pointer");
415 MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
416 assert(header->tracking_level() >= NMT_minimal, "Bad header");
417 return header;
418 }
419 };
420
421 #endif // INCLUDE_NMT
422
423
424 #endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP

mercurial