src/share/vm/services/mallocTracker.hpp

Wed, 27 Aug 2014 08:19:12 -0400

author
zgu
date
Wed, 27 Aug 2014 08:19:12 -0400
changeset 7074
833b0f92429a
child 7080
dd3939fe8424
permissions
-rw-r--r--

8046598: Scalable Native memory tracking development
Summary: Enhance scalability of native memory tracking
Reviewed-by: coleenp, ctornqvi, gtriantafill

zgu@7074 1 /*
zgu@7074 2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
zgu@7074 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
zgu@7074 4 *
zgu@7074 5 * This code is free software; you can redistribute it and/or modify it
zgu@7074 6 * under the terms of the GNU General Public License version 2 only, as
zgu@7074 7 * published by the Free Software Foundation.
zgu@7074 8 *
zgu@7074 9 * This code is distributed in the hope that it will be useful, but WITHOUT
zgu@7074 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
zgu@7074 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
zgu@7074 12 * version 2 for more details (a copy is included in the LICENSE file that
zgu@7074 13 * accompanied this code).
zgu@7074 14 *
zgu@7074 15 * You should have received a copy of the GNU General Public License version
zgu@7074 16 * 2 along with this work; if not, write to the Free Software Foundation,
zgu@7074 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
zgu@7074 18 *
zgu@7074 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
zgu@7074 20 * or visit www.oracle.com if you need additional information or have any
zgu@7074 21 * questions.
zgu@7074 22 *
zgu@7074 23 */
zgu@7074 24
zgu@7074 25 #ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
zgu@7074 26 #define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
zgu@7074 27
zgu@7074 28 #if INCLUDE_NMT
zgu@7074 29
zgu@7074 30 #include "memory/allocation.hpp"
zgu@7074 31 #include "runtime/atomic.hpp"
zgu@7074 32 #include "services/nmtCommon.hpp"
zgu@7074 33 #include "utilities/nativeCallStack.hpp"
zgu@7074 34
zgu@7074 35 /*
zgu@7074 36 * This counter class counts memory allocation and deallocation,
zgu@7074 37 * records total memory allocation size and number of allocations.
zgu@7074 38 * The counters are updated atomically.
zgu@7074 39 */
zgu@7074 40 class MemoryCounter VALUE_OBJ_CLASS_SPEC {
zgu@7074 41 private:
zgu@7074 42 size_t _count;
zgu@7074 43 size_t _size;
zgu@7074 44
zgu@7074 45 DEBUG_ONLY(size_t _peak_count;)
zgu@7074 46 DEBUG_ONLY(size_t _peak_size; )
zgu@7074 47
zgu@7074 48 public:
zgu@7074 49 MemoryCounter() : _count(0), _size(0) {
zgu@7074 50 DEBUG_ONLY(_peak_count = 0;)
zgu@7074 51 DEBUG_ONLY(_peak_size = 0;)
zgu@7074 52 }
zgu@7074 53
zgu@7074 54 // Reset counters
zgu@7074 55 void reset() {
zgu@7074 56 _size = 0;
zgu@7074 57 _count = 0;
zgu@7074 58 DEBUG_ONLY(_peak_size = 0;)
zgu@7074 59 DEBUG_ONLY(_peak_count = 0;)
zgu@7074 60 }
zgu@7074 61
zgu@7074 62 inline void allocate(size_t sz) {
zgu@7074 63 Atomic::add(1, (volatile MemoryCounterType*)&_count);
zgu@7074 64 if (sz > 0) {
zgu@7074 65 Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
zgu@7074 66 DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
zgu@7074 67 }
zgu@7074 68 DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
zgu@7074 69 }
zgu@7074 70
zgu@7074 71 inline void deallocate(size_t sz) {
zgu@7074 72 assert(_count > 0, "Negative counter");
zgu@7074 73 assert(_size >= sz, "Negative size");
zgu@7074 74 Atomic::add(-1, (volatile MemoryCounterType*)&_count);
zgu@7074 75 if (sz > 0) {
zgu@7074 76 Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
zgu@7074 77 }
zgu@7074 78 }
zgu@7074 79
zgu@7074 80 inline void resize(long sz) {
zgu@7074 81 if (sz != 0) {
zgu@7074 82 Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
zgu@7074 83 DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
zgu@7074 84 }
zgu@7074 85 }
zgu@7074 86
zgu@7074 87 inline size_t count() const { return _count; }
zgu@7074 88 inline size_t size() const { return _size; }
zgu@7074 89 DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
zgu@7074 90 DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; })
zgu@7074 91
zgu@7074 92 };
zgu@7074 93
zgu@7074 94 /*
zgu@7074 95 * Malloc memory used by a particular subsystem.
zgu@7074 96 * It includes the memory acquired through os::malloc()
zgu@7074 97 * call and arena's backing memory.
zgu@7074 98 */
zgu@7074 99 class MallocMemory VALUE_OBJ_CLASS_SPEC {
zgu@7074 100 private:
zgu@7074 101 MemoryCounter _malloc;
zgu@7074 102 MemoryCounter _arena;
zgu@7074 103
zgu@7074 104 public:
zgu@7074 105 MallocMemory() { }
zgu@7074 106
zgu@7074 107 inline void record_malloc(size_t sz) {
zgu@7074 108 _malloc.allocate(sz);
zgu@7074 109 }
zgu@7074 110
zgu@7074 111 inline void record_free(size_t sz) {
zgu@7074 112 _malloc.deallocate(sz);
zgu@7074 113 }
zgu@7074 114
zgu@7074 115 inline void record_new_arena() {
zgu@7074 116 _arena.allocate(0);
zgu@7074 117 }
zgu@7074 118
zgu@7074 119 inline void record_arena_free() {
zgu@7074 120 _arena.deallocate(0);
zgu@7074 121 }
zgu@7074 122
zgu@7074 123 inline void record_arena_size_change(long sz) {
zgu@7074 124 _arena.resize(sz);
zgu@7074 125 }
zgu@7074 126
zgu@7074 127 void reset() {
zgu@7074 128 _malloc.reset();
zgu@7074 129 _arena.reset();
zgu@7074 130 }
zgu@7074 131
zgu@7074 132 inline size_t malloc_size() const { return _malloc.size(); }
zgu@7074 133 inline size_t malloc_count() const { return _malloc.count();}
zgu@7074 134 inline size_t arena_size() const { return _arena.size(); }
zgu@7074 135 inline size_t arena_count() const { return _arena.count(); }
zgu@7074 136
zgu@7074 137 DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
zgu@7074 138 DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; })
zgu@7074 139 };
zgu@7074 140
zgu@7074 141 class MallocMemorySummary;
zgu@7074 142
zgu@7074 143 // A snapshot of malloc'd memory, includes malloc memory
zgu@7074 144 // usage by types and memory used by tracking itself.
zgu@7074 145 class MallocMemorySnapshot : public ResourceObj {
zgu@7074 146 friend class MallocMemorySummary;
zgu@7074 147
zgu@7074 148 private:
zgu@7074 149 MallocMemory _malloc[mt_number_of_types];
zgu@7074 150 MemoryCounter _tracking_header;
zgu@7074 151
zgu@7074 152
zgu@7074 153 public:
zgu@7074 154 inline MallocMemory* by_type(MEMFLAGS flags) {
zgu@7074 155 int index = NMTUtil::flag_to_index(flags);
zgu@7074 156 return &_malloc[index];
zgu@7074 157 }
zgu@7074 158
zgu@7074 159 inline MallocMemory* by_index(int index) {
zgu@7074 160 assert(index >= 0, "Index out of bound");
zgu@7074 161 assert(index < mt_number_of_types, "Index out of bound");
zgu@7074 162 return &_malloc[index];
zgu@7074 163 }
zgu@7074 164
zgu@7074 165 inline MemoryCounter* malloc_overhead() {
zgu@7074 166 return &_tracking_header;
zgu@7074 167 }
zgu@7074 168
zgu@7074 169 // Total malloc'd memory amount
zgu@7074 170 size_t total() const;
zgu@7074 171 // Total malloc'd memory used by arenas
zgu@7074 172 size_t total_arena() const;
zgu@7074 173
zgu@7074 174 inline size_t thread_count() {
zgu@7074 175 return by_type(mtThreadStack)->malloc_count();
zgu@7074 176 }
zgu@7074 177
zgu@7074 178 void reset();
zgu@7074 179
zgu@7074 180 void copy_to(MallocMemorySnapshot* s) {
zgu@7074 181 s->_tracking_header = _tracking_header;
zgu@7074 182 for (int index = 0; index < mt_number_of_types; index ++) {
zgu@7074 183 s->_malloc[index] = _malloc[index];
zgu@7074 184 }
zgu@7074 185 }
zgu@7074 186
zgu@7074 187 // Make adjustment by subtracting chunks used by arenas
zgu@7074 188 // from total chunks to get total free chunk size
zgu@7074 189 void make_adjustment();
zgu@7074 190 };
zgu@7074 191
zgu@7074 192 /*
zgu@7074 193 * This class is for collecting malloc statistics at summary level
zgu@7074 194 */
zgu@7074 195 class MallocMemorySummary : AllStatic {
zgu@7074 196 private:
zgu@7074 197 // Reserve memory for placement of MallocMemorySnapshot object
zgu@7074 198 static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
zgu@7074 199
zgu@7074 200 public:
zgu@7074 201 static void initialize();
zgu@7074 202
zgu@7074 203 static inline void record_malloc(size_t size, MEMFLAGS flag) {
zgu@7074 204 as_snapshot()->by_type(flag)->record_malloc(size);
zgu@7074 205 }
zgu@7074 206
zgu@7074 207 static inline void record_free(size_t size, MEMFLAGS flag) {
zgu@7074 208 as_snapshot()->by_type(flag)->record_free(size);
zgu@7074 209 }
zgu@7074 210
zgu@7074 211 static inline void record_new_arena(MEMFLAGS flag) {
zgu@7074 212 as_snapshot()->by_type(flag)->record_new_arena();
zgu@7074 213 }
zgu@7074 214
zgu@7074 215 static inline void record_arena_free(MEMFLAGS flag) {
zgu@7074 216 as_snapshot()->by_type(flag)->record_arena_free();
zgu@7074 217 }
zgu@7074 218
zgu@7074 219 static inline void record_arena_size_change(long size, MEMFLAGS flag) {
zgu@7074 220 as_snapshot()->by_type(flag)->record_arena_size_change(size);
zgu@7074 221 }
zgu@7074 222
zgu@7074 223 static void snapshot(MallocMemorySnapshot* s) {
zgu@7074 224 as_snapshot()->copy_to(s);
zgu@7074 225 s->make_adjustment();
zgu@7074 226 }
zgu@7074 227
zgu@7074 228 // Record memory used by malloc tracking header
zgu@7074 229 static inline void record_new_malloc_header(size_t sz) {
zgu@7074 230 as_snapshot()->malloc_overhead()->allocate(sz);
zgu@7074 231 }
zgu@7074 232
zgu@7074 233 static inline void record_free_malloc_header(size_t sz) {
zgu@7074 234 as_snapshot()->malloc_overhead()->deallocate(sz);
zgu@7074 235 }
zgu@7074 236
zgu@7074 237 // The memory used by malloc tracking headers
zgu@7074 238 static inline size_t tracking_overhead() {
zgu@7074 239 return as_snapshot()->malloc_overhead()->size();
zgu@7074 240 }
zgu@7074 241
zgu@7074 242 // Reset all counters to zero
zgu@7074 243 static void reset() {
zgu@7074 244 as_snapshot()->reset();
zgu@7074 245 }
zgu@7074 246
zgu@7074 247 static MallocMemorySnapshot* as_snapshot() {
zgu@7074 248 return (MallocMemorySnapshot*)_snapshot;
zgu@7074 249 }
zgu@7074 250 };
zgu@7074 251
zgu@7074 252
zgu@7074 253 /*
zgu@7074 254 * Malloc tracking header.
zgu@7074 255 * To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
zgu@7074 256 * which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
zgu@7074 257 */
zgu@7074 258
zgu@7074 259 class MallocHeader VALUE_OBJ_CLASS_SPEC {
zgu@7074 260 #ifdef _LP64
zgu@7074 261 size_t _size : 62;
zgu@7074 262 size_t _level : 2;
zgu@7074 263 size_t _flags : 8;
zgu@7074 264 size_t _pos_idx : 16;
zgu@7074 265 size_t _bucket_idx: 40;
zgu@7074 266 #define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
zgu@7074 267 #define MAX_BUCKET_LENGTH ((size_t)(1 << 16))
zgu@7074 268 #define MAX_MALLOC_SIZE (((size_t)1 << 62) - 1)
zgu@7074 269 #else
zgu@7074 270 size_t _size : 30;
zgu@7074 271 size_t _level : 2;
zgu@7074 272 size_t _flags : 8;
zgu@7074 273 size_t _pos_idx : 8;
zgu@7074 274 size_t _bucket_idx: 16;
zgu@7074 275 #define MAX_MALLOCSITE_TABLE_SIZE ((size_t)(1 << 16))
zgu@7074 276 #define MAX_BUCKET_LENGTH ((size_t)(1 << 8))
zgu@7074 277 // Max malloc size = 1GB - 1 on 32 bit system, such has total 4GB memory
zgu@7074 278 #define MAX_MALLOC_SIZE ((size_t)(1 << 30) - 1)
zgu@7074 279 #endif // _LP64
zgu@7074 280
zgu@7074 281 public:
zgu@7074 282 // Summary tracking header
zgu@7074 283 MallocHeader(size_t size, MEMFLAGS flags) {
zgu@7074 284 assert(sizeof(MallocHeader) == sizeof(void*) * 2,
zgu@7074 285 "Wrong header size");
zgu@7074 286
zgu@7074 287 _level = NMT_summary;
zgu@7074 288 _flags = flags;
zgu@7074 289 set_size(size);
zgu@7074 290 MallocMemorySummary::record_malloc(size, flags);
zgu@7074 291 MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
zgu@7074 292 }
zgu@7074 293 // Detail tracking header
zgu@7074 294 MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack) {
zgu@7074 295 assert(sizeof(MallocHeader) == sizeof(void*) * 2,
zgu@7074 296 "Wrong header size");
zgu@7074 297
zgu@7074 298 _level = NMT_detail;
zgu@7074 299 _flags = flags;
zgu@7074 300 set_size(size);
zgu@7074 301 size_t bucket_idx;
zgu@7074 302 size_t pos_idx;
zgu@7074 303 if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
zgu@7074 304 assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
zgu@7074 305 assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
zgu@7074 306 _bucket_idx = bucket_idx;
zgu@7074 307 _pos_idx = pos_idx;
zgu@7074 308 }
zgu@7074 309 MallocMemorySummary::record_malloc(size, flags);
zgu@7074 310 MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
zgu@7074 311 }
zgu@7074 312 // Minimal tracking header
zgu@7074 313 MallocHeader() {
zgu@7074 314 assert(sizeof(MallocHeader) == sizeof(void*) * 2,
zgu@7074 315 "Wrong header size");
zgu@7074 316
zgu@7074 317 _level = (unsigned short)NMT_minimal;
zgu@7074 318 }
zgu@7074 319
zgu@7074 320 inline NMT_TrackingLevel tracking_level() const {
zgu@7074 321 return (NMT_TrackingLevel)_level;
zgu@7074 322 }
zgu@7074 323
zgu@7074 324 inline size_t size() const { return _size; }
zgu@7074 325 inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
zgu@7074 326 bool get_stack(NativeCallStack& stack) const;
zgu@7074 327
zgu@7074 328 // Cleanup tracking information before the memory is released.
zgu@7074 329 void release() const;
zgu@7074 330
zgu@7074 331 private:
zgu@7074 332 inline void set_size(size_t size) {
zgu@7074 333 assert(size <= MAX_MALLOC_SIZE, "Malloc size too large, should use virtual memory?");
zgu@7074 334 _size = size;
zgu@7074 335 }
zgu@7074 336 bool record_malloc_site(const NativeCallStack& stack, size_t size,
zgu@7074 337 size_t* bucket_idx, size_t* pos_idx) const;
zgu@7074 338 };
zgu@7074 339
zgu@7074 340
zgu@7074 341 // Main class called from MemTracker to track malloc activities
zgu@7074 342 class MallocTracker : AllStatic {
zgu@7074 343 public:
zgu@7074 344 // Initialize malloc tracker for specific tracking level
zgu@7074 345 static bool initialize(NMT_TrackingLevel level);
zgu@7074 346
zgu@7074 347 static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
zgu@7074 348
zgu@7074 349 // malloc tracking header size for specific tracking level
zgu@7074 350 static inline size_t malloc_header_size(NMT_TrackingLevel level) {
zgu@7074 351 return (level == NMT_off) ? 0 : sizeof(MallocHeader);
zgu@7074 352 }
zgu@7074 353
zgu@7074 354 // Parameter name convention:
zgu@7074 355 // memblock : the beginning address for user data
zgu@7074 356 // malloc_base: the beginning address that includes malloc tracking header
zgu@7074 357 //
zgu@7074 358 // The relationship:
zgu@7074 359 // memblock = (char*)malloc_base + sizeof(nmt header)
zgu@7074 360 //
zgu@7074 361
zgu@7074 362 // Record malloc on specified memory block
zgu@7074 363 static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
zgu@7074 364 const NativeCallStack& stack, NMT_TrackingLevel level);
zgu@7074 365
zgu@7074 366 // Record free on specified memory block
zgu@7074 367 static void* record_free(void* memblock);
zgu@7074 368
zgu@7074 369 // Get tracking level of specified memory block
zgu@7074 370 static inline NMT_TrackingLevel get_memory_tracking_level(void* memblock);
zgu@7074 371
zgu@7074 372
zgu@7074 373 // Offset memory address to header address
zgu@7074 374 static inline void* get_base(void* memblock);
zgu@7074 375 static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
zgu@7074 376 if (memblock == NULL || level == NMT_off) return memblock;
zgu@7074 377 return (char*)memblock - malloc_header_size(level);
zgu@7074 378 }
zgu@7074 379
zgu@7074 380 // Get memory size
zgu@7074 381 static inline size_t get_size(void* memblock) {
zgu@7074 382 MallocHeader* header = malloc_header(memblock);
zgu@7074 383 assert(header->tracking_level() >= NMT_summary,
zgu@7074 384 "Wrong tracking level");
zgu@7074 385 return header->size();
zgu@7074 386 }
zgu@7074 387
zgu@7074 388 // Get memory type
zgu@7074 389 static inline MEMFLAGS get_flags(void* memblock) {
zgu@7074 390 MallocHeader* header = malloc_header(memblock);
zgu@7074 391 assert(header->tracking_level() >= NMT_summary,
zgu@7074 392 "Wrong tracking level");
zgu@7074 393 return header->flags();
zgu@7074 394 }
zgu@7074 395
zgu@7074 396 // Get header size
zgu@7074 397 static inline size_t get_header_size(void* memblock) {
zgu@7074 398 return (memblock == NULL) ? 0 : sizeof(MallocHeader);
zgu@7074 399 }
zgu@7074 400
zgu@7074 401 static inline void record_new_arena(MEMFLAGS flags) {
zgu@7074 402 MallocMemorySummary::record_new_arena(flags);
zgu@7074 403 }
zgu@7074 404
zgu@7074 405 static inline void record_arena_free(MEMFLAGS flags) {
zgu@7074 406 MallocMemorySummary::record_arena_free(flags);
zgu@7074 407 }
zgu@7074 408
zgu@7074 409 static inline void record_arena_size_change(int size, MEMFLAGS flags) {
zgu@7074 410 MallocMemorySummary::record_arena_size_change(size, flags);
zgu@7074 411 }
zgu@7074 412 private:
zgu@7074 413 static inline MallocHeader* malloc_header(void *memblock) {
zgu@7074 414 assert(memblock != NULL, "NULL pointer");
zgu@7074 415 MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
zgu@7074 416 assert(header->tracking_level() >= NMT_minimal, "Bad header");
zgu@7074 417 return header;
zgu@7074 418 }
zgu@7074 419 };
zgu@7074 420
zgu@7074 421 #endif // INCLUDE_NMT
zgu@7074 422
zgu@7074 423
zgu@7074 424 #endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP

mercurial