Wed, 03 Jul 2019 20:42:37 +0800
Merge
zgu@7074 | 1 | /* |
zgu@7074 | 2 | * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. |
zgu@7074 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
zgu@7074 | 4 | * |
zgu@7074 | 5 | * This code is free software; you can redistribute it and/or modify it |
zgu@7074 | 6 | * under the terms of the GNU General Public License version 2 only, as |
zgu@7074 | 7 | * published by the Free Software Foundation. |
zgu@7074 | 8 | * |
zgu@7074 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
zgu@7074 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
zgu@7074 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
zgu@7074 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
zgu@7074 | 13 | * accompanied this code). |
zgu@7074 | 14 | * |
zgu@7074 | 15 | * You should have received a copy of the GNU General Public License version |
zgu@7074 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
zgu@7074 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
zgu@7074 | 18 | * |
zgu@7074 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
zgu@7074 | 20 | * or visit www.oracle.com if you need additional information or have any |
zgu@7074 | 21 | * questions. |
zgu@7074 | 22 | * |
zgu@7074 | 23 | */ |
zgu@7074 | 24 | |
zgu@7074 | 25 | #ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP |
zgu@7074 | 26 | #define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP |
zgu@7074 | 27 | |
zgu@7074 | 28 | #if INCLUDE_NMT |
zgu@7074 | 29 | |
zgu@7074 | 30 | #include "memory/allocation.hpp" |
zgu@7074 | 31 | #include "runtime/atomic.hpp" |
zgu@7074 | 32 | #include "services/nmtCommon.hpp" |
zgu@7074 | 33 | #include "utilities/nativeCallStack.hpp" |
zgu@7074 | 34 | |
zgu@7074 | 35 | /* |
zgu@7074 | 36 | * This counter class counts memory allocation and deallocation, |
zgu@7074 | 37 | * records total memory allocation size and number of allocations. |
zgu@7074 | 38 | * The counters are updated atomically. |
zgu@7074 | 39 | */ |
zgu@7074 | 40 | class MemoryCounter VALUE_OBJ_CLASS_SPEC { |
zgu@7074 | 41 | private: |
zgu@7074 | 42 | size_t _count; |
zgu@7074 | 43 | size_t _size; |
zgu@7074 | 44 | |
zgu@7074 | 45 | DEBUG_ONLY(size_t _peak_count;) |
zgu@7074 | 46 | DEBUG_ONLY(size_t _peak_size; ) |
zgu@7074 | 47 | |
zgu@7074 | 48 | public: |
zgu@7074 | 49 | MemoryCounter() : _count(0), _size(0) { |
zgu@7074 | 50 | DEBUG_ONLY(_peak_count = 0;) |
zgu@7074 | 51 | DEBUG_ONLY(_peak_size = 0;) |
zgu@7074 | 52 | } |
zgu@7074 | 53 | |
zgu@7074 | 54 | inline void allocate(size_t sz) { |
zgu@7074 | 55 | Atomic::add(1, (volatile MemoryCounterType*)&_count); |
zgu@7074 | 56 | if (sz > 0) { |
zgu@7074 | 57 | Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size); |
zgu@7074 | 58 | DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size)); |
zgu@7074 | 59 | } |
zgu@7074 | 60 | DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);) |
zgu@7074 | 61 | } |
zgu@7074 | 62 | |
zgu@7074 | 63 | inline void deallocate(size_t sz) { |
zgu@7074 | 64 | assert(_count > 0, "Negative counter"); |
zgu@7074 | 65 | assert(_size >= sz, "Negative size"); |
zgu@7074 | 66 | Atomic::add(-1, (volatile MemoryCounterType*)&_count); |
zgu@7074 | 67 | if (sz > 0) { |
zgu@7074 | 68 | Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size); |
zgu@7074 | 69 | } |
zgu@7074 | 70 | } |
zgu@7074 | 71 | |
zgu@7074 | 72 | inline void resize(long sz) { |
zgu@7074 | 73 | if (sz != 0) { |
zgu@7074 | 74 | Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size); |
zgu@7074 | 75 | DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);) |
zgu@7074 | 76 | } |
zgu@7074 | 77 | } |
zgu@7074 | 78 | |
zgu@7074 | 79 | inline size_t count() const { return _count; } |
zgu@7074 | 80 | inline size_t size() const { return _size; } |
zgu@7074 | 81 | DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; }) |
zgu@7074 | 82 | DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; }) |
zgu@7074 | 83 | |
zgu@7074 | 84 | }; |
zgu@7074 | 85 | |
zgu@7074 | 86 | /* |
zgu@7074 | 87 | * Malloc memory used by a particular subsystem. |
zgu@7074 | 88 | * It includes the memory acquired through os::malloc() |
zgu@7074 | 89 | * call and arena's backing memory. |
zgu@7074 | 90 | */ |
zgu@7074 | 91 | class MallocMemory VALUE_OBJ_CLASS_SPEC { |
zgu@7074 | 92 | private: |
zgu@7074 | 93 | MemoryCounter _malloc; |
zgu@7074 | 94 | MemoryCounter _arena; |
zgu@7074 | 95 | |
zgu@7074 | 96 | public: |
zgu@7074 | 97 | MallocMemory() { } |
zgu@7074 | 98 | |
zgu@7074 | 99 | inline void record_malloc(size_t sz) { |
zgu@7074 | 100 | _malloc.allocate(sz); |
zgu@7074 | 101 | } |
zgu@7074 | 102 | |
zgu@7074 | 103 | inline void record_free(size_t sz) { |
zgu@7074 | 104 | _malloc.deallocate(sz); |
zgu@7074 | 105 | } |
zgu@7074 | 106 | |
zgu@7074 | 107 | inline void record_new_arena() { |
zgu@7074 | 108 | _arena.allocate(0); |
zgu@7074 | 109 | } |
zgu@7074 | 110 | |
zgu@7074 | 111 | inline void record_arena_free() { |
zgu@7074 | 112 | _arena.deallocate(0); |
zgu@7074 | 113 | } |
zgu@7074 | 114 | |
zgu@7074 | 115 | inline void record_arena_size_change(long sz) { |
zgu@7074 | 116 | _arena.resize(sz); |
zgu@7074 | 117 | } |
zgu@7074 | 118 | |
zgu@7074 | 119 | inline size_t malloc_size() const { return _malloc.size(); } |
zgu@7074 | 120 | inline size_t malloc_count() const { return _malloc.count();} |
zgu@7074 | 121 | inline size_t arena_size() const { return _arena.size(); } |
zgu@7074 | 122 | inline size_t arena_count() const { return _arena.count(); } |
zgu@7074 | 123 | |
zgu@7074 | 124 | DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; }) |
zgu@7074 | 125 | DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; }) |
zgu@7074 | 126 | }; |
zgu@7074 | 127 | |
zgu@7074 | 128 | class MallocMemorySummary; |
zgu@7074 | 129 | |
zgu@7074 | 130 | // A snapshot of malloc'd memory, includes malloc memory |
zgu@7074 | 131 | // usage by types and memory used by tracking itself. |
zgu@7074 | 132 | class MallocMemorySnapshot : public ResourceObj { |
zgu@7074 | 133 | friend class MallocMemorySummary; |
zgu@7074 | 134 | |
zgu@7074 | 135 | private: |
zgu@7074 | 136 | MallocMemory _malloc[mt_number_of_types]; |
zgu@7074 | 137 | MemoryCounter _tracking_header; |
zgu@7074 | 138 | |
zgu@7074 | 139 | |
zgu@7074 | 140 | public: |
zgu@7074 | 141 | inline MallocMemory* by_type(MEMFLAGS flags) { |
zgu@7074 | 142 | int index = NMTUtil::flag_to_index(flags); |
zgu@7074 | 143 | return &_malloc[index]; |
zgu@7074 | 144 | } |
zgu@7074 | 145 | |
zgu@7074 | 146 | inline MallocMemory* by_index(int index) { |
zgu@7074 | 147 | assert(index >= 0, "Index out of bound"); |
zgu@7074 | 148 | assert(index < mt_number_of_types, "Index out of bound"); |
zgu@7074 | 149 | return &_malloc[index]; |
zgu@7074 | 150 | } |
zgu@7074 | 151 | |
zgu@7074 | 152 | inline MemoryCounter* malloc_overhead() { |
zgu@7074 | 153 | return &_tracking_header; |
zgu@7074 | 154 | } |
zgu@7074 | 155 | |
zgu@7074 | 156 | // Total malloc'd memory amount |
zgu@7074 | 157 | size_t total() const; |
zgu@7074 | 158 | // Total malloc'd memory used by arenas |
zgu@7074 | 159 | size_t total_arena() const; |
zgu@7074 | 160 | |
zgu@7080 | 161 | inline size_t thread_count() const { |
zgu@7080 | 162 | MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this); |
zgu@7080 | 163 | return s->by_type(mtThreadStack)->malloc_count(); |
zgu@7074 | 164 | } |
zgu@7074 | 165 | |
zgu@7074 | 166 | void copy_to(MallocMemorySnapshot* s) { |
zgu@7074 | 167 | s->_tracking_header = _tracking_header; |
zgu@7074 | 168 | for (int index = 0; index < mt_number_of_types; index ++) { |
zgu@7074 | 169 | s->_malloc[index] = _malloc[index]; |
zgu@7074 | 170 | } |
zgu@7074 | 171 | } |
zgu@7074 | 172 | |
zgu@7074 | 173 | // Make adjustment by subtracting chunks used by arenas |
zgu@7074 | 174 | // from total chunks to get total free chunk size |
zgu@7074 | 175 | void make_adjustment(); |
zgu@7074 | 176 | }; |
zgu@7074 | 177 | |
zgu@7074 | 178 | /* |
zgu@7074 | 179 | * This class is for collecting malloc statistics at summary level |
zgu@7074 | 180 | */ |
zgu@7074 | 181 | class MallocMemorySummary : AllStatic { |
zgu@7074 | 182 | private: |
zgu@7074 | 183 | // Reserve memory for placement of MallocMemorySnapshot object |
zgu@7074 | 184 | static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)]; |
zgu@7074 | 185 | |
zgu@7074 | 186 | public: |
zgu@7074 | 187 | static void initialize(); |
zgu@7074 | 188 | |
zgu@7074 | 189 | static inline void record_malloc(size_t size, MEMFLAGS flag) { |
zgu@7074 | 190 | as_snapshot()->by_type(flag)->record_malloc(size); |
zgu@7074 | 191 | } |
zgu@7074 | 192 | |
zgu@7074 | 193 | static inline void record_free(size_t size, MEMFLAGS flag) { |
zgu@7074 | 194 | as_snapshot()->by_type(flag)->record_free(size); |
zgu@7074 | 195 | } |
zgu@7074 | 196 | |
zgu@7074 | 197 | static inline void record_new_arena(MEMFLAGS flag) { |
zgu@7074 | 198 | as_snapshot()->by_type(flag)->record_new_arena(); |
zgu@7074 | 199 | } |
zgu@7074 | 200 | |
zgu@7074 | 201 | static inline void record_arena_free(MEMFLAGS flag) { |
zgu@7074 | 202 | as_snapshot()->by_type(flag)->record_arena_free(); |
zgu@7074 | 203 | } |
zgu@7074 | 204 | |
zgu@7074 | 205 | static inline void record_arena_size_change(long size, MEMFLAGS flag) { |
zgu@7074 | 206 | as_snapshot()->by_type(flag)->record_arena_size_change(size); |
zgu@7074 | 207 | } |
zgu@7074 | 208 | |
zgu@7074 | 209 | static void snapshot(MallocMemorySnapshot* s) { |
zgu@7074 | 210 | as_snapshot()->copy_to(s); |
zgu@7074 | 211 | s->make_adjustment(); |
zgu@7074 | 212 | } |
zgu@7074 | 213 | |
zgu@7074 | 214 | // Record memory used by malloc tracking header |
zgu@7074 | 215 | static inline void record_new_malloc_header(size_t sz) { |
zgu@7074 | 216 | as_snapshot()->malloc_overhead()->allocate(sz); |
zgu@7074 | 217 | } |
zgu@7074 | 218 | |
zgu@7074 | 219 | static inline void record_free_malloc_header(size_t sz) { |
zgu@7074 | 220 | as_snapshot()->malloc_overhead()->deallocate(sz); |
zgu@7074 | 221 | } |
zgu@7074 | 222 | |
zgu@7074 | 223 | // The memory used by malloc tracking headers |
zgu@7074 | 224 | static inline size_t tracking_overhead() { |
zgu@7074 | 225 | return as_snapshot()->malloc_overhead()->size(); |
zgu@7074 | 226 | } |
zgu@7074 | 227 | |
zgu@7074 | 228 | static MallocMemorySnapshot* as_snapshot() { |
zgu@7074 | 229 | return (MallocMemorySnapshot*)_snapshot; |
zgu@7074 | 230 | } |
zgu@7074 | 231 | }; |
zgu@7074 | 232 | |
zgu@7074 | 233 | |
zgu@7074 | 234 | /* |
zgu@7074 | 235 | * Malloc tracking header. |
zgu@7074 | 236 | * To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose, |
zgu@7074 | 237 | * which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build). |
zgu@7074 | 238 | */ |
zgu@7074 | 239 | |
zgu@7074 | 240 | class MallocHeader VALUE_OBJ_CLASS_SPEC { |
zgu@7074 | 241 | #ifdef _LP64 |
ctornqvi@7344 | 242 | size_t _size : 64; |
zgu@7074 | 243 | size_t _flags : 8; |
zgu@7074 | 244 | size_t _pos_idx : 16; |
zgu@7074 | 245 | size_t _bucket_idx: 40; |
coleenp@7356 | 246 | #define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(40) |
coleenp@7356 | 247 | #define MAX_BUCKET_LENGTH right_n_bits(16) |
zgu@7074 | 248 | #else |
ctornqvi@7344 | 249 | size_t _size : 32; |
zgu@7074 | 250 | size_t _flags : 8; |
zgu@7074 | 251 | size_t _pos_idx : 8; |
zgu@7074 | 252 | size_t _bucket_idx: 16; |
coleenp@7356 | 253 | #define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(16) |
coleenp@7356 | 254 | #define MAX_BUCKET_LENGTH right_n_bits(8) |
zgu@7074 | 255 | #endif // _LP64 |
zgu@7074 | 256 | |
zgu@7074 | 257 | public: |
ctornqvi@7344 | 258 | MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) { |
zgu@7074 | 259 | assert(sizeof(MallocHeader) == sizeof(void*) * 2, |
zgu@7074 | 260 | "Wrong header size"); |
zgu@7074 | 261 | |
ctornqvi@7344 | 262 | if (level == NMT_minimal) { |
ctornqvi@7344 | 263 | return; |
ctornqvi@7344 | 264 | } |
ctornqvi@7344 | 265 | |
zgu@7074 | 266 | _flags = flags; |
zgu@7074 | 267 | set_size(size); |
ctornqvi@7344 | 268 | if (level == NMT_detail) { |
ctornqvi@7344 | 269 | size_t bucket_idx; |
ctornqvi@7344 | 270 | size_t pos_idx; |
zgu@9053 | 271 | if (record_malloc_site(stack, size, &bucket_idx, &pos_idx, flags)) { |
ctornqvi@7344 | 272 | assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index"); |
ctornqvi@7344 | 273 | assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index"); |
ctornqvi@7344 | 274 | _bucket_idx = bucket_idx; |
ctornqvi@7344 | 275 | _pos_idx = pos_idx; |
ctornqvi@7344 | 276 | } |
ctornqvi@7344 | 277 | } |
ctornqvi@7344 | 278 | |
zgu@7074 | 279 | MallocMemorySummary::record_malloc(size, flags); |
zgu@7074 | 280 | MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader)); |
zgu@7074 | 281 | } |
zgu@7074 | 282 | |
zgu@7074 | 283 | inline size_t size() const { return _size; } |
zgu@7074 | 284 | inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; } |
zgu@7074 | 285 | bool get_stack(NativeCallStack& stack) const; |
zgu@7074 | 286 | |
zgu@7074 | 287 | // Cleanup tracking information before the memory is released. |
zgu@7074 | 288 | void release() const; |
zgu@7074 | 289 | |
zgu@7074 | 290 | private: |
zgu@7074 | 291 | inline void set_size(size_t size) { |
zgu@7074 | 292 | _size = size; |
zgu@7074 | 293 | } |
zgu@7074 | 294 | bool record_malloc_site(const NativeCallStack& stack, size_t size, |
zgu@9053 | 295 | size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) const; |
zgu@7074 | 296 | }; |
zgu@7074 | 297 | |
zgu@7074 | 298 | |
zgu@7074 | 299 | // Main class called from MemTracker to track malloc activities |
zgu@7074 | 300 | class MallocTracker : AllStatic { |
zgu@7074 | 301 | public: |
zgu@7074 | 302 | // Initialize malloc tracker for specific tracking level |
zgu@7074 | 303 | static bool initialize(NMT_TrackingLevel level); |
zgu@7074 | 304 | |
zgu@7074 | 305 | static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to); |
zgu@7074 | 306 | |
zgu@7074 | 307 | // malloc tracking header size for specific tracking level |
zgu@7074 | 308 | static inline size_t malloc_header_size(NMT_TrackingLevel level) { |
zgu@7074 | 309 | return (level == NMT_off) ? 0 : sizeof(MallocHeader); |
zgu@7074 | 310 | } |
zgu@7074 | 311 | |
zgu@7074 | 312 | // Parameter name convention: |
zgu@7074 | 313 | // memblock : the beginning address for user data |
zgu@7074 | 314 | // malloc_base: the beginning address that includes malloc tracking header |
zgu@7074 | 315 | // |
zgu@7074 | 316 | // The relationship: |
zgu@7074 | 317 | // memblock = (char*)malloc_base + sizeof(nmt header) |
zgu@7074 | 318 | // |
zgu@7074 | 319 | |
zgu@7074 | 320 | // Record malloc on specified memory block |
zgu@7074 | 321 | static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags, |
zgu@7074 | 322 | const NativeCallStack& stack, NMT_TrackingLevel level); |
zgu@7074 | 323 | |
zgu@7074 | 324 | // Record free on specified memory block |
zgu@7074 | 325 | static void* record_free(void* memblock); |
zgu@7074 | 326 | |
zgu@7074 | 327 | // Offset memory address to header address |
zgu@7074 | 328 | static inline void* get_base(void* memblock); |
zgu@7074 | 329 | static inline void* get_base(void* memblock, NMT_TrackingLevel level) { |
zgu@7074 | 330 | if (memblock == NULL || level == NMT_off) return memblock; |
zgu@7074 | 331 | return (char*)memblock - malloc_header_size(level); |
zgu@7074 | 332 | } |
zgu@7074 | 333 | |
zgu@7074 | 334 | // Get memory size |
zgu@7074 | 335 | static inline size_t get_size(void* memblock) { |
zgu@7074 | 336 | MallocHeader* header = malloc_header(memblock); |
zgu@7074 | 337 | return header->size(); |
zgu@7074 | 338 | } |
zgu@7074 | 339 | |
zgu@7074 | 340 | // Get memory type |
zgu@7074 | 341 | static inline MEMFLAGS get_flags(void* memblock) { |
zgu@7074 | 342 | MallocHeader* header = malloc_header(memblock); |
zgu@7074 | 343 | return header->flags(); |
zgu@7074 | 344 | } |
zgu@7074 | 345 | |
zgu@7074 | 346 | // Get header size |
zgu@7074 | 347 | static inline size_t get_header_size(void* memblock) { |
zgu@7074 | 348 | return (memblock == NULL) ? 0 : sizeof(MallocHeader); |
zgu@7074 | 349 | } |
zgu@7074 | 350 | |
zgu@7074 | 351 | static inline void record_new_arena(MEMFLAGS flags) { |
zgu@7074 | 352 | MallocMemorySummary::record_new_arena(flags); |
zgu@7074 | 353 | } |
zgu@7074 | 354 | |
zgu@7074 | 355 | static inline void record_arena_free(MEMFLAGS flags) { |
zgu@7074 | 356 | MallocMemorySummary::record_arena_free(flags); |
zgu@7074 | 357 | } |
zgu@7074 | 358 | |
zgu@7074 | 359 | static inline void record_arena_size_change(int size, MEMFLAGS flags) { |
zgu@7074 | 360 | MallocMemorySummary::record_arena_size_change(size, flags); |
zgu@7074 | 361 | } |
zgu@7074 | 362 | private: |
zgu@7074 | 363 | static inline MallocHeader* malloc_header(void *memblock) { |
zgu@7074 | 364 | assert(memblock != NULL, "NULL pointer"); |
zgu@7074 | 365 | MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader)); |
zgu@7074 | 366 | return header; |
zgu@7074 | 367 | } |
zgu@7074 | 368 | }; |
zgu@7074 | 369 | |
zgu@7074 | 370 | #endif // INCLUDE_NMT |
zgu@7074 | 371 | |
zgu@7074 | 372 | |
zgu@7074 | 373 | #endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP |