src/share/vm/services/virtualMemoryTracker.hpp

Wed, 03 Jul 2019 20:42:37 +0800

author
aoqi
date
Wed, 03 Jul 2019 20:42:37 +0800
changeset 9637
eef07cd490d4
parent 9485
7a6239517d46
child 9778
bf6ea7319424
permissions
-rw-r--r--

Merge

zgu@7074 1 /*
zgu@9485 2 * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
zgu@7074 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
zgu@7074 4 *
zgu@7074 5 * This code is free software; you can redistribute it and/or modify it
zgu@7074 6 * under the terms of the GNU General Public License version 2 only, as
zgu@7074 7 * published by the Free Software Foundation.
zgu@7074 8 *
zgu@7074 9 * This code is distributed in the hope that it will be useful, but WITHOUT
zgu@7074 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
zgu@7074 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
zgu@7074 12 * version 2 for more details (a copy is included in the LICENSE file that
zgu@7074 13 * accompanied this code).
zgu@7074 14 *
zgu@7074 15 * You should have received a copy of the GNU General Public License version
zgu@7074 16 * 2 along with this work; if not, write to the Free Software Foundation,
zgu@7074 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
zgu@7074 18 *
zgu@7074 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
zgu@7074 20 * or visit www.oracle.com if you need additional information or have any
zgu@7074 21 * questions.
zgu@7074 22 *
zgu@7074 23 */
zgu@7074 24
zgu@7074 25 #ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
zgu@7074 26 #define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
zgu@7074 27
zgu@7074 28 #if INCLUDE_NMT
zgu@7074 29
zgu@7074 30 #include "memory/allocation.hpp"
zgu@7074 31 #include "services/allocationSite.hpp"
zgu@7074 32 #include "services/nmtCommon.hpp"
zgu@7074 33 #include "utilities/linkedlist.hpp"
zgu@7074 34 #include "utilities/nativeCallStack.hpp"
zgu@7074 35 #include "utilities/ostream.hpp"
zgu@7074 36
zgu@7074 37
zgu@7074 38 /*
zgu@7074 39 * Virtual memory counter
zgu@7074 40 */
zgu@7074 41 class VirtualMemory VALUE_OBJ_CLASS_SPEC {
zgu@7074 42 private:
zgu@7074 43 size_t _reserved;
zgu@7074 44 size_t _committed;
zgu@7074 45
zgu@7074 46 public:
zgu@7074 47 VirtualMemory() : _reserved(0), _committed(0) { }
zgu@7074 48
zgu@7074 49 inline void reserve_memory(size_t sz) { _reserved += sz; }
zgu@7074 50 inline void commit_memory (size_t sz) {
zgu@7074 51 _committed += sz;
zgu@7074 52 assert(_committed <= _reserved, "Sanity check");
zgu@7074 53 }
zgu@7074 54
zgu@7074 55 inline void release_memory (size_t sz) {
zgu@7074 56 assert(_reserved >= sz, "Negative amount");
zgu@7074 57 _reserved -= sz;
zgu@7074 58 }
zgu@7074 59
zgu@7074 60 inline void uncommit_memory(size_t sz) {
zgu@7074 61 assert(_committed >= sz, "Negative amount");
zgu@7074 62 _committed -= sz;
zgu@7074 63 }
zgu@7074 64
zgu@7074 65 inline size_t reserved() const { return _reserved; }
zgu@7074 66 inline size_t committed() const { return _committed; }
zgu@7074 67 };
zgu@7074 68
zgu@7074 69 // Virtual memory allocation site, keeps track where the virtual memory is reserved.
zgu@7074 70 class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> {
zgu@7074 71 public:
zgu@7074 72 VirtualMemoryAllocationSite(const NativeCallStack& stack) :
zgu@7074 73 AllocationSite<VirtualMemory>(stack) { }
zgu@7074 74
zgu@7074 75 inline void reserve_memory(size_t sz) { data()->reserve_memory(sz); }
zgu@7074 76 inline void commit_memory (size_t sz) { data()->commit_memory(sz); }
zgu@7074 77 inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); }
zgu@7074 78 inline void release_memory(size_t sz) { data()->release_memory(sz); }
zgu@7074 79 inline size_t reserved() const { return peek()->reserved(); }
zgu@7074 80 inline size_t committed() const { return peek()->committed(); }
zgu@7074 81 };
zgu@7074 82
zgu@7074 83 class VirtualMemorySummary;
zgu@7074 84
zgu@7074 85 // This class represents a snapshot of virtual memory at a given time.
zgu@7074 86 // The latest snapshot is saved in a static area.
zgu@7074 87 class VirtualMemorySnapshot : public ResourceObj {
zgu@7074 88 friend class VirtualMemorySummary;
zgu@7074 89
zgu@7074 90 private:
zgu@7074 91 VirtualMemory _virtual_memory[mt_number_of_types];
zgu@7074 92
zgu@7074 93 public:
zgu@7074 94 inline VirtualMemory* by_type(MEMFLAGS flag) {
zgu@7074 95 int index = NMTUtil::flag_to_index(flag);
zgu@7074 96 return &_virtual_memory[index];
zgu@7074 97 }
zgu@7074 98
zgu@7074 99 inline VirtualMemory* by_index(int index) {
zgu@7074 100 assert(index >= 0, "Index out of bound");
zgu@7074 101 assert(index < mt_number_of_types, "Index out of bound");
zgu@7074 102 return &_virtual_memory[index];
zgu@7074 103 }
zgu@7074 104
zgu@7074 105 inline size_t total_reserved() const {
zgu@7074 106 size_t amount = 0;
zgu@7074 107 for (int index = 0; index < mt_number_of_types; index ++) {
zgu@7074 108 amount += _virtual_memory[index].reserved();
zgu@7074 109 }
zgu@7074 110 return amount;
zgu@7074 111 }
zgu@7074 112
zgu@7074 113 inline size_t total_committed() const {
zgu@7074 114 size_t amount = 0;
zgu@7074 115 for (int index = 0; index < mt_number_of_types; index ++) {
zgu@7074 116 amount += _virtual_memory[index].committed();
zgu@7074 117 }
zgu@7074 118 return amount;
zgu@7074 119 }
zgu@7074 120
zgu@7074 121 void copy_to(VirtualMemorySnapshot* s) {
zgu@7074 122 for (int index = 0; index < mt_number_of_types; index ++) {
zgu@7074 123 s->_virtual_memory[index] = _virtual_memory[index];
zgu@7074 124 }
zgu@7074 125 }
zgu@7074 126 };
zgu@7074 127
zgu@7074 128 class VirtualMemorySummary : AllStatic {
zgu@7074 129 public:
zgu@7074 130 static void initialize();
zgu@7074 131
zgu@7074 132 static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
zgu@7074 133 as_snapshot()->by_type(flag)->reserve_memory(size);
zgu@7074 134 }
zgu@7074 135
zgu@7074 136 static inline void record_committed_memory(size_t size, MEMFLAGS flag) {
zgu@7074 137 as_snapshot()->by_type(flag)->commit_memory(size);
zgu@7074 138 }
zgu@7074 139
zgu@7074 140 static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) {
zgu@7074 141 as_snapshot()->by_type(flag)->uncommit_memory(size);
zgu@7074 142 }
zgu@7074 143
zgu@7074 144 static inline void record_released_memory(size_t size, MEMFLAGS flag) {
zgu@7074 145 as_snapshot()->by_type(flag)->release_memory(size);
zgu@7074 146 }
zgu@7074 147
zgu@7074 148 // Move virtual memory from one memory type to another.
zgu@7074 149 // Virtual memory can be reserved before it is associated with a memory type, and tagged
zgu@7074 150 // as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown'
zgu@7074 151 // type to specified memory type.
zgu@7074 152 static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
zgu@7074 153 as_snapshot()->by_type(from)->release_memory(size);
zgu@7074 154 as_snapshot()->by_type(to)->reserve_memory(size);
zgu@7074 155 }
zgu@7074 156
zgu@7074 157 static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
zgu@7074 158 as_snapshot()->by_type(from)->uncommit_memory(size);
zgu@7074 159 as_snapshot()->by_type(to)->commit_memory(size);
zgu@7074 160 }
zgu@7074 161
zgu@7074 162 static inline void snapshot(VirtualMemorySnapshot* s) {
zgu@7074 163 as_snapshot()->copy_to(s);
zgu@7074 164 }
zgu@7074 165
zgu@7074 166 static VirtualMemorySnapshot* as_snapshot() {
zgu@7074 167 return (VirtualMemorySnapshot*)_snapshot;
zgu@7074 168 }
zgu@7074 169
zgu@7074 170 private:
zgu@7074 171 static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
zgu@7074 172 };
zgu@7074 173
zgu@7074 174
zgu@7074 175
zgu@7074 176 /*
zgu@7074 177 * A virtual memory region
zgu@7074 178 */
zgu@7074 179 class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC {
zgu@7074 180 private:
zgu@7074 181 address _base_address;
zgu@7074 182 size_t _size;
zgu@7074 183
zgu@7074 184 public:
zgu@7074 185 VirtualMemoryRegion(address addr, size_t size) :
zgu@7074 186 _base_address(addr), _size(size) {
zgu@7074 187 assert(addr != NULL, "Invalid address");
zgu@7074 188 assert(size > 0, "Invalid size");
zgu@7074 189 }
zgu@7074 190
zgu@7074 191 inline address base() const { return _base_address; }
zgu@7074 192 inline address end() const { return base() + size(); }
zgu@7074 193 inline size_t size() const { return _size; }
zgu@7074 194
zgu@7074 195 inline bool is_empty() const { return size() == 0; }
zgu@7074 196
zgu@7074 197 inline bool contain_address(address addr) const {
zgu@7074 198 return (addr >= base() && addr < end());
zgu@7074 199 }
zgu@7074 200
zgu@7074 201
zgu@7074 202 inline bool contain_region(address addr, size_t size) const {
zgu@7074 203 return contain_address(addr) && contain_address(addr + size - 1);
zgu@7074 204 }
zgu@7074 205
zgu@7074 206 inline bool same_region(address addr, size_t sz) const {
zgu@7074 207 return (addr == base() && sz == size());
zgu@7074 208 }
zgu@7074 209
zgu@7074 210
zgu@7074 211 inline bool overlap_region(address addr, size_t sz) const {
zgu@7074 212 VirtualMemoryRegion rgn(addr, sz);
zgu@7074 213 return contain_address(addr) ||
zgu@7074 214 contain_address(addr + sz - 1) ||
zgu@7074 215 rgn.contain_address(base()) ||
zgu@7074 216 rgn.contain_address(end() - 1);
zgu@7074 217 }
zgu@7074 218
zgu@7074 219 inline bool adjacent_to(address addr, size_t sz) const {
zgu@7074 220 return (addr == end() || (addr + sz) == base());
zgu@7074 221 }
zgu@7074 222
zgu@7074 223 void exclude_region(address addr, size_t sz) {
zgu@7074 224 assert(contain_region(addr, sz), "Not containment");
zgu@7074 225 assert(addr == base() || addr + sz == end(), "Can not exclude from middle");
zgu@7074 226 size_t new_size = size() - sz;
zgu@7074 227
zgu@7074 228 if (addr == base()) {
zgu@7074 229 set_base(addr + sz);
zgu@7074 230 }
zgu@7074 231 set_size(new_size);
zgu@7074 232 }
zgu@7074 233
zgu@7074 234 void expand_region(address addr, size_t sz) {
zgu@7074 235 assert(adjacent_to(addr, sz), "Not adjacent regions");
zgu@7074 236 if (base() == addr + sz) {
zgu@7074 237 set_base(addr);
zgu@7074 238 }
zgu@7074 239 set_size(size() + sz);
zgu@7074 240 }
zgu@7074 241
zgu@7074 242 protected:
zgu@7074 243 void set_base(address base) {
zgu@7074 244 assert(base != NULL, "Sanity check");
zgu@7074 245 _base_address = base;
zgu@7074 246 }
zgu@7074 247
zgu@7074 248 void set_size(size_t size) {
zgu@7074 249 assert(size > 0, "Sanity check");
zgu@7074 250 _size = size;
zgu@7074 251 }
zgu@7074 252 };
zgu@7074 253
zgu@7074 254
zgu@7074 255 class CommittedMemoryRegion : public VirtualMemoryRegion {
zgu@7074 256 private:
zgu@7074 257 NativeCallStack _stack;
zgu@7074 258
zgu@7074 259 public:
zgu@7074 260 CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
zgu@7074 261 VirtualMemoryRegion(addr, size), _stack(stack) { }
zgu@7074 262
zgu@7074 263 inline int compare(const CommittedMemoryRegion& rgn) const {
zgu@7074 264 if (overlap_region(rgn.base(), rgn.size()) ||
zgu@7074 265 adjacent_to (rgn.base(), rgn.size())) {
zgu@7074 266 return 0;
zgu@7074 267 } else {
zgu@7074 268 if (base() == rgn.base()) {
zgu@7074 269 return 0;
zgu@7074 270 } else if (base() > rgn.base()) {
zgu@7074 271 return 1;
zgu@7074 272 } else {
zgu@7074 273 return -1;
zgu@7074 274 }
zgu@7074 275 }
zgu@7074 276 }
zgu@7074 277
zgu@7074 278 inline bool equals(const CommittedMemoryRegion& rgn) const {
zgu@7074 279 return compare(rgn) == 0;
zgu@7074 280 }
zgu@7074 281
zgu@7074 282 inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
zgu@7074 283 inline const NativeCallStack* call_stack() const { return &_stack; }
zgu@7074 284 };
zgu@7074 285
zgu@7074 286
zgu@7074 287 typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator;
zgu@7074 288
zgu@7074 289 int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&);
zgu@7074 290 class ReservedMemoryRegion : public VirtualMemoryRegion {
zgu@7074 291 private:
zgu@7074 292 SortedLinkedList<CommittedMemoryRegion, compare_committed_region>
zgu@7074 293 _committed_regions;
zgu@7074 294
zgu@7074 295 NativeCallStack _stack;
zgu@7074 296 MEMFLAGS _flag;
zgu@7074 297
zgu@7074 298 bool _all_committed;
zgu@7074 299
zgu@7074 300 public:
zgu@7074 301 ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
zgu@7074 302 MEMFLAGS flag = mtNone) :
zgu@7074 303 VirtualMemoryRegion(base, size), _stack(stack), _flag(flag),
zgu@7074 304 _all_committed(false) { }
zgu@7074 305
zgu@7074 306
zgu@7074 307 ReservedMemoryRegion(address base, size_t size) :
zgu@9485 308 VirtualMemoryRegion(base, size), _stack(NativeCallStack::empty_stack()), _flag(mtNone),
zgu@7074 309 _all_committed(false) { }
zgu@7074 310
zgu@7074 311 // Copy constructor
zgu@7074 312 ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
zgu@7074 313 VirtualMemoryRegion(rr.base(), rr.size()) {
zgu@7074 314 *this = rr;
zgu@7074 315 }
zgu@7074 316
zgu@7074 317 inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
zgu@7074 318 inline const NativeCallStack* call_stack() const { return &_stack; }
zgu@7074 319
zgu@7074 320 void set_flag(MEMFLAGS flag);
zgu@7074 321 inline MEMFLAGS flag() const { return _flag; }
zgu@7074 322
zgu@7074 323 inline int compare(const ReservedMemoryRegion& rgn) const {
zgu@7074 324 if (overlap_region(rgn.base(), rgn.size())) {
zgu@7074 325 return 0;
zgu@7074 326 } else {
zgu@7074 327 if (base() == rgn.base()) {
zgu@7074 328 return 0;
zgu@7074 329 } else if (base() > rgn.base()) {
zgu@7074 330 return 1;
zgu@7074 331 } else {
zgu@7074 332 return -1;
zgu@7074 333 }
zgu@7074 334 }
zgu@7074 335 }
zgu@7074 336
zgu@7074 337 inline bool equals(const ReservedMemoryRegion& rgn) const {
zgu@7074 338 return compare(rgn) == 0;
zgu@7074 339 }
zgu@7074 340
zgu@7074 341 bool add_committed_region(address addr, size_t size, const NativeCallStack& stack);
zgu@7074 342 bool remove_uncommitted_region(address addr, size_t size);
zgu@7074 343
zgu@7074 344 size_t committed_size() const;
zgu@7074 345
zgu@7074 346 // move committed regions that higher than specified address to
zgu@7074 347 // the new region
zgu@7074 348 void move_committed_regions(address addr, ReservedMemoryRegion& rgn);
zgu@7074 349
zgu@7074 350 inline bool all_committed() const { return _all_committed; }
zgu@7074 351 void set_all_committed(bool b);
zgu@7074 352
zgu@7074 353 CommittedRegionIterator iterate_committed_regions() const {
zgu@7074 354 return CommittedRegionIterator(_committed_regions.head());
zgu@7074 355 }
zgu@7074 356
zgu@7074 357 ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) {
zgu@7074 358 set_base(other.base());
zgu@7074 359 set_size(other.size());
zgu@7074 360
zgu@7074 361 _stack = *other.call_stack();
zgu@7074 362 _flag = other.flag();
zgu@7074 363 _all_committed = other.all_committed();
zgu@7074 364 if (other.all_committed()) {
zgu@7074 365 set_all_committed(true);
zgu@7074 366 } else {
zgu@7074 367 CommittedRegionIterator itr = other.iterate_committed_regions();
zgu@7074 368 const CommittedMemoryRegion* rgn = itr.next();
zgu@7074 369 while (rgn != NULL) {
zgu@7074 370 _committed_regions.add(*rgn);
zgu@7074 371 rgn = itr.next();
zgu@7074 372 }
zgu@7074 373 }
zgu@7074 374 return *this;
zgu@7074 375 }
zgu@7074 376
zgu@7074 377 private:
zgu@7074 378 // The committed region contains the uncommitted region, subtract the uncommitted
zgu@7074 379 // region from this committed region
zgu@7074 380 bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
zgu@7074 381 address addr, size_t sz);
zgu@7074 382
zgu@7074 383 bool add_committed_region(const CommittedMemoryRegion& rgn) {
zgu@7074 384 assert(rgn.base() != NULL, "Invalid base address");
zgu@7074 385 assert(size() > 0, "Invalid size");
zgu@7074 386 return _committed_regions.add(rgn) != NULL;
zgu@7074 387 }
zgu@7074 388 };
zgu@7074 389
zgu@7074 390 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2);
zgu@7074 391
zgu@7074 392 class VirtualMemoryWalker : public StackObj {
zgu@7074 393 public:
zgu@7074 394 virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
zgu@7074 395 };
zgu@7074 396
zgu@7074 397 // Main class called from MemTracker to track virtual memory allocations, commits and releases.
zgu@7074 398 class VirtualMemoryTracker : AllStatic {
zgu@7074 399 public:
zgu@7074 400 static bool initialize(NMT_TrackingLevel level);
zgu@7074 401
zgu@7077 402 // Late phase initialization
zgu@7077 403 static bool late_initialize(NMT_TrackingLevel level);
zgu@7077 404
zgu@7074 405 static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack,
zgu@7074 406 MEMFLAGS flag = mtNone, bool all_committed = false);
zgu@7074 407
zgu@7074 408 static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
zgu@7074 409 static bool remove_uncommitted_region (address base_addr, size_t size);
zgu@7074 410 static bool remove_released_region (address base_addr, size_t size);
zgu@7074 411 static void set_reserved_region_type (address addr, MEMFLAGS flag);
zgu@7074 412
zgu@7074 413 // Walk virtual memory data structure for creating baseline, etc.
zgu@7074 414 static bool walk_virtual_memory(VirtualMemoryWalker* walker);
zgu@7074 415
zgu@7074 416 static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
zgu@7074 417
zgu@7074 418 private:
zgu@7077 419 static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions;
zgu@7074 420 };
zgu@7074 421
zgu@7074 422
zgu@7074 423 #endif // INCLUDE_NMT
zgu@7074 424
zgu@7074 425 #endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP

mercurial