src/share/vm/services/virtualMemoryTracker.hpp

Wed, 27 Aug 2014 08:19:12 -0400

author
zgu
date
Wed, 27 Aug 2014 08:19:12 -0400
changeset 7074
833b0f92429a
child 7077
36c9011aaead
permissions
-rw-r--r--

8046598: Scalable Native memory tracking development
Summary: Enhance scalability of native memory tracking
Reviewed-by: coleenp, ctornqvi, gtriantafill

zgu@7074 1 /*
zgu@7074 2 * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
zgu@7074 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
zgu@7074 4 *
zgu@7074 5 * This code is free software; you can redistribute it and/or modify it
zgu@7074 6 * under the terms of the GNU General Public License version 2 only, as
zgu@7074 7 * published by the Free Software Foundation.
zgu@7074 8 *
zgu@7074 9 * This code is distributed in the hope that it will be useful, but WITHOUT
zgu@7074 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
zgu@7074 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
zgu@7074 12 * version 2 for more details (a copy is included in the LICENSE file that
zgu@7074 13 * accompanied this code).
zgu@7074 14 *
zgu@7074 15 * You should have received a copy of the GNU General Public License version
zgu@7074 16 * 2 along with this work; if not, write to the Free Software Foundation,
zgu@7074 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
zgu@7074 18 *
zgu@7074 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
zgu@7074 20 * or visit www.oracle.com if you need additional information or have any
zgu@7074 21 * questions.
zgu@7074 22 *
zgu@7074 23 */
zgu@7074 24
zgu@7074 25 #ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
zgu@7074 26 #define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
zgu@7074 27
zgu@7074 28 #if INCLUDE_NMT
zgu@7074 29
zgu@7074 30 #include "memory/allocation.hpp"
zgu@7074 31 #include "services/allocationSite.hpp"
zgu@7074 32 #include "services/nmtCommon.hpp"
zgu@7074 33 #include "utilities/linkedlist.hpp"
zgu@7074 34 #include "utilities/nativeCallStack.hpp"
zgu@7074 35 #include "utilities/ostream.hpp"
zgu@7074 36
zgu@7074 37
zgu@7074 38 /*
zgu@7074 39 * Virtual memory counter
zgu@7074 40 */
zgu@7074 41 class VirtualMemory VALUE_OBJ_CLASS_SPEC {
zgu@7074 42 private:
zgu@7074 43 size_t _reserved;
zgu@7074 44 size_t _committed;
zgu@7074 45
zgu@7074 46 public:
zgu@7074 47 VirtualMemory() : _reserved(0), _committed(0) { }
zgu@7074 48
zgu@7074 49 inline void reserve_memory(size_t sz) { _reserved += sz; }
zgu@7074 50 inline void commit_memory (size_t sz) {
zgu@7074 51 _committed += sz;
zgu@7074 52 assert(_committed <= _reserved, "Sanity check");
zgu@7074 53 }
zgu@7074 54
zgu@7074 55 inline void release_memory (size_t sz) {
zgu@7074 56 assert(_reserved >= sz, "Negative amount");
zgu@7074 57 _reserved -= sz;
zgu@7074 58 }
zgu@7074 59
zgu@7074 60 inline void uncommit_memory(size_t sz) {
zgu@7074 61 assert(_committed >= sz, "Negative amount");
zgu@7074 62 _committed -= sz;
zgu@7074 63 }
zgu@7074 64
zgu@7074 65 void reset() {
zgu@7074 66 _reserved = 0;
zgu@7074 67 _committed = 0;
zgu@7074 68 }
zgu@7074 69
zgu@7074 70 inline size_t reserved() const { return _reserved; }
zgu@7074 71 inline size_t committed() const { return _committed; }
zgu@7074 72 };
zgu@7074 73
zgu@7074 74 // Virtual memory allocation site, keeps track where the virtual memory is reserved.
zgu@7074 75 class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> {
zgu@7074 76 public:
zgu@7074 77 VirtualMemoryAllocationSite(const NativeCallStack& stack) :
zgu@7074 78 AllocationSite<VirtualMemory>(stack) { }
zgu@7074 79
zgu@7074 80 inline void reserve_memory(size_t sz) { data()->reserve_memory(sz); }
zgu@7074 81 inline void commit_memory (size_t sz) { data()->commit_memory(sz); }
zgu@7074 82 inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); }
zgu@7074 83 inline void release_memory(size_t sz) { data()->release_memory(sz); }
zgu@7074 84 inline size_t reserved() const { return peek()->reserved(); }
zgu@7074 85 inline size_t committed() const { return peek()->committed(); }
zgu@7074 86 };
zgu@7074 87
zgu@7074 88 class VirtualMemorySummary;
zgu@7074 89
zgu@7074 90 // This class represents a snapshot of virtual memory at a given time.
zgu@7074 91 // The latest snapshot is saved in a static area.
zgu@7074 92 class VirtualMemorySnapshot : public ResourceObj {
zgu@7074 93 friend class VirtualMemorySummary;
zgu@7074 94
zgu@7074 95 private:
zgu@7074 96 VirtualMemory _virtual_memory[mt_number_of_types];
zgu@7074 97
zgu@7074 98 public:
zgu@7074 99 inline VirtualMemory* by_type(MEMFLAGS flag) {
zgu@7074 100 int index = NMTUtil::flag_to_index(flag);
zgu@7074 101 return &_virtual_memory[index];
zgu@7074 102 }
zgu@7074 103
zgu@7074 104 inline VirtualMemory* by_index(int index) {
zgu@7074 105 assert(index >= 0, "Index out of bound");
zgu@7074 106 assert(index < mt_number_of_types, "Index out of bound");
zgu@7074 107 return &_virtual_memory[index];
zgu@7074 108 }
zgu@7074 109
zgu@7074 110 inline size_t total_reserved() const {
zgu@7074 111 size_t amount = 0;
zgu@7074 112 for (int index = 0; index < mt_number_of_types; index ++) {
zgu@7074 113 amount += _virtual_memory[index].reserved();
zgu@7074 114 }
zgu@7074 115 return amount;
zgu@7074 116 }
zgu@7074 117
zgu@7074 118 inline size_t total_committed() const {
zgu@7074 119 size_t amount = 0;
zgu@7074 120 for (int index = 0; index < mt_number_of_types; index ++) {
zgu@7074 121 amount += _virtual_memory[index].committed();
zgu@7074 122 }
zgu@7074 123 return amount;
zgu@7074 124 }
zgu@7074 125
zgu@7074 126 inline void reset() {
zgu@7074 127 for (int index = 0; index < mt_number_of_types; index ++) {
zgu@7074 128 _virtual_memory[index].reset();
zgu@7074 129 }
zgu@7074 130 }
zgu@7074 131
zgu@7074 132 void copy_to(VirtualMemorySnapshot* s) {
zgu@7074 133 for (int index = 0; index < mt_number_of_types; index ++) {
zgu@7074 134 s->_virtual_memory[index] = _virtual_memory[index];
zgu@7074 135 }
zgu@7074 136 }
zgu@7074 137 };
zgu@7074 138
zgu@7074 139 class VirtualMemorySummary : AllStatic {
zgu@7074 140 public:
zgu@7074 141 static void initialize();
zgu@7074 142
zgu@7074 143 static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
zgu@7074 144 as_snapshot()->by_type(flag)->reserve_memory(size);
zgu@7074 145 }
zgu@7074 146
zgu@7074 147 static inline void record_committed_memory(size_t size, MEMFLAGS flag) {
zgu@7074 148 as_snapshot()->by_type(flag)->commit_memory(size);
zgu@7074 149 }
zgu@7074 150
zgu@7074 151 static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) {
zgu@7074 152 as_snapshot()->by_type(flag)->uncommit_memory(size);
zgu@7074 153 }
zgu@7074 154
zgu@7074 155 static inline void record_released_memory(size_t size, MEMFLAGS flag) {
zgu@7074 156 as_snapshot()->by_type(flag)->release_memory(size);
zgu@7074 157 }
zgu@7074 158
zgu@7074 159 // Move virtual memory from one memory type to another.
zgu@7074 160 // Virtual memory can be reserved before it is associated with a memory type, and tagged
zgu@7074 161 // as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown'
zgu@7074 162 // type to specified memory type.
zgu@7074 163 static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
zgu@7074 164 as_snapshot()->by_type(from)->release_memory(size);
zgu@7074 165 as_snapshot()->by_type(to)->reserve_memory(size);
zgu@7074 166 }
zgu@7074 167
zgu@7074 168 static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
zgu@7074 169 as_snapshot()->by_type(from)->uncommit_memory(size);
zgu@7074 170 as_snapshot()->by_type(to)->commit_memory(size);
zgu@7074 171 }
zgu@7074 172
zgu@7074 173 static inline void snapshot(VirtualMemorySnapshot* s) {
zgu@7074 174 as_snapshot()->copy_to(s);
zgu@7074 175 }
zgu@7074 176
zgu@7074 177 static inline void reset() {
zgu@7074 178 as_snapshot()->reset();
zgu@7074 179 }
zgu@7074 180
zgu@7074 181 static VirtualMemorySnapshot* as_snapshot() {
zgu@7074 182 return (VirtualMemorySnapshot*)_snapshot;
zgu@7074 183 }
zgu@7074 184
zgu@7074 185 private:
zgu@7074 186 static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
zgu@7074 187 };
zgu@7074 188
zgu@7074 189
zgu@7074 190
zgu@7074 191 /*
zgu@7074 192 * A virtual memory region
zgu@7074 193 */
zgu@7074 194 class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC {
zgu@7074 195 private:
zgu@7074 196 address _base_address;
zgu@7074 197 size_t _size;
zgu@7074 198
zgu@7074 199 public:
zgu@7074 200 VirtualMemoryRegion(address addr, size_t size) :
zgu@7074 201 _base_address(addr), _size(size) {
zgu@7074 202 assert(addr != NULL, "Invalid address");
zgu@7074 203 assert(size > 0, "Invalid size");
zgu@7074 204 }
zgu@7074 205
zgu@7074 206 inline address base() const { return _base_address; }
zgu@7074 207 inline address end() const { return base() + size(); }
zgu@7074 208 inline size_t size() const { return _size; }
zgu@7074 209
zgu@7074 210 inline bool is_empty() const { return size() == 0; }
zgu@7074 211
zgu@7074 212 inline bool contain_address(address addr) const {
zgu@7074 213 return (addr >= base() && addr < end());
zgu@7074 214 }
zgu@7074 215
zgu@7074 216
zgu@7074 217 inline bool contain_region(address addr, size_t size) const {
zgu@7074 218 return contain_address(addr) && contain_address(addr + size - 1);
zgu@7074 219 }
zgu@7074 220
zgu@7074 221 inline bool same_region(address addr, size_t sz) const {
zgu@7074 222 return (addr == base() && sz == size());
zgu@7074 223 }
zgu@7074 224
zgu@7074 225
zgu@7074 226 inline bool overlap_region(address addr, size_t sz) const {
zgu@7074 227 VirtualMemoryRegion rgn(addr, sz);
zgu@7074 228 return contain_address(addr) ||
zgu@7074 229 contain_address(addr + sz - 1) ||
zgu@7074 230 rgn.contain_address(base()) ||
zgu@7074 231 rgn.contain_address(end() - 1);
zgu@7074 232 }
zgu@7074 233
zgu@7074 234 inline bool adjacent_to(address addr, size_t sz) const {
zgu@7074 235 return (addr == end() || (addr + sz) == base());
zgu@7074 236 }
zgu@7074 237
zgu@7074 238 void exclude_region(address addr, size_t sz) {
zgu@7074 239 assert(contain_region(addr, sz), "Not containment");
zgu@7074 240 assert(addr == base() || addr + sz == end(), "Can not exclude from middle");
zgu@7074 241 size_t new_size = size() - sz;
zgu@7074 242
zgu@7074 243 if (addr == base()) {
zgu@7074 244 set_base(addr + sz);
zgu@7074 245 }
zgu@7074 246 set_size(new_size);
zgu@7074 247 }
zgu@7074 248
zgu@7074 249 void expand_region(address addr, size_t sz) {
zgu@7074 250 assert(adjacent_to(addr, sz), "Not adjacent regions");
zgu@7074 251 if (base() == addr + sz) {
zgu@7074 252 set_base(addr);
zgu@7074 253 }
zgu@7074 254 set_size(size() + sz);
zgu@7074 255 }
zgu@7074 256
zgu@7074 257 protected:
zgu@7074 258 void set_base(address base) {
zgu@7074 259 assert(base != NULL, "Sanity check");
zgu@7074 260 _base_address = base;
zgu@7074 261 }
zgu@7074 262
zgu@7074 263 void set_size(size_t size) {
zgu@7074 264 assert(size > 0, "Sanity check");
zgu@7074 265 _size = size;
zgu@7074 266 }
zgu@7074 267 };
zgu@7074 268
zgu@7074 269
zgu@7074 270 class CommittedMemoryRegion : public VirtualMemoryRegion {
zgu@7074 271 private:
zgu@7074 272 NativeCallStack _stack;
zgu@7074 273
zgu@7074 274 public:
zgu@7074 275 CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
zgu@7074 276 VirtualMemoryRegion(addr, size), _stack(stack) { }
zgu@7074 277
zgu@7074 278 inline int compare(const CommittedMemoryRegion& rgn) const {
zgu@7074 279 if (overlap_region(rgn.base(), rgn.size()) ||
zgu@7074 280 adjacent_to (rgn.base(), rgn.size())) {
zgu@7074 281 return 0;
zgu@7074 282 } else {
zgu@7074 283 if (base() == rgn.base()) {
zgu@7074 284 return 0;
zgu@7074 285 } else if (base() > rgn.base()) {
zgu@7074 286 return 1;
zgu@7074 287 } else {
zgu@7074 288 return -1;
zgu@7074 289 }
zgu@7074 290 }
zgu@7074 291 }
zgu@7074 292
zgu@7074 293 inline bool equals(const CommittedMemoryRegion& rgn) const {
zgu@7074 294 return compare(rgn) == 0;
zgu@7074 295 }
zgu@7074 296
zgu@7074 297 inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
zgu@7074 298 inline const NativeCallStack* call_stack() const { return &_stack; }
zgu@7074 299 };
zgu@7074 300
zgu@7074 301
zgu@7074 302 typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator;
zgu@7074 303
zgu@7074 304 int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&);
zgu@7074 305 class ReservedMemoryRegion : public VirtualMemoryRegion {
zgu@7074 306 private:
zgu@7074 307 SortedLinkedList<CommittedMemoryRegion, compare_committed_region>
zgu@7074 308 _committed_regions;
zgu@7074 309
zgu@7074 310 NativeCallStack _stack;
zgu@7074 311 MEMFLAGS _flag;
zgu@7074 312
zgu@7074 313 bool _all_committed;
zgu@7074 314
zgu@7074 315 public:
zgu@7074 316 ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
zgu@7074 317 MEMFLAGS flag = mtNone) :
zgu@7074 318 VirtualMemoryRegion(base, size), _stack(stack), _flag(flag),
zgu@7074 319 _all_committed(false) { }
zgu@7074 320
zgu@7074 321
zgu@7074 322 ReservedMemoryRegion(address base, size_t size) :
zgu@7074 323 VirtualMemoryRegion(base, size), _stack(emptyStack), _flag(mtNone),
zgu@7074 324 _all_committed(false) { }
zgu@7074 325
zgu@7074 326 // Copy constructor
zgu@7074 327 ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
zgu@7074 328 VirtualMemoryRegion(rr.base(), rr.size()) {
zgu@7074 329 *this = rr;
zgu@7074 330 }
zgu@7074 331
zgu@7074 332 inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
zgu@7074 333 inline const NativeCallStack* call_stack() const { return &_stack; }
zgu@7074 334
zgu@7074 335 void set_flag(MEMFLAGS flag);
zgu@7074 336 inline MEMFLAGS flag() const { return _flag; }
zgu@7074 337
zgu@7074 338 inline int compare(const ReservedMemoryRegion& rgn) const {
zgu@7074 339 if (overlap_region(rgn.base(), rgn.size())) {
zgu@7074 340 return 0;
zgu@7074 341 } else {
zgu@7074 342 if (base() == rgn.base()) {
zgu@7074 343 return 0;
zgu@7074 344 } else if (base() > rgn.base()) {
zgu@7074 345 return 1;
zgu@7074 346 } else {
zgu@7074 347 return -1;
zgu@7074 348 }
zgu@7074 349 }
zgu@7074 350 }
zgu@7074 351
zgu@7074 352 inline bool equals(const ReservedMemoryRegion& rgn) const {
zgu@7074 353 return compare(rgn) == 0;
zgu@7074 354 }
zgu@7074 355
zgu@7074 356 bool add_committed_region(address addr, size_t size, const NativeCallStack& stack);
zgu@7074 357 bool remove_uncommitted_region(address addr, size_t size);
zgu@7074 358
zgu@7074 359 size_t committed_size() const;
zgu@7074 360
zgu@7074 361 // move committed regions that higher than specified address to
zgu@7074 362 // the new region
zgu@7074 363 void move_committed_regions(address addr, ReservedMemoryRegion& rgn);
zgu@7074 364
zgu@7074 365 inline bool all_committed() const { return _all_committed; }
zgu@7074 366 void set_all_committed(bool b);
zgu@7074 367
zgu@7074 368 CommittedRegionIterator iterate_committed_regions() const {
zgu@7074 369 return CommittedRegionIterator(_committed_regions.head());
zgu@7074 370 }
zgu@7074 371
zgu@7074 372 ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) {
zgu@7074 373 set_base(other.base());
zgu@7074 374 set_size(other.size());
zgu@7074 375
zgu@7074 376 _stack = *other.call_stack();
zgu@7074 377 _flag = other.flag();
zgu@7074 378 _all_committed = other.all_committed();
zgu@7074 379 if (other.all_committed()) {
zgu@7074 380 set_all_committed(true);
zgu@7074 381 } else {
zgu@7074 382 CommittedRegionIterator itr = other.iterate_committed_regions();
zgu@7074 383 const CommittedMemoryRegion* rgn = itr.next();
zgu@7074 384 while (rgn != NULL) {
zgu@7074 385 _committed_regions.add(*rgn);
zgu@7074 386 rgn = itr.next();
zgu@7074 387 }
zgu@7074 388 }
zgu@7074 389 return *this;
zgu@7074 390 }
zgu@7074 391
zgu@7074 392 private:
zgu@7074 393 // The committed region contains the uncommitted region, subtract the uncommitted
zgu@7074 394 // region from this committed region
zgu@7074 395 bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
zgu@7074 396 address addr, size_t sz);
zgu@7074 397
zgu@7074 398 bool add_committed_region(const CommittedMemoryRegion& rgn) {
zgu@7074 399 assert(rgn.base() != NULL, "Invalid base address");
zgu@7074 400 assert(size() > 0, "Invalid size");
zgu@7074 401 return _committed_regions.add(rgn) != NULL;
zgu@7074 402 }
zgu@7074 403 };
zgu@7074 404
zgu@7074 405 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2);
zgu@7074 406
zgu@7074 407 class VirtualMemoryWalker : public StackObj {
zgu@7074 408 public:
zgu@7074 409 virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
zgu@7074 410 };
zgu@7074 411
zgu@7074 412 // Main class called from MemTracker to track virtual memory allocations, commits and releases.
zgu@7074 413 class VirtualMemoryTracker : AllStatic {
zgu@7074 414 public:
zgu@7074 415 static bool initialize(NMT_TrackingLevel level);
zgu@7074 416
zgu@7074 417 static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack,
zgu@7074 418 MEMFLAGS flag = mtNone, bool all_committed = false);
zgu@7074 419
zgu@7074 420 static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
zgu@7074 421 static bool remove_uncommitted_region (address base_addr, size_t size);
zgu@7074 422 static bool remove_released_region (address base_addr, size_t size);
zgu@7074 423 static void set_reserved_region_type (address addr, MEMFLAGS flag);
zgu@7074 424
zgu@7074 425 // Walk virtual memory data structure for creating baseline, etc.
zgu@7074 426 static bool walk_virtual_memory(VirtualMemoryWalker* walker);
zgu@7074 427
zgu@7074 428 static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
zgu@7074 429
zgu@7074 430 private:
zgu@7074 431 static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> _reserved_regions;
zgu@7074 432 };
zgu@7074 433
zgu@7074 434
zgu@7074 435 #endif // INCLUDE_NMT
zgu@7074 436
zgu@7074 437 #endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP

mercurial