src/share/vm/services/virtualMemoryTracker.hpp

changeset 7074
833b0f92429a
child 7077
36c9011aaead
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/services/virtualMemoryTracker.hpp	Wed Aug 27 08:19:12 2014 -0400
     1.3 @@ -0,0 +1,437 @@
     1.4 +/*
     1.5 + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
    1.29 +#define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
    1.30 +
    1.31 +#if INCLUDE_NMT
    1.32 +
    1.33 +#include "memory/allocation.hpp"
    1.34 +#include "services/allocationSite.hpp"
    1.35 +#include "services/nmtCommon.hpp"
    1.36 +#include "utilities/linkedlist.hpp"
    1.37 +#include "utilities/nativeCallStack.hpp"
    1.38 +#include "utilities/ostream.hpp"
    1.39 +
    1.40 +
    1.41 +/*
    1.42 + * Virtual memory counter
    1.43 + */
    1.44 +class VirtualMemory VALUE_OBJ_CLASS_SPEC {
    1.45 + private:
    1.46 +  size_t     _reserved;
    1.47 +  size_t     _committed;
    1.48 +
    1.49 + public:
    1.50 +  VirtualMemory() : _reserved(0), _committed(0) { }
    1.51 +
    1.52 +  inline void reserve_memory(size_t sz) { _reserved += sz; }
    1.53 +  inline void commit_memory (size_t sz) {
    1.54 +    _committed += sz;
    1.55 +    assert(_committed <= _reserved, "Sanity check");
    1.56 +  }
    1.57 +
    1.58 +  inline void release_memory (size_t sz) {
    1.59 +    assert(_reserved >= sz, "Negative amount");
    1.60 +    _reserved -= sz;
    1.61 +  }
    1.62 +
    1.63 +  inline void uncommit_memory(size_t sz) {
    1.64 +    assert(_committed >= sz, "Negative amount");
    1.65 +    _committed -= sz;
    1.66 +  }
    1.67 +
    1.68 +  void reset() {
    1.69 +    _reserved  = 0;
    1.70 +    _committed = 0;
    1.71 +  }
    1.72 +
    1.73 +  inline size_t reserved()  const { return _reserved;  }
    1.74 +  inline size_t committed() const { return _committed; }
    1.75 +};
    1.76 +
    1.77 +// Virtual memory allocation site, keeps track where the virtual memory is reserved.
    1.78 +class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> {
    1.79 + public:
    1.80 +  VirtualMemoryAllocationSite(const NativeCallStack& stack) :
    1.81 +    AllocationSite<VirtualMemory>(stack) { }
    1.82 +
    1.83 +  inline void reserve_memory(size_t sz)  { data()->reserve_memory(sz);  }
    1.84 +  inline void commit_memory (size_t sz)  { data()->commit_memory(sz);   }
    1.85 +  inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); }
    1.86 +  inline void release_memory(size_t sz)  { data()->release_memory(sz);  }
    1.87 +  inline size_t reserved() const  { return peek()->reserved(); }
    1.88 +  inline size_t committed() const { return peek()->committed(); }
    1.89 +};
    1.90 +
    1.91 +class VirtualMemorySummary;
    1.92 +
    1.93 +// This class represents a snapshot of virtual memory at a given time.
    1.94 +// The latest snapshot is saved in a static area.
    1.95 +class VirtualMemorySnapshot : public ResourceObj {
    1.96 +  friend class VirtualMemorySummary;
    1.97 +
    1.98 + private:
    1.99 +  VirtualMemory  _virtual_memory[mt_number_of_types];
   1.100 +
   1.101 + public:
   1.102 +  inline VirtualMemory* by_type(MEMFLAGS flag) {
   1.103 +    int index = NMTUtil::flag_to_index(flag);
   1.104 +    return &_virtual_memory[index];
   1.105 +  }
   1.106 +
   1.107 +  inline VirtualMemory* by_index(int index) {
   1.108 +    assert(index >= 0, "Index out of bound");
   1.109 +    assert(index < mt_number_of_types, "Index out of bound");
   1.110 +    return &_virtual_memory[index];
   1.111 +  }
   1.112 +
   1.113 +  inline size_t total_reserved() const {
   1.114 +    size_t amount = 0;
   1.115 +    for (int index = 0; index < mt_number_of_types; index ++) {
   1.116 +      amount += _virtual_memory[index].reserved();
   1.117 +    }
   1.118 +    return amount;
   1.119 +  }
   1.120 +
   1.121 +  inline size_t total_committed() const {
   1.122 +    size_t amount = 0;
   1.123 +    for (int index = 0; index < mt_number_of_types; index ++) {
   1.124 +      amount += _virtual_memory[index].committed();
   1.125 +    }
   1.126 +    return amount;
   1.127 +  }
   1.128 +
   1.129 +  inline void reset() {
   1.130 +    for (int index = 0; index < mt_number_of_types; index ++) {
   1.131 +      _virtual_memory[index].reset();
   1.132 +    }
   1.133 +  }
   1.134 +
   1.135 +  void copy_to(VirtualMemorySnapshot* s) {
   1.136 +    for (int index = 0; index < mt_number_of_types; index ++) {
   1.137 +      s->_virtual_memory[index] = _virtual_memory[index];
   1.138 +    }
   1.139 +  }
   1.140 +};
   1.141 +
   1.142 +class VirtualMemorySummary : AllStatic {
   1.143 + public:
   1.144 +  static void initialize();
   1.145 +
   1.146 +  static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
   1.147 +    as_snapshot()->by_type(flag)->reserve_memory(size);
   1.148 +  }
   1.149 +
   1.150 +  static inline void record_committed_memory(size_t size, MEMFLAGS flag) {
   1.151 +    as_snapshot()->by_type(flag)->commit_memory(size);
   1.152 +  }
   1.153 +
   1.154 +  static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) {
   1.155 +    as_snapshot()->by_type(flag)->uncommit_memory(size);
   1.156 +  }
   1.157 +
   1.158 +  static inline void record_released_memory(size_t size, MEMFLAGS flag) {
   1.159 +    as_snapshot()->by_type(flag)->release_memory(size);
   1.160 +  }
   1.161 +
   1.162 +  // Move virtual memory from one memory type to another.
   1.163 +  // Virtual memory can be reserved before it is associated with a memory type, and tagged
   1.164 +  // as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown'
   1.165 +  // type to specified memory type.
   1.166 +  static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
   1.167 +    as_snapshot()->by_type(from)->release_memory(size);
   1.168 +    as_snapshot()->by_type(to)->reserve_memory(size);
   1.169 +  }
   1.170 +
   1.171 +  static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
   1.172 +    as_snapshot()->by_type(from)->uncommit_memory(size);
   1.173 +    as_snapshot()->by_type(to)->commit_memory(size);
   1.174 +  }
   1.175 +
   1.176 +  static inline void snapshot(VirtualMemorySnapshot* s) {
   1.177 +    as_snapshot()->copy_to(s);
   1.178 +  }
   1.179 +
   1.180 +  static inline void reset() {
   1.181 +    as_snapshot()->reset();
   1.182 +  }
   1.183 +
   1.184 +  static VirtualMemorySnapshot* as_snapshot() {
   1.185 +    return (VirtualMemorySnapshot*)_snapshot;
   1.186 +  }
   1.187 +
   1.188 + private:
   1.189 +  static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
   1.190 +};
   1.191 +
   1.192 +
   1.193 +
   1.194 +/*
   1.195 + * A virtual memory region
   1.196 + */
   1.197 +class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC {
   1.198 + private:
   1.199 +  address      _base_address;
   1.200 +  size_t       _size;
   1.201 +
   1.202 + public:
   1.203 +  VirtualMemoryRegion(address addr, size_t size) :
   1.204 +    _base_address(addr), _size(size) {
   1.205 +     assert(addr != NULL, "Invalid address");
   1.206 +     assert(size > 0, "Invalid size");
   1.207 +   }
   1.208 +
   1.209 +  inline address base() const { return _base_address;   }
   1.210 +  inline address end()  const { return base() + size(); }
   1.211 +  inline size_t  size() const { return _size;           }
   1.212 +
   1.213 +  inline bool is_empty() const { return size() == 0; }
   1.214 +
   1.215 +  inline bool contain_address(address addr) const {
   1.216 +    return (addr >= base() && addr < end());
   1.217 +  }
   1.218 +
   1.219 +
   1.220 +  inline bool contain_region(address addr, size_t size) const {
   1.221 +    return contain_address(addr) && contain_address(addr + size - 1);
   1.222 +  }
   1.223 +
   1.224 +  inline bool same_region(address addr, size_t sz) const {
   1.225 +    return (addr == base() && sz == size());
   1.226 +  }
   1.227 +
   1.228 +
   1.229 +  inline bool overlap_region(address addr, size_t sz) const {
   1.230 +    VirtualMemoryRegion rgn(addr, sz);
   1.231 +    return contain_address(addr) ||
   1.232 +           contain_address(addr + sz - 1) ||
   1.233 +           rgn.contain_address(base()) ||
   1.234 +           rgn.contain_address(end() - 1);
   1.235 +  }
   1.236 +
   1.237 +  inline bool adjacent_to(address addr, size_t sz) const {
   1.238 +    return (addr == end() || (addr + sz) == base());
   1.239 +  }
   1.240 +
   1.241 +  void exclude_region(address addr, size_t sz) {
   1.242 +    assert(contain_region(addr, sz), "Not containment");
   1.243 +    assert(addr == base() || addr + sz == end(), "Can not exclude from middle");
   1.244 +    size_t new_size = size() - sz;
   1.245 +
   1.246 +    if (addr == base()) {
   1.247 +      set_base(addr + sz);
   1.248 +    }
   1.249 +    set_size(new_size);
   1.250 +  }
   1.251 +
   1.252 +  void expand_region(address addr, size_t sz) {
   1.253 +    assert(adjacent_to(addr, sz), "Not adjacent regions");
   1.254 +    if (base() == addr + sz) {
   1.255 +      set_base(addr);
   1.256 +    }
   1.257 +    set_size(size() + sz);
   1.258 +  }
   1.259 +
   1.260 + protected:
   1.261 +  void set_base(address base) {
   1.262 +    assert(base != NULL, "Sanity check");
   1.263 +    _base_address = base;
   1.264 +  }
   1.265 +
   1.266 +  void set_size(size_t  size) {
   1.267 +    assert(size > 0, "Sanity check");
   1.268 +    _size = size;
   1.269 +  }
   1.270 +};
   1.271 +
   1.272 +
   1.273 +class CommittedMemoryRegion : public VirtualMemoryRegion {
   1.274 + private:
   1.275 +  NativeCallStack  _stack;
   1.276 +
   1.277 + public:
   1.278 +  CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
   1.279 +    VirtualMemoryRegion(addr, size), _stack(stack) { }
   1.280 +
   1.281 +  inline int compare(const CommittedMemoryRegion& rgn) const {
   1.282 +    if (overlap_region(rgn.base(), rgn.size()) ||
   1.283 +        adjacent_to   (rgn.base(), rgn.size())) {
   1.284 +      return 0;
   1.285 +    } else {
   1.286 +      if (base() == rgn.base()) {
   1.287 +        return 0;
   1.288 +      } else if (base() > rgn.base()) {
   1.289 +        return 1;
   1.290 +      } else {
   1.291 +        return -1;
   1.292 +      }
   1.293 +    }
   1.294 +  }
   1.295 +
   1.296 +  inline bool equals(const CommittedMemoryRegion& rgn) const {
   1.297 +    return compare(rgn) == 0;
   1.298 +  }
   1.299 +
   1.300 +  inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
   1.301 +  inline const NativeCallStack* call_stack() const         { return &_stack; }
   1.302 +};
   1.303 +
   1.304 +
   1.305 +typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator;
   1.306 +
   1.307 +int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&);
   1.308 +class ReservedMemoryRegion : public VirtualMemoryRegion {
   1.309 + private:
   1.310 +  SortedLinkedList<CommittedMemoryRegion, compare_committed_region>
   1.311 +    _committed_regions;
   1.312 +
   1.313 +  NativeCallStack  _stack;
   1.314 +  MEMFLAGS         _flag;
   1.315 +
   1.316 +  bool             _all_committed;
   1.317 +
   1.318 + public:
   1.319 +  ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
   1.320 +    MEMFLAGS flag = mtNone) :
   1.321 +    VirtualMemoryRegion(base, size), _stack(stack), _flag(flag),
   1.322 +    _all_committed(false) { }
   1.323 +
   1.324 +
   1.325 +  ReservedMemoryRegion(address base, size_t size) :
   1.326 +    VirtualMemoryRegion(base, size), _stack(emptyStack), _flag(mtNone),
   1.327 +    _all_committed(false) { }
   1.328 +
   1.329 +  // Copy constructor
   1.330 +  ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
   1.331 +    VirtualMemoryRegion(rr.base(), rr.size()) {
   1.332 +    *this = rr;
   1.333 +  }
   1.334 +
   1.335 +  inline void  set_call_stack(const NativeCallStack& stack) { _stack = stack; }
   1.336 +  inline const NativeCallStack* call_stack() const          { return &_stack;  }
   1.337 +
   1.338 +  void  set_flag(MEMFLAGS flag);
   1.339 +  inline MEMFLAGS flag() const            { return _flag;  }
   1.340 +
   1.341 +  inline int compare(const ReservedMemoryRegion& rgn) const {
   1.342 +    if (overlap_region(rgn.base(), rgn.size())) {
   1.343 +      return 0;
   1.344 +    } else {
   1.345 +      if (base() == rgn.base()) {
   1.346 +        return 0;
   1.347 +      } else if (base() > rgn.base()) {
   1.348 +        return 1;
   1.349 +      } else {
   1.350 +        return -1;
   1.351 +      }
   1.352 +    }
   1.353 +  }
   1.354 +
   1.355 +  inline bool equals(const ReservedMemoryRegion& rgn) const {
   1.356 +    return compare(rgn) == 0;
   1.357 +  }
   1.358 +
   1.359 +  bool    add_committed_region(address addr, size_t size, const NativeCallStack& stack);
   1.360 +  bool    remove_uncommitted_region(address addr, size_t size);
   1.361 +
   1.362 +  size_t  committed_size() const;
   1.363 +
   1.364 +  // move committed regions that higher than specified address to
   1.365 +  // the new region
   1.366 +  void    move_committed_regions(address addr, ReservedMemoryRegion& rgn);
   1.367 +
   1.368 +  inline bool all_committed() const { return _all_committed; }
   1.369 +  void        set_all_committed(bool b);
   1.370 +
   1.371 +  CommittedRegionIterator iterate_committed_regions() const {
   1.372 +    return CommittedRegionIterator(_committed_regions.head());
   1.373 +  }
   1.374 +
   1.375 +  ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) {
   1.376 +    set_base(other.base());
   1.377 +    set_size(other.size());
   1.378 +
   1.379 +    _stack =         *other.call_stack();
   1.380 +    _flag  =         other.flag();
   1.381 +    _all_committed = other.all_committed();
   1.382 +    if (other.all_committed()) {
   1.383 +      set_all_committed(true);
   1.384 +    } else {
   1.385 +      CommittedRegionIterator itr = other.iterate_committed_regions();
   1.386 +      const CommittedMemoryRegion* rgn = itr.next();
   1.387 +      while (rgn != NULL) {
   1.388 +        _committed_regions.add(*rgn);
   1.389 +        rgn = itr.next();
   1.390 +      }
   1.391 +    }
   1.392 +    return *this;
   1.393 +  }
   1.394 +
   1.395 + private:
   1.396 +  // The committed region contains the uncommitted region, subtract the uncommitted
   1.397 +  // region from this committed region
   1.398 +  bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
   1.399 +    address addr, size_t sz);
   1.400 +
   1.401 +  bool add_committed_region(const CommittedMemoryRegion& rgn) {
   1.402 +    assert(rgn.base() != NULL, "Invalid base address");
   1.403 +    assert(size() > 0, "Invalid size");
   1.404 +    return _committed_regions.add(rgn) != NULL;
   1.405 +  }
   1.406 +};
   1.407 +
   1.408 +int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2);
   1.409 +
   1.410 +class VirtualMemoryWalker : public StackObj {
   1.411 + public:
   1.412 +   virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
   1.413 +};
   1.414 +
   1.415 +// Main class called from MemTracker to track virtual memory allocations, commits and releases.
   1.416 +class VirtualMemoryTracker : AllStatic {
   1.417 + public:
   1.418 +  static bool initialize(NMT_TrackingLevel level);
   1.419 +
   1.420 +  static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack,
   1.421 +    MEMFLAGS flag = mtNone, bool all_committed = false);
   1.422 +
   1.423 +  static bool add_committed_region      (address base_addr, size_t size, const NativeCallStack& stack);
   1.424 +  static bool remove_uncommitted_region (address base_addr, size_t size);
   1.425 +  static bool remove_released_region    (address base_addr, size_t size);
   1.426 +  static void set_reserved_region_type  (address addr, MEMFLAGS flag);
   1.427 +
   1.428 +  // Walk virtual memory data structure for creating baseline, etc.
   1.429 +  static bool walk_virtual_memory(VirtualMemoryWalker* walker);
   1.430 +
   1.431 +  static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
   1.432 +
   1.433 + private:
   1.434 +  static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> _reserved_regions;
   1.435 +};
   1.436 +
   1.437 +
   1.438 +#endif // INCLUDE_NMT
   1.439 +
   1.440 +#endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP

mercurial