src/share/vm/services/memSnapshot.hpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/services/memSnapshot.hpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,408 @@
     1.4 +/*
     1.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
    1.29 +#define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
    1.30 +
    1.31 +#include "memory/allocation.hpp"
    1.32 +#include "runtime/mutex.hpp"
    1.33 +#include "runtime/mutexLocker.hpp"
    1.34 +#include "services/memBaseline.hpp"
    1.35 +#include "services/memPtrArray.hpp"
    1.36 +
    1.37 +// Snapshot pointer array iterator
    1.38 +
    1.39 +// The pointer array contains malloc-ed pointers
    1.40 +class MemPointerIterator : public MemPointerArrayIteratorImpl {
    1.41 + public:
    1.42 +  MemPointerIterator(MemPointerArray* arr):
    1.43 +    MemPointerArrayIteratorImpl(arr) {
    1.44 +    assert(arr != NULL, "null array");
    1.45 +  }
    1.46 +
    1.47 +#ifdef ASSERT
    1.48 +  virtual bool is_dup_pointer(const MemPointer* ptr1,
    1.49 +    const MemPointer* ptr2) const {
    1.50 +    MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
    1.51 +    MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
    1.52 +
    1.53 +    if (p1->addr() != p2->addr()) return false;
    1.54 +    if ((p1->flags() & MemPointerRecord::tag_masks) !=
    1.55 +        (p2->flags() & MemPointerRecord::tag_masks)) {
    1.56 +      return false;
    1.57 +    }
    1.58 +    // we do see multiple commit/uncommit on the same memory, it is ok
    1.59 +    return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
    1.60 +           (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
    1.61 +  }
    1.62 +
    1.63 +  virtual bool insert(MemPointer* ptr) {
    1.64 +    if (_pos > 0) {
    1.65 +      MemPointer* p1 = (MemPointer*)ptr;
    1.66 +      MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
    1.67 +      assert(!is_dup_pointer(p1, p2),
    1.68 +        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
    1.69 +    }
    1.70 +     if (_pos < _array->length() -1) {
    1.71 +      MemPointer* p1 = (MemPointer*)ptr;
    1.72 +      MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
    1.73 +      assert(!is_dup_pointer(p1, p2),
    1.74 +        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
    1.75 +     }
    1.76 +    return _array->insert_at(ptr, _pos);
    1.77 +  }
    1.78 +
    1.79 +  virtual bool insert_after(MemPointer* ptr) {
    1.80 +    if (_pos > 0) {
    1.81 +      MemPointer* p1 = (MemPointer*)ptr;
    1.82 +      MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
    1.83 +      assert(!is_dup_pointer(p1, p2),
    1.84 +        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
    1.85 +    }
    1.86 +    if (_pos < _array->length() - 1) {
    1.87 +      MemPointer* p1 = (MemPointer*)ptr;
    1.88 +      MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
    1.89 +
    1.90 +      assert(!is_dup_pointer(p1, p2),
    1.91 +        err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
    1.92 +     }
    1.93 +    if (_array->insert_at(ptr, _pos + 1)) {
    1.94 +      _pos ++;
    1.95 +      return true;
    1.96 +    }
    1.97 +    return false;
    1.98 +  }
    1.99 +#endif
   1.100 +
   1.101 +  virtual MemPointer* locate(address addr) {
   1.102 +    MemPointer* cur = current();
   1.103 +    while (cur != NULL && cur->addr() < addr) {
   1.104 +      cur = next();
   1.105 +    }
   1.106 +    return cur;
   1.107 +  }
   1.108 +};
   1.109 +
   1.110 +class VMMemPointerIterator : public MemPointerIterator {
   1.111 + public:
   1.112 +  VMMemPointerIterator(MemPointerArray* arr):
   1.113 +      MemPointerIterator(arr) {
   1.114 +  }
   1.115 +
   1.116 +  // locate an existing reserved memory region that contains specified address,
   1.117 +  // or the reserved region just above this address, where the incoming
   1.118 +  // reserved region should be inserted.
   1.119 +  virtual MemPointer* locate(address addr) {
   1.120 +    reset();
   1.121 +    VMMemRegion* reg = (VMMemRegion*)current();
   1.122 +    while (reg != NULL) {
   1.123 +      if (reg->is_reserved_region()) {
   1.124 +        if (reg->contains_address(addr) || addr < reg->base()) {
   1.125 +          return reg;
   1.126 +      }
   1.127 +    }
   1.128 +      reg = (VMMemRegion*)next();
   1.129 +    }
   1.130 +      return NULL;
   1.131 +    }
   1.132 +
   1.133 +  // following methods update virtual memory in the context
   1.134 +  // of 'current' position, which is properly positioned by
   1.135 +  // callers via locate method.
   1.136 +  bool add_reserved_region(MemPointerRecord* rec);
   1.137 +  bool add_committed_region(MemPointerRecord* rec);
   1.138 +  bool remove_uncommitted_region(MemPointerRecord* rec);
   1.139 +  bool remove_released_region(MemPointerRecord* rec);
   1.140 +
   1.141 +  // split a reserved region to create a new memory region with specified base and size
   1.142 +  bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
   1.143 + private:
   1.144 +  bool insert_record(MemPointerRecord* rec);
   1.145 +  bool insert_record_after(MemPointerRecord* rec);
   1.146 +
   1.147 +  bool insert_reserved_region(MemPointerRecord* rec);
   1.148 +
   1.149 +  // reset current position
   1.150 +  inline void reset() { _pos = 0; }
   1.151 +#ifdef ASSERT
   1.152 +  // check integrity of records on current reserved memory region.
   1.153 +  bool check_reserved_region() {
   1.154 +    VMMemRegion* reserved_region = (VMMemRegion*)current();
   1.155 +    assert(reserved_region != NULL && reserved_region->is_reserved_region(),
   1.156 +          "Sanity check");
   1.157 +    // all committed regions that follow current reserved region, should all
   1.158 +    // belong to the reserved region.
   1.159 +    VMMemRegion* next_region = (VMMemRegion*)next();
   1.160 +    for (; next_region != NULL && next_region->is_committed_region();
   1.161 +         next_region = (VMMemRegion*)next() ) {
   1.162 +      if(!reserved_region->contains_region(next_region)) {
   1.163 +        return false;
   1.164 +      }
   1.165 +    }
   1.166 +    return true;
   1.167 +  }
   1.168 +
   1.169 +  virtual bool is_dup_pointer(const MemPointer* ptr1,
   1.170 +    const MemPointer* ptr2) const {
   1.171 +    VMMemRegion* p1 = (VMMemRegion*)ptr1;
   1.172 +    VMMemRegion* p2 = (VMMemRegion*)ptr2;
   1.173 +
   1.174 +    if (p1->addr() != p2->addr()) return false;
   1.175 +    if ((p1->flags() & MemPointerRecord::tag_masks) !=
   1.176 +        (p2->flags() & MemPointerRecord::tag_masks)) {
   1.177 +      return false;
   1.178 +    }
   1.179 +    // we do see multiple commit/uncommit on the same memory, it is ok
   1.180 +    return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
   1.181 +           (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
   1.182 +  }
   1.183 +#endif
   1.184 +};
   1.185 +
   1.186 +class MallocRecordIterator : public MemPointerArrayIterator {
   1.187 + private:
   1.188 +  MemPointerArrayIteratorImpl  _itr;
   1.189 +
   1.190 +
   1.191 +
   1.192 + public:
   1.193 +  MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
   1.194 +  }
   1.195 +
   1.196 +  virtual MemPointer* current() const {
   1.197 +#ifdef ASSERT
   1.198 +    MemPointer* cur_rec = _itr.current();
   1.199 +    if (cur_rec != NULL) {
   1.200 +      MemPointer* prev_rec = _itr.peek_prev();
   1.201 +      MemPointer* next_rec = _itr.peek_next();
   1.202 +      assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
   1.203 +      assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
   1.204 +    }
   1.205 +#endif
   1.206 +    return _itr.current();
   1.207 +  }
   1.208 +  virtual MemPointer* next() {
   1.209 +    MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
   1.210 +    // arena memory record is a special case, which we have to compare
   1.211 +    // sequence number against its associated arena record.
   1.212 +    if (next_rec != NULL && next_rec->is_arena_memory_record()) {
   1.213 +      MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
   1.214 +      // if there is an associated arena record, it has to be previous
   1.215 +      // record because of sorting order (by address) - NMT generates a pseudo address
   1.216 +      // for arena's size record by offsetting arena's address, that guarantees
   1.217 +      // the order of arena record and it's size record.
   1.218 +      if (prev_rec != NULL && prev_rec->is_arena_record() &&
   1.219 +        next_rec->is_memory_record_of_arena(prev_rec)) {
   1.220 +        if (prev_rec->seq() > next_rec->seq()) {
   1.221 +          // Skip this arena memory record
   1.222 +          // Two scenarios:
   1.223 +          //   - if the arena record is an allocation record, this early
   1.224 +          //     size record must be leftover by previous arena,
   1.225 +          //     and the last size record should have size = 0.
   1.226 +          //   - if the arena record is a deallocation record, this
   1.227 +          //     size record should be its cleanup record, which should
   1.228 +          //     also have size = 0. In other world, arena alway reset
   1.229 +          //     its size before gone (see Arena's destructor)
   1.230 +          assert(next_rec->size() == 0, "size not reset");
   1.231 +          return _itr.next();
   1.232 +        } else {
   1.233 +          assert(prev_rec->is_allocation_record(),
   1.234 +            "Arena size record ahead of allocation record");
   1.235 +        }
   1.236 +      }
   1.237 +    }
   1.238 +    return next_rec;
   1.239 +  }
   1.240 +
   1.241 +  MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
   1.242 +  MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
   1.243 +  void remove()                      { ShouldNotReachHere(); }
   1.244 +  bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
   1.245 +  bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
   1.246 +};
   1.247 +
   1.248 +// collapse duplicated records. Eliminating duplicated records here, is much
   1.249 +// cheaper than during promotion phase. However, it does have limitation - it
   1.250 +// can only eliminate duplicated records within the generation, there are
   1.251 +// still chances seeing duplicated records during promotion.
   1.252 +// We want to use the record with higher sequence number, because it has
   1.253 +// more accurate callsite pc.
   1.254 +class VMRecordIterator : public MemPointerArrayIterator {
   1.255 + private:
   1.256 +  MemPointerArrayIteratorImpl  _itr;
   1.257 +
   1.258 + public:
   1.259 +  VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
   1.260 +    MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
   1.261 +    MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
   1.262 +    while (next != NULL) {
   1.263 +      assert(cur != NULL, "Sanity check");
   1.264 +      assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
   1.265 +        "pre-sort order");
   1.266 +
   1.267 +      if (is_duplicated_record(cur, next)) {
   1.268 +        _itr.next();
   1.269 +        next = (MemPointerRecord*)_itr.peek_next();
   1.270 +      } else {
   1.271 +        break;
   1.272 +      }
   1.273 +    }
   1.274 +  }
   1.275 +
   1.276 +  virtual MemPointer* current() const {
   1.277 +    return _itr.current();
   1.278 +  }
   1.279 +
   1.280 +  // get next record, but skip the duplicated records
   1.281 +  virtual MemPointer* next() {
   1.282 +    MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
   1.283 +    MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
   1.284 +    while (next != NULL) {
   1.285 +      assert(cur != NULL, "Sanity check");
   1.286 +      assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
   1.287 +        "pre-sort order");
   1.288 +
   1.289 +      if (is_duplicated_record(cur, next)) {
   1.290 +        _itr.next();
   1.291 +        cur = next;
   1.292 +        next = (MemPointerRecord*)_itr.peek_next();
   1.293 +      } else {
   1.294 +        break;
   1.295 +      }
   1.296 +    }
   1.297 +    return cur;
   1.298 +  }
   1.299 +
   1.300 +  MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
   1.301 +  MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
   1.302 +  void remove()                      { ShouldNotReachHere(); }
   1.303 +  bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
   1.304 +  bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
   1.305 +
   1.306 + private:
   1.307 +  bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
   1.308 +    bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
   1.309 +    assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
   1.310 +    return ret;
   1.311 +  }
   1.312 +};
   1.313 +
   1.314 +class StagingArea VALUE_OBJ_CLASS_SPEC {
   1.315 + private:
   1.316 +  MemPointerArray*   _malloc_data;
   1.317 +  MemPointerArray*   _vm_data;
   1.318 +
   1.319 + public:
   1.320 +  StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
   1.321 +    init();
   1.322 +  }
   1.323 +
   1.324 +  ~StagingArea() {
   1.325 +    if (_malloc_data != NULL) delete _malloc_data;
   1.326 +    if (_vm_data != NULL) delete _vm_data;
   1.327 +  }
   1.328 +
   1.329 +  MallocRecordIterator malloc_record_walker() {
   1.330 +    return MallocRecordIterator(malloc_data());
   1.331 +  }
   1.332 +
   1.333 +  VMRecordIterator virtual_memory_record_walker();
   1.334 +
   1.335 +  bool init();
   1.336 +  void clear() {
   1.337 +    assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
   1.338 +    _malloc_data->shrink();
   1.339 +    _malloc_data->clear();
   1.340 +    _vm_data->clear();
   1.341 +  }
   1.342 +
   1.343 +  inline MemPointerArray* malloc_data() { return _malloc_data; }
   1.344 +  inline MemPointerArray* vm_data()     { return _vm_data; }
   1.345 +};
   1.346 +
   1.347 +class MemBaseline;
   1.348 +class MemSnapshot : public CHeapObj<mtNMT> {
   1.349 + private:
   1.350 +  // the following two arrays contain records of all known lived memory blocks
   1.351 +  // live malloc-ed memory pointers
   1.352 +  MemPointerArray*      _alloc_ptrs;
   1.353 +  // live virtual memory pointers
   1.354 +  MemPointerArray*      _vm_ptrs;
   1.355 +
   1.356 +  StagingArea           _staging_area;
   1.357 +
   1.358 +  // the lock to protect this snapshot
   1.359 +  Monitor*              _lock;
   1.360 +
   1.361 +  // the number of instance classes
   1.362 +  int                   _number_of_classes;
   1.363 +
   1.364 +  NOT_PRODUCT(size_t    _untracked_count;)
   1.365 +  friend class MemBaseline;
   1.366 +
   1.367 + public:
   1.368 +  MemSnapshot();
   1.369 +  virtual ~MemSnapshot();
   1.370 +
   1.371 +  // if we are running out of native memory
   1.372 +  bool out_of_memory() {
   1.373 +    return (_alloc_ptrs == NULL ||
   1.374 +      _staging_area.malloc_data() == NULL ||
   1.375 +      _staging_area.vm_data() == NULL ||
   1.376 +      _vm_ptrs == NULL || _lock == NULL ||
   1.377 +      _alloc_ptrs->out_of_memory() ||
   1.378 +      _vm_ptrs->out_of_memory());
   1.379 +  }
   1.380 +
   1.381 +  // merge a per-thread memory recorder into staging area
   1.382 +  bool merge(MemRecorder* rec);
   1.383 +  // promote staged data to snapshot
   1.384 +  bool promote(int number_of_classes);
   1.385 +
   1.386 +  int  number_of_classes() const { return _number_of_classes; }
   1.387 +
   1.388 +  void wait(long timeout) {
   1.389 +    assert(_lock != NULL, "Just check");
   1.390 +    MonitorLockerEx locker(_lock);
   1.391 +    locker.wait(true, timeout);
   1.392 +  }
   1.393 +
   1.394 +  NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
   1.395 +  NOT_PRODUCT(void check_staging_data();)
   1.396 +  NOT_PRODUCT(void check_malloc_pointers();)
   1.397 +  NOT_PRODUCT(bool has_allocation_record(address addr);)
   1.398 +  // dump all virtual memory pointers in snapshot
   1.399 +  DEBUG_ONLY( void dump_all_vm_pointers();)
   1.400 +
   1.401 + private:
   1.402 +   // copy sequenced pointer from src to dest
   1.403 +   void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
   1.404 +   // assign a sequenced pointer to non-sequenced pointer
   1.405 +   void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
   1.406 +
   1.407 +   bool promote_malloc_records(MemPointerArrayIterator* itr);
   1.408 +   bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
   1.409 +};
   1.410 +
   1.411 +#endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP

mercurial