src/share/vm/services/memSnapshot.hpp

changeset 4274
fb3190e77d3c
parent 4193
716c64bda5ba
child 4285
49cbd3e25ba9
     1.1 --- a/src/share/vm/services/memSnapshot.hpp	Mon Nov 05 13:55:31 2012 -0800
     1.2 +++ b/src/share/vm/services/memSnapshot.hpp	Fri Nov 09 19:24:31 2012 -0500
     1.3 @@ -31,7 +31,6 @@
     1.4  #include "services/memBaseline.hpp"
     1.5  #include "services/memPtrArray.hpp"
     1.6  
     1.7 -
     1.8  // Snapshot pointer array iterator
     1.9  
    1.10  // The pointer array contains malloc-ed pointers
    1.11 @@ -165,39 +164,58 @@
    1.12  };
    1.13  
    1.14  class MallocRecordIterator : public MemPointerArrayIterator {
    1.15 - protected:
    1.16 + private:
    1.17    MemPointerArrayIteratorImpl  _itr;
    1.18  
    1.19 +
    1.20 +
    1.21   public:
    1.22    MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
    1.23    }
    1.24  
    1.25    virtual MemPointer* current() const {
    1.26 -    MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
    1.27 -    assert(cur == NULL || !cur->is_vm_pointer(), "seek error");
    1.28 -    MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
    1.29 -    if (next == NULL || next->addr() != cur->addr()) {
    1.30 -      return cur;
    1.31 -    } else {
    1.32 -      assert(!cur->is_vm_pointer(), "Sanity check");
    1.33 -      assert(cur->is_allocation_record() && next->is_deallocation_record(),
    1.34 -             "sorting order");
    1.35 -      assert(cur->seq() != next->seq(), "Sanity check");
    1.36 -      return cur->seq() >  next->seq() ? cur : next;
    1.37 +#ifdef ASSERT
    1.38 +    MemPointer* cur_rec = _itr.current();
    1.39 +    if (cur_rec != NULL) {
    1.40 +      MemPointer* prev_rec = _itr.peek_prev();
    1.41 +      MemPointer* next_rec = _itr.peek_next();
    1.42 +      assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
    1.43 +      assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
    1.44      }
    1.45 +#endif
    1.46 +    return _itr.current();
    1.47    }
    1.48 -
    1.49    virtual MemPointer* next() {
    1.50 -    MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
    1.51 -    assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check");
    1.52 -    MemPointerRecord* next = (MemPointerRecord*)_itr.next();
    1.53 -    if (next == NULL) {
    1.54 -      return NULL;
    1.55 +    MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
    1.56 +    // arena memory record is a special case, which we have to compare
    1.57 +    // sequence number against its associated arena record.
    1.58 +    if (next_rec != NULL && next_rec->is_arena_memory_record()) {
    1.59 +      MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
    1.60 +      // if there is an associated arena record, it has to be previous
    1.61 +      // record because of sorting order (by address) - NMT generates a pseudo address
    1.62 +      // for arena's size record by offsetting arena's address, that guarantees
    1.63 +      // the order of arena record and it's size record.
    1.64 +      if (prev_rec != NULL && prev_rec->is_arena_record() &&
    1.65 +        next_rec->is_memory_record_of_arena(prev_rec)) {
    1.66 +        if (prev_rec->seq() > next_rec->seq()) {
    1.67 +          // Skip this arena memory record
    1.68 +          // Two scenarios:
    1.69 +          //   - if the arena record is an allocation record, this early
    1.70 +          //     size record must be leftover by previous arena,
    1.71 +          //     and the last size record should have size = 0.
    1.72 +          //   - if the arena record is a deallocation record, this
    1.73 +          //     size record should be its cleanup record, which should
    1.74 +          //     also have size = 0. In other world, arena alway reset
    1.75 +          //     its size before gone (see Arena's destructor)
    1.76 +          assert(next_rec->size() == 0, "size not reset");
    1.77 +          return _itr.next();
    1.78 +        } else {
    1.79 +          assert(prev_rec->is_allocation_record(),
    1.80 +            "Arena size record ahead of allocation record");
    1.81 +        }
    1.82 +      }
    1.83      }
    1.84 -    if (cur->addr() == next->addr()) {
    1.85 -      next = (MemPointerRecord*)_itr.next();
    1.86 -    }
    1.87 -    return current();
    1.88 +    return next_rec;
    1.89    }
    1.90  
    1.91    MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
    1.92 @@ -213,9 +231,12 @@
    1.93  // still chances seeing duplicated records during promotion.
    1.94  // We want to use the record with higher sequence number, because it has
    1.95  // more accurate callsite pc.
    1.96 -class VMRecordIterator : public MallocRecordIterator {
    1.97 +class VMRecordIterator : public MemPointerArrayIterator {
    1.98 + private:
    1.99 +  MemPointerArrayIteratorImpl  _itr;
   1.100 +
   1.101   public:
   1.102 -  VMRecordIterator(MemPointerArray* arr) : MallocRecordIterator(arr) {
   1.103 +  VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
   1.104      MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
   1.105      MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
   1.106      while (next != NULL) {
   1.107 @@ -256,6 +277,12 @@
   1.108      return cur;
   1.109    }
   1.110  
   1.111 +  MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
   1.112 +  MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
   1.113 +  void remove()                      { ShouldNotReachHere(); }
   1.114 +  bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
   1.115 +  bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
   1.116 +
   1.117   private:
   1.118    bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
   1.119      bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
   1.120 @@ -348,8 +375,10 @@
   1.121    DEBUG_ONLY( void dump_all_vm_pointers();)
   1.122  
   1.123   private:
   1.124 -   // copy pointer data from src to dest
   1.125 -   void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
   1.126 +   // copy sequenced pointer from src to dest
   1.127 +   void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
   1.128 +   // assign a sequenced pointer to non-sequenced pointer
   1.129 +   void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
   1.130  
   1.131     bool promote_malloc_records(MemPointerArrayIterator* itr);
   1.132     bool promote_virtual_memory_records(MemPointerArrayIterator* itr);

mercurial