Merge

Fri, 09 Nov 2012 22:22:53 -0800

author
zgu
date
Fri, 09 Nov 2012 22:22:53 -0800
changeset 4276
8c413497f434
parent 4273
4efcd79826f2
parent 4275
e26ce0e8b666
child 4277
e4f764ddb06a
child 4278
070d523b96a7

Merge

src/share/vm/services/memSnapshot.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/services/memBaseline.cpp	Fri Nov 09 11:47:28 2012 -0800
     1.2 +++ b/src/share/vm/services/memBaseline.cpp	Fri Nov 09 22:22:53 2012 -0800
     1.3 @@ -115,17 +115,25 @@
     1.4    while (malloc_ptr != NULL) {
     1.5      index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
     1.6      size_t size = malloc_ptr->size();
     1.7 -    _total_malloced += size;
     1.8 -    _malloc_data[index].inc(size);
     1.9 -    if (MemPointerRecord::is_arena_record(malloc_ptr->flags())) {
    1.10 -      // see if arena size record present
    1.11 -      MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
    1.12 -      if (MemPointerRecord::is_arena_size_record(next_malloc_ptr->flags())) {
    1.13 -        assert(next_malloc_ptr->is_size_record_of_arena(malloc_ptr), "arena records do not match");
    1.14 -        size = next_malloc_ptr->size();
    1.15 -        _arena_data[index].inc(size);
    1.16 -        used_arena_size += size;
    1.17 -        malloc_itr.next();
    1.18 +    if (malloc_ptr->is_arena_memory_record()) {
    1.19 +      // We do have anonymous arenas, they are either used as value objects,
    1.20 +      // which are embedded inside other objects, or used as stack objects.
    1.21 +      _arena_data[index].inc(size);
    1.22 +      used_arena_size += size;
    1.23 +    } else {
    1.24 +      _total_malloced += size;
    1.25 +      _malloc_data[index].inc(size);
    1.26 +      if (malloc_ptr->is_arena_record()) {
    1.27 +        // see if arena memory record present
    1.28 +        MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
    1.29 +        if (next_malloc_ptr->is_arena_memory_record()) {
    1.30 +          assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
    1.31 +             "Arena records do not match");
    1.32 +          size = next_malloc_ptr->size();
    1.33 +          _arena_data[index].inc(size);
    1.34 +          used_arena_size += size;
    1.35 +          malloc_itr.next();
    1.36 +        }
    1.37        }
    1.38      }
    1.39      malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
    1.40 @@ -193,7 +201,7 @@
    1.41  
    1.42    // baseline memory that is totaled over 1 KB
    1.43    while (malloc_ptr != NULL) {
    1.44 -    if (!MemPointerRecord::is_arena_size_record(malloc_ptr->flags())) {
    1.45 +    if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
    1.46        // skip thread stacks
    1.47        if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
    1.48          if (malloc_callsite.addr() != malloc_ptr->pc()) {
     2.1 --- a/src/share/vm/services/memPtr.hpp	Fri Nov 09 11:47:28 2012 -0800
     2.2 +++ b/src/share/vm/services/memPtr.hpp	Fri Nov 09 22:22:53 2012 -0800
     2.3 @@ -165,7 +165,7 @@
     2.4      return (flags & (otArena | tag_size)) == otArena;
     2.5    }
     2.6  
     2.7 -  inline static bool is_arena_size_record(MEMFLAGS flags) {
     2.8 +  inline static bool is_arena_memory_record(MEMFLAGS flags) {
     2.9      return (flags & (otArena | tag_size)) == (otArena | tag_size);
    2.10    }
    2.11  
    2.12 @@ -256,8 +256,8 @@
    2.13    }
    2.14  
    2.15    // if this record records a size information of an arena
    2.16 -  inline bool is_arena_size_record() const {
    2.17 -    return is_arena_size_record(_flags);
    2.18 +  inline bool is_arena_memory_record() const {
    2.19 +    return is_arena_memory_record(_flags);
    2.20    }
    2.21  
    2.22    // if this pointer represents an address to an arena object
    2.23 @@ -266,8 +266,8 @@
    2.24    }
    2.25  
    2.26    // if this record represents a size information of specific arena
    2.27 -  inline bool is_size_record_of_arena(const MemPointerRecord* arena_rc) {
    2.28 -    assert(is_arena_size_record(), "not size record");
    2.29 +  inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) {
    2.30 +    assert(is_arena_memory_record(), "not size record");
    2.31      assert(arena_rc->is_arena_record(), "not arena record");
    2.32      return (arena_rc->addr() + sizeof(void*)) == addr();
    2.33    }
     3.1 --- a/src/share/vm/services/memSnapshot.cpp	Fri Nov 09 11:47:28 2012 -0800
     3.2 +++ b/src/share/vm/services/memSnapshot.cpp	Fri Nov 09 22:22:53 2012 -0800
     3.3 @@ -50,7 +50,7 @@
     3.4        tty->print_cr(" (tag)");
     3.5      }
     3.6    } else {
     3.7 -    if (rec->is_arena_size_record()) {
     3.8 +    if (rec->is_arena_memory_record()) {
     3.9        tty->print_cr(" (arena size)");
    3.10      } else if (rec->is_allocation_record()) {
    3.11        tty->print_cr(" (malloc)");
    3.12 @@ -401,21 +401,31 @@
    3.13    }
    3.14  }
    3.15  
    3.16 -void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
    3.17 +
    3.18 +void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
    3.19    assert(dest != NULL && src != NULL, "Just check");
    3.20    assert(dest->addr() == src->addr(), "Just check");
    3.21 +  assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
    3.22  
    3.23 -  MEMFLAGS flags = dest->flags();
    3.24 +  if (MemTracker::track_callsite()) {
    3.25 +    *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
    3.26 +  } else {
    3.27 +    *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
    3.28 +  }
    3.29 +}
    3.30 +
    3.31 +void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
    3.32 +  assert(src != NULL && dest != NULL, "Just check");
    3.33 +  assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
    3.34  
    3.35    if (MemTracker::track_callsite()) {
    3.36      *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
    3.37    } else {
    3.38 -    *dest = *src;
    3.39 +    *(MemPointerRecord*)dest = *(MemPointerRecord*)src;
    3.40    }
    3.41  }
    3.42  
    3.43 -
    3.44 -// merge a per-thread memory recorder to the staging area
    3.45 +// merge a recorder to the staging area
    3.46  bool MemSnapshot::merge(MemRecorder* rec) {
    3.47    assert(rec != NULL && !rec->out_of_memory(), "Just check");
    3.48  
    3.49 @@ -423,71 +433,45 @@
    3.50  
    3.51    MutexLockerEx lock(_lock, true);
    3.52    MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
    3.53 -  MemPointerRecord *p1, *p2;
    3.54 -  p1 = (MemPointerRecord*) itr.current();
    3.55 -  while (p1 != NULL) {
    3.56 -    if (p1->is_vm_pointer()) {
    3.57 +  MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
    3.58 +  MemPointerRecord* matched_rec;
    3.59 +
    3.60 +  while (incoming_rec != NULL) {
    3.61 +    if (incoming_rec->is_vm_pointer()) {
    3.62        // we don't do anything with virtual memory records during merge
    3.63 -      if (!_staging_area.vm_data()->append(p1)) {
    3.64 +      if (!_staging_area.vm_data()->append(incoming_rec)) {
    3.65          return false;
    3.66        }
    3.67      } else {
    3.68        // locate matched record and/or also position the iterator to proper
    3.69        // location for this incoming record.
    3.70 -      p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
    3.71 -      // we have not seen this memory block, so just add to staging area
    3.72 -      if (p2 == NULL) {
    3.73 -        if (!malloc_staging_itr.insert(p1)) {
    3.74 +      matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
    3.75 +      // we have not seen this memory block in this generation,
    3.76 +      // so just add to staging area
    3.77 +      if (matched_rec == NULL) {
    3.78 +        if (!malloc_staging_itr.insert(incoming_rec)) {
    3.79            return false;
    3.80          }
    3.81 -      } else if (p1->addr() == p2->addr()) {
    3.82 -        MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
    3.83 -        // a memory block can have many tagging records, find right one to replace or
    3.84 -        // right position to insert
    3.85 -        while (staging_next != NULL && staging_next->addr() == p1->addr()) {
    3.86 -          if ((staging_next->flags() & MemPointerRecord::tag_masks) <=
    3.87 -            (p1->flags() & MemPointerRecord::tag_masks)) {
    3.88 -            p2 = (MemPointerRecord*)malloc_staging_itr.next();
    3.89 -            staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
    3.90 -          } else {
    3.91 -            break;
    3.92 -          }
    3.93 +      } else if (incoming_rec->addr() == matched_rec->addr()) {
    3.94 +        // whoever has higher sequence number wins
    3.95 +        if (incoming_rec->seq() > matched_rec->seq()) {
    3.96 +          copy_seq_pointer(matched_rec, incoming_rec);
    3.97          }
    3.98 -        int df = (p1->flags() & MemPointerRecord::tag_masks) -
    3.99 -          (p2->flags() & MemPointerRecord::tag_masks);
   3.100 -        if (df == 0) {
   3.101 -          assert(p1->seq() > 0, "not sequenced");
   3.102 -          assert(p2->seq() > 0, "not sequenced");
   3.103 -          if (p1->seq() > p2->seq()) {
   3.104 -            copy_pointer(p2, p1);
   3.105 -          }
   3.106 -        } else if (df < 0) {
   3.107 -          if (!malloc_staging_itr.insert(p1)) {
   3.108 -            return false;
   3.109 -          }
   3.110 -        } else {
   3.111 -          if (!malloc_staging_itr.insert_after(p1)) {
   3.112 -            return false;
   3.113 -          }
   3.114 -        }
   3.115 -      } else if (p1->addr() < p2->addr()) {
   3.116 -        if (!malloc_staging_itr.insert(p1)) {
   3.117 +      } else if (incoming_rec->addr() < matched_rec->addr()) {
   3.118 +        if (!malloc_staging_itr.insert(incoming_rec)) {
   3.119            return false;
   3.120          }
   3.121        } else {
   3.122 -        if (!malloc_staging_itr.insert_after(p1)) {
   3.123 -          return false;
   3.124 -        }
   3.125 +        ShouldNotReachHere();
   3.126        }
   3.127      }
   3.128 -    p1 = (MemPointerRecord*)itr.next();
   3.129 +    incoming_rec = (MemPointerRecord*)itr.next();
   3.130    }
   3.131    NOT_PRODUCT(void check_staging_data();)
   3.132    return true;
   3.133  }
   3.134  
   3.135  
   3.136 -
   3.137  // promote data to next generation
   3.138  bool MemSnapshot::promote() {
   3.139    assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
   3.140 @@ -518,20 +502,25 @@
   3.141      // found matched memory block
   3.142      if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
   3.143        // snapshot already contains 'live' records
   3.144 -      assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
   3.145 +      assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(),
   3.146               "Sanity check");
   3.147        // update block states
   3.148 -      if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
   3.149 -        copy_pointer(matched_rec, new_rec);
   3.150 +      if (new_rec->is_allocation_record()) {
   3.151 +        assign_pointer(matched_rec, new_rec);
   3.152 +      } else if (new_rec->is_arena_memory_record()) {
   3.153 +        if (new_rec->size() == 0) {
   3.154 +          // remove size record once size drops to 0
   3.155 +          malloc_snapshot_itr.remove();
   3.156 +        } else {
   3.157 +          assign_pointer(matched_rec, new_rec);
   3.158 +        }
   3.159        } else {
   3.160          // a deallocation record
   3.161          assert(new_rec->is_deallocation_record(), "Sanity check");
   3.162          // an arena record can be followed by a size record, we need to remove both
   3.163          if (matched_rec->is_arena_record()) {
   3.164            MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
   3.165 -          if (next->is_arena_size_record()) {
   3.166 -            // it has to match the arena record
   3.167 -            assert(next->is_size_record_of_arena(matched_rec), "Sanity check");
   3.168 +          if (next->is_arena_memory_record() && next->is_memory_record_of_arena(matched_rec)) {
   3.169              malloc_snapshot_itr.remove();
   3.170            }
   3.171          }
   3.172 @@ -539,17 +528,13 @@
   3.173          malloc_snapshot_itr.remove();
   3.174        }
   3.175      } else {
   3.176 -      // it is a new record, insert into snapshot
   3.177 -      if (new_rec->is_arena_size_record()) {
   3.178 -        MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev();
   3.179 -        if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) {
   3.180 -          // no matched arena record, ignore the size record
   3.181 -          new_rec = NULL;
   3.182 -        }
   3.183 +      // don't insert size 0 record
   3.184 +      if (new_rec->is_arena_memory_record() && new_rec->size() == 0) {
   3.185 +        new_rec = NULL;
   3.186        }
   3.187 -      // only 'live' record can go into snapshot
   3.188 +
   3.189        if (new_rec != NULL) {
   3.190 -        if  (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
   3.191 +        if  (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) {
   3.192            if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
   3.193              if (!malloc_snapshot_itr.insert_after(new_rec)) {
   3.194                return false;
     4.1 --- a/src/share/vm/services/memSnapshot.hpp	Fri Nov 09 11:47:28 2012 -0800
     4.2 +++ b/src/share/vm/services/memSnapshot.hpp	Fri Nov 09 22:22:53 2012 -0800
     4.3 @@ -31,7 +31,6 @@
     4.4  #include "services/memBaseline.hpp"
     4.5  #include "services/memPtrArray.hpp"
     4.6  
     4.7 -
     4.8  // Snapshot pointer array iterator
     4.9  
    4.10  // The pointer array contains malloc-ed pointers
    4.11 @@ -165,39 +164,58 @@
    4.12  };
    4.13  
    4.14  class MallocRecordIterator : public MemPointerArrayIterator {
    4.15 - protected:
    4.16 + private:
    4.17    MemPointerArrayIteratorImpl  _itr;
    4.18  
    4.19 +
    4.20 +
    4.21   public:
    4.22    MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
    4.23    }
    4.24  
    4.25    virtual MemPointer* current() const {
    4.26 -    MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
    4.27 -    assert(cur == NULL || !cur->is_vm_pointer(), "seek error");
    4.28 -    MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
    4.29 -    if (next == NULL || next->addr() != cur->addr()) {
    4.30 -      return cur;
    4.31 -    } else {
    4.32 -      assert(!cur->is_vm_pointer(), "Sanity check");
    4.33 -      assert(cur->is_allocation_record() && next->is_deallocation_record(),
    4.34 -             "sorting order");
    4.35 -      assert(cur->seq() != next->seq(), "Sanity check");
    4.36 -      return cur->seq() >  next->seq() ? cur : next;
    4.37 +#ifdef ASSERT
    4.38 +    MemPointer* cur_rec = _itr.current();
    4.39 +    if (cur_rec != NULL) {
    4.40 +      MemPointer* prev_rec = _itr.peek_prev();
    4.41 +      MemPointer* next_rec = _itr.peek_next();
    4.42 +      assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
    4.43 +      assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
    4.44      }
    4.45 +#endif
    4.46 +    return _itr.current();
    4.47    }
    4.48 -
    4.49    virtual MemPointer* next() {
    4.50 -    MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
    4.51 -    assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check");
    4.52 -    MemPointerRecord* next = (MemPointerRecord*)_itr.next();
    4.53 -    if (next == NULL) {
    4.54 -      return NULL;
    4.55 +    MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
    4.56 +    // arena memory record is a special case, which we have to compare
    4.57 +    // sequence number against its associated arena record.
    4.58 +    if (next_rec != NULL && next_rec->is_arena_memory_record()) {
    4.59 +      MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
    4.60 +      // if there is an associated arena record, it has to be previous
    4.61 +      // record because of sorting order (by address) - NMT generates a pseudo address
    4.62 +      // for arena's size record by offsetting arena's address, that guarantees
    4.63 +      // the order of arena record and it's size record.
    4.64 +      if (prev_rec != NULL && prev_rec->is_arena_record() &&
    4.65 +        next_rec->is_memory_record_of_arena(prev_rec)) {
    4.66 +        if (prev_rec->seq() > next_rec->seq()) {
    4.67 +          // Skip this arena memory record
    4.68 +          // Two scenarios:
    4.69 +          //   - if the arena record is an allocation record, this early
    4.70 +          //     size record must be leftover by previous arena,
    4.71 +          //     and the last size record should have size = 0.
    4.72 +          //   - if the arena record is a deallocation record, this
    4.73 +          //     size record should be its cleanup record, which should
    4.74 +          //     also have size = 0. In other world, arena alway reset
    4.75 +          //     its size before gone (see Arena's destructor)
    4.76 +          assert(next_rec->size() == 0, "size not reset");
    4.77 +          return _itr.next();
    4.78 +        } else {
    4.79 +          assert(prev_rec->is_allocation_record(),
    4.80 +            "Arena size record ahead of allocation record");
    4.81 +        }
    4.82 +      }
    4.83      }
    4.84 -    if (cur->addr() == next->addr()) {
    4.85 -      next = (MemPointerRecord*)_itr.next();
    4.86 -    }
    4.87 -    return current();
    4.88 +    return next_rec;
    4.89    }
    4.90  
    4.91    MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
    4.92 @@ -213,9 +231,12 @@
    4.93  // still chances seeing duplicated records during promotion.
    4.94  // We want to use the record with higher sequence number, because it has
    4.95  // more accurate callsite pc.
    4.96 -class VMRecordIterator : public MallocRecordIterator {
    4.97 +class VMRecordIterator : public MemPointerArrayIterator {
    4.98 + private:
    4.99 +  MemPointerArrayIteratorImpl  _itr;
   4.100 +
   4.101   public:
   4.102 -  VMRecordIterator(MemPointerArray* arr) : MallocRecordIterator(arr) {
   4.103 +  VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
   4.104      MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
   4.105      MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
   4.106      while (next != NULL) {
   4.107 @@ -256,6 +277,12 @@
   4.108      return cur;
   4.109    }
   4.110  
   4.111 +  MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
   4.112 +  MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
   4.113 +  void remove()                      { ShouldNotReachHere(); }
   4.114 +  bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
   4.115 +  bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
   4.116 +
   4.117   private:
   4.118    bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
   4.119      bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
   4.120 @@ -348,8 +375,10 @@
   4.121    DEBUG_ONLY( void dump_all_vm_pointers();)
   4.122  
   4.123   private:
   4.124 -   // copy pointer data from src to dest
   4.125 -   void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
   4.126 +   // copy sequenced pointer from src to dest
   4.127 +   void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
   4.128 +   // assign a sequenced pointer to non-sequenced pointer
   4.129 +   void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
   4.130  
   4.131     bool promote_malloc_records(MemPointerArrayIterator* itr);
   4.132     bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
     5.1 --- a/src/share/vm/services/memTracker.hpp	Fri Nov 09 11:47:28 2012 -0800
     5.2 +++ b/src/share/vm/services/memTracker.hpp	Fri Nov 09 22:22:53 2012 -0800
     5.3 @@ -284,14 +284,14 @@
     5.4      }
     5.5    }
     5.6  
     5.7 -  // record arena size
     5.8 +  // record arena memory size
     5.9    static inline void record_arena_size(address addr, size_t size) {
    5.10 -    // we add a positive offset to arena address, so we can have arena size record
    5.11 +    // we add a positive offset to arena address, so we can have arena memory record
    5.12      // sorted after arena record
    5.13      if (is_on() && !UseMallocOnly) {
    5.14        assert(addr != NULL, "Sanity check");
    5.15        create_memory_record((addr + sizeof(void*)), MemPointerRecord::arena_size_tag(), size,
    5.16 -        0, NULL);
    5.17 +        DEBUG_CALLER_PC, NULL);
    5.18      }
    5.19    }
    5.20  

mercurial