src/share/vm/services/memSnapshot.hpp

Mon, 12 Nov 2012 16:15:05 -0500

author
hseigel
date
Mon, 12 Nov 2012 16:15:05 -0500
changeset 4278
070d523b96a7
parent 4274
fb3190e77d3c
child 4285
49cbd3e25ba9
permissions
-rw-r--r--

8001471: Klass::cast() does nothing
Summary: Remove function Klass::cast() and calls to it.
Reviewed-by: dholmes, coleenp

     1 /*
     2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
    26 #define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
    28 #include "memory/allocation.hpp"
    29 #include "runtime/mutex.hpp"
    30 #include "runtime/mutexLocker.hpp"
    31 #include "services/memBaseline.hpp"
    32 #include "services/memPtrArray.hpp"
    34 // Snapshot pointer array iterator
    36 // The pointer array contains malloc-ed pointers
    37 class MemPointerIterator : public MemPointerArrayIteratorImpl {
    38  public:
    39   MemPointerIterator(MemPointerArray* arr):
    40     MemPointerArrayIteratorImpl(arr) {
    41     assert(arr != NULL, "null array");
    42   }
    44 #ifdef ASSERT
    45   virtual bool is_dup_pointer(const MemPointer* ptr1,
    46     const MemPointer* ptr2) const {
    47     MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
    48     MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
    50     if (p1->addr() != p2->addr()) return false;
    51     if ((p1->flags() & MemPointerRecord::tag_masks) !=
    52         (p2->flags() & MemPointerRecord::tag_masks)) {
    53       return false;
    54     }
    55     // we do see multiple commit/uncommit on the same memory, it is ok
    56     return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
    57            (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
    58   }
    60   virtual bool insert(MemPointer* ptr) {
    61     if (_pos > 0) {
    62       MemPointer* p1 = (MemPointer*)ptr;
    63       MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
    64       assert(!is_dup_pointer(p1, p2),
    65         err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
    66     }
    67      if (_pos < _array->length() -1) {
    68       MemPointer* p1 = (MemPointer*)ptr;
    69       MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
    70       assert(!is_dup_pointer(p1, p2),
    71         err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
    72      }
    73     return _array->insert_at(ptr, _pos);
    74   }
    76   virtual bool insert_after(MemPointer* ptr) {
    77     if (_pos > 0) {
    78       MemPointer* p1 = (MemPointer*)ptr;
    79       MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
    80       assert(!is_dup_pointer(p1, p2),
    81         err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
    82     }
    83     if (_pos < _array->length() - 1) {
    84       MemPointer* p1 = (MemPointer*)ptr;
    85       MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
    87       assert(!is_dup_pointer(p1, p2),
    88         err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
    89      }
    90     if (_array->insert_at(ptr, _pos + 1)) {
    91       _pos ++;
    92       return true;
    93     }
    94     return false;
    95   }
    96 #endif
    98   virtual MemPointer* locate(address addr) {
    99     MemPointer* cur = current();
   100     while (cur != NULL && cur->addr() < addr) {
   101       cur = next();
   102     }
   103     return cur;
   104   }
   105 };
   107 class VMMemPointerIterator : public MemPointerIterator {
   108  public:
   109   VMMemPointerIterator(MemPointerArray* arr):
   110       MemPointerIterator(arr) {
   111   }
   113   // locate an existing reserved memory region that contains specified address,
   114   // or the reserved region just above this address, where the incoming
   115   // reserved region should be inserted.
   116   virtual MemPointer* locate(address addr) {
   117     reset();
   118     VMMemRegion* reg = (VMMemRegion*)current();
   119     while (reg != NULL) {
   120       if (reg->is_reserved_region()) {
   121         if (reg->contains_address(addr) || addr < reg->base()) {
   122           return reg;
   123       }
   124     }
   125       reg = (VMMemRegion*)next();
   126     }
   127       return NULL;
   128     }
   130   // following methods update virtual memory in the context
   131   // of 'current' position, which is properly positioned by
   132   // callers via locate method.
   133   bool add_reserved_region(MemPointerRecord* rec);
   134   bool add_committed_region(MemPointerRecord* rec);
   135   bool remove_uncommitted_region(MemPointerRecord* rec);
   136   bool remove_released_region(MemPointerRecord* rec);
   138   // split a reserved region to create a new memory region with specified base and size
   139   bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
   140  private:
   141   bool insert_record(MemPointerRecord* rec);
   142   bool insert_record_after(MemPointerRecord* rec);
   144   bool insert_reserved_region(MemPointerRecord* rec);
   146   // reset current position
   147   inline void reset() { _pos = 0; }
   148 #ifdef ASSERT
   149   virtual bool is_dup_pointer(const MemPointer* ptr1,
   150     const MemPointer* ptr2) const {
   151     VMMemRegion* p1 = (VMMemRegion*)ptr1;
   152     VMMemRegion* p2 = (VMMemRegion*)ptr2;
   154     if (p1->addr() != p2->addr()) return false;
   155     if ((p1->flags() & MemPointerRecord::tag_masks) !=
   156         (p2->flags() & MemPointerRecord::tag_masks)) {
   157       return false;
   158     }
   159     // we do see multiple commit/uncommit on the same memory, it is ok
   160     return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
   161            (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
   162   }
   163 #endif
   164 };
   166 class MallocRecordIterator : public MemPointerArrayIterator {
   167  private:
   168   MemPointerArrayIteratorImpl  _itr;
   172  public:
   173   MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
   174   }
   176   virtual MemPointer* current() const {
   177 #ifdef ASSERT
   178     MemPointer* cur_rec = _itr.current();
   179     if (cur_rec != NULL) {
   180       MemPointer* prev_rec = _itr.peek_prev();
   181       MemPointer* next_rec = _itr.peek_next();
   182       assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
   183       assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
   184     }
   185 #endif
   186     return _itr.current();
   187   }
   188   virtual MemPointer* next() {
   189     MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
   190     // arena memory record is a special case, which we have to compare
   191     // sequence number against its associated arena record.
   192     if (next_rec != NULL && next_rec->is_arena_memory_record()) {
   193       MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
   194       // if there is an associated arena record, it has to be previous
   195       // record because of sorting order (by address) - NMT generates a pseudo address
   196       // for arena's size record by offsetting arena's address, that guarantees
   197       // the order of arena record and it's size record.
   198       if (prev_rec != NULL && prev_rec->is_arena_record() &&
   199         next_rec->is_memory_record_of_arena(prev_rec)) {
   200         if (prev_rec->seq() > next_rec->seq()) {
   201           // Skip this arena memory record
   202           // Two scenarios:
   203           //   - if the arena record is an allocation record, this early
   204           //     size record must be leftover by previous arena,
   205           //     and the last size record should have size = 0.
   206           //   - if the arena record is a deallocation record, this
   207           //     size record should be its cleanup record, which should
   208           //     also have size = 0. In other world, arena alway reset
   209           //     its size before gone (see Arena's destructor)
   210           assert(next_rec->size() == 0, "size not reset");
   211           return _itr.next();
   212         } else {
   213           assert(prev_rec->is_allocation_record(),
   214             "Arena size record ahead of allocation record");
   215         }
   216       }
   217     }
   218     return next_rec;
   219   }
   221   MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
   222   MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
   223   void remove()                      { ShouldNotReachHere(); }
   224   bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
   225   bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
   226 };
   228 // collapse duplicated records. Eliminating duplicated records here, is much
   229 // cheaper than during promotion phase. However, it does have limitation - it
   230 // can only eliminate duplicated records within the generation, there are
   231 // still chances seeing duplicated records during promotion.
   232 // We want to use the record with higher sequence number, because it has
   233 // more accurate callsite pc.
   234 class VMRecordIterator : public MemPointerArrayIterator {
   235  private:
   236   MemPointerArrayIteratorImpl  _itr;
   238  public:
   239   VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
   240     MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
   241     MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
   242     while (next != NULL) {
   243       assert(cur != NULL, "Sanity check");
   244       assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
   245         "pre-sort order");
   247       if (is_duplicated_record(cur, next)) {
   248         _itr.next();
   249         next = (MemPointerRecord*)_itr.peek_next();
   250       } else {
   251         break;
   252       }
   253     }
   254   }
   256   virtual MemPointer* current() const {
   257     return _itr.current();
   258   }
   260   // get next record, but skip the duplicated records
   261   virtual MemPointer* next() {
   262     MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
   263     MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
   264     while (next != NULL) {
   265       assert(cur != NULL, "Sanity check");
   266       assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
   267         "pre-sort order");
   269       if (is_duplicated_record(cur, next)) {
   270         _itr.next();
   271         cur = next;
   272         next = (MemPointerRecord*)_itr.peek_next();
   273       } else {
   274         break;
   275       }
   276     }
   277     return cur;
   278   }
   280   MemPointer* peek_next() const      { ShouldNotReachHere(); return NULL; }
   281   MemPointer* peek_prev() const      { ShouldNotReachHere(); return NULL; }
   282   void remove()                      { ShouldNotReachHere(); }
   283   bool insert(MemPointer* ptr)       { ShouldNotReachHere(); return false; }
   284   bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
   286  private:
   287   bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
   288     bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
   289     assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
   290     return ret;
   291   }
   292 };
   294 class StagingArea : public _ValueObj {
   295  private:
   296   MemPointerArray*   _malloc_data;
   297   MemPointerArray*   _vm_data;
   299  public:
   300   StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
   301     init();
   302   }
   304   ~StagingArea() {
   305     if (_malloc_data != NULL) delete _malloc_data;
   306     if (_vm_data != NULL) delete _vm_data;
   307   }
   309   MallocRecordIterator malloc_record_walker() {
   310     return MallocRecordIterator(malloc_data());
   311   }
   313   VMRecordIterator virtual_memory_record_walker();
   315   bool init();
   316   void clear() {
   317     assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
   318     _malloc_data->shrink();
   319     _malloc_data->clear();
   320     _vm_data->clear();
   321   }
   323   inline MemPointerArray* malloc_data() { return _malloc_data; }
   324   inline MemPointerArray* vm_data()     { return _vm_data; }
   325 };
   327 class MemBaseline;
   328 class MemSnapshot : public CHeapObj<mtNMT> {
   329  private:
   330   // the following two arrays contain records of all known lived memory blocks
   331   // live malloc-ed memory pointers
   332   MemPointerArray*      _alloc_ptrs;
   333   // live virtual memory pointers
   334   MemPointerArray*      _vm_ptrs;
   336   StagingArea           _staging_area;
   338   // the lock to protect this snapshot
   339   Monitor*              _lock;
   341   NOT_PRODUCT(size_t    _untracked_count;)
   342   friend class MemBaseline;
   344  public:
   345   MemSnapshot();
   346   virtual ~MemSnapshot();
   348   // if we are running out of native memory
   349   bool out_of_memory() {
   350     return (_alloc_ptrs == NULL ||
   351       _staging_area.malloc_data() == NULL ||
   352       _staging_area.vm_data() == NULL ||
   353       _vm_ptrs == NULL || _lock == NULL ||
   354       _alloc_ptrs->out_of_memory() ||
   355       _vm_ptrs->out_of_memory());
   356   }
   358   // merge a per-thread memory recorder into staging area
   359   bool merge(MemRecorder* rec);
   360   // promote staged data to snapshot
   361   bool promote();
   364   void wait(long timeout) {
   365     assert(_lock != NULL, "Just check");
   366     MonitorLockerEx locker(_lock);
   367     locker.wait(true, timeout);
   368   }
   370   NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
   371   NOT_PRODUCT(void check_staging_data();)
   372   NOT_PRODUCT(void check_malloc_pointers();)
   373   NOT_PRODUCT(bool has_allocation_record(address addr);)
   374   // dump all virtual memory pointers in snapshot
   375   DEBUG_ONLY( void dump_all_vm_pointers();)
   377  private:
   378    // copy sequenced pointer from src to dest
   379    void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
   380    // assign a sequenced pointer to non-sequenced pointer
   381    void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
   383    bool promote_malloc_records(MemPointerArrayIterator* itr);
   384    bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
   385 };
   387 #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP

mercurial