src/share/vm/services/memSnapshot.cpp

Tue, 11 Sep 2012 20:53:17 -0400

author
zgu
date
Tue, 11 Sep 2012 20:53:17 -0400
changeset 4053
33143ee07800
parent 3994
e5bf1c79ed5b
child 4193
716c64bda5ba
permissions
-rw-r--r--

7181995: NMT ON: NMT assertion failure assert(cur_vm->is_uncommit_record() || cur_vm->is_deallocation_record
Summary: Fixed virtual memory records merge and promotion logic, should be based on sequence number vs. base address order
Reviewed-by: coleenp, acorn

     1 /*
     2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "runtime/mutexLocker.hpp"
    27 #include "utilities/decoder.hpp"
    28 #include "services/memBaseline.hpp"
    29 #include "services/memPtr.hpp"
    30 #include "services/memPtrArray.hpp"
    31 #include "services/memSnapshot.hpp"
    32 #include "services/memTracker.hpp"
    34 static int sort_in_seq_order(const void* p1, const void* p2) {
    35   assert(p1 != NULL && p2 != NULL, "Sanity check");
    36   const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
    37   const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
    38   return (mp1->seq() - mp2->seq());
    39 }
    41 bool StagingArea::init() {
    42   if (MemTracker::track_callsite()) {
    43     _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
    44     _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
    45   } else {
    46     _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
    47     _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
    48   }
    50   if (_malloc_data != NULL && _vm_data != NULL &&
    51       !_malloc_data->out_of_memory() &&
    52       !_vm_data->out_of_memory()) {
    53     return true;
    54   } else {
    55     if (_malloc_data != NULL) delete _malloc_data;
    56     if (_vm_data != NULL) delete _vm_data;
    57     _malloc_data = NULL;
    58     _vm_data = NULL;
    59     return false;
    60   }
    61 }
    64 MemPointerArrayIteratorImpl StagingArea::virtual_memory_record_walker() {
    65   MemPointerArray* arr = vm_data();
    66   // sort into seq number order
    67   arr->sort((FN_SORT)sort_in_seq_order);
    68   return MemPointerArrayIteratorImpl(arr);
    69 }
    72 MemSnapshot::MemSnapshot() {
    73   if (MemTracker::track_callsite()) {
    74     _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
    75     _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
    76   } else {
    77     _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
    78     _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
    79   }
    81   _staging_area.init();
    82   _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
    83   NOT_PRODUCT(_untracked_count = 0;)
    84 }
    86 MemSnapshot::~MemSnapshot() {
    87   assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
    88   {
    89     MutexLockerEx locker(_lock);
    90     if (_alloc_ptrs != NULL) {
    91       delete _alloc_ptrs;
    92       _alloc_ptrs = NULL;
    93     }
    95     if (_vm_ptrs != NULL) {
    96       delete _vm_ptrs;
    97       _vm_ptrs = NULL;
    98     }
    99   }
   101   if (_lock != NULL) {
   102     delete _lock;
   103     _lock = NULL;
   104   }
   105 }
   107 void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
   108   assert(dest != NULL && src != NULL, "Just check");
   109   assert(dest->addr() == src->addr(), "Just check");
   111   MEMFLAGS flags = dest->flags();
   113   if (MemTracker::track_callsite()) {
   114     *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
   115   } else {
   116     *dest = *src;
   117   }
   118 }
   121 // merge a per-thread memory recorder to the staging area
   122 bool MemSnapshot::merge(MemRecorder* rec) {
   123   assert(rec != NULL && !rec->out_of_memory(), "Just check");
   125   SequencedRecordIterator itr(rec->pointer_itr());
   127   MutexLockerEx lock(_lock, true);
   128   MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
   129   MemPointerRecord *p1, *p2;
   130   p1 = (MemPointerRecord*) itr.current();
   131   while (p1 != NULL) {
   132     if (p1->is_vm_pointer()) {
   133       // we don't do anything with virtual memory records during merge
   134       if (!_staging_area.vm_data()->append(p1)) {
   135         return false;
   136       }
   137     } else {
   138       p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
   139       // we have not seen this memory block, so just add to staging area
   140       if (p2 == NULL) {
   141         if (!malloc_staging_itr.insert(p1)) {
   142           return false;
   143         }
   144       } else if (p1->addr() == p2->addr()) {
   145         MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
   146         // a memory block can have many tagging records, find right one to replace or
   147         // right position to insert
   148         while (staging_next != NULL && staging_next->addr() == p1->addr()) {
   149           if ((staging_next->flags() & MemPointerRecord::tag_masks) <=
   150             (p1->flags() & MemPointerRecord::tag_masks)) {
   151             p2 = (MemPointerRecord*)malloc_staging_itr.next();
   152             staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
   153           } else {
   154             break;
   155           }
   156         }
   157         int df = (p1->flags() & MemPointerRecord::tag_masks) -
   158           (p2->flags() & MemPointerRecord::tag_masks);
   159         if (df == 0) {
   160           assert(p1->seq() > 0, "not sequenced");
   161           assert(p2->seq() > 0, "not sequenced");
   162           if (p1->seq() > p2->seq()) {
   163             copy_pointer(p2, p1);
   164           }
   165         } else if (df < 0) {
   166           if (!malloc_staging_itr.insert(p1)) {
   167             return false;
   168           }
   169         } else {
   170           if (!malloc_staging_itr.insert_after(p1)) {
   171             return false;
   172           }
   173         }
   174       } else if (p1->addr() < p2->addr()) {
   175         if (!malloc_staging_itr.insert(p1)) {
   176           return false;
   177         }
   178       } else {
   179         if (!malloc_staging_itr.insert_after(p1)) {
   180           return false;
   181         }
   182       }
   183     }
   184     p1 = (MemPointerRecord*)itr.next();
   185   }
   186   NOT_PRODUCT(void check_staging_data();)
   187   return true;
   188 }
   192 // promote data to next generation
   193 bool MemSnapshot::promote() {
   194   assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
   195   assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
   196          "Just check");
   197   MutexLockerEx lock(_lock, true);
   199   MallocRecordIterator  malloc_itr = _staging_area.malloc_record_walker();
   200   bool promoted = false;
   201   if (promote_malloc_records(&malloc_itr)) {
   202     MemPointerArrayIteratorImpl vm_itr = _staging_area.virtual_memory_record_walker();
   203     if (promote_virtual_memory_records(&vm_itr)) {
   204       promoted = true;
   205     }
   206   }
   208   NOT_PRODUCT(check_malloc_pointers();)
   209   _staging_area.clear();
   210   return promoted;
   211 }
   213 bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
   214   MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
   215   MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
   216   MemPointerRecord* matched_rec;
   217   while (new_rec != NULL) {
   218     matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
   219     // found matched memory block
   220     if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
   221       // snapshot already contains 'lived' records
   222       assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
   223              "Sanity check");
   224       // update block states
   225       if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
   226         copy_pointer(matched_rec, new_rec);
   227       } else {
   228         // a deallocation record
   229         assert(new_rec->is_deallocation_record(), "Sanity check");
   230         // an arena record can be followed by a size record, we need to remove both
   231         if (matched_rec->is_arena_record()) {
   232           MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
   233           if (next->is_arena_size_record()) {
   234             // it has to match the arena record
   235             assert(next->is_size_record_of_arena(matched_rec), "Sanity check");
   236             malloc_snapshot_itr.remove();
   237           }
   238         }
   239         // the memory is deallocated, remove related record(s)
   240         malloc_snapshot_itr.remove();
   241       }
   242     } else {
   243       // it is a new record, insert into snapshot
   244       if (new_rec->is_arena_size_record()) {
   245         MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev();
   246         if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) {
   247           // no matched arena record, ignore the size record
   248           new_rec = NULL;
   249         }
   250       }
   251       // only 'live' record can go into snapshot
   252       if (new_rec != NULL) {
   253         if  (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
   254           if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
   255             if (!malloc_snapshot_itr.insert_after(new_rec)) {
   256               return false;
   257             }
   258           } else {
   259             if (!malloc_snapshot_itr.insert(new_rec)) {
   260               return false;
   261             }
   262           }
   263         }
   264 #ifndef PRODUCT
   265         else if (!has_allocation_record(new_rec->addr())) {
   266           // NMT can not track some startup memory, which is allocated before NMT is on
   267           _untracked_count ++;
   268         }
   269 #endif
   270       }
   271     }
   272     new_rec = (MemPointerRecord*)itr->next();
   273   }
   274   return true;
   275 }
   277 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
   278   VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
   279   MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
   280   VMMemRegionEx new_vm_rec;
   281   VMMemRegion*  matched_rec;
   282   while (new_rec != NULL) {
   283     assert(new_rec->is_vm_pointer(), "Sanity check");
   284     if (MemTracker::track_callsite()) {
   285       new_vm_rec.init((MemPointerRecordEx*)new_rec);
   286     } else {
   287       new_vm_rec.init(new_rec);
   288     }
   289     matched_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
   290     if (matched_rec != NULL &&
   291         (matched_rec->contains(&new_vm_rec) || matched_rec->base() == new_vm_rec.base())) {
   292       // snapshot can only have 'live' records
   293       assert(matched_rec->is_reserve_record(), "Sanity check");
   294       if (new_vm_rec.is_reserve_record() && matched_rec->base() == new_vm_rec.base()) {
   295         // resize reserved virtual memory range
   296         // resize has to cover committed area
   297         assert(new_vm_rec.size() >= matched_rec->committed_size(), "Sanity check");
   298         matched_rec->set_reserved_size(new_vm_rec.size());
   299       } else if (new_vm_rec.is_commit_record()) {
   300         // commit memory inside reserved memory range
   301         assert(new_vm_rec.committed_size() <= matched_rec->reserved_size(), "Sanity check");
   302         // thread stacks are marked committed, so we ignore 'commit' record for creating
   303         // stack guard pages
   304         if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) != mtThreadStack) {
   305           matched_rec->commit(new_vm_rec.committed_size());
   306         }
   307       } else if (new_vm_rec.is_uncommit_record()) {
   308         if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtThreadStack) {
   309           // ignore 'uncommit' record from removing stack guard pages, uncommit
   310           // thread stack as whole
   311           if (matched_rec->committed_size() == new_vm_rec.committed_size()) {
   312             matched_rec->uncommit(new_vm_rec.committed_size());
   313           }
   314         } else {
   315           // uncommit memory inside reserved memory range
   316           assert(new_vm_rec.committed_size() <= matched_rec->committed_size(),
   317                 "Sanity check");
   318           matched_rec->uncommit(new_vm_rec.committed_size());
   319         }
   320       } else if (new_vm_rec.is_type_tagging_record()) {
   321         // tag this virtual memory range to a memory type
   322         // can not re-tag a memory range to different type
   323         assert(FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtNone ||
   324                FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_vm_rec.flags()),
   325                "Sanity check");
   326         matched_rec->tag(new_vm_rec.flags());
   327       } else if (new_vm_rec.is_release_record()) {
   328         // release part or whole memory range
   329         if (new_vm_rec.base() == matched_rec->base() &&
   330             new_vm_rec.size() == matched_rec->size()) {
   331           // release whole virtual memory range
   332           assert(matched_rec->committed_size() == 0, "Sanity check");
   333           vm_snapshot_itr.remove();
   334         } else {
   335           // partial release
   336           matched_rec->partial_release(new_vm_rec.base(), new_vm_rec.size());
   337         }
   338       } else {
   339         // multiple reserve/commit on the same virtual memory range
   340         assert((new_vm_rec.is_reserve_record() || new_vm_rec.is_commit_record()) &&
   341           (new_vm_rec.base() == matched_rec->base() && new_vm_rec.size() == matched_rec->size()),
   342           "Sanity check");
   343         matched_rec->tag(new_vm_rec.flags());
   344       }
   345     } else {
   346       // no matched record
   347       if (new_vm_rec.is_reserve_record()) {
   348         if (matched_rec == NULL || matched_rec->base() > new_vm_rec.base()) {
   349           if (!vm_snapshot_itr.insert(&new_vm_rec)) {
   350             return false;
   351           }
   352         } else {
   353           if (!vm_snapshot_itr.insert_after(&new_vm_rec)) {
   354             return false;
   355           }
   356         }
   357       } else {
   358         // throw out obsolete records, which are the commit/uncommit/release/tag records
   359         // on memory regions that are already released.
   360       }
   361   }
   362     new_rec = (MemPointerRecord*)itr->next();
   363   }
   364   return true;
   365 }
   367 #ifndef PRODUCT
   368 void MemSnapshot::print_snapshot_stats(outputStream* st) {
   369   st->print_cr("Snapshot:");
   370   st->print_cr("\tMalloced: %d/%d [%5.2f%%]  %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
   371     (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
   373   st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
   374     (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
   376   st->print_cr("\tMalloc staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
   377     _staging_area.malloc_data()->capacity(),
   378     (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
   379     _staging_area.malloc_data()->instance_size()/K);
   381   st->print_cr("\tVirtual memory staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
   382     _staging_area.vm_data()->capacity(),
   383     (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
   384     _staging_area.vm_data()->instance_size()/K);
   386   st->print_cr("\tUntracked allocation: %d", _untracked_count);
   387 }
   389 void MemSnapshot::check_malloc_pointers() {
   390   MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
   391   MemPointerRecord* p = (MemPointerRecord*)mItr.current();
   392   MemPointerRecord* prev = NULL;
   393   while (p != NULL) {
   394     if (prev != NULL) {
   395       assert(p->addr() >= prev->addr(), "sorting order");
   396     }
   397     prev = p;
   398     p = (MemPointerRecord*)mItr.next();
   399   }
   400 }
   402 bool MemSnapshot::has_allocation_record(address addr) {
   403   MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
   404   MemPointerRecord* cur = (MemPointerRecord*)itr.current();
   405   while (cur != NULL) {
   406     if (cur->addr() == addr && cur->is_allocation_record()) {
   407       return true;
   408     }
   409     cur = (MemPointerRecord*)itr.next();
   410   }
   411   return false;
   412 }
   413 #endif // PRODUCT
   415 #ifdef ASSERT
   416 void MemSnapshot::check_staging_data() {
   417   MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
   418   MemPointerRecord* cur = (MemPointerRecord*)itr.current();
   419   MemPointerRecord* next = (MemPointerRecord*)itr.next();
   420   while (next != NULL) {
   421     assert((next->addr() > cur->addr()) ||
   422       ((next->flags() & MemPointerRecord::tag_masks) >
   423        (cur->flags() & MemPointerRecord::tag_masks)),
   424        "sorting order");
   425     cur = next;
   426     next = (MemPointerRecord*)itr.next();
   427   }
   429   MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
   430   cur = (MemPointerRecord*)vm_itr.current();
   431   while (cur != NULL) {
   432     assert(cur->is_vm_pointer(), "virtual memory pointer only");
   433     cur = (MemPointerRecord*)vm_itr.next();
   434   }
   435 }
   436 #endif // ASSERT

mercurial