src/share/vm/services/memSnapshot.cpp

Mon, 06 May 2013 11:15:13 -0400

author
zgu
date
Mon, 06 May 2013 11:15:13 -0400
changeset 5053
c18152e0554e
parent 4641
fc64254f5579
child 6680
78bbf4d43a14
permissions
-rw-r--r--

8013120: NMT: Kitchensink crashes with assert(next_region == NULL || !next_region->is_committed_region()) failed: Sanity check
Summary: Fixed NMT to deal with releasing virtual memory region when there are still committed regions within it
Reviewed-by: acorn, coleenp

     1 /*
     2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "runtime/mutexLocker.hpp"
    27 #include "utilities/decoder.hpp"
    28 #include "services/memBaseline.hpp"
    29 #include "services/memPtr.hpp"
    30 #include "services/memPtrArray.hpp"
    31 #include "services/memSnapshot.hpp"
    32 #include "services/memTracker.hpp"
    34 #ifdef ASSERT
    36 void decode_pointer_record(MemPointerRecord* rec) {
    37   tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT  "] size = %d bytes", rec->addr(),
    38     rec->addr() + rec->size(), (int)rec->size());
    39   tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
    40   if (rec->is_vm_pointer()) {
    41     if (rec->is_allocation_record()) {
    42       tty->print_cr(" (reserve)");
    43     } else if (rec->is_commit_record()) {
    44       tty->print_cr(" (commit)");
    45     } else if (rec->is_uncommit_record()) {
    46       tty->print_cr(" (uncommit)");
    47     } else if (rec->is_deallocation_record()) {
    48       tty->print_cr(" (release)");
    49     } else {
    50       tty->print_cr(" (tag)");
    51     }
    52   } else {
    53     if (rec->is_arena_memory_record()) {
    54       tty->print_cr(" (arena size)");
    55     } else if (rec->is_allocation_record()) {
    56       tty->print_cr(" (malloc)");
    57     } else {
    58       tty->print_cr(" (free)");
    59     }
    60   }
    61   if (MemTracker::track_callsite()) {
    62     char buf[1024];
    63     address pc = ((MemPointerRecordEx*)rec)->pc();
    64     if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
    65       tty->print_cr("\tfrom %s", buf);
    66     } else {
    67       tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
    68     }
    69   }
    70 }
    72 void decode_vm_region_record(VMMemRegion* rec) {
    73   tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
    74     rec->addr() + rec->size());
    75   tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
    76   if (rec->is_allocation_record()) {
    77     tty->print_cr(" (reserved)");
    78   } else if (rec->is_commit_record()) {
    79     tty->print_cr(" (committed)");
    80   } else {
    81     ShouldNotReachHere();
    82   }
    83   if (MemTracker::track_callsite()) {
    84     char buf[1024];
    85     address pc = ((VMMemRegionEx*)rec)->pc();
    86     if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
    87       tty->print_cr("\tfrom %s", buf);
    88     } else {
    89       tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
    90     }
    92   }
    93 }
    95 #endif
    98 bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
    99   VMMemRegionEx new_rec;
   100   assert(rec->is_allocation_record() || rec->is_commit_record(),
   101     "Sanity check");
   102   if (MemTracker::track_callsite()) {
   103     new_rec.init((MemPointerRecordEx*)rec);
   104   } else {
   105     new_rec.init(rec);
   106   }
   107   return insert(&new_rec);
   108 }
   110 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
   111   VMMemRegionEx new_rec;
   112   assert(rec->is_allocation_record() || rec->is_commit_record(),
   113     "Sanity check");
   114   if (MemTracker::track_callsite()) {
   115     new_rec.init((MemPointerRecordEx*)rec);
   116   } else {
   117     new_rec.init(rec);
   118   }
   119   return insert_after(&new_rec);
   120 }
   122 // we don't consolidate reserved regions, since they may be categorized
   123 // in different types.
   124 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
   125   assert(rec->is_allocation_record(), "Sanity check");
   126   VMMemRegion* reserved_region = (VMMemRegion*)current();
   128   // we don't have anything yet
   129   if (reserved_region == NULL) {
   130     return insert_record(rec);
   131   }
   133   assert(reserved_region->is_reserved_region(), "Sanity check");
   134   // duplicated records
   135   if (reserved_region->is_same_region(rec)) {
   136     return true;
   137   }
   138   // Overlapping stack regions indicate that a JNI thread failed to
   139   // detach from the VM before exiting. This leaks the JavaThread object.
   140   if (CheckJNICalls)  {
   141       guarantee(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) != mtThreadStack ||
   142          !reserved_region->overlaps_region(rec),
   143          "Attached JNI thread exited without being detached");
   144   }
   145   // otherwise, we should not have overlapping reserved regions
   146   assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
   147     reserved_region->base() > rec->addr(), "Just check: locate()");
   148   assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
   149     !reserved_region->overlaps_region(rec), "overlapping reserved regions");
   151   return insert_record(rec);
   152 }
   154 // we do consolidate committed regions
   155 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
   156   assert(rec->is_commit_record(), "Sanity check");
   157   VMMemRegion* reserved_rgn = (VMMemRegion*)current();
   158   assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec),
   159     "Sanity check");
   161   // thread's native stack is always marked as "committed", ignore
   162   // the "commit" operation for creating stack guard pages
   163   if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
   164       FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
   165     return true;
   166   }
   168   // if the reserved region has any committed regions
   169   VMMemRegion* committed_rgn  = (VMMemRegion*)next();
   170   while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
   171     // duplicated commit records
   172     if(committed_rgn->contains_region(rec)) {
   173       return true;
   174     } else if (committed_rgn->overlaps_region(rec)) {
   175       // overlaps front part
   176       if (rec->addr() < committed_rgn->addr()) {
   177         committed_rgn->expand_region(rec->addr(),
   178           committed_rgn->addr() - rec->addr());
   179       } else {
   180         // overlaps tail part
   181         address committed_rgn_end = committed_rgn->addr() +
   182               committed_rgn->size();
   183         assert(committed_rgn_end < rec->addr() + rec->size(),
   184              "overlap tail part");
   185         committed_rgn->expand_region(committed_rgn_end,
   186           (rec->addr() + rec->size()) - committed_rgn_end);
   187       }
   188     } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
   189       // adjunct each other
   190       committed_rgn->expand_region(rec->addr(), rec->size());
   191       VMMemRegion* next_reg = (VMMemRegion*)next();
   192       // see if we can consolidate next committed region
   193       if (next_reg != NULL && next_reg->is_committed_region() &&
   194         next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
   195           committed_rgn->expand_region(next_reg->base(), next_reg->size());
   196           // delete merged region
   197           remove();
   198       }
   199       return true;
   200     } else if (committed_rgn->base() > rec->addr()) {
   201       // found the location, insert this committed region
   202       return insert_record(rec);
   203     }
   204     committed_rgn = (VMMemRegion*)next();
   205   }
   206   return insert_record(rec);
   207 }
   209 bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
   210   assert(rec->is_uncommit_record(), "sanity check");
   211   VMMemRegion* cur;
   212   cur = (VMMemRegion*)current();
   213   assert(cur->is_reserved_region() && cur->contains_region(rec),
   214     "Sanity check");
   215   // thread's native stack is always marked as "committed", ignore
   216   // the "commit" operation for creating stack guard pages
   217   if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
   218       FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
   219     return true;
   220   }
   222   cur = (VMMemRegion*)next();
   223   while (cur != NULL && cur->is_committed_region()) {
   224     // region already uncommitted, must be due to duplicated record
   225     if (cur->addr() >= rec->addr() + rec->size()) {
   226       break;
   227     } else if (cur->contains_region(rec)) {
   228       // uncommit whole region
   229       if (cur->is_same_region(rec)) {
   230         remove();
   231         break;
   232       } else if (rec->addr() == cur->addr() ||
   233         rec->addr() + rec->size() == cur->addr() + cur->size()) {
   234         // uncommitted from either end of current memory region.
   235         cur->exclude_region(rec->addr(), rec->size());
   236         break;
   237       } else { // split the committed region and release the middle
   238         address high_addr = cur->addr() + cur->size();
   239         size_t sz = high_addr - rec->addr();
   240         cur->exclude_region(rec->addr(), sz);
   241         sz = high_addr - (rec->addr() + rec->size());
   242         if (MemTracker::track_callsite()) {
   243           MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
   244              ((VMMemRegionEx*)cur)->pc());
   245           return insert_record_after(&tmp);
   246         } else {
   247           MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
   248           return insert_record_after(&tmp);
   249         }
   250       }
   251     }
   252     cur = (VMMemRegion*)next();
   253   }
   255   // we may not find committed record due to duplicated records
   256   return true;
   257 }
   259 bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
   260   assert(rec->is_deallocation_record(), "Sanity check");
   261   VMMemRegion* cur = (VMMemRegion*)current();
   262   assert(cur->is_reserved_region() && cur->contains_region(rec),
   263     "Sanity check");
   264   if (rec->is_same_region(cur)) {
   266     // In snapshot, the virtual memory records are sorted in following orders:
   267     // 1. virtual memory's base address
   268     // 2. virtual memory reservation record, followed by commit records within this reservation.
   269     //    The commit records are also in base address order.
   270     // When a reserved region is released, we want to remove the reservation record and all
   271     // commit records following it.
   272 #ifdef ASSERT
   273     address low_addr = cur->addr();
   274     address high_addr = low_addr + cur->size();
   275 #endif
   276     // remove virtual memory reservation record
   277     remove();
   278     // remove committed regions within above reservation
   279     VMMemRegion* next_region = (VMMemRegion*)current();
   280     while (next_region != NULL && next_region->is_committed_region()) {
   281       assert(next_region->addr() >= low_addr &&
   282              next_region->addr() + next_region->size() <= high_addr,
   283             "Range check");
   284       remove();
   285       next_region = (VMMemRegion*)current();
   286     }
   287   } else if (rec->addr() == cur->addr() ||
   288     rec->addr() + rec->size() == cur->addr() + cur->size()) {
   289     // released region is at either end of this region
   290     cur->exclude_region(rec->addr(), rec->size());
   291     assert(check_reserved_region(), "Integrity check");
   292   } else { // split the reserved region and release the middle
   293     address high_addr = cur->addr() + cur->size();
   294     size_t sz = high_addr - rec->addr();
   295     cur->exclude_region(rec->addr(), sz);
   296     sz = high_addr - rec->addr() - rec->size();
   297     if (MemTracker::track_callsite()) {
   298       MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
   299         ((VMMemRegionEx*)cur)->pc());
   300       bool ret = insert_reserved_region(&tmp);
   301       assert(!ret || check_reserved_region(), "Integrity check");
   302       return ret;
   303     } else {
   304       MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
   305       bool ret = insert_reserved_region(&tmp);
   306       assert(!ret || check_reserved_region(), "Integrity check");
   307       return ret;
   308     }
   309   }
   310   return true;
   311 }
   313 bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
   314   // skip all 'commit' records associated with previous reserved region
   315   VMMemRegion* p = (VMMemRegion*)next();
   316   while (p != NULL && p->is_committed_region() &&
   317          p->base() + p->size() < rec->addr()) {
   318     p = (VMMemRegion*)next();
   319   }
   320   return insert_record(rec);
   321 }
   323 bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
   324   assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
   325   address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
   326   if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
   327     size_t sz = rgn->size() - new_rgn_size;
   328     // the original region becomes 'new' region
   329     rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
   330      // remaining becomes next region
   331     MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
   332     return insert_reserved_region(&next_rgn);
   333   } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
   334     rgn->exclude_region(new_rgn_addr, new_rgn_size);
   335     MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
   336     return insert_reserved_region(&next_rgn);
   337   } else {
   338     // the orginal region will be split into three
   339     address rgn_high_addr = rgn->base() + rgn->size();
   340     // first region
   341     rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
   342     // the second region is the new region
   343     MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
   344     if (!insert_reserved_region(&new_rgn)) return false;
   345     // the remaining region
   346     MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
   347       rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
   348     return insert_reserved_region(&rem_rgn);
   349   }
   350 }
   352 static int sort_in_seq_order(const void* p1, const void* p2) {
   353   assert(p1 != NULL && p2 != NULL, "Sanity check");
   354   const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
   355   const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
   356   return (mp1->seq() - mp2->seq());
   357 }
   359 bool StagingArea::init() {
   360   if (MemTracker::track_callsite()) {
   361     _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
   362     _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
   363   } else {
   364     _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
   365     _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
   366   }
   368   if (_malloc_data != NULL && _vm_data != NULL &&
   369       !_malloc_data->out_of_memory() &&
   370       !_vm_data->out_of_memory()) {
   371     return true;
   372   } else {
   373     if (_malloc_data != NULL) delete _malloc_data;
   374     if (_vm_data != NULL) delete _vm_data;
   375     _malloc_data = NULL;
   376     _vm_data = NULL;
   377     return false;
   378   }
   379 }
   382 VMRecordIterator StagingArea::virtual_memory_record_walker() {
   383   MemPointerArray* arr = vm_data();
   384   // sort into seq number order
   385   arr->sort((FN_SORT)sort_in_seq_order);
   386   return VMRecordIterator(arr);
   387 }
   390 MemSnapshot::MemSnapshot() {
   391   if (MemTracker::track_callsite()) {
   392     _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
   393     _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
   394   } else {
   395     _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
   396     _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
   397   }
   399   _staging_area.init();
   400   _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
   401   NOT_PRODUCT(_untracked_count = 0;)
   402   _number_of_classes = 0;
   403 }
   405 MemSnapshot::~MemSnapshot() {
   406   assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
   407   {
   408     MutexLockerEx locker(_lock);
   409     if (_alloc_ptrs != NULL) {
   410       delete _alloc_ptrs;
   411       _alloc_ptrs = NULL;
   412     }
   414     if (_vm_ptrs != NULL) {
   415       delete _vm_ptrs;
   416       _vm_ptrs = NULL;
   417     }
   418   }
   420   if (_lock != NULL) {
   421     delete _lock;
   422     _lock = NULL;
   423   }
   424 }
   427 void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
   428   assert(dest != NULL && src != NULL, "Just check");
   429   assert(dest->addr() == src->addr(), "Just check");
   430   assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
   432   if (MemTracker::track_callsite()) {
   433     *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
   434   } else {
   435     *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
   436   }
   437 }
   439 void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
   440   assert(src != NULL && dest != NULL, "Just check");
   441   assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
   443   if (MemTracker::track_callsite()) {
   444     *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
   445   } else {
   446     *(MemPointerRecord*)dest = *(MemPointerRecord*)src;
   447   }
   448 }
   450 // merge a recorder to the staging area
   451 bool MemSnapshot::merge(MemRecorder* rec) {
   452   assert(rec != NULL && !rec->out_of_memory(), "Just check");
   454   SequencedRecordIterator itr(rec->pointer_itr());
   456   MutexLockerEx lock(_lock, true);
   457   MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
   458   MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
   459   MemPointerRecord* matched_rec;
   461   while (incoming_rec != NULL) {
   462     if (incoming_rec->is_vm_pointer()) {
   463       // we don't do anything with virtual memory records during merge
   464       if (!_staging_area.vm_data()->append(incoming_rec)) {
   465         return false;
   466       }
   467     } else {
   468       // locate matched record and/or also position the iterator to proper
   469       // location for this incoming record.
   470       matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
   471       // we have not seen this memory block in this generation,
   472       // so just add to staging area
   473       if (matched_rec == NULL) {
   474         if (!malloc_staging_itr.insert(incoming_rec)) {
   475           return false;
   476         }
   477       } else if (incoming_rec->addr() == matched_rec->addr()) {
   478         // whoever has higher sequence number wins
   479         if (incoming_rec->seq() > matched_rec->seq()) {
   480           copy_seq_pointer(matched_rec, incoming_rec);
   481         }
   482       } else if (incoming_rec->addr() < matched_rec->addr()) {
   483         if (!malloc_staging_itr.insert(incoming_rec)) {
   484           return false;
   485         }
   486       } else {
   487         ShouldNotReachHere();
   488       }
   489     }
   490     incoming_rec = (MemPointerRecord*)itr.next();
   491   }
   492   NOT_PRODUCT(void check_staging_data();)
   493   return true;
   494 }
   497 // promote data to next generation
   498 bool MemSnapshot::promote(int number_of_classes) {
   499   assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
   500   assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
   501          "Just check");
   502   MutexLockerEx lock(_lock, true);
   504   MallocRecordIterator  malloc_itr = _staging_area.malloc_record_walker();
   505   bool promoted = false;
   506   if (promote_malloc_records(&malloc_itr)) {
   507     VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
   508     if (promote_virtual_memory_records(&vm_itr)) {
   509       promoted = true;
   510     }
   511   }
   513   NOT_PRODUCT(check_malloc_pointers();)
   514   _staging_area.clear();
   515   _number_of_classes = number_of_classes;
   516   return promoted;
   517 }
   519 bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
   520   MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
   521   MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
   522   MemPointerRecord* matched_rec;
   523   while (new_rec != NULL) {
   524     matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
   525     // found matched memory block
   526     if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
   527       // snapshot already contains 'live' records
   528       assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(),
   529              "Sanity check");
   530       // update block states
   531       if (new_rec->is_allocation_record()) {
   532         assign_pointer(matched_rec, new_rec);
   533       } else if (new_rec->is_arena_memory_record()) {
   534         if (new_rec->size() == 0) {
   535           // remove size record once size drops to 0
   536           malloc_snapshot_itr.remove();
   537         } else {
   538           assign_pointer(matched_rec, new_rec);
   539         }
   540       } else {
   541         // a deallocation record
   542         assert(new_rec->is_deallocation_record(), "Sanity check");
   543         // an arena record can be followed by a size record, we need to remove both
   544         if (matched_rec->is_arena_record()) {
   545           MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
   546           if (next != NULL && next->is_arena_memory_record() &&
   547               next->is_memory_record_of_arena(matched_rec)) {
   548             malloc_snapshot_itr.remove();
   549           }
   550         }
   551         // the memory is deallocated, remove related record(s)
   552         malloc_snapshot_itr.remove();
   553       }
   554     } else {
   555       // don't insert size 0 record
   556       if (new_rec->is_arena_memory_record() && new_rec->size() == 0) {
   557         new_rec = NULL;
   558       }
   560       if (new_rec != NULL) {
   561         if  (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) {
   562           if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
   563             if (!malloc_snapshot_itr.insert_after(new_rec)) {
   564               return false;
   565             }
   566           } else {
   567             if (!malloc_snapshot_itr.insert(new_rec)) {
   568               return false;
   569             }
   570           }
   571         }
   572 #ifndef PRODUCT
   573         else if (!has_allocation_record(new_rec->addr())) {
   574           // NMT can not track some startup memory, which is allocated before NMT is on
   575           _untracked_count ++;
   576         }
   577 #endif
   578       }
   579     }
   580     new_rec = (MemPointerRecord*)itr->next();
   581   }
   582   return true;
   583 }
   585 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
   586   VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
   587   MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
   588   VMMemRegion*  reserved_rec;
   589   while (new_rec != NULL) {
   590     assert(new_rec->is_vm_pointer(), "Sanity check");
   592     // locate a reserved region that contains the specified address, or
   593     // the nearest reserved region has base address just above the specified
   594     // address
   595     reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
   596     if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
   597       // snapshot can only have 'live' records
   598       assert(reserved_rec->is_reserved_region(), "Sanity check");
   599       if (new_rec->is_allocation_record()) {
   600         if (!reserved_rec->is_same_region(new_rec)) {
   601           // only deal with split a bigger reserved region into smaller regions.
   602           // So far, CDS is the only use case.
   603           if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
   604             return false;
   605           }
   606         }
   607       } else if (new_rec->is_uncommit_record()) {
   608         if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
   609           return false;
   610         }
   611       } else if (new_rec->is_commit_record()) {
   612         // insert or expand existing committed region to cover this
   613         // newly committed region
   614         if (!vm_snapshot_itr.add_committed_region(new_rec)) {
   615           return false;
   616         }
   617       } else if (new_rec->is_deallocation_record()) {
   618         // release part or all memory region
   619         if (!vm_snapshot_itr.remove_released_region(new_rec)) {
   620           return false;
   621         }
   622       } else if (new_rec->is_type_tagging_record()) {
   623         // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
   624         // to different type.
   625         assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
   626                FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
   627                "Sanity check");
   628         reserved_rec->tag(new_rec->flags());
   629     } else {
   630         ShouldNotReachHere();
   631           }
   632         } else {
   633       /*
   634        * The assertion failure indicates mis-matched virtual memory records. The likely
   635        * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
   636        * api, which have to be tracked manually. (perfMemory is an example).
   637       */
   638       assert(new_rec->is_allocation_record(), "Sanity check");
   639       if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
   640             return false;
   641           }
   642   }
   643     new_rec = (MemPointerRecord*)itr->next();
   644   }
   645   return true;
   646 }
   648 #ifndef PRODUCT
   649 void MemSnapshot::print_snapshot_stats(outputStream* st) {
   650   st->print_cr("Snapshot:");
   651   st->print_cr("\tMalloced: %d/%d [%5.2f%%]  %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
   652     (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
   654   st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
   655     (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
   657   st->print_cr("\tMalloc staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
   658     _staging_area.malloc_data()->capacity(),
   659     (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
   660     _staging_area.malloc_data()->instance_size()/K);
   662   st->print_cr("\tVirtual memory staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
   663     _staging_area.vm_data()->capacity(),
   664     (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
   665     _staging_area.vm_data()->instance_size()/K);
   667   st->print_cr("\tUntracked allocation: %d", _untracked_count);
   668 }
   670 void MemSnapshot::check_malloc_pointers() {
   671   MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
   672   MemPointerRecord* p = (MemPointerRecord*)mItr.current();
   673   MemPointerRecord* prev = NULL;
   674   while (p != NULL) {
   675     if (prev != NULL) {
   676       assert(p->addr() >= prev->addr(), "sorting order");
   677     }
   678     prev = p;
   679     p = (MemPointerRecord*)mItr.next();
   680   }
   681 }
   683 bool MemSnapshot::has_allocation_record(address addr) {
   684   MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
   685   MemPointerRecord* cur = (MemPointerRecord*)itr.current();
   686   while (cur != NULL) {
   687     if (cur->addr() == addr && cur->is_allocation_record()) {
   688       return true;
   689     }
   690     cur = (MemPointerRecord*)itr.next();
   691   }
   692   return false;
   693 }
   694 #endif // PRODUCT
   696 #ifdef ASSERT
   697 void MemSnapshot::check_staging_data() {
   698   MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
   699   MemPointerRecord* cur = (MemPointerRecord*)itr.current();
   700   MemPointerRecord* next = (MemPointerRecord*)itr.next();
   701   while (next != NULL) {
   702     assert((next->addr() > cur->addr()) ||
   703       ((next->flags() & MemPointerRecord::tag_masks) >
   704        (cur->flags() & MemPointerRecord::tag_masks)),
   705        "sorting order");
   706     cur = next;
   707     next = (MemPointerRecord*)itr.next();
   708   }
   710   MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
   711   cur = (MemPointerRecord*)vm_itr.current();
   712   while (cur != NULL) {
   713     assert(cur->is_vm_pointer(), "virtual memory pointer only");
   714     cur = (MemPointerRecord*)vm_itr.next();
   715   }
   716 }
   718 void MemSnapshot::dump_all_vm_pointers() {
   719   MemPointerArrayIteratorImpl itr(_vm_ptrs);
   720   VMMemRegion* ptr = (VMMemRegion*)itr.current();
   721   tty->print_cr("dump virtual memory pointers:");
   722   while (ptr != NULL) {
   723     if (ptr->is_committed_region()) {
   724       tty->print("\t");
   725     }
   726     tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
   727       (ptr->addr() + ptr->size()), ptr->flags());
   729     if (MemTracker::track_callsite()) {
   730       VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
   731       if (ex->pc() != NULL) {
   732         char buf[1024];
   733         if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
   734           tty->print_cr("\t%s", buf);
   735         } else {
   736           tty->print_cr("");
   737         }
   738       }
   739     }
   741     ptr = (VMMemRegion*)itr.next();
   742   }
   743   tty->flush();
   744 }
   745 #endif // ASSERT

mercurial