src/share/vm/services/memSnapshot.cpp

Fri, 19 Oct 2012 21:40:07 -0400

author
zgu
date
Fri, 19 Oct 2012 21:40:07 -0400
changeset 4193
716c64bda5ba
parent 4053
33143ee07800
child 4248
69ad7823b1ca
permissions
-rw-r--r--

7199092: NMT: NMT needs to deal overlapped virtual memory ranges
Summary: Enhanced virtual memory tracking to track committed regions as well as reserved regions, so NMT now can generate virtual memory map.
Reviewed-by: acorn, coleenp

     1 /*
     2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "runtime/mutexLocker.hpp"
    27 #include "utilities/decoder.hpp"
    28 #include "services/memBaseline.hpp"
    29 #include "services/memPtr.hpp"
    30 #include "services/memPtrArray.hpp"
    31 #include "services/memSnapshot.hpp"
    32 #include "services/memTracker.hpp"
    35 bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
    36   VMMemRegionEx new_rec;
    37   assert(rec->is_allocation_record() || rec->is_commit_record(),
    38     "Sanity check");
    39   if (MemTracker::track_callsite()) {
    40     new_rec.init((MemPointerRecordEx*)rec);
    41   } else {
    42     new_rec.init(rec);
    43   }
    44   return insert(&new_rec);
    45 }
    47 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
    48   VMMemRegionEx new_rec;
    49   assert(rec->is_allocation_record() || rec->is_commit_record(),
    50     "Sanity check");
    51   if (MemTracker::track_callsite()) {
    52     new_rec.init((MemPointerRecordEx*)rec);
    53   } else {
    54     new_rec.init(rec);
    55   }
    56   return insert_after(&new_rec);
    57 }
    59 // we don't consolidate reserved regions, since they may be categorized
    60 // in different types.
    61 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
    62   assert(rec->is_allocation_record(), "Sanity check");
    63   VMMemRegion* cur = (VMMemRegion*)current();
    65   // we don't have anything yet
    66   if (cur == NULL) {
    67     return insert_record(rec);
    68   }
    70   assert(cur->is_reserved_region(), "Sanity check");
    71   // duplicated records
    72   if (cur->is_same_region(rec)) {
    73     return true;
    74   }
    75   assert(cur->base() > rec->addr(), "Just check: locate()");
    76   assert(rec->addr() + rec->size() <= cur->base(), "Can not overlap");
    77   return insert_record(rec);
    78 }
    80 // we do consolidate committed regions
    81 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
    82   assert(rec->is_commit_record(), "Sanity check");
    83   VMMemRegion* cur;
    84   cur = (VMMemRegion*)current();
    85   assert(cur->is_reserved_region() && cur->contains_region(rec),
    86     "Sanity check");
    88   // thread's native stack is always marked as "committed", ignore
    89   // the "commit" operation for creating stack guard pages
    90   if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
    91       FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
    92     return true;
    93   }
    95   cur = (VMMemRegion*)next();
    96   while (cur != NULL && cur->is_committed_region()) {
    97     // duplicated commit records
    98     if(cur->contains_region(rec)) {
    99       return true;
   100     }
   101     if (cur->base() > rec->addr()) {
   102       // committed regions can not overlap
   103       assert(rec->addr() + rec->size() <= cur->base(), "Can not overlap");
   104       if (rec->addr() + rec->size() == cur->base()) {
   105         cur->expand_region(rec->addr(), rec->size());
   106         return true;
   107       } else {
   108         return insert_record(rec);
   109       }
   110     } else if (cur->base() + cur->size() == rec->addr()) {
   111       cur->expand_region(rec->addr(), rec->size());
   112       VMMemRegion* next_reg = (VMMemRegion*)next();
   113       // see if we can consolidate next committed region
   114       if (next_reg != NULL && next_reg->is_committed_region() &&
   115         next_reg->base() == cur->base() + cur->size()) {
   116           cur->expand_region(next_reg->base(), next_reg->size());
   117           remove();
   118       }
   119       return true;
   120     }
   121     cur = (VMMemRegion*)next();
   122   }
   123   return insert_record(rec);
   124 }
   126 bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
   127   assert(rec->is_uncommit_record(), "sanity check");
   128   VMMemRegion* cur;
   129   cur = (VMMemRegion*)current();
   130   assert(cur->is_reserved_region() && cur->contains_region(rec),
   131     "Sanity check");
   132   // thread's native stack is always marked as "committed", ignore
   133   // the "commit" operation for creating stack guard pages
   134   if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
   135       FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
   136     return true;
   137   }
   139   cur = (VMMemRegion*)next();
   140   while (cur != NULL && cur->is_committed_region()) {
   141     // region already uncommitted, must be due to duplicated record
   142     if (cur->addr() >= rec->addr() + rec->size()) {
   143       break;
   144     } else if (cur->contains_region(rec)) {
   145       // uncommit whole region
   146       if (cur->is_same_region(rec)) {
   147         remove();
   148         break;
   149       } else if (rec->addr() == cur->addr() ||
   150         rec->addr() + rec->size() == cur->addr() + cur->size()) {
   151         // uncommitted from either end of current memory region.
   152         cur->exclude_region(rec->addr(), rec->size());
   153         break;
   154       } else { // split the committed region and release the middle
   155         address high_addr = cur->addr() + cur->size();
   156         size_t sz = high_addr - rec->addr();
   157         cur->exclude_region(rec->addr(), sz);
   158         sz = high_addr - (rec->addr() + rec->size());
   159         if (MemTracker::track_callsite()) {
   160           MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
   161              ((VMMemRegionEx*)cur)->pc());
   162           return insert_record_after(&tmp);
   163         } else {
   164           MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
   165           return insert_record_after(&tmp);
   166         }
   167       }
   168     }
   169     cur = (VMMemRegion*)next();
   170   }
   172   // we may not find committed record due to duplicated records
   173   return true;
   174 }
   176 bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
   177   assert(rec->is_deallocation_record(), "Sanity check");
   178   VMMemRegion* cur = (VMMemRegion*)current();
   179   assert(cur->is_reserved_region() && cur->contains_region(rec),
   180     "Sanity check");
   181 #ifdef ASSERT
   182   VMMemRegion* next_reg = (VMMemRegion*)peek_next();
   183   // should not have any committed memory in this reserved region
   184   assert(next_reg == NULL || !next_reg->is_committed_region(), "Sanity check");
   185 #endif
   186   if (rec->is_same_region(cur)) {
   187     remove();
   188   } else if (rec->addr() == cur->addr() ||
   189     rec->addr() + rec->size() == cur->addr() + cur->size()) {
   190     // released region is at either end of this region
   191     cur->exclude_region(rec->addr(), rec->size());
   192   } else { // split the reserved region and release the middle
   193     address high_addr = cur->addr() + cur->size();
   194     size_t sz = high_addr - rec->addr();
   195     cur->exclude_region(rec->addr(), sz);
   196     sz = high_addr - rec->addr() - rec->size();
   197     if (MemTracker::track_callsite()) {
   198       MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
   199         ((VMMemRegionEx*)cur)->pc());
   200       return insert_reserved_region(&tmp);
   201     } else {
   202       MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
   203       return insert_reserved_region(&tmp);
   204     }
   205   }
   206   return true;
   207 }
   209 bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
   210   // skip all 'commit' records associated with previous reserved region
   211   VMMemRegion* p = (VMMemRegion*)next();
   212   while (p != NULL && p->is_committed_region() &&
   213          p->base() + p->size() < rec->addr()) {
   214     p = (VMMemRegion*)next();
   215   }
   216   return insert_record(rec);
   217 }
   219 bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
   220   assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
   221   address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
   222   if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
   223     size_t sz = rgn->size() - new_rgn_size;
   224     // the original region becomes 'new' region
   225     rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
   226      // remaining becomes next region
   227     MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
   228     return insert_reserved_region(&next_rgn);
   229   } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
   230     rgn->exclude_region(new_rgn_addr, new_rgn_size);
   231     MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
   232     return insert_reserved_region(&next_rgn);
   233   } else {
   234     // the orginal region will be split into three
   235     address rgn_high_addr = rgn->base() + rgn->size();
   236     // first region
   237     rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
   238     // the second region is the new region
   239     MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
   240     if (!insert_reserved_region(&new_rgn)) return false;
   241     // the remaining region
   242     MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
   243       rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
   244     return insert_reserved_region(&rem_rgn);
   245   }
   246 }
   248 static int sort_in_seq_order(const void* p1, const void* p2) {
   249   assert(p1 != NULL && p2 != NULL, "Sanity check");
   250   const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
   251   const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
   252   return (mp1->seq() - mp2->seq());
   253 }
   255 bool StagingArea::init() {
   256   if (MemTracker::track_callsite()) {
   257     _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
   258     _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
   259   } else {
   260     _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
   261     _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
   262   }
   264   if (_malloc_data != NULL && _vm_data != NULL &&
   265       !_malloc_data->out_of_memory() &&
   266       !_vm_data->out_of_memory()) {
   267     return true;
   268   } else {
   269     if (_malloc_data != NULL) delete _malloc_data;
   270     if (_vm_data != NULL) delete _vm_data;
   271     _malloc_data = NULL;
   272     _vm_data = NULL;
   273     return false;
   274   }
   275 }
   278 VMRecordIterator StagingArea::virtual_memory_record_walker() {
   279   MemPointerArray* arr = vm_data();
   280   // sort into seq number order
   281   arr->sort((FN_SORT)sort_in_seq_order);
   282   return VMRecordIterator(arr);
   283 }
   286 MemSnapshot::MemSnapshot() {
   287   if (MemTracker::track_callsite()) {
   288     _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
   289     _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
   290   } else {
   291     _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
   292     _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
   293   }
   295   _staging_area.init();
   296   _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
   297   NOT_PRODUCT(_untracked_count = 0;)
   298 }
   300 MemSnapshot::~MemSnapshot() {
   301   assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
   302   {
   303     MutexLockerEx locker(_lock);
   304     if (_alloc_ptrs != NULL) {
   305       delete _alloc_ptrs;
   306       _alloc_ptrs = NULL;
   307     }
   309     if (_vm_ptrs != NULL) {
   310       delete _vm_ptrs;
   311       _vm_ptrs = NULL;
   312     }
   313   }
   315   if (_lock != NULL) {
   316     delete _lock;
   317     _lock = NULL;
   318   }
   319 }
   321 void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
   322   assert(dest != NULL && src != NULL, "Just check");
   323   assert(dest->addr() == src->addr(), "Just check");
   325   MEMFLAGS flags = dest->flags();
   327   if (MemTracker::track_callsite()) {
   328     *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
   329   } else {
   330     *dest = *src;
   331   }
   332 }
   335 // merge a per-thread memory recorder to the staging area
   336 bool MemSnapshot::merge(MemRecorder* rec) {
   337   assert(rec != NULL && !rec->out_of_memory(), "Just check");
   339   SequencedRecordIterator itr(rec->pointer_itr());
   341   MutexLockerEx lock(_lock, true);
   342   MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
   343   MemPointerRecord *p1, *p2;
   344   p1 = (MemPointerRecord*) itr.current();
   345   while (p1 != NULL) {
   346     if (p1->is_vm_pointer()) {
   347       // we don't do anything with virtual memory records during merge
   348       if (!_staging_area.vm_data()->append(p1)) {
   349         return false;
   350       }
   351     } else {
   352       // locate matched record and/or also position the iterator to proper
   353       // location for this incoming record.
   354       p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
   355       // we have not seen this memory block, so just add to staging area
   356       if (p2 == NULL) {
   357         if (!malloc_staging_itr.insert(p1)) {
   358           return false;
   359         }
   360       } else if (p1->addr() == p2->addr()) {
   361         MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
   362         // a memory block can have many tagging records, find right one to replace or
   363         // right position to insert
   364         while (staging_next != NULL && staging_next->addr() == p1->addr()) {
   365           if ((staging_next->flags() & MemPointerRecord::tag_masks) <=
   366             (p1->flags() & MemPointerRecord::tag_masks)) {
   367             p2 = (MemPointerRecord*)malloc_staging_itr.next();
   368             staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
   369           } else {
   370             break;
   371           }
   372         }
   373         int df = (p1->flags() & MemPointerRecord::tag_masks) -
   374           (p2->flags() & MemPointerRecord::tag_masks);
   375         if (df == 0) {
   376           assert(p1->seq() > 0, "not sequenced");
   377           assert(p2->seq() > 0, "not sequenced");
   378           if (p1->seq() > p2->seq()) {
   379             copy_pointer(p2, p1);
   380           }
   381         } else if (df < 0) {
   382           if (!malloc_staging_itr.insert(p1)) {
   383             return false;
   384           }
   385         } else {
   386           if (!malloc_staging_itr.insert_after(p1)) {
   387             return false;
   388           }
   389         }
   390       } else if (p1->addr() < p2->addr()) {
   391         if (!malloc_staging_itr.insert(p1)) {
   392           return false;
   393         }
   394       } else {
   395         if (!malloc_staging_itr.insert_after(p1)) {
   396           return false;
   397         }
   398       }
   399     }
   400     p1 = (MemPointerRecord*)itr.next();
   401   }
   402   NOT_PRODUCT(void check_staging_data();)
   403   return true;
   404 }
   408 // promote data to next generation
   409 bool MemSnapshot::promote() {
   410   assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
   411   assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
   412          "Just check");
   413   MutexLockerEx lock(_lock, true);
   415   MallocRecordIterator  malloc_itr = _staging_area.malloc_record_walker();
   416   bool promoted = false;
   417   if (promote_malloc_records(&malloc_itr)) {
   418     VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
   419     if (promote_virtual_memory_records(&vm_itr)) {
   420       promoted = true;
   421     }
   422   }
   424   NOT_PRODUCT(check_malloc_pointers();)
   425   _staging_area.clear();
   426   return promoted;
   427 }
   429 bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
   430   MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
   431   MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
   432   MemPointerRecord* matched_rec;
   433   while (new_rec != NULL) {
   434     matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
   435     // found matched memory block
   436     if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
   437       // snapshot already contains 'live' records
   438       assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
   439              "Sanity check");
   440       // update block states
   441       if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
   442         copy_pointer(matched_rec, new_rec);
   443       } else {
   444         // a deallocation record
   445         assert(new_rec->is_deallocation_record(), "Sanity check");
   446         // an arena record can be followed by a size record, we need to remove both
   447         if (matched_rec->is_arena_record()) {
   448           MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
   449           if (next->is_arena_size_record()) {
   450             // it has to match the arena record
   451             assert(next->is_size_record_of_arena(matched_rec), "Sanity check");
   452             malloc_snapshot_itr.remove();
   453           }
   454         }
   455         // the memory is deallocated, remove related record(s)
   456         malloc_snapshot_itr.remove();
   457       }
   458     } else {
   459       // it is a new record, insert into snapshot
   460       if (new_rec->is_arena_size_record()) {
   461         MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev();
   462         if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) {
   463           // no matched arena record, ignore the size record
   464           new_rec = NULL;
   465         }
   466       }
   467       // only 'live' record can go into snapshot
   468       if (new_rec != NULL) {
   469         if  (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
   470           if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
   471             if (!malloc_snapshot_itr.insert_after(new_rec)) {
   472               return false;
   473             }
   474           } else {
   475             if (!malloc_snapshot_itr.insert(new_rec)) {
   476               return false;
   477             }
   478           }
   479         }
   480 #ifndef PRODUCT
   481         else if (!has_allocation_record(new_rec->addr())) {
   482           // NMT can not track some startup memory, which is allocated before NMT is on
   483           _untracked_count ++;
   484         }
   485 #endif
   486       }
   487     }
   488     new_rec = (MemPointerRecord*)itr->next();
   489   }
   490   return true;
   491 }
   493 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
   494   VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
   495   MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
   496   VMMemRegion*  reserved_rec;
   497   while (new_rec != NULL) {
   498     assert(new_rec->is_vm_pointer(), "Sanity check");
   500     // locate a reserved region that contains the specified address, or
   501     // the nearest reserved region has base address just above the specified
   502     // address
   503     reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
   504     if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
   505       // snapshot can only have 'live' records
   506       assert(reserved_rec->is_reserved_region(), "Sanity check");
   507       if (new_rec->is_allocation_record()) {
   508         if (!reserved_rec->is_same_region(new_rec)) {
   509           // only deal with split a bigger reserved region into smaller regions.
   510           // So far, CDS is the only use case.
   511           if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
   512             return false;
   513           }
   514         }
   515       } else if (new_rec->is_uncommit_record()) {
   516         if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
   517           return false;
   518         }
   519       } else if (new_rec->is_commit_record()) {
   520         // insert or expand existing committed region to cover this
   521         // newly committed region
   522         if (!vm_snapshot_itr.add_committed_region(new_rec)) {
   523           return false;
   524         }
   525       } else if (new_rec->is_deallocation_record()) {
   526         // release part or all memory region
   527         if (!vm_snapshot_itr.remove_released_region(new_rec)) {
   528           return false;
   529         }
   530       } else if (new_rec->is_type_tagging_record()) {
   531         // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
   532         // to different type.
   533         assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
   534                FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
   535                "Sanity check");
   536         reserved_rec->tag(new_rec->flags());
   537     } else {
   538         ShouldNotReachHere();
   539           }
   540         } else {
   541       /*
   542        * The assertion failure indicates mis-matched virtual memory records. The likely
   543        * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
   544        * api, which have to be tracked manually. (perfMemory is an example).
   545       */
   546       assert(new_rec->is_allocation_record(), "Sanity check");
   547       if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
   548             return false;
   549           }
   550   }
   551     new_rec = (MemPointerRecord*)itr->next();
   552   }
   553   return true;
   554 }
   556 #ifndef PRODUCT
   557 void MemSnapshot::print_snapshot_stats(outputStream* st) {
   558   st->print_cr("Snapshot:");
   559   st->print_cr("\tMalloced: %d/%d [%5.2f%%]  %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
   560     (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
   562   st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
   563     (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
   565   st->print_cr("\tMalloc staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
   566     _staging_area.malloc_data()->capacity(),
   567     (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
   568     _staging_area.malloc_data()->instance_size()/K);
   570   st->print_cr("\tVirtual memory staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
   571     _staging_area.vm_data()->capacity(),
   572     (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
   573     _staging_area.vm_data()->instance_size()/K);
   575   st->print_cr("\tUntracked allocation: %d", _untracked_count);
   576 }
   578 void MemSnapshot::check_malloc_pointers() {
   579   MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
   580   MemPointerRecord* p = (MemPointerRecord*)mItr.current();
   581   MemPointerRecord* prev = NULL;
   582   while (p != NULL) {
   583     if (prev != NULL) {
   584       assert(p->addr() >= prev->addr(), "sorting order");
   585     }
   586     prev = p;
   587     p = (MemPointerRecord*)mItr.next();
   588   }
   589 }
   591 bool MemSnapshot::has_allocation_record(address addr) {
   592   MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
   593   MemPointerRecord* cur = (MemPointerRecord*)itr.current();
   594   while (cur != NULL) {
   595     if (cur->addr() == addr && cur->is_allocation_record()) {
   596       return true;
   597     }
   598     cur = (MemPointerRecord*)itr.next();
   599   }
   600   return false;
   601 }
   602 #endif // PRODUCT
   604 #ifdef ASSERT
   605 void MemSnapshot::check_staging_data() {
   606   MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
   607   MemPointerRecord* cur = (MemPointerRecord*)itr.current();
   608   MemPointerRecord* next = (MemPointerRecord*)itr.next();
   609   while (next != NULL) {
   610     assert((next->addr() > cur->addr()) ||
   611       ((next->flags() & MemPointerRecord::tag_masks) >
   612        (cur->flags() & MemPointerRecord::tag_masks)),
   613        "sorting order");
   614     cur = next;
   615     next = (MemPointerRecord*)itr.next();
   616   }
   618   MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
   619   cur = (MemPointerRecord*)vm_itr.current();
   620   while (cur != NULL) {
   621     assert(cur->is_vm_pointer(), "virtual memory pointer only");
   622     cur = (MemPointerRecord*)vm_itr.next();
   623   }
   624 }
   626 void MemSnapshot::dump_all_vm_pointers() {
   627   MemPointerArrayIteratorImpl itr(_vm_ptrs);
   628   VMMemRegion* ptr = (VMMemRegion*)itr.current();
   629   tty->print_cr("dump virtual memory pointers:");
   630   while (ptr != NULL) {
   631     if (ptr->is_committed_region()) {
   632       tty->print("\t");
   633     }
   634     tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
   635       (ptr->addr() + ptr->size()), ptr->flags());
   637     if (MemTracker::track_callsite()) {
   638       VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
   639       if (ex->pc() != NULL) {
   640         char buf[1024];
   641         if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
   642           tty->print_cr("\t%s", buf);
   643         } else {
   644           tty->print_cr("");
   645         }
   646       }
   647     }
   649     ptr = (VMMemRegion*)itr.next();
   650   }
   651   tty->flush();
   652 }
   653 #endif // ASSERT

mercurial