src/share/vm/services/memSnapshot.cpp

Mon, 05 Nov 2012 15:30:22 -0500

author
zgu
date
Mon, 05 Nov 2012 15:30:22 -0500
changeset 4248
69ad7823b1ca
parent 4193
716c64bda5ba
child 4272
ed8b1e39ff4f
child 4274
fb3190e77d3c
permissions
-rw-r--r--

8001591: NMT: assertion failed: assert(rec->addr() + rec->size() <= cur->base()) failed: Can not overlap in memSnapshot.cpp
Summary: NMT should allow overlapping committed regions as long as they belong to the same reserved region
Reviewed-by: dholmes, coleenp

     1 /*
     2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "runtime/mutexLocker.hpp"
    27 #include "utilities/decoder.hpp"
    28 #include "services/memBaseline.hpp"
    29 #include "services/memPtr.hpp"
    30 #include "services/memPtrArray.hpp"
    31 #include "services/memSnapshot.hpp"
    32 #include "services/memTracker.hpp"
    34 #ifdef ASSERT
    36 void decode_pointer_record(MemPointerRecord* rec) {
    37   tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT  "] size = %d bytes", rec->addr(),
    38     rec->addr() + rec->size(), (int)rec->size());
    39   tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
    40   if (rec->is_vm_pointer()) {
    41     if (rec->is_allocation_record()) {
    42       tty->print_cr(" (reserve)");
    43     } else if (rec->is_commit_record()) {
    44       tty->print_cr(" (commit)");
    45     } else if (rec->is_uncommit_record()) {
    46       tty->print_cr(" (uncommit)");
    47     } else if (rec->is_deallocation_record()) {
    48       tty->print_cr(" (release)");
    49     } else {
    50       tty->print_cr(" (tag)");
    51     }
    52   } else {
    53     if (rec->is_arena_size_record()) {
    54       tty->print_cr(" (arena size)");
    55     } else if (rec->is_allocation_record()) {
    56       tty->print_cr(" (malloc)");
    57     } else {
    58       tty->print_cr(" (free)");
    59     }
    60   }
    61   if (MemTracker::track_callsite()) {
    62     char buf[1024];
    63     address pc = ((MemPointerRecordEx*)rec)->pc();
    64     if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
    65       tty->print_cr("\tfrom %s", buf);
    66     } else {
    67       tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
    68     }
    69   }
    70 }
    72 void decode_vm_region_record(VMMemRegion* rec) {
    73   tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
    74     rec->addr() + rec->size());
    75   tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
    76   if (rec->is_allocation_record()) {
    77     tty->print_cr(" (reserved)");
    78   } else if (rec->is_commit_record()) {
    79     tty->print_cr(" (committed)");
    80   } else {
    81     ShouldNotReachHere();
    82   }
    83   if (MemTracker::track_callsite()) {
    84     char buf[1024];
    85     address pc = ((VMMemRegionEx*)rec)->pc();
    86     if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
    87       tty->print_cr("\tfrom %s", buf);
    88     } else {
    89       tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
    90     }
    92   }
    93 }
    95 #endif
    98 bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
    99   VMMemRegionEx new_rec;
   100   assert(rec->is_allocation_record() || rec->is_commit_record(),
   101     "Sanity check");
   102   if (MemTracker::track_callsite()) {
   103     new_rec.init((MemPointerRecordEx*)rec);
   104   } else {
   105     new_rec.init(rec);
   106   }
   107   return insert(&new_rec);
   108 }
   110 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
   111   VMMemRegionEx new_rec;
   112   assert(rec->is_allocation_record() || rec->is_commit_record(),
   113     "Sanity check");
   114   if (MemTracker::track_callsite()) {
   115     new_rec.init((MemPointerRecordEx*)rec);
   116   } else {
   117     new_rec.init(rec);
   118   }
   119   return insert_after(&new_rec);
   120 }
   122 // we don't consolidate reserved regions, since they may be categorized
   123 // in different types.
   124 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
   125   assert(rec->is_allocation_record(), "Sanity check");
   126   VMMemRegion* cur = (VMMemRegion*)current();
   128   // we don't have anything yet
   129   if (cur == NULL) {
   130     return insert_record(rec);
   131   }
   133   assert(cur->is_reserved_region(), "Sanity check");
   134   // duplicated records
   135   if (cur->is_same_region(rec)) {
   136     return true;
   137   }
   138   assert(cur->base() > rec->addr(), "Just check: locate()");
   139   assert(!cur->overlaps_region(rec), "overlapping reserved regions");
   140   return insert_record(rec);
   141 }
   143 // we do consolidate committed regions
   144 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
   145   assert(rec->is_commit_record(), "Sanity check");
   146   VMMemRegion* reserved_rgn = (VMMemRegion*)current();
   147   assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec),
   148     "Sanity check");
   150   // thread's native stack is always marked as "committed", ignore
   151   // the "commit" operation for creating stack guard pages
   152   if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
   153       FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
   154     return true;
   155   }
   157   // if the reserved region has any committed regions
   158   VMMemRegion* committed_rgn  = (VMMemRegion*)next();
   159   while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
   160     // duplicated commit records
   161     if(committed_rgn->contains_region(rec)) {
   162       return true;
   163     } else if (committed_rgn->overlaps_region(rec)) {
   164       // overlaps front part
   165       if (rec->addr() < committed_rgn->addr()) {
   166         committed_rgn->expand_region(rec->addr(),
   167           committed_rgn->addr() - rec->addr());
   168       } else {
   169         // overlaps tail part
   170         address committed_rgn_end = committed_rgn->addr() +
   171               committed_rgn->size();
   172         assert(committed_rgn_end < rec->addr() + rec->size(),
   173              "overlap tail part");
   174         committed_rgn->expand_region(committed_rgn_end,
   175           (rec->addr() + rec->size()) - committed_rgn_end);
   176       }
   177     } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
   178       // adjunct each other
   179       committed_rgn->expand_region(rec->addr(), rec->size());
   180       VMMemRegion* next_reg = (VMMemRegion*)next();
   181       // see if we can consolidate next committed region
   182       if (next_reg != NULL && next_reg->is_committed_region() &&
   183         next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
   184           committed_rgn->expand_region(next_reg->base(), next_reg->size());
   185           // delete merged region
   186           remove();
   187       }
   188       return true;
   189     } else if (committed_rgn->base() > rec->addr()) {
   190       // found the location, insert this committed region
   191       return insert_record(rec);
   192     }
   193     committed_rgn = (VMMemRegion*)next();
   194   }
   195   return insert_record(rec);
   196 }
   198 bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
   199   assert(rec->is_uncommit_record(), "sanity check");
   200   VMMemRegion* cur;
   201   cur = (VMMemRegion*)current();
   202   assert(cur->is_reserved_region() && cur->contains_region(rec),
   203     "Sanity check");
   204   // thread's native stack is always marked as "committed", ignore
   205   // the "commit" operation for creating stack guard pages
   206   if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
   207       FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
   208     return true;
   209   }
   211   cur = (VMMemRegion*)next();
   212   while (cur != NULL && cur->is_committed_region()) {
   213     // region already uncommitted, must be due to duplicated record
   214     if (cur->addr() >= rec->addr() + rec->size()) {
   215       break;
   216     } else if (cur->contains_region(rec)) {
   217       // uncommit whole region
   218       if (cur->is_same_region(rec)) {
   219         remove();
   220         break;
   221       } else if (rec->addr() == cur->addr() ||
   222         rec->addr() + rec->size() == cur->addr() + cur->size()) {
   223         // uncommitted from either end of current memory region.
   224         cur->exclude_region(rec->addr(), rec->size());
   225         break;
   226       } else { // split the committed region and release the middle
   227         address high_addr = cur->addr() + cur->size();
   228         size_t sz = high_addr - rec->addr();
   229         cur->exclude_region(rec->addr(), sz);
   230         sz = high_addr - (rec->addr() + rec->size());
   231         if (MemTracker::track_callsite()) {
   232           MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
   233              ((VMMemRegionEx*)cur)->pc());
   234           return insert_record_after(&tmp);
   235         } else {
   236           MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
   237           return insert_record_after(&tmp);
   238         }
   239       }
   240     }
   241     cur = (VMMemRegion*)next();
   242   }
   244   // we may not find committed record due to duplicated records
   245   return true;
   246 }
   248 bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
   249   assert(rec->is_deallocation_record(), "Sanity check");
   250   VMMemRegion* cur = (VMMemRegion*)current();
   251   assert(cur->is_reserved_region() && cur->contains_region(rec),
   252     "Sanity check");
   253 #ifdef ASSERT
   254   VMMemRegion* next_reg = (VMMemRegion*)peek_next();
   255   // should not have any committed memory in this reserved region
   256   assert(next_reg == NULL || !next_reg->is_committed_region(), "Sanity check");
   257 #endif
   258   if (rec->is_same_region(cur)) {
   259     remove();
   260   } else if (rec->addr() == cur->addr() ||
   261     rec->addr() + rec->size() == cur->addr() + cur->size()) {
   262     // released region is at either end of this region
   263     cur->exclude_region(rec->addr(), rec->size());
   264   } else { // split the reserved region and release the middle
   265     address high_addr = cur->addr() + cur->size();
   266     size_t sz = high_addr - rec->addr();
   267     cur->exclude_region(rec->addr(), sz);
   268     sz = high_addr - rec->addr() - rec->size();
   269     if (MemTracker::track_callsite()) {
   270       MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
   271         ((VMMemRegionEx*)cur)->pc());
   272       return insert_reserved_region(&tmp);
   273     } else {
   274       MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
   275       return insert_reserved_region(&tmp);
   276     }
   277   }
   278   return true;
   279 }
   281 bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
   282   // skip all 'commit' records associated with previous reserved region
   283   VMMemRegion* p = (VMMemRegion*)next();
   284   while (p != NULL && p->is_committed_region() &&
   285          p->base() + p->size() < rec->addr()) {
   286     p = (VMMemRegion*)next();
   287   }
   288   return insert_record(rec);
   289 }
   291 bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
   292   assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
   293   address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
   294   if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
   295     size_t sz = rgn->size() - new_rgn_size;
   296     // the original region becomes 'new' region
   297     rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
   298      // remaining becomes next region
   299     MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
   300     return insert_reserved_region(&next_rgn);
   301   } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
   302     rgn->exclude_region(new_rgn_addr, new_rgn_size);
   303     MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
   304     return insert_reserved_region(&next_rgn);
   305   } else {
   306     // the orginal region will be split into three
   307     address rgn_high_addr = rgn->base() + rgn->size();
   308     // first region
   309     rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
   310     // the second region is the new region
   311     MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
   312     if (!insert_reserved_region(&new_rgn)) return false;
   313     // the remaining region
   314     MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
   315       rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
   316     return insert_reserved_region(&rem_rgn);
   317   }
   318 }
   320 static int sort_in_seq_order(const void* p1, const void* p2) {
   321   assert(p1 != NULL && p2 != NULL, "Sanity check");
   322   const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
   323   const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
   324   return (mp1->seq() - mp2->seq());
   325 }
   327 bool StagingArea::init() {
   328   if (MemTracker::track_callsite()) {
   329     _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
   330     _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
   331   } else {
   332     _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
   333     _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
   334   }
   336   if (_malloc_data != NULL && _vm_data != NULL &&
   337       !_malloc_data->out_of_memory() &&
   338       !_vm_data->out_of_memory()) {
   339     return true;
   340   } else {
   341     if (_malloc_data != NULL) delete _malloc_data;
   342     if (_vm_data != NULL) delete _vm_data;
   343     _malloc_data = NULL;
   344     _vm_data = NULL;
   345     return false;
   346   }
   347 }
   350 VMRecordIterator StagingArea::virtual_memory_record_walker() {
   351   MemPointerArray* arr = vm_data();
   352   // sort into seq number order
   353   arr->sort((FN_SORT)sort_in_seq_order);
   354   return VMRecordIterator(arr);
   355 }
   358 MemSnapshot::MemSnapshot() {
   359   if (MemTracker::track_callsite()) {
   360     _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
   361     _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
   362   } else {
   363     _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
   364     _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
   365   }
   367   _staging_area.init();
   368   _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
   369   NOT_PRODUCT(_untracked_count = 0;)
   370 }
   372 MemSnapshot::~MemSnapshot() {
   373   assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
   374   {
   375     MutexLockerEx locker(_lock);
   376     if (_alloc_ptrs != NULL) {
   377       delete _alloc_ptrs;
   378       _alloc_ptrs = NULL;
   379     }
   381     if (_vm_ptrs != NULL) {
   382       delete _vm_ptrs;
   383       _vm_ptrs = NULL;
   384     }
   385   }
   387   if (_lock != NULL) {
   388     delete _lock;
   389     _lock = NULL;
   390   }
   391 }
   393 void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
   394   assert(dest != NULL && src != NULL, "Just check");
   395   assert(dest->addr() == src->addr(), "Just check");
   397   MEMFLAGS flags = dest->flags();
   399   if (MemTracker::track_callsite()) {
   400     *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
   401   } else {
   402     *dest = *src;
   403   }
   404 }
   407 // merge a per-thread memory recorder to the staging area
   408 bool MemSnapshot::merge(MemRecorder* rec) {
   409   assert(rec != NULL && !rec->out_of_memory(), "Just check");
   411   SequencedRecordIterator itr(rec->pointer_itr());
   413   MutexLockerEx lock(_lock, true);
   414   MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
   415   MemPointerRecord *p1, *p2;
   416   p1 = (MemPointerRecord*) itr.current();
   417   while (p1 != NULL) {
   418     if (p1->is_vm_pointer()) {
   419       // we don't do anything with virtual memory records during merge
   420       if (!_staging_area.vm_data()->append(p1)) {
   421         return false;
   422       }
   423     } else {
   424       // locate matched record and/or also position the iterator to proper
   425       // location for this incoming record.
   426       p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
   427       // we have not seen this memory block, so just add to staging area
   428       if (p2 == NULL) {
   429         if (!malloc_staging_itr.insert(p1)) {
   430           return false;
   431         }
   432       } else if (p1->addr() == p2->addr()) {
   433         MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
   434         // a memory block can have many tagging records, find right one to replace or
   435         // right position to insert
   436         while (staging_next != NULL && staging_next->addr() == p1->addr()) {
   437           if ((staging_next->flags() & MemPointerRecord::tag_masks) <=
   438             (p1->flags() & MemPointerRecord::tag_masks)) {
   439             p2 = (MemPointerRecord*)malloc_staging_itr.next();
   440             staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
   441           } else {
   442             break;
   443           }
   444         }
   445         int df = (p1->flags() & MemPointerRecord::tag_masks) -
   446           (p2->flags() & MemPointerRecord::tag_masks);
   447         if (df == 0) {
   448           assert(p1->seq() > 0, "not sequenced");
   449           assert(p2->seq() > 0, "not sequenced");
   450           if (p1->seq() > p2->seq()) {
   451             copy_pointer(p2, p1);
   452           }
   453         } else if (df < 0) {
   454           if (!malloc_staging_itr.insert(p1)) {
   455             return false;
   456           }
   457         } else {
   458           if (!malloc_staging_itr.insert_after(p1)) {
   459             return false;
   460           }
   461         }
   462       } else if (p1->addr() < p2->addr()) {
   463         if (!malloc_staging_itr.insert(p1)) {
   464           return false;
   465         }
   466       } else {
   467         if (!malloc_staging_itr.insert_after(p1)) {
   468           return false;
   469         }
   470       }
   471     }
   472     p1 = (MemPointerRecord*)itr.next();
   473   }
   474   NOT_PRODUCT(void check_staging_data();)
   475   return true;
   476 }
   480 // promote data to next generation
   481 bool MemSnapshot::promote() {
   482   assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
   483   assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
   484          "Just check");
   485   MutexLockerEx lock(_lock, true);
   487   MallocRecordIterator  malloc_itr = _staging_area.malloc_record_walker();
   488   bool promoted = false;
   489   if (promote_malloc_records(&malloc_itr)) {
   490     VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
   491     if (promote_virtual_memory_records(&vm_itr)) {
   492       promoted = true;
   493     }
   494   }
   496   NOT_PRODUCT(check_malloc_pointers();)
   497   _staging_area.clear();
   498   return promoted;
   499 }
   501 bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
   502   MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
   503   MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
   504   MemPointerRecord* matched_rec;
   505   while (new_rec != NULL) {
   506     matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
   507     // found matched memory block
   508     if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
   509       // snapshot already contains 'live' records
   510       assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
   511              "Sanity check");
   512       // update block states
   513       if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
   514         copy_pointer(matched_rec, new_rec);
   515       } else {
   516         // a deallocation record
   517         assert(new_rec->is_deallocation_record(), "Sanity check");
   518         // an arena record can be followed by a size record, we need to remove both
   519         if (matched_rec->is_arena_record()) {
   520           MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
   521           if (next->is_arena_size_record()) {
   522             // it has to match the arena record
   523             assert(next->is_size_record_of_arena(matched_rec), "Sanity check");
   524             malloc_snapshot_itr.remove();
   525           }
   526         }
   527         // the memory is deallocated, remove related record(s)
   528         malloc_snapshot_itr.remove();
   529       }
   530     } else {
   531       // it is a new record, insert into snapshot
   532       if (new_rec->is_arena_size_record()) {
   533         MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev();
   534         if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) {
   535           // no matched arena record, ignore the size record
   536           new_rec = NULL;
   537         }
   538       }
   539       // only 'live' record can go into snapshot
   540       if (new_rec != NULL) {
   541         if  (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
   542           if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
   543             if (!malloc_snapshot_itr.insert_after(new_rec)) {
   544               return false;
   545             }
   546           } else {
   547             if (!malloc_snapshot_itr.insert(new_rec)) {
   548               return false;
   549             }
   550           }
   551         }
   552 #ifndef PRODUCT
   553         else if (!has_allocation_record(new_rec->addr())) {
   554           // NMT can not track some startup memory, which is allocated before NMT is on
   555           _untracked_count ++;
   556         }
   557 #endif
   558       }
   559     }
   560     new_rec = (MemPointerRecord*)itr->next();
   561   }
   562   return true;
   563 }
   565 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
   566   VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
   567   MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
   568   VMMemRegion*  reserved_rec;
   569   while (new_rec != NULL) {
   570     assert(new_rec->is_vm_pointer(), "Sanity check");
   572     // locate a reserved region that contains the specified address, or
   573     // the nearest reserved region has base address just above the specified
   574     // address
   575     reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
   576     if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
   577       // snapshot can only have 'live' records
   578       assert(reserved_rec->is_reserved_region(), "Sanity check");
   579       if (new_rec->is_allocation_record()) {
   580         if (!reserved_rec->is_same_region(new_rec)) {
   581           // only deal with split a bigger reserved region into smaller regions.
   582           // So far, CDS is the only use case.
   583           if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
   584             return false;
   585           }
   586         }
   587       } else if (new_rec->is_uncommit_record()) {
   588         if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
   589           return false;
   590         }
   591       } else if (new_rec->is_commit_record()) {
   592         // insert or expand existing committed region to cover this
   593         // newly committed region
   594         if (!vm_snapshot_itr.add_committed_region(new_rec)) {
   595           return false;
   596         }
   597       } else if (new_rec->is_deallocation_record()) {
   598         // release part or all memory region
   599         if (!vm_snapshot_itr.remove_released_region(new_rec)) {
   600           return false;
   601         }
   602       } else if (new_rec->is_type_tagging_record()) {
   603         // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
   604         // to different type.
   605         assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
   606                FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
   607                "Sanity check");
   608         reserved_rec->tag(new_rec->flags());
   609     } else {
   610         ShouldNotReachHere();
   611           }
   612         } else {
   613       /*
   614        * The assertion failure indicates mis-matched virtual memory records. The likely
   615        * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
   616        * api, which have to be tracked manually. (perfMemory is an example).
   617       */
   618       assert(new_rec->is_allocation_record(), "Sanity check");
   619       if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
   620             return false;
   621           }
   622   }
   623     new_rec = (MemPointerRecord*)itr->next();
   624   }
   625   return true;
   626 }
   628 #ifndef PRODUCT
   629 void MemSnapshot::print_snapshot_stats(outputStream* st) {
   630   st->print_cr("Snapshot:");
   631   st->print_cr("\tMalloced: %d/%d [%5.2f%%]  %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
   632     (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
   634   st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
   635     (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
   637   st->print_cr("\tMalloc staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
   638     _staging_area.malloc_data()->capacity(),
   639     (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
   640     _staging_area.malloc_data()->instance_size()/K);
   642   st->print_cr("\tVirtual memory staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
   643     _staging_area.vm_data()->capacity(),
   644     (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
   645     _staging_area.vm_data()->instance_size()/K);
   647   st->print_cr("\tUntracked allocation: %d", _untracked_count);
   648 }
   650 void MemSnapshot::check_malloc_pointers() {
   651   MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
   652   MemPointerRecord* p = (MemPointerRecord*)mItr.current();
   653   MemPointerRecord* prev = NULL;
   654   while (p != NULL) {
   655     if (prev != NULL) {
   656       assert(p->addr() >= prev->addr(), "sorting order");
   657     }
   658     prev = p;
   659     p = (MemPointerRecord*)mItr.next();
   660   }
   661 }
   663 bool MemSnapshot::has_allocation_record(address addr) {
   664   MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
   665   MemPointerRecord* cur = (MemPointerRecord*)itr.current();
   666   while (cur != NULL) {
   667     if (cur->addr() == addr && cur->is_allocation_record()) {
   668       return true;
   669     }
   670     cur = (MemPointerRecord*)itr.next();
   671   }
   672   return false;
   673 }
   674 #endif // PRODUCT
   676 #ifdef ASSERT
   677 void MemSnapshot::check_staging_data() {
   678   MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
   679   MemPointerRecord* cur = (MemPointerRecord*)itr.current();
   680   MemPointerRecord* next = (MemPointerRecord*)itr.next();
   681   while (next != NULL) {
   682     assert((next->addr() > cur->addr()) ||
   683       ((next->flags() & MemPointerRecord::tag_masks) >
   684        (cur->flags() & MemPointerRecord::tag_masks)),
   685        "sorting order");
   686     cur = next;
   687     next = (MemPointerRecord*)itr.next();
   688   }
   690   MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
   691   cur = (MemPointerRecord*)vm_itr.current();
   692   while (cur != NULL) {
   693     assert(cur->is_vm_pointer(), "virtual memory pointer only");
   694     cur = (MemPointerRecord*)vm_itr.next();
   695   }
   696 }
   698 void MemSnapshot::dump_all_vm_pointers() {
   699   MemPointerArrayIteratorImpl itr(_vm_ptrs);
   700   VMMemRegion* ptr = (VMMemRegion*)itr.current();
   701   tty->print_cr("dump virtual memory pointers:");
   702   while (ptr != NULL) {
   703     if (ptr->is_committed_region()) {
   704       tty->print("\t");
   705     }
   706     tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
   707       (ptr->addr() + ptr->size()), ptr->flags());
   709     if (MemTracker::track_callsite()) {
   710       VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
   711       if (ex->pc() != NULL) {
   712         char buf[1024];
   713         if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
   714           tty->print_cr("\t%s", buf);
   715         } else {
   716           tty->print_cr("");
   717         }
   718       }
   719     }
   721     ptr = (VMMemRegion*)itr.next();
   722   }
   723   tty->flush();
   724 }
   725 #endif // ASSERT

mercurial