1.1 --- a/src/share/vm/services/memSnapshot.cpp Tue Sep 04 16:20:28 2012 -0700 1.2 +++ b/src/share/vm/services/memSnapshot.cpp Tue Sep 11 20:53:17 2012 -0400 1.3 @@ -31,148 +31,54 @@ 1.4 #include "services/memSnapshot.hpp" 1.5 #include "services/memTracker.hpp" 1.6 1.7 +static int sort_in_seq_order(const void* p1, const void* p2) { 1.8 + assert(p1 != NULL && p2 != NULL, "Sanity check"); 1.9 + const MemPointerRecord* mp1 = (MemPointerRecord*)p1; 1.10 + const MemPointerRecord* mp2 = (MemPointerRecord*)p2; 1.11 + return (mp1->seq() - mp2->seq()); 1.12 +} 1.13 1.14 -// stagging data groups the data of a VM memory range, so we can consolidate 1.15 -// them into one record during the walk 1.16 -bool StagingWalker::consolidate_vm_records(VMMemRegionEx* vm_rec) { 1.17 - MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); 1.18 - assert(cur != NULL && cur->is_vm_pointer(), "not a virtual memory pointer"); 1.19 - 1.20 - jint cur_seq; 1.21 - jint next_seq; 1.22 - 1.23 - bool trackCallsite = MemTracker::track_callsite(); 1.24 - 1.25 - if (trackCallsite) { 1.26 - vm_rec->init((MemPointerRecordEx*)cur); 1.27 - cur_seq = ((SeqMemPointerRecordEx*)cur)->seq(); 1.28 +bool StagingArea::init() { 1.29 + if (MemTracker::track_callsite()) { 1.30 + _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>(); 1.31 + _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>(); 1.32 } else { 1.33 - vm_rec->init((MemPointerRecord*)cur); 1.34 - cur_seq = ((SeqMemPointerRecord*)cur)->seq(); 1.35 + _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>(); 1.36 + _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>(); 1.37 } 1.38 1.39 - // only can consolidate when we have allocation record, 1.40 - // which contains virtual memory range 1.41 - if (!cur->is_allocation_record()) { 1.42 - _itr.next(); 1.43 + if (_malloc_data != NULL && _vm_data != NULL && 1.44 + !_malloc_data->out_of_memory() && 1.45 + !_vm_data->out_of_memory()) { 1.46 return true; 1.47 + } else { 1.48 + if (_malloc_data != NULL) delete _malloc_data; 1.49 + if (_vm_data != NULL) delete _vm_data; 1.50 + _malloc_data = NULL; 1.51 + _vm_data = NULL; 1.52 + return false; 1.53 } 1.54 - 1.55 - // allocation range 1.56 - address base = cur->addr(); 1.57 - address end = base + cur->size(); 1.58 - 1.59 - MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); 1.60 - // if the memory range is alive 1.61 - bool live_vm_rec = true; 1.62 - while (next != NULL && next->is_vm_pointer()) { 1.63 - if (next->is_allocation_record()) { 1.64 - assert(next->addr() >= base, "sorting order or overlapping"); 1.65 - break; 1.66 - } 1.67 - 1.68 - if (trackCallsite) { 1.69 - next_seq = ((SeqMemPointerRecordEx*)next)->seq(); 1.70 - } else { 1.71 - next_seq = ((SeqMemPointerRecord*)next)->seq(); 1.72 - } 1.73 - 1.74 - if (next_seq < cur_seq) { 1.75 - _itr.next(); 1.76 - next = (MemPointerRecord*)_itr.peek_next(); 1.77 - continue; 1.78 - } 1.79 - 1.80 - if (next->is_deallocation_record()) { 1.81 - if (next->addr() == base && next->size() == cur->size()) { 1.82 - // the virtual memory range has been released 1.83 - _itr.next(); 1.84 - live_vm_rec = false; 1.85 - break; 1.86 - } else if (next->addr() < end) { // partial release 1.87 - vm_rec->partial_release(next->addr(), next->size()); 1.88 - _itr.next(); 1.89 - } else { 1.90 - break; 1.91 - } 1.92 - } else if (next->is_commit_record()) { 1.93 - if (next->addr() >= base && next->addr() + next->size() <= end) { 1.94 - vm_rec->commit(next->size()); 1.95 - _itr.next(); 1.96 - } else { 1.97 - assert(next->addr() >= base, "sorting order or overlapping"); 1.98 - break; 1.99 - } 1.100 - } else if (next->is_uncommit_record()) { 1.101 - if (next->addr() >= base && next->addr() + next->size() <= end) { 1.102 - vm_rec->uncommit(next->size()); 1.103 - _itr.next(); 1.104 - } else { 1.105 - assert(next->addr() >= end, "sorting order or overlapping"); 1.106 - break; 1.107 - } 1.108 - } else if (next->is_type_tagging_record()) { 1.109 - if (next->addr() >= base && next->addr() < end ) { 1.110 - vm_rec->tag(next->flags()); 1.111 - _itr.next(); 1.112 - } else { 1.113 - break; 1.114 - } 1.115 - } else { 1.116 - assert(false, "unknown record type"); 1.117 - } 1.118 - next = (MemPointerRecord*)_itr.peek_next(); 1.119 - } 1.120 - _itr.next(); 1.121 - return live_vm_rec; 1.122 } 1.123 1.124 -MemPointer* StagingWalker::next() { 1.125 - MemPointerRecord* cur_p = (MemPointerRecord*)_itr.current(); 1.126 - if (cur_p == NULL) { 1.127 - _end_of_array = true; 1.128 - return NULL; 1.129 - } 1.130 1.131 - MemPointerRecord* next_p; 1.132 - if (cur_p->is_vm_pointer()) { 1.133 - _is_vm_record = true; 1.134 - if (!consolidate_vm_records(&_vm_record)) { 1.135 - return next(); 1.136 - } 1.137 - } else { // malloc-ed pointer 1.138 - _is_vm_record = false; 1.139 - next_p = (MemPointerRecord*)_itr.peek_next(); 1.140 - if (next_p != NULL && next_p->addr() == cur_p->addr()) { 1.141 - assert(cur_p->is_allocation_record(), "sorting order"); 1.142 - assert(!next_p->is_allocation_record(), "sorting order"); 1.143 - _itr.next(); 1.144 - if (cur_p->seq() < next_p->seq()) { 1.145 - cur_p = next_p; 1.146 - } 1.147 - } 1.148 - if (MemTracker::track_callsite()) { 1.149 - _malloc_record.init((MemPointerRecordEx*)cur_p); 1.150 - } else { 1.151 - _malloc_record.init((MemPointerRecord*)cur_p); 1.152 - } 1.153 +MemPointerArrayIteratorImpl StagingArea::virtual_memory_record_walker() { 1.154 + MemPointerArray* arr = vm_data(); 1.155 + // sort into seq number order 1.156 + arr->sort((FN_SORT)sort_in_seq_order); 1.157 + return MemPointerArrayIteratorImpl(arr); 1.158 +} 1.159 1.160 - _itr.next(); 1.161 - } 1.162 - return current(); 1.163 -} 1.164 1.165 MemSnapshot::MemSnapshot() { 1.166 if (MemTracker::track_callsite()) { 1.167 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>(); 1.168 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true); 1.169 - _staging_area = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>(); 1.170 } else { 1.171 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>(); 1.172 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true); 1.173 - _staging_area = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>(); 1.174 } 1.175 1.176 + _staging_area.init(); 1.177 _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock"); 1.178 NOT_PRODUCT(_untracked_count = 0;) 1.179 } 1.180 @@ -181,11 +87,6 @@ 1.181 assert(MemTracker::shutdown_in_progress(), "native memory tracking still on"); 1.182 { 1.183 MutexLockerEx locker(_lock); 1.184 - if (_staging_area != NULL) { 1.185 - delete _staging_area; 1.186 - _staging_area = NULL; 1.187 - } 1.188 - 1.189 if (_alloc_ptrs != NULL) { 1.190 delete _alloc_ptrs; 1.191 _alloc_ptrs = NULL; 1.192 @@ -221,62 +122,64 @@ 1.193 bool MemSnapshot::merge(MemRecorder* rec) { 1.194 assert(rec != NULL && !rec->out_of_memory(), "Just check"); 1.195 1.196 - // out of memory 1.197 - if (_staging_area == NULL || _staging_area->out_of_memory()) { 1.198 - return false; 1.199 - } 1.200 - 1.201 SequencedRecordIterator itr(rec->pointer_itr()); 1.202 1.203 MutexLockerEx lock(_lock, true); 1.204 - MemPointerIterator staging_itr(_staging_area); 1.205 + MemPointerIterator malloc_staging_itr(_staging_area.malloc_data()); 1.206 MemPointerRecord *p1, *p2; 1.207 p1 = (MemPointerRecord*) itr.current(); 1.208 while (p1 != NULL) { 1.209 - p2 = (MemPointerRecord*)staging_itr.locate(p1->addr()); 1.210 - // we have not seen this memory block, so just add to staging area 1.211 - if (p2 == NULL) { 1.212 - if (!staging_itr.insert(p1)) { 1.213 + if (p1->is_vm_pointer()) { 1.214 + // we don't do anything with virtual memory records during merge 1.215 + if (!_staging_area.vm_data()->append(p1)) { 1.216 return false; 1.217 } 1.218 - } else if (p1->addr() == p2->addr()) { 1.219 - MemPointerRecord* staging_next = (MemPointerRecord*)staging_itr.peek_next(); 1.220 - // a memory block can have many tagging records, find right one to replace or 1.221 - // right position to insert 1.222 - while (staging_next != NULL && staging_next->addr() == p1->addr()) { 1.223 - if ((staging_next->flags() & MemPointerRecord::tag_masks) <= 1.224 - (p1->flags() & MemPointerRecord::tag_masks)) { 1.225 - p2 = (MemPointerRecord*)staging_itr.next(); 1.226 - staging_next = (MemPointerRecord*)staging_itr.peek_next(); 1.227 + } else { 1.228 + p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr()); 1.229 + // we have not seen this memory block, so just add to staging area 1.230 + if (p2 == NULL) { 1.231 + if (!malloc_staging_itr.insert(p1)) { 1.232 + return false; 1.233 + } 1.234 + } else if (p1->addr() == p2->addr()) { 1.235 + MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next(); 1.236 + // a memory block can have many tagging records, find right one to replace or 1.237 + // right position to insert 1.238 + while (staging_next != NULL && staging_next->addr() == p1->addr()) { 1.239 + if ((staging_next->flags() & MemPointerRecord::tag_masks) <= 1.240 + (p1->flags() & MemPointerRecord::tag_masks)) { 1.241 + p2 = (MemPointerRecord*)malloc_staging_itr.next(); 1.242 + staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next(); 1.243 + } else { 1.244 + break; 1.245 + } 1.246 + } 1.247 + int df = (p1->flags() & MemPointerRecord::tag_masks) - 1.248 + (p2->flags() & MemPointerRecord::tag_masks); 1.249 + if (df == 0) { 1.250 + assert(p1->seq() > 0, "not sequenced"); 1.251 + assert(p2->seq() > 0, "not sequenced"); 1.252 + if (p1->seq() > p2->seq()) { 1.253 + copy_pointer(p2, p1); 1.254 + } 1.255 + } else if (df < 0) { 1.256 + if (!malloc_staging_itr.insert(p1)) { 1.257 + return false; 1.258 + } 1.259 } else { 1.260 - break; 1.261 + if (!malloc_staging_itr.insert_after(p1)) { 1.262 + return false; 1.263 + } 1.264 } 1.265 - } 1.266 - int df = (p1->flags() & MemPointerRecord::tag_masks) - 1.267 - (p2->flags() & MemPointerRecord::tag_masks); 1.268 - if (df == 0) { 1.269 - assert(p1->seq() > 0, "not sequenced"); 1.270 - assert(p2->seq() > 0, "not sequenced"); 1.271 - if (p1->seq() > p2->seq()) { 1.272 - copy_pointer(p2, p1); 1.273 - } 1.274 - } else if (df < 0) { 1.275 - if (!staging_itr.insert(p1)) { 1.276 + } else if (p1->addr() < p2->addr()) { 1.277 + if (!malloc_staging_itr.insert(p1)) { 1.278 return false; 1.279 } 1.280 } else { 1.281 - if (!staging_itr.insert_after(p1)) { 1.282 + if (!malloc_staging_itr.insert_after(p1)) { 1.283 return false; 1.284 } 1.285 } 1.286 - } else if (p1->addr() < p2->addr()) { 1.287 - if (!staging_itr.insert(p1)) { 1.288 - return false; 1.289 - } 1.290 - } else { 1.291 - if (!staging_itr.insert_after(p1)) { 1.292 - return false; 1.293 - } 1.294 } 1.295 p1 = (MemPointerRecord*)itr.next(); 1.296 } 1.297 @@ -287,123 +190,180 @@ 1.298 1.299 1.300 // promote data to next generation 1.301 -void MemSnapshot::promote() { 1.302 - assert(_alloc_ptrs != NULL && _staging_area != NULL && _vm_ptrs != NULL, 1.303 - "Just check"); 1.304 +bool MemSnapshot::promote() { 1.305 + assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check"); 1.306 + assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL, 1.307 + "Just check"); 1.308 MutexLockerEx lock(_lock, true); 1.309 - StagingWalker walker(_staging_area); 1.310 - MemPointerIterator malloc_itr(_alloc_ptrs); 1.311 - VMMemPointerIterator vm_itr(_vm_ptrs); 1.312 - MemPointer* cur = walker.current(); 1.313 - while (cur != NULL) { 1.314 - if (walker.is_vm_record()) { 1.315 - VMMemRegion* cur_vm = (VMMemRegion*)cur; 1.316 - VMMemRegion* p = (VMMemRegion*)vm_itr.locate(cur_vm->addr()); 1.317 - cur_vm = (VMMemRegion*)cur; 1.318 - if (p != NULL && (p->contains(cur_vm) || p->base() == cur_vm->base())) { 1.319 - assert(p->is_reserve_record() || 1.320 - p->is_commit_record(), "wrong vm record type"); 1.321 - // resize existing reserved range 1.322 - if (cur_vm->is_reserve_record() && p->base() == cur_vm->base()) { 1.323 - assert(cur_vm->size() >= p->committed_size(), "incorrect resizing"); 1.324 - p->set_reserved_size(cur_vm->size()); 1.325 - } else if (cur_vm->is_commit_record()) { 1.326 - p->commit(cur_vm->committed_size()); 1.327 - } else if (cur_vm->is_uncommit_record()) { 1.328 - p->uncommit(cur_vm->committed_size()); 1.329 - if (!p->is_reserve_record() && p->committed_size() == 0) { 1.330 - vm_itr.remove(); 1.331 + 1.332 + MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker(); 1.333 + bool promoted = false; 1.334 + if (promote_malloc_records(&malloc_itr)) { 1.335 + MemPointerArrayIteratorImpl vm_itr = _staging_area.virtual_memory_record_walker(); 1.336 + if (promote_virtual_memory_records(&vm_itr)) { 1.337 + promoted = true; 1.338 + } 1.339 + } 1.340 + 1.341 + NOT_PRODUCT(check_malloc_pointers();) 1.342 + _staging_area.clear(); 1.343 + return promoted; 1.344 +} 1.345 + 1.346 +bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) { 1.347 + MemPointerIterator malloc_snapshot_itr(_alloc_ptrs); 1.348 + MemPointerRecord* new_rec = (MemPointerRecord*)itr->current(); 1.349 + MemPointerRecord* matched_rec; 1.350 + while (new_rec != NULL) { 1.351 + matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr()); 1.352 + // found matched memory block 1.353 + if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) { 1.354 + // snapshot already contains 'lived' records 1.355 + assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(), 1.356 + "Sanity check"); 1.357 + // update block states 1.358 + if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) { 1.359 + copy_pointer(matched_rec, new_rec); 1.360 + } else { 1.361 + // a deallocation record 1.362 + assert(new_rec->is_deallocation_record(), "Sanity check"); 1.363 + // an arena record can be followed by a size record, we need to remove both 1.364 + if (matched_rec->is_arena_record()) { 1.365 + MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next(); 1.366 + if (next->is_arena_size_record()) { 1.367 + // it has to match the arena record 1.368 + assert(next->is_size_record_of_arena(matched_rec), "Sanity check"); 1.369 + malloc_snapshot_itr.remove(); 1.370 } 1.371 - } else if (cur_vm->is_type_tagging_record()) { 1.372 - p->tag(cur_vm->flags()); 1.373 - } else if (cur_vm->is_release_record()) { 1.374 - if (cur_vm->base() == p->base() && cur_vm->size() == p->size()) { 1.375 - // release the whole range 1.376 - vm_itr.remove(); 1.377 + } 1.378 + // the memory is deallocated, remove related record(s) 1.379 + malloc_snapshot_itr.remove(); 1.380 + } 1.381 + } else { 1.382 + // it is a new record, insert into snapshot 1.383 + if (new_rec->is_arena_size_record()) { 1.384 + MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev(); 1.385 + if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) { 1.386 + // no matched arena record, ignore the size record 1.387 + new_rec = NULL; 1.388 + } 1.389 + } 1.390 + // only 'live' record can go into snapshot 1.391 + if (new_rec != NULL) { 1.392 + if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) { 1.393 + if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) { 1.394 + if (!malloc_snapshot_itr.insert_after(new_rec)) { 1.395 + return false; 1.396 + } 1.397 } else { 1.398 - // partial release 1.399 - p->partial_release(cur_vm->base(), cur_vm->size()); 1.400 + if (!malloc_snapshot_itr.insert(new_rec)) { 1.401 + return false; 1.402 + } 1.403 + } 1.404 + } 1.405 +#ifndef PRODUCT 1.406 + else if (!has_allocation_record(new_rec->addr())) { 1.407 + // NMT can not track some startup memory, which is allocated before NMT is on 1.408 + _untracked_count ++; 1.409 + } 1.410 +#endif 1.411 + } 1.412 + } 1.413 + new_rec = (MemPointerRecord*)itr->next(); 1.414 + } 1.415 + return true; 1.416 +} 1.417 + 1.418 +bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) { 1.419 + VMMemPointerIterator vm_snapshot_itr(_vm_ptrs); 1.420 + MemPointerRecord* new_rec = (MemPointerRecord*)itr->current(); 1.421 + VMMemRegionEx new_vm_rec; 1.422 + VMMemRegion* matched_rec; 1.423 + while (new_rec != NULL) { 1.424 + assert(new_rec->is_vm_pointer(), "Sanity check"); 1.425 + if (MemTracker::track_callsite()) { 1.426 + new_vm_rec.init((MemPointerRecordEx*)new_rec); 1.427 + } else { 1.428 + new_vm_rec.init(new_rec); 1.429 + } 1.430 + matched_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr()); 1.431 + if (matched_rec != NULL && 1.432 + (matched_rec->contains(&new_vm_rec) || matched_rec->base() == new_vm_rec.base())) { 1.433 + // snapshot can only have 'live' records 1.434 + assert(matched_rec->is_reserve_record(), "Sanity check"); 1.435 + if (new_vm_rec.is_reserve_record() && matched_rec->base() == new_vm_rec.base()) { 1.436 + // resize reserved virtual memory range 1.437 + // resize has to cover committed area 1.438 + assert(new_vm_rec.size() >= matched_rec->committed_size(), "Sanity check"); 1.439 + matched_rec->set_reserved_size(new_vm_rec.size()); 1.440 + } else if (new_vm_rec.is_commit_record()) { 1.441 + // commit memory inside reserved memory range 1.442 + assert(new_vm_rec.committed_size() <= matched_rec->reserved_size(), "Sanity check"); 1.443 + // thread stacks are marked committed, so we ignore 'commit' record for creating 1.444 + // stack guard pages 1.445 + if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) != mtThreadStack) { 1.446 + matched_rec->commit(new_vm_rec.committed_size()); 1.447 + } 1.448 + } else if (new_vm_rec.is_uncommit_record()) { 1.449 + if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtThreadStack) { 1.450 + // ignore 'uncommit' record from removing stack guard pages, uncommit 1.451 + // thread stack as whole 1.452 + if (matched_rec->committed_size() == new_vm_rec.committed_size()) { 1.453 + matched_rec->uncommit(new_vm_rec.committed_size()); 1.454 } 1.455 } else { 1.456 - // we do see multiple reserver on the same vm range 1.457 - assert((cur_vm->is_commit_record() || cur_vm->is_reserve_record()) && 1.458 - cur_vm->base() == p->base() && cur_vm->size() == p->size(), "bad record"); 1.459 - p->tag(cur_vm->flags()); 1.460 + // uncommit memory inside reserved memory range 1.461 + assert(new_vm_rec.committed_size() <= matched_rec->committed_size(), 1.462 + "Sanity check"); 1.463 + matched_rec->uncommit(new_vm_rec.committed_size()); 1.464 + } 1.465 + } else if (new_vm_rec.is_type_tagging_record()) { 1.466 + // tag this virtual memory range to a memory type 1.467 + // can not re-tag a memory range to different type 1.468 + assert(FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtNone || 1.469 + FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_vm_rec.flags()), 1.470 + "Sanity check"); 1.471 + matched_rec->tag(new_vm_rec.flags()); 1.472 + } else if (new_vm_rec.is_release_record()) { 1.473 + // release part or whole memory range 1.474 + if (new_vm_rec.base() == matched_rec->base() && 1.475 + new_vm_rec.size() == matched_rec->size()) { 1.476 + // release whole virtual memory range 1.477 + assert(matched_rec->committed_size() == 0, "Sanity check"); 1.478 + vm_snapshot_itr.remove(); 1.479 + } else { 1.480 + // partial release 1.481 + matched_rec->partial_release(new_vm_rec.base(), new_vm_rec.size()); 1.482 } 1.483 } else { 1.484 - if(cur_vm->is_reserve_record()) { 1.485 - if (p == NULL || p->base() > cur_vm->base()) { 1.486 - vm_itr.insert(cur_vm); 1.487 - } else { 1.488 - vm_itr.insert_after(cur_vm); 1.489 + // multiple reserve/commit on the same virtual memory range 1.490 + assert((new_vm_rec.is_reserve_record() || new_vm_rec.is_commit_record()) && 1.491 + (new_vm_rec.base() == matched_rec->base() && new_vm_rec.size() == matched_rec->size()), 1.492 + "Sanity check"); 1.493 + matched_rec->tag(new_vm_rec.flags()); 1.494 + } 1.495 + } else { 1.496 + // no matched record 1.497 + if (new_vm_rec.is_reserve_record()) { 1.498 + if (matched_rec == NULL || matched_rec->base() > new_vm_rec.base()) { 1.499 + if (!vm_snapshot_itr.insert(&new_vm_rec)) { 1.500 + return false; 1.501 } 1.502 } else { 1.503 - // In theory, we should assert without conditions. However, in case of native 1.504 - // thread stack, NMT explicitly releases the thread stack in Thread's destructor, 1.505 - // due to platform dependent behaviors. On some platforms, we see uncommit/release 1.506 - // native thread stack, but some, we don't. 1.507 - assert(cur_vm->is_uncommit_record() || cur_vm->is_deallocation_record(), 1.508 - err_msg("Should not reach here, pointer addr = [" INTPTR_FORMAT "], flags = [%x]", 1.509 - cur_vm->addr(), cur_vm->flags())); 1.510 - } 1.511 - } 1.512 - } else { 1.513 - MemPointerRecord* cur_p = (MemPointerRecord*)cur; 1.514 - MemPointerRecord* p = (MemPointerRecord*)malloc_itr.locate(cur->addr()); 1.515 - if (p != NULL && cur_p->addr() == p->addr()) { 1.516 - assert(p->is_allocation_record() || p->is_arena_size_record(), "untracked"); 1.517 - if (cur_p->is_allocation_record() || cur_p->is_arena_size_record()) { 1.518 - copy_pointer(p, cur_p); 1.519 - } else { // deallocation record 1.520 - assert(cur_p->is_deallocation_record(), "wrong record type"); 1.521 - 1.522 - // we are removing an arena record, we also need to remove its 'size' 1.523 - // record behind it 1.524 - if (p->is_arena_record()) { 1.525 - MemPointerRecord* next_p = (MemPointerRecord*)malloc_itr.peek_next(); 1.526 - if (next_p->is_arena_size_record()) { 1.527 - assert(next_p->is_size_record_of_arena(p), "arena records dont match"); 1.528 - malloc_itr.remove(); 1.529 - } 1.530 + if (!vm_snapshot_itr.insert_after(&new_vm_rec)) { 1.531 + return false; 1.532 } 1.533 - malloc_itr.remove(); 1.534 } 1.535 } else { 1.536 - if (cur_p->is_arena_size_record()) { 1.537 - MemPointerRecord* prev_p = (MemPointerRecord*)malloc_itr.peek_prev(); 1.538 - if (prev_p != NULL && 1.539 - (!prev_p->is_arena_record() || !cur_p->is_size_record_of_arena(prev_p))) { 1.540 - // arena already deallocated 1.541 - cur_p = NULL; 1.542 - } 1.543 - } 1.544 - if (cur_p != NULL) { 1.545 - if (cur_p->is_allocation_record() || cur_p->is_arena_size_record()) { 1.546 - if (p != NULL && cur_p->addr() > p->addr()) { 1.547 - malloc_itr.insert_after(cur); 1.548 - } else { 1.549 - malloc_itr.insert(cur); 1.550 - } 1.551 - } 1.552 -#ifndef PRODUCT 1.553 - else if (!has_allocation_record(cur_p->addr())){ 1.554 - // NMT can not track some startup memory, which allocated before NMT 1.555 - // is enabled 1.556 - _untracked_count ++; 1.557 - } 1.558 -#endif 1.559 - } 1.560 + // throw out obsolete records, which are the commit/uncommit/release/tag records 1.561 + // on memory regions that are already released. 1.562 } 1.563 - } 1.564 - 1.565 - cur = walker.next(); 1.566 } 1.567 - NOT_PRODUCT(check_malloc_pointers();) 1.568 - _staging_area->shrink(); 1.569 - _staging_area->clear(); 1.570 + new_rec = (MemPointerRecord*)itr->next(); 1.571 + } 1.572 + return true; 1.573 } 1.574 1.575 - 1.576 #ifndef PRODUCT 1.577 void MemSnapshot::print_snapshot_stats(outputStream* st) { 1.578 st->print_cr("Snapshot:"); 1.579 @@ -413,8 +373,15 @@ 1.580 st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(), 1.581 (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K); 1.582 1.583 - st->print_cr("\tStaging: %d/%d [%5.2f%%] %dKB", _staging_area->length(), _staging_area->capacity(), 1.584 - (100.0 * (float)_staging_area->length()) / (float)_staging_area->capacity(), _staging_area->instance_size()/K); 1.585 + st->print_cr("\tMalloc staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(), 1.586 + _staging_area.malloc_data()->capacity(), 1.587 + (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(), 1.588 + _staging_area.malloc_data()->instance_size()/K); 1.589 + 1.590 + st->print_cr("\tVirtual memory staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(), 1.591 + _staging_area.vm_data()->capacity(), 1.592 + (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(), 1.593 + _staging_area.vm_data()->instance_size()/K); 1.594 1.595 st->print_cr("\tUntracked allocation: %d", _untracked_count); 1.596 } 1.597 @@ -433,7 +400,7 @@ 1.598 } 1.599 1.600 bool MemSnapshot::has_allocation_record(address addr) { 1.601 - MemPointerArrayIteratorImpl itr(_staging_area); 1.602 + MemPointerArrayIteratorImpl itr(_staging_area.malloc_data()); 1.603 MemPointerRecord* cur = (MemPointerRecord*)itr.current(); 1.604 while (cur != NULL) { 1.605 if (cur->addr() == addr && cur->is_allocation_record()) { 1.606 @@ -447,7 +414,7 @@ 1.607 1.608 #ifdef ASSERT 1.609 void MemSnapshot::check_staging_data() { 1.610 - MemPointerArrayIteratorImpl itr(_staging_area); 1.611 + MemPointerArrayIteratorImpl itr(_staging_area.malloc_data()); 1.612 MemPointerRecord* cur = (MemPointerRecord*)itr.current(); 1.613 MemPointerRecord* next = (MemPointerRecord*)itr.next(); 1.614 while (next != NULL) { 1.615 @@ -458,6 +425,13 @@ 1.616 cur = next; 1.617 next = (MemPointerRecord*)itr.next(); 1.618 } 1.619 + 1.620 + MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data()); 1.621 + cur = (MemPointerRecord*)vm_itr.current(); 1.622 + while (cur != NULL) { 1.623 + assert(cur->is_vm_pointer(), "virtual memory pointer only"); 1.624 + cur = (MemPointerRecord*)vm_itr.next(); 1.625 + } 1.626 } 1.627 #endif // ASSERT 1.628