src/share/vm/services/memSnapshot.cpp

Tue, 11 Sep 2012 20:53:17 -0400

author
zgu
date
Tue, 11 Sep 2012 20:53:17 -0400
changeset 4053
33143ee07800
parent 3994
e5bf1c79ed5b
child 4193
716c64bda5ba
permissions
-rw-r--r--

7181995: NMT ON: NMT assertion failure assert(cur_vm->is_uncommit_record() || cur_vm->is_deallocation_record
Summary: Fixed virtual memory records merge and promotion logic, should be based on sequence number vs. base address order
Reviewed-by: coleenp, acorn

zgu@3900 1 /*
zgu@3900 2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
zgu@3900 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
zgu@3900 4 *
zgu@3900 5 * This code is free software; you can redistribute it and/or modify it
zgu@3900 6 * under the terms of the GNU General Public License version 2 only, as
zgu@3900 7 * published by the Free Software Foundation.
zgu@3900 8 *
zgu@3900 9 * This code is distributed in the hope that it will be useful, but WITHOUT
zgu@3900 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
zgu@3900 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
zgu@3900 12 * version 2 for more details (a copy is included in the LICENSE file that
zgu@3900 13 * accompanied this code).
zgu@3900 14 *
zgu@3900 15 * You should have received a copy of the GNU General Public License version
zgu@3900 16 * 2 along with this work; if not, write to the Free Software Foundation,
zgu@3900 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
zgu@3900 18 *
zgu@3900 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
zgu@3900 20 * or visit www.oracle.com if you need additional information or have any
zgu@3900 21 * questions.
zgu@3900 22 *
zgu@3900 23 */
zgu@3900 24
zgu@3900 25 #include "precompiled.hpp"
zgu@3900 26 #include "runtime/mutexLocker.hpp"
zgu@3900 27 #include "utilities/decoder.hpp"
zgu@3900 28 #include "services/memBaseline.hpp"
zgu@3900 29 #include "services/memPtr.hpp"
zgu@3900 30 #include "services/memPtrArray.hpp"
zgu@3900 31 #include "services/memSnapshot.hpp"
zgu@3900 32 #include "services/memTracker.hpp"
zgu@3900 33
zgu@4053 34 static int sort_in_seq_order(const void* p1, const void* p2) {
zgu@4053 35 assert(p1 != NULL && p2 != NULL, "Sanity check");
zgu@4053 36 const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
zgu@4053 37 const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
zgu@4053 38 return (mp1->seq() - mp2->seq());
zgu@4053 39 }
zgu@3900 40
zgu@4053 41 bool StagingArea::init() {
zgu@4053 42 if (MemTracker::track_callsite()) {
zgu@4053 43 _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
zgu@4053 44 _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
zgu@3900 45 } else {
zgu@4053 46 _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
zgu@4053 47 _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
zgu@3900 48 }
zgu@3900 49
zgu@4053 50 if (_malloc_data != NULL && _vm_data != NULL &&
zgu@4053 51 !_malloc_data->out_of_memory() &&
zgu@4053 52 !_vm_data->out_of_memory()) {
zgu@3900 53 return true;
zgu@4053 54 } else {
zgu@4053 55 if (_malloc_data != NULL) delete _malloc_data;
zgu@4053 56 if (_vm_data != NULL) delete _vm_data;
zgu@4053 57 _malloc_data = NULL;
zgu@4053 58 _vm_data = NULL;
zgu@4053 59 return false;
zgu@3900 60 }
zgu@3900 61 }
zgu@3900 62
zgu@3900 63
zgu@4053 64 MemPointerArrayIteratorImpl StagingArea::virtual_memory_record_walker() {
zgu@4053 65 MemPointerArray* arr = vm_data();
zgu@4053 66 // sort into seq number order
zgu@4053 67 arr->sort((FN_SORT)sort_in_seq_order);
zgu@4053 68 return MemPointerArrayIteratorImpl(arr);
zgu@4053 69 }
zgu@3900 70
zgu@3900 71
zgu@3900 72 MemSnapshot::MemSnapshot() {
zgu@3900 73 if (MemTracker::track_callsite()) {
zgu@3900 74 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
zgu@3900 75 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
zgu@3900 76 } else {
zgu@3900 77 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
zgu@3900 78 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
zgu@3900 79 }
zgu@3900 80
zgu@4053 81 _staging_area.init();
zgu@3936 82 _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
zgu@3900 83 NOT_PRODUCT(_untracked_count = 0;)
zgu@3900 84 }
zgu@3900 85
zgu@3900 86 MemSnapshot::~MemSnapshot() {
zgu@3900 87 assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
zgu@3900 88 {
zgu@3900 89 MutexLockerEx locker(_lock);
zgu@3900 90 if (_alloc_ptrs != NULL) {
zgu@3900 91 delete _alloc_ptrs;
zgu@3900 92 _alloc_ptrs = NULL;
zgu@3900 93 }
zgu@3900 94
zgu@3900 95 if (_vm_ptrs != NULL) {
zgu@3900 96 delete _vm_ptrs;
zgu@3900 97 _vm_ptrs = NULL;
zgu@3900 98 }
zgu@3900 99 }
zgu@3900 100
zgu@3900 101 if (_lock != NULL) {
zgu@3900 102 delete _lock;
zgu@3900 103 _lock = NULL;
zgu@3900 104 }
zgu@3900 105 }
zgu@3900 106
zgu@3900 107 void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
zgu@3900 108 assert(dest != NULL && src != NULL, "Just check");
zgu@3900 109 assert(dest->addr() == src->addr(), "Just check");
zgu@3900 110
zgu@3900 111 MEMFLAGS flags = dest->flags();
zgu@3900 112
zgu@3900 113 if (MemTracker::track_callsite()) {
zgu@3900 114 *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
zgu@3900 115 } else {
zgu@3900 116 *dest = *src;
zgu@3900 117 }
zgu@3900 118 }
zgu@3900 119
zgu@3900 120
zgu@3900 121 // merge a per-thread memory recorder to the staging area
zgu@3900 122 bool MemSnapshot::merge(MemRecorder* rec) {
zgu@3900 123 assert(rec != NULL && !rec->out_of_memory(), "Just check");
zgu@3900 124
zgu@3900 125 SequencedRecordIterator itr(rec->pointer_itr());
zgu@3900 126
zgu@3900 127 MutexLockerEx lock(_lock, true);
zgu@4053 128 MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
zgu@3900 129 MemPointerRecord *p1, *p2;
zgu@3900 130 p1 = (MemPointerRecord*) itr.current();
zgu@3900 131 while (p1 != NULL) {
zgu@4053 132 if (p1->is_vm_pointer()) {
zgu@4053 133 // we don't do anything with virtual memory records during merge
zgu@4053 134 if (!_staging_area.vm_data()->append(p1)) {
zgu@3900 135 return false;
zgu@3900 136 }
zgu@4053 137 } else {
zgu@4053 138 p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
zgu@4053 139 // we have not seen this memory block, so just add to staging area
zgu@4053 140 if (p2 == NULL) {
zgu@4053 141 if (!malloc_staging_itr.insert(p1)) {
zgu@4053 142 return false;
zgu@4053 143 }
zgu@4053 144 } else if (p1->addr() == p2->addr()) {
zgu@4053 145 MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
zgu@4053 146 // a memory block can have many tagging records, find right one to replace or
zgu@4053 147 // right position to insert
zgu@4053 148 while (staging_next != NULL && staging_next->addr() == p1->addr()) {
zgu@4053 149 if ((staging_next->flags() & MemPointerRecord::tag_masks) <=
zgu@4053 150 (p1->flags() & MemPointerRecord::tag_masks)) {
zgu@4053 151 p2 = (MemPointerRecord*)malloc_staging_itr.next();
zgu@4053 152 staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
zgu@4053 153 } else {
zgu@4053 154 break;
zgu@4053 155 }
zgu@4053 156 }
zgu@4053 157 int df = (p1->flags() & MemPointerRecord::tag_masks) -
zgu@4053 158 (p2->flags() & MemPointerRecord::tag_masks);
zgu@4053 159 if (df == 0) {
zgu@4053 160 assert(p1->seq() > 0, "not sequenced");
zgu@4053 161 assert(p2->seq() > 0, "not sequenced");
zgu@4053 162 if (p1->seq() > p2->seq()) {
zgu@4053 163 copy_pointer(p2, p1);
zgu@4053 164 }
zgu@4053 165 } else if (df < 0) {
zgu@4053 166 if (!malloc_staging_itr.insert(p1)) {
zgu@4053 167 return false;
zgu@4053 168 }
zgu@3900 169 } else {
zgu@4053 170 if (!malloc_staging_itr.insert_after(p1)) {
zgu@4053 171 return false;
zgu@4053 172 }
zgu@3900 173 }
zgu@4053 174 } else if (p1->addr() < p2->addr()) {
zgu@4053 175 if (!malloc_staging_itr.insert(p1)) {
zgu@3900 176 return false;
zgu@3900 177 }
zgu@3900 178 } else {
zgu@4053 179 if (!malloc_staging_itr.insert_after(p1)) {
zgu@3900 180 return false;
zgu@3900 181 }
zgu@3900 182 }
zgu@3900 183 }
zgu@3900 184 p1 = (MemPointerRecord*)itr.next();
zgu@3900 185 }
zgu@3900 186 NOT_PRODUCT(void check_staging_data();)
zgu@3900 187 return true;
zgu@3900 188 }
zgu@3900 189
zgu@3900 190
zgu@3900 191
zgu@3900 192 // promote data to next generation
zgu@4053 193 bool MemSnapshot::promote() {
zgu@4053 194 assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
zgu@4053 195 assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
zgu@4053 196 "Just check");
zgu@3900 197 MutexLockerEx lock(_lock, true);
zgu@4053 198
zgu@4053 199 MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker();
zgu@4053 200 bool promoted = false;
zgu@4053 201 if (promote_malloc_records(&malloc_itr)) {
zgu@4053 202 MemPointerArrayIteratorImpl vm_itr = _staging_area.virtual_memory_record_walker();
zgu@4053 203 if (promote_virtual_memory_records(&vm_itr)) {
zgu@4053 204 promoted = true;
zgu@4053 205 }
zgu@4053 206 }
zgu@4053 207
zgu@4053 208 NOT_PRODUCT(check_malloc_pointers();)
zgu@4053 209 _staging_area.clear();
zgu@4053 210 return promoted;
zgu@4053 211 }
zgu@4053 212
zgu@4053 213 bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
zgu@4053 214 MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
zgu@4053 215 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
zgu@4053 216 MemPointerRecord* matched_rec;
zgu@4053 217 while (new_rec != NULL) {
zgu@4053 218 matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
zgu@4053 219 // found matched memory block
zgu@4053 220 if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
zgu@4053 221 // snapshot already contains 'lived' records
zgu@4053 222 assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
zgu@4053 223 "Sanity check");
zgu@4053 224 // update block states
zgu@4053 225 if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
zgu@4053 226 copy_pointer(matched_rec, new_rec);
zgu@4053 227 } else {
zgu@4053 228 // a deallocation record
zgu@4053 229 assert(new_rec->is_deallocation_record(), "Sanity check");
zgu@4053 230 // an arena record can be followed by a size record, we need to remove both
zgu@4053 231 if (matched_rec->is_arena_record()) {
zgu@4053 232 MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
zgu@4053 233 if (next->is_arena_size_record()) {
zgu@4053 234 // it has to match the arena record
zgu@4053 235 assert(next->is_size_record_of_arena(matched_rec), "Sanity check");
zgu@4053 236 malloc_snapshot_itr.remove();
zgu@3900 237 }
zgu@4053 238 }
zgu@4053 239 // the memory is deallocated, remove related record(s)
zgu@4053 240 malloc_snapshot_itr.remove();
zgu@4053 241 }
zgu@4053 242 } else {
zgu@4053 243 // it is a new record, insert into snapshot
zgu@4053 244 if (new_rec->is_arena_size_record()) {
zgu@4053 245 MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev();
zgu@4053 246 if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) {
zgu@4053 247 // no matched arena record, ignore the size record
zgu@4053 248 new_rec = NULL;
zgu@4053 249 }
zgu@4053 250 }
zgu@4053 251 // only 'live' record can go into snapshot
zgu@4053 252 if (new_rec != NULL) {
zgu@4053 253 if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
zgu@4053 254 if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
zgu@4053 255 if (!malloc_snapshot_itr.insert_after(new_rec)) {
zgu@4053 256 return false;
zgu@4053 257 }
zgu@3900 258 } else {
zgu@4053 259 if (!malloc_snapshot_itr.insert(new_rec)) {
zgu@4053 260 return false;
zgu@4053 261 }
zgu@4053 262 }
zgu@4053 263 }
zgu@4053 264 #ifndef PRODUCT
zgu@4053 265 else if (!has_allocation_record(new_rec->addr())) {
zgu@4053 266 // NMT can not track some startup memory, which is allocated before NMT is on
zgu@4053 267 _untracked_count ++;
zgu@4053 268 }
zgu@4053 269 #endif
zgu@4053 270 }
zgu@4053 271 }
zgu@4053 272 new_rec = (MemPointerRecord*)itr->next();
zgu@4053 273 }
zgu@4053 274 return true;
zgu@4053 275 }
zgu@4053 276
zgu@4053 277 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
zgu@4053 278 VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
zgu@4053 279 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
zgu@4053 280 VMMemRegionEx new_vm_rec;
zgu@4053 281 VMMemRegion* matched_rec;
zgu@4053 282 while (new_rec != NULL) {
zgu@4053 283 assert(new_rec->is_vm_pointer(), "Sanity check");
zgu@4053 284 if (MemTracker::track_callsite()) {
zgu@4053 285 new_vm_rec.init((MemPointerRecordEx*)new_rec);
zgu@4053 286 } else {
zgu@4053 287 new_vm_rec.init(new_rec);
zgu@4053 288 }
zgu@4053 289 matched_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
zgu@4053 290 if (matched_rec != NULL &&
zgu@4053 291 (matched_rec->contains(&new_vm_rec) || matched_rec->base() == new_vm_rec.base())) {
zgu@4053 292 // snapshot can only have 'live' records
zgu@4053 293 assert(matched_rec->is_reserve_record(), "Sanity check");
zgu@4053 294 if (new_vm_rec.is_reserve_record() && matched_rec->base() == new_vm_rec.base()) {
zgu@4053 295 // resize reserved virtual memory range
zgu@4053 296 // resize has to cover committed area
zgu@4053 297 assert(new_vm_rec.size() >= matched_rec->committed_size(), "Sanity check");
zgu@4053 298 matched_rec->set_reserved_size(new_vm_rec.size());
zgu@4053 299 } else if (new_vm_rec.is_commit_record()) {
zgu@4053 300 // commit memory inside reserved memory range
zgu@4053 301 assert(new_vm_rec.committed_size() <= matched_rec->reserved_size(), "Sanity check");
zgu@4053 302 // thread stacks are marked committed, so we ignore 'commit' record for creating
zgu@4053 303 // stack guard pages
zgu@4053 304 if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) != mtThreadStack) {
zgu@4053 305 matched_rec->commit(new_vm_rec.committed_size());
zgu@4053 306 }
zgu@4053 307 } else if (new_vm_rec.is_uncommit_record()) {
zgu@4053 308 if (FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtThreadStack) {
zgu@4053 309 // ignore 'uncommit' record from removing stack guard pages, uncommit
zgu@4053 310 // thread stack as whole
zgu@4053 311 if (matched_rec->committed_size() == new_vm_rec.committed_size()) {
zgu@4053 312 matched_rec->uncommit(new_vm_rec.committed_size());
zgu@3900 313 }
zgu@3900 314 } else {
zgu@4053 315 // uncommit memory inside reserved memory range
zgu@4053 316 assert(new_vm_rec.committed_size() <= matched_rec->committed_size(),
zgu@4053 317 "Sanity check");
zgu@4053 318 matched_rec->uncommit(new_vm_rec.committed_size());
zgu@4053 319 }
zgu@4053 320 } else if (new_vm_rec.is_type_tagging_record()) {
zgu@4053 321 // tag this virtual memory range to a memory type
zgu@4053 322 // can not re-tag a memory range to different type
zgu@4053 323 assert(FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == mtNone ||
zgu@4053 324 FLAGS_TO_MEMORY_TYPE(matched_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_vm_rec.flags()),
zgu@4053 325 "Sanity check");
zgu@4053 326 matched_rec->tag(new_vm_rec.flags());
zgu@4053 327 } else if (new_vm_rec.is_release_record()) {
zgu@4053 328 // release part or whole memory range
zgu@4053 329 if (new_vm_rec.base() == matched_rec->base() &&
zgu@4053 330 new_vm_rec.size() == matched_rec->size()) {
zgu@4053 331 // release whole virtual memory range
zgu@4053 332 assert(matched_rec->committed_size() == 0, "Sanity check");
zgu@4053 333 vm_snapshot_itr.remove();
zgu@4053 334 } else {
zgu@4053 335 // partial release
zgu@4053 336 matched_rec->partial_release(new_vm_rec.base(), new_vm_rec.size());
zgu@3900 337 }
zgu@3900 338 } else {
zgu@4053 339 // multiple reserve/commit on the same virtual memory range
zgu@4053 340 assert((new_vm_rec.is_reserve_record() || new_vm_rec.is_commit_record()) &&
zgu@4053 341 (new_vm_rec.base() == matched_rec->base() && new_vm_rec.size() == matched_rec->size()),
zgu@4053 342 "Sanity check");
zgu@4053 343 matched_rec->tag(new_vm_rec.flags());
zgu@4053 344 }
zgu@4053 345 } else {
zgu@4053 346 // no matched record
zgu@4053 347 if (new_vm_rec.is_reserve_record()) {
zgu@4053 348 if (matched_rec == NULL || matched_rec->base() > new_vm_rec.base()) {
zgu@4053 349 if (!vm_snapshot_itr.insert(&new_vm_rec)) {
zgu@4053 350 return false;
zgu@3900 351 }
zgu@3900 352 } else {
zgu@4053 353 if (!vm_snapshot_itr.insert_after(&new_vm_rec)) {
zgu@4053 354 return false;
zgu@3900 355 }
zgu@3900 356 }
zgu@3900 357 } else {
zgu@4053 358 // throw out obsolete records, which are the commit/uncommit/release/tag records
zgu@4053 359 // on memory regions that are already released.
zgu@3900 360 }
zgu@3900 361 }
zgu@4053 362 new_rec = (MemPointerRecord*)itr->next();
zgu@4053 363 }
zgu@4053 364 return true;
zgu@3900 365 }
zgu@3900 366
zgu@3994 367 #ifndef PRODUCT
zgu@3900 368 void MemSnapshot::print_snapshot_stats(outputStream* st) {
zgu@3900 369 st->print_cr("Snapshot:");
zgu@3900 370 st->print_cr("\tMalloced: %d/%d [%5.2f%%] %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
zgu@3900 371 (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
zgu@3900 372
zgu@3900 373 st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
zgu@3900 374 (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
zgu@3900 375
zgu@4053 376 st->print_cr("\tMalloc staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
zgu@4053 377 _staging_area.malloc_data()->capacity(),
zgu@4053 378 (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
zgu@4053 379 _staging_area.malloc_data()->instance_size()/K);
zgu@4053 380
zgu@4053 381 st->print_cr("\tVirtual memory staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
zgu@4053 382 _staging_area.vm_data()->capacity(),
zgu@4053 383 (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
zgu@4053 384 _staging_area.vm_data()->instance_size()/K);
zgu@3900 385
zgu@3900 386 st->print_cr("\tUntracked allocation: %d", _untracked_count);
zgu@3900 387 }
zgu@3900 388
zgu@3900 389 void MemSnapshot::check_malloc_pointers() {
zgu@3900 390 MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
zgu@3900 391 MemPointerRecord* p = (MemPointerRecord*)mItr.current();
zgu@3900 392 MemPointerRecord* prev = NULL;
zgu@3900 393 while (p != NULL) {
zgu@3900 394 if (prev != NULL) {
zgu@3900 395 assert(p->addr() >= prev->addr(), "sorting order");
zgu@3900 396 }
zgu@3900 397 prev = p;
zgu@3900 398 p = (MemPointerRecord*)mItr.next();
zgu@3900 399 }
zgu@3900 400 }
zgu@3900 401
zgu@3994 402 bool MemSnapshot::has_allocation_record(address addr) {
zgu@4053 403 MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
zgu@3994 404 MemPointerRecord* cur = (MemPointerRecord*)itr.current();
zgu@3994 405 while (cur != NULL) {
zgu@3994 406 if (cur->addr() == addr && cur->is_allocation_record()) {
zgu@3994 407 return true;
zgu@3994 408 }
zgu@3994 409 cur = (MemPointerRecord*)itr.next();
zgu@3994 410 }
zgu@3994 411 return false;
zgu@3994 412 }
zgu@3994 413 #endif // PRODUCT
zgu@3994 414
zgu@3994 415 #ifdef ASSERT
zgu@3900 416 void MemSnapshot::check_staging_data() {
zgu@4053 417 MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
zgu@3900 418 MemPointerRecord* cur = (MemPointerRecord*)itr.current();
zgu@3900 419 MemPointerRecord* next = (MemPointerRecord*)itr.next();
zgu@3900 420 while (next != NULL) {
zgu@3900 421 assert((next->addr() > cur->addr()) ||
zgu@3900 422 ((next->flags() & MemPointerRecord::tag_masks) >
zgu@3900 423 (cur->flags() & MemPointerRecord::tag_masks)),
zgu@3900 424 "sorting order");
zgu@3900 425 cur = next;
zgu@3900 426 next = (MemPointerRecord*)itr.next();
zgu@3900 427 }
zgu@4053 428
zgu@4053 429 MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
zgu@4053 430 cur = (MemPointerRecord*)vm_itr.current();
zgu@4053 431 while (cur != NULL) {
zgu@4053 432 assert(cur->is_vm_pointer(), "virtual memory pointer only");
zgu@4053 433 cur = (MemPointerRecord*)vm_itr.next();
zgu@4053 434 }
zgu@3900 435 }
zgu@3994 436 #endif // ASSERT
zgu@3900 437

mercurial