Mon, 16 Jul 2012 14:10:34 -0400
7181986: NMT ON: Assertion failure when running jdi ExpiredRequestDeletionTest
Summary: Changed _query_lock to heap object from static object. Also fixed _query_lock and snapshot lock ranks, so they can participate deadlock detection.
Reviewed-by: coleenp, dholmes, kvn
zgu@3900 | 1 | /* |
zgu@3900 | 2 | * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. |
zgu@3900 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
zgu@3900 | 4 | * |
zgu@3900 | 5 | * This code is free software; you can redistribute it and/or modify it |
zgu@3900 | 6 | * under the terms of the GNU General Public License version 2 only, as |
zgu@3900 | 7 | * published by the Free Software Foundation. |
zgu@3900 | 8 | * |
zgu@3900 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
zgu@3900 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
zgu@3900 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
zgu@3900 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
zgu@3900 | 13 | * accompanied this code). |
zgu@3900 | 14 | * |
zgu@3900 | 15 | * You should have received a copy of the GNU General Public License version |
zgu@3900 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
zgu@3900 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
zgu@3900 | 18 | * |
zgu@3900 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
zgu@3900 | 20 | * or visit www.oracle.com if you need additional information or have any |
zgu@3900 | 21 | * questions. |
zgu@3900 | 22 | * |
zgu@3900 | 23 | */ |
zgu@3900 | 24 | |
zgu@3900 | 25 | #include "precompiled.hpp" |
zgu@3900 | 26 | #include "runtime/mutexLocker.hpp" |
zgu@3900 | 27 | #include "utilities/decoder.hpp" |
zgu@3900 | 28 | #include "services/memBaseline.hpp" |
zgu@3900 | 29 | #include "services/memPtr.hpp" |
zgu@3900 | 30 | #include "services/memPtrArray.hpp" |
zgu@3900 | 31 | #include "services/memSnapshot.hpp" |
zgu@3900 | 32 | #include "services/memTracker.hpp" |
zgu@3900 | 33 | |
zgu@3900 | 34 | |
zgu@3900 | 35 | // stagging data groups the data of a VM memory range, so we can consolidate |
zgu@3900 | 36 | // them into one record during the walk |
zgu@3900 | 37 | bool StagingWalker::consolidate_vm_records(VMMemRegionEx* vm_rec) { |
zgu@3900 | 38 | MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); |
zgu@3900 | 39 | assert(cur != NULL && cur->is_vm_pointer(), "not a virtual memory pointer"); |
zgu@3900 | 40 | |
zgu@3900 | 41 | jint cur_seq; |
zgu@3900 | 42 | jint next_seq; |
zgu@3900 | 43 | |
zgu@3900 | 44 | bool trackCallsite = MemTracker::track_callsite(); |
zgu@3900 | 45 | |
zgu@3900 | 46 | if (trackCallsite) { |
zgu@3900 | 47 | vm_rec->init((MemPointerRecordEx*)cur); |
zgu@3900 | 48 | cur_seq = ((SeqMemPointerRecordEx*)cur)->seq(); |
zgu@3900 | 49 | } else { |
zgu@3900 | 50 | vm_rec->init((MemPointerRecord*)cur); |
zgu@3900 | 51 | cur_seq = ((SeqMemPointerRecord*)cur)->seq(); |
zgu@3900 | 52 | } |
zgu@3900 | 53 | |
zgu@3900 | 54 | // only can consolidate when we have allocation record, |
zgu@3900 | 55 | // which contains virtual memory range |
zgu@3900 | 56 | if (!cur->is_allocation_record()) { |
zgu@3900 | 57 | _itr.next(); |
zgu@3900 | 58 | return true; |
zgu@3900 | 59 | } |
zgu@3900 | 60 | |
zgu@3900 | 61 | // allocation range |
zgu@3900 | 62 | address base = cur->addr(); |
zgu@3900 | 63 | address end = base + cur->size(); |
zgu@3900 | 64 | |
zgu@3900 | 65 | MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); |
zgu@3900 | 66 | // if the memory range is alive |
zgu@3900 | 67 | bool live_vm_rec = true; |
zgu@3900 | 68 | while (next != NULL && next->is_vm_pointer()) { |
zgu@3900 | 69 | if (next->is_allocation_record()) { |
zgu@3900 | 70 | assert(next->addr() >= base, "sorting order or overlapping"); |
zgu@3900 | 71 | break; |
zgu@3900 | 72 | } |
zgu@3900 | 73 | |
zgu@3900 | 74 | if (trackCallsite) { |
zgu@3900 | 75 | next_seq = ((SeqMemPointerRecordEx*)next)->seq(); |
zgu@3900 | 76 | } else { |
zgu@3900 | 77 | next_seq = ((SeqMemPointerRecord*)next)->seq(); |
zgu@3900 | 78 | } |
zgu@3900 | 79 | |
zgu@3900 | 80 | if (next_seq < cur_seq) { |
zgu@3900 | 81 | _itr.next(); |
zgu@3900 | 82 | next = (MemPointerRecord*)_itr.peek_next(); |
zgu@3900 | 83 | continue; |
zgu@3900 | 84 | } |
zgu@3900 | 85 | |
zgu@3900 | 86 | if (next->is_deallocation_record()) { |
zgu@3900 | 87 | if (next->addr() == base && next->size() == cur->size()) { |
zgu@3900 | 88 | // the virtual memory range has been released |
zgu@3900 | 89 | _itr.next(); |
zgu@3900 | 90 | live_vm_rec = false; |
zgu@3900 | 91 | break; |
zgu@3900 | 92 | } else if (next->addr() < end) { // partial release |
zgu@3900 | 93 | vm_rec->partial_release(next->addr(), next->size()); |
zgu@3900 | 94 | _itr.next(); |
zgu@3900 | 95 | } else { |
zgu@3900 | 96 | break; |
zgu@3900 | 97 | } |
zgu@3900 | 98 | } else if (next->is_commit_record()) { |
zgu@3900 | 99 | if (next->addr() >= base && next->addr() + next->size() <= end) { |
zgu@3900 | 100 | vm_rec->commit(next->size()); |
zgu@3900 | 101 | _itr.next(); |
zgu@3900 | 102 | } else { |
zgu@3900 | 103 | assert(next->addr() >= base, "sorting order or overlapping"); |
zgu@3900 | 104 | break; |
zgu@3900 | 105 | } |
zgu@3900 | 106 | } else if (next->is_uncommit_record()) { |
zgu@3900 | 107 | if (next->addr() >= base && next->addr() + next->size() <= end) { |
zgu@3900 | 108 | vm_rec->uncommit(next->size()); |
zgu@3900 | 109 | _itr.next(); |
zgu@3900 | 110 | } else { |
zgu@3900 | 111 | assert(next->addr() >= end, "sorting order or overlapping"); |
zgu@3900 | 112 | break; |
zgu@3900 | 113 | } |
zgu@3900 | 114 | } else if (next->is_type_tagging_record()) { |
zgu@3900 | 115 | if (next->addr() >= base && next->addr() < end ) { |
zgu@3900 | 116 | vm_rec->tag(next->flags()); |
zgu@3900 | 117 | _itr.next(); |
zgu@3900 | 118 | } else { |
zgu@3900 | 119 | break; |
zgu@3900 | 120 | } |
zgu@3900 | 121 | } else { |
zgu@3900 | 122 | assert(false, "unknown record type"); |
zgu@3900 | 123 | } |
zgu@3900 | 124 | next = (MemPointerRecord*)_itr.peek_next(); |
zgu@3900 | 125 | } |
zgu@3900 | 126 | _itr.next(); |
zgu@3900 | 127 | return live_vm_rec; |
zgu@3900 | 128 | } |
zgu@3900 | 129 | |
zgu@3900 | 130 | MemPointer* StagingWalker::next() { |
zgu@3900 | 131 | MemPointerRecord* cur_p = (MemPointerRecord*)_itr.current(); |
zgu@3900 | 132 | if (cur_p == NULL) { |
zgu@3900 | 133 | _end_of_array = true; |
zgu@3900 | 134 | return NULL; |
zgu@3900 | 135 | } |
zgu@3900 | 136 | |
zgu@3900 | 137 | MemPointerRecord* next_p; |
zgu@3900 | 138 | if (cur_p->is_vm_pointer()) { |
zgu@3900 | 139 | _is_vm_record = true; |
zgu@3900 | 140 | if (!consolidate_vm_records(&_vm_record)) { |
zgu@3900 | 141 | return next(); |
zgu@3900 | 142 | } |
zgu@3900 | 143 | } else { // malloc-ed pointer |
zgu@3900 | 144 | _is_vm_record = false; |
zgu@3900 | 145 | next_p = (MemPointerRecord*)_itr.peek_next(); |
zgu@3900 | 146 | if (next_p != NULL && next_p->addr() == cur_p->addr()) { |
zgu@3900 | 147 | assert(cur_p->is_allocation_record(), "sorting order"); |
zgu@3900 | 148 | assert(!next_p->is_allocation_record(), "sorting order"); |
zgu@3900 | 149 | _itr.next(); |
zgu@3900 | 150 | if (cur_p->seq() < next_p->seq()) { |
zgu@3900 | 151 | cur_p = next_p; |
zgu@3900 | 152 | } |
zgu@3900 | 153 | } |
zgu@3900 | 154 | if (MemTracker::track_callsite()) { |
zgu@3900 | 155 | _malloc_record.init((MemPointerRecordEx*)cur_p); |
zgu@3900 | 156 | } else { |
zgu@3900 | 157 | _malloc_record.init((MemPointerRecord*)cur_p); |
zgu@3900 | 158 | } |
zgu@3900 | 159 | |
zgu@3900 | 160 | _itr.next(); |
zgu@3900 | 161 | } |
zgu@3900 | 162 | return current(); |
zgu@3900 | 163 | } |
zgu@3900 | 164 | |
zgu@3900 | 165 | MemSnapshot::MemSnapshot() { |
zgu@3900 | 166 | if (MemTracker::track_callsite()) { |
zgu@3900 | 167 | _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>(); |
zgu@3900 | 168 | _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true); |
zgu@3900 | 169 | _staging_area = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>(); |
zgu@3900 | 170 | } else { |
zgu@3900 | 171 | _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>(); |
zgu@3900 | 172 | _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true); |
zgu@3900 | 173 | _staging_area = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>(); |
zgu@3900 | 174 | } |
zgu@3900 | 175 | |
zgu@3936 | 176 | _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock"); |
zgu@3900 | 177 | NOT_PRODUCT(_untracked_count = 0;) |
zgu@3900 | 178 | } |
zgu@3900 | 179 | |
zgu@3900 | 180 | MemSnapshot::~MemSnapshot() { |
zgu@3900 | 181 | assert(MemTracker::shutdown_in_progress(), "native memory tracking still on"); |
zgu@3900 | 182 | { |
zgu@3900 | 183 | MutexLockerEx locker(_lock); |
zgu@3900 | 184 | if (_staging_area != NULL) { |
zgu@3900 | 185 | delete _staging_area; |
zgu@3900 | 186 | _staging_area = NULL; |
zgu@3900 | 187 | } |
zgu@3900 | 188 | |
zgu@3900 | 189 | if (_alloc_ptrs != NULL) { |
zgu@3900 | 190 | delete _alloc_ptrs; |
zgu@3900 | 191 | _alloc_ptrs = NULL; |
zgu@3900 | 192 | } |
zgu@3900 | 193 | |
zgu@3900 | 194 | if (_vm_ptrs != NULL) { |
zgu@3900 | 195 | delete _vm_ptrs; |
zgu@3900 | 196 | _vm_ptrs = NULL; |
zgu@3900 | 197 | } |
zgu@3900 | 198 | } |
zgu@3900 | 199 | |
zgu@3900 | 200 | if (_lock != NULL) { |
zgu@3900 | 201 | delete _lock; |
zgu@3900 | 202 | _lock = NULL; |
zgu@3900 | 203 | } |
zgu@3900 | 204 | } |
zgu@3900 | 205 | |
zgu@3900 | 206 | void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) { |
zgu@3900 | 207 | assert(dest != NULL && src != NULL, "Just check"); |
zgu@3900 | 208 | assert(dest->addr() == src->addr(), "Just check"); |
zgu@3900 | 209 | |
zgu@3900 | 210 | MEMFLAGS flags = dest->flags(); |
zgu@3900 | 211 | |
zgu@3900 | 212 | if (MemTracker::track_callsite()) { |
zgu@3900 | 213 | *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src; |
zgu@3900 | 214 | } else { |
zgu@3900 | 215 | *dest = *src; |
zgu@3900 | 216 | } |
zgu@3900 | 217 | } |
zgu@3900 | 218 | |
zgu@3900 | 219 | |
zgu@3900 | 220 | // merge a per-thread memory recorder to the staging area |
zgu@3900 | 221 | bool MemSnapshot::merge(MemRecorder* rec) { |
zgu@3900 | 222 | assert(rec != NULL && !rec->out_of_memory(), "Just check"); |
zgu@3900 | 223 | |
zgu@3900 | 224 | // out of memory |
zgu@3900 | 225 | if (_staging_area == NULL || _staging_area->out_of_memory()) { |
zgu@3900 | 226 | return false; |
zgu@3900 | 227 | } |
zgu@3900 | 228 | |
zgu@3900 | 229 | SequencedRecordIterator itr(rec->pointer_itr()); |
zgu@3900 | 230 | |
zgu@3900 | 231 | MutexLockerEx lock(_lock, true); |
zgu@3900 | 232 | MemPointerIterator staging_itr(_staging_area); |
zgu@3900 | 233 | MemPointerRecord *p1, *p2; |
zgu@3900 | 234 | p1 = (MemPointerRecord*) itr.current(); |
zgu@3900 | 235 | while (p1 != NULL) { |
zgu@3900 | 236 | p2 = (MemPointerRecord*)staging_itr.locate(p1->addr()); |
zgu@3900 | 237 | // we have not seen this memory block, so just add to staging area |
zgu@3900 | 238 | if (p2 == NULL) { |
zgu@3900 | 239 | if (!staging_itr.insert(p1)) { |
zgu@3900 | 240 | return false; |
zgu@3900 | 241 | } |
zgu@3900 | 242 | } else if (p1->addr() == p2->addr()) { |
zgu@3900 | 243 | MemPointerRecord* staging_next = (MemPointerRecord*)staging_itr.peek_next(); |
zgu@3900 | 244 | // a memory block can have many tagging records, find right one to replace or |
zgu@3900 | 245 | // right position to insert |
zgu@3900 | 246 | while (staging_next != NULL && staging_next->addr() == p1->addr()) { |
zgu@3900 | 247 | if ((staging_next->flags() & MemPointerRecord::tag_masks) <= |
zgu@3900 | 248 | (p1->flags() & MemPointerRecord::tag_masks)) { |
zgu@3900 | 249 | p2 = (MemPointerRecord*)staging_itr.next(); |
zgu@3900 | 250 | staging_next = (MemPointerRecord*)staging_itr.peek_next(); |
zgu@3900 | 251 | } else { |
zgu@3900 | 252 | break; |
zgu@3900 | 253 | } |
zgu@3900 | 254 | } |
zgu@3900 | 255 | int df = (p1->flags() & MemPointerRecord::tag_masks) - |
zgu@3900 | 256 | (p2->flags() & MemPointerRecord::tag_masks); |
zgu@3900 | 257 | if (df == 0) { |
zgu@3900 | 258 | assert(p1->seq() > 0, "not sequenced"); |
zgu@3900 | 259 | assert(p2->seq() > 0, "not sequenced"); |
zgu@3900 | 260 | if (p1->seq() > p2->seq()) { |
zgu@3900 | 261 | copy_pointer(p2, p1); |
zgu@3900 | 262 | } |
zgu@3900 | 263 | } else if (df < 0) { |
zgu@3900 | 264 | if (!staging_itr.insert(p1)) { |
zgu@3900 | 265 | return false; |
zgu@3900 | 266 | } |
zgu@3900 | 267 | } else { |
zgu@3900 | 268 | if (!staging_itr.insert_after(p1)) { |
zgu@3900 | 269 | return false; |
zgu@3900 | 270 | } |
zgu@3900 | 271 | } |
zgu@3900 | 272 | } else if (p1->addr() < p2->addr()) { |
zgu@3900 | 273 | if (!staging_itr.insert(p1)) { |
zgu@3900 | 274 | return false; |
zgu@3900 | 275 | } |
zgu@3900 | 276 | } else { |
zgu@3900 | 277 | if (!staging_itr.insert_after(p1)) { |
zgu@3900 | 278 | return false; |
zgu@3900 | 279 | } |
zgu@3900 | 280 | } |
zgu@3900 | 281 | p1 = (MemPointerRecord*)itr.next(); |
zgu@3900 | 282 | } |
zgu@3900 | 283 | NOT_PRODUCT(void check_staging_data();) |
zgu@3900 | 284 | return true; |
zgu@3900 | 285 | } |
zgu@3900 | 286 | |
zgu@3900 | 287 | |
zgu@3900 | 288 | |
zgu@3900 | 289 | // promote data to next generation |
zgu@3900 | 290 | void MemSnapshot::promote() { |
zgu@3900 | 291 | assert(_alloc_ptrs != NULL && _staging_area != NULL && _vm_ptrs != NULL, |
zgu@3900 | 292 | "Just check"); |
zgu@3900 | 293 | MutexLockerEx lock(_lock, true); |
zgu@3900 | 294 | StagingWalker walker(_staging_area); |
zgu@3900 | 295 | MemPointerIterator malloc_itr(_alloc_ptrs); |
zgu@3900 | 296 | VMMemPointerIterator vm_itr(_vm_ptrs); |
zgu@3900 | 297 | MemPointer* cur = walker.current(); |
zgu@3900 | 298 | while (cur != NULL) { |
zgu@3900 | 299 | if (walker.is_vm_record()) { |
zgu@3900 | 300 | VMMemRegion* cur_vm = (VMMemRegion*)cur; |
zgu@3900 | 301 | VMMemRegion* p = (VMMemRegion*)vm_itr.locate(cur_vm->addr()); |
zgu@3900 | 302 | cur_vm = (VMMemRegion*)cur; |
zgu@3900 | 303 | if (p != NULL && (p->contains(cur_vm) || p->base() == cur_vm->base())) { |
zgu@3900 | 304 | assert(p->is_reserve_record() || |
zgu@3900 | 305 | p->is_commit_record(), "wrong vm record type"); |
zgu@3900 | 306 | // resize existing reserved range |
zgu@3900 | 307 | if (cur_vm->is_reserve_record() && p->base() == cur_vm->base()) { |
zgu@3900 | 308 | assert(cur_vm->size() >= p->committed_size(), "incorrect resizing"); |
zgu@3900 | 309 | p->set_reserved_size(cur_vm->size()); |
zgu@3900 | 310 | } else if (cur_vm->is_commit_record()) { |
zgu@3900 | 311 | p->commit(cur_vm->committed_size()); |
zgu@3900 | 312 | } else if (cur_vm->is_uncommit_record()) { |
zgu@3900 | 313 | p->uncommit(cur_vm->committed_size()); |
zgu@3900 | 314 | if (!p->is_reserve_record() && p->committed_size() == 0) { |
zgu@3900 | 315 | vm_itr.remove(); |
zgu@3900 | 316 | } |
zgu@3900 | 317 | } else if (cur_vm->is_type_tagging_record()) { |
zgu@3900 | 318 | p->tag(cur_vm->flags()); |
zgu@3900 | 319 | } else if (cur_vm->is_release_record()) { |
zgu@3900 | 320 | if (cur_vm->base() == p->base() && cur_vm->size() == p->size()) { |
zgu@3900 | 321 | // release the whole range |
zgu@3900 | 322 | vm_itr.remove(); |
zgu@3900 | 323 | } else { |
zgu@3900 | 324 | // partial release |
zgu@3900 | 325 | p->partial_release(cur_vm->base(), cur_vm->size()); |
zgu@3900 | 326 | } |
zgu@3900 | 327 | } else { |
zgu@3900 | 328 | // we do see multiple reserver on the same vm range |
zgu@3900 | 329 | assert((cur_vm->is_commit_record() || cur_vm->is_reserve_record()) && |
zgu@3900 | 330 | cur_vm->base() == p->base() && cur_vm->size() == p->size(), "bad record"); |
zgu@3900 | 331 | p->tag(cur_vm->flags()); |
zgu@3900 | 332 | } |
zgu@3900 | 333 | } else { |
zgu@3900 | 334 | if(cur_vm->is_reserve_record()) { |
zgu@3900 | 335 | if (p == NULL || p->base() > cur_vm->base()) { |
zgu@3900 | 336 | vm_itr.insert(cur_vm); |
zgu@3900 | 337 | } else { |
zgu@3900 | 338 | vm_itr.insert_after(cur_vm); |
zgu@3900 | 339 | } |
zgu@3900 | 340 | } else { |
zgu@3900 | 341 | #ifdef ASSERT |
zgu@3900 | 342 | // In theory, we should assert without conditions. However, in case of native |
zgu@3900 | 343 | // thread stack, NMT explicitly releases the thread stack in Thread's destructor, |
zgu@3900 | 344 | // due to platform dependent behaviors. On some platforms, we see uncommit/release |
zgu@3900 | 345 | // native thread stack, but some, we don't. |
zgu@3900 | 346 | if (!cur_vm->is_uncommit_record() && !cur_vm->is_deallocation_record()) { |
zgu@3900 | 347 | ShouldNotReachHere(); |
zgu@3900 | 348 | } |
zgu@3900 | 349 | #endif |
zgu@3900 | 350 | } |
zgu@3900 | 351 | } |
zgu@3900 | 352 | } else { |
zgu@3900 | 353 | MemPointerRecord* cur_p = (MemPointerRecord*)cur; |
zgu@3900 | 354 | MemPointerRecord* p = (MemPointerRecord*)malloc_itr.locate(cur->addr()); |
zgu@3900 | 355 | if (p != NULL && cur_p->addr() == p->addr()) { |
zgu@3900 | 356 | assert(p->is_allocation_record() || p->is_arena_size_record(), "untracked"); |
zgu@3900 | 357 | if (cur_p->is_allocation_record() || cur_p->is_arena_size_record()) { |
zgu@3900 | 358 | copy_pointer(p, cur_p); |
zgu@3900 | 359 | } else { // deallocation record |
zgu@3900 | 360 | assert(cur_p->is_deallocation_record(), "wrong record type"); |
zgu@3900 | 361 | |
zgu@3900 | 362 | // we are removing an arena record, we also need to remove its 'size' |
zgu@3900 | 363 | // record behind it |
zgu@3900 | 364 | if (p->is_arena_record()) { |
zgu@3900 | 365 | MemPointerRecord* next_p = (MemPointerRecord*)malloc_itr.peek_next(); |
zgu@3900 | 366 | if (next_p->is_arena_size_record()) { |
zgu@3900 | 367 | assert(next_p->is_size_record_of_arena(p), "arena records dont match"); |
zgu@3900 | 368 | malloc_itr.remove(); |
zgu@3900 | 369 | } |
zgu@3900 | 370 | } |
zgu@3900 | 371 | malloc_itr.remove(); |
zgu@3900 | 372 | } |
zgu@3900 | 373 | } else { |
zgu@3900 | 374 | if (cur_p->is_arena_size_record()) { |
zgu@3900 | 375 | MemPointerRecord* prev_p = (MemPointerRecord*)malloc_itr.peek_prev(); |
zgu@3900 | 376 | if (prev_p != NULL && |
zgu@3900 | 377 | (!prev_p->is_arena_record() || !cur_p->is_size_record_of_arena(prev_p))) { |
zgu@3900 | 378 | // arena already deallocated |
zgu@3900 | 379 | cur_p = NULL; |
zgu@3900 | 380 | } |
zgu@3900 | 381 | } |
zgu@3900 | 382 | if (cur_p != NULL) { |
zgu@3900 | 383 | if (cur_p->is_allocation_record() || cur_p->is_arena_size_record()) { |
zgu@3900 | 384 | if (p != NULL && cur_p->addr() > p->addr()) { |
zgu@3900 | 385 | malloc_itr.insert_after(cur); |
zgu@3900 | 386 | } else { |
zgu@3900 | 387 | malloc_itr.insert(cur); |
zgu@3900 | 388 | } |
zgu@3900 | 389 | } |
zgu@3900 | 390 | #ifndef PRODUCT |
zgu@3900 | 391 | else if (!has_allocation_record(cur_p->addr())){ |
zgu@3900 | 392 | // NMT can not track some startup memory, which allocated before NMT |
zgu@3900 | 393 | // is enabled |
zgu@3900 | 394 | _untracked_count ++; |
zgu@3900 | 395 | } |
zgu@3900 | 396 | #endif |
zgu@3900 | 397 | } |
zgu@3900 | 398 | } |
zgu@3900 | 399 | } |
zgu@3900 | 400 | |
zgu@3900 | 401 | cur = walker.next(); |
zgu@3900 | 402 | } |
zgu@3900 | 403 | NOT_PRODUCT(check_malloc_pointers();) |
zgu@3900 | 404 | _staging_area->shrink(); |
zgu@3900 | 405 | _staging_area->clear(); |
zgu@3900 | 406 | } |
zgu@3900 | 407 | |
zgu@3900 | 408 | |
zgu@3900 | 409 | #ifdef ASSERT |
zgu@3900 | 410 | void MemSnapshot::print_snapshot_stats(outputStream* st) { |
zgu@3900 | 411 | st->print_cr("Snapshot:"); |
zgu@3900 | 412 | st->print_cr("\tMalloced: %d/%d [%5.2f%%] %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(), |
zgu@3900 | 413 | (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K); |
zgu@3900 | 414 | |
zgu@3900 | 415 | st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(), |
zgu@3900 | 416 | (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K); |
zgu@3900 | 417 | |
zgu@3900 | 418 | st->print_cr("\tStaging: %d/%d [%5.2f%%] %dKB", _staging_area->length(), _staging_area->capacity(), |
zgu@3900 | 419 | (100.0 * (float)_staging_area->length()) / (float)_staging_area->capacity(), _staging_area->instance_size()/K); |
zgu@3900 | 420 | |
zgu@3900 | 421 | st->print_cr("\tUntracked allocation: %d", _untracked_count); |
zgu@3900 | 422 | } |
zgu@3900 | 423 | |
zgu@3900 | 424 | void MemSnapshot::check_malloc_pointers() { |
zgu@3900 | 425 | MemPointerArrayIteratorImpl mItr(_alloc_ptrs); |
zgu@3900 | 426 | MemPointerRecord* p = (MemPointerRecord*)mItr.current(); |
zgu@3900 | 427 | MemPointerRecord* prev = NULL; |
zgu@3900 | 428 | while (p != NULL) { |
zgu@3900 | 429 | if (prev != NULL) { |
zgu@3900 | 430 | assert(p->addr() >= prev->addr(), "sorting order"); |
zgu@3900 | 431 | } |
zgu@3900 | 432 | prev = p; |
zgu@3900 | 433 | p = (MemPointerRecord*)mItr.next(); |
zgu@3900 | 434 | } |
zgu@3900 | 435 | } |
zgu@3900 | 436 | |
zgu@3900 | 437 | void MemSnapshot::check_staging_data() { |
zgu@3900 | 438 | MemPointerArrayIteratorImpl itr(_staging_area); |
zgu@3900 | 439 | MemPointerRecord* cur = (MemPointerRecord*)itr.current(); |
zgu@3900 | 440 | MemPointerRecord* next = (MemPointerRecord*)itr.next(); |
zgu@3900 | 441 | while (next != NULL) { |
zgu@3900 | 442 | assert((next->addr() > cur->addr()) || |
zgu@3900 | 443 | ((next->flags() & MemPointerRecord::tag_masks) > |
zgu@3900 | 444 | (cur->flags() & MemPointerRecord::tag_masks)), |
zgu@3900 | 445 | "sorting order"); |
zgu@3900 | 446 | cur = next; |
zgu@3900 | 447 | next = (MemPointerRecord*)itr.next(); |
zgu@3900 | 448 | } |
zgu@3900 | 449 | } |
zgu@3900 | 450 | |
zgu@3900 | 451 | bool MemSnapshot::has_allocation_record(address addr) { |
zgu@3900 | 452 | MemPointerArrayIteratorImpl itr(_staging_area); |
zgu@3900 | 453 | MemPointerRecord* cur = (MemPointerRecord*)itr.current(); |
zgu@3900 | 454 | while (cur != NULL) { |
zgu@3900 | 455 | if (cur->addr() == addr && cur->is_allocation_record()) { |
zgu@3900 | 456 | return true; |
zgu@3900 | 457 | } |
zgu@3900 | 458 | cur = (MemPointerRecord*)itr.next(); |
zgu@3900 | 459 | } |
zgu@3900 | 460 | return false; |
zgu@3900 | 461 | } |
zgu@3900 | 462 | |
zgu@3900 | 463 | #endif |