src/share/vm/services/memSnapshot.cpp

Fri, 09 Nov 2012 11:04:06 -0500

author
zgu
date
Fri, 09 Nov 2012 11:04:06 -0500
changeset 4272
ed8b1e39ff4f
parent 4248
69ad7823b1ca
child 4276
8c413497f434
permissions
-rw-r--r--

8002273: NMT to report JNI memory leaks when -Xcheck:jni is on
Summary: Allows NMT to report that JNI thread failed to detach from JVM before exiting, which leaks the JavaThread object when check:jni option is on.
Reviewed-by: acorn, dholmes, coleenp, ctornqvi

zgu@3900 1 /*
zgu@3900 2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
zgu@3900 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
zgu@3900 4 *
zgu@3900 5 * This code is free software; you can redistribute it and/or modify it
zgu@3900 6 * under the terms of the GNU General Public License version 2 only, as
zgu@3900 7 * published by the Free Software Foundation.
zgu@3900 8 *
zgu@3900 9 * This code is distributed in the hope that it will be useful, but WITHOUT
zgu@3900 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
zgu@3900 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
zgu@3900 12 * version 2 for more details (a copy is included in the LICENSE file that
zgu@3900 13 * accompanied this code).
zgu@3900 14 *
zgu@3900 15 * You should have received a copy of the GNU General Public License version
zgu@3900 16 * 2 along with this work; if not, write to the Free Software Foundation,
zgu@3900 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
zgu@3900 18 *
zgu@3900 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
zgu@3900 20 * or visit www.oracle.com if you need additional information or have any
zgu@3900 21 * questions.
zgu@3900 22 *
zgu@3900 23 */
zgu@3900 24
zgu@3900 25 #include "precompiled.hpp"
zgu@3900 26 #include "runtime/mutexLocker.hpp"
zgu@3900 27 #include "utilities/decoder.hpp"
zgu@3900 28 #include "services/memBaseline.hpp"
zgu@3900 29 #include "services/memPtr.hpp"
zgu@3900 30 #include "services/memPtrArray.hpp"
zgu@3900 31 #include "services/memSnapshot.hpp"
zgu@3900 32 #include "services/memTracker.hpp"
zgu@3900 33
zgu@4248 34 #ifdef ASSERT
zgu@4248 35
zgu@4248 36 void decode_pointer_record(MemPointerRecord* rec) {
zgu@4248 37 tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT "] size = %d bytes", rec->addr(),
zgu@4248 38 rec->addr() + rec->size(), (int)rec->size());
zgu@4248 39 tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
zgu@4248 40 if (rec->is_vm_pointer()) {
zgu@4248 41 if (rec->is_allocation_record()) {
zgu@4248 42 tty->print_cr(" (reserve)");
zgu@4248 43 } else if (rec->is_commit_record()) {
zgu@4248 44 tty->print_cr(" (commit)");
zgu@4248 45 } else if (rec->is_uncommit_record()) {
zgu@4248 46 tty->print_cr(" (uncommit)");
zgu@4248 47 } else if (rec->is_deallocation_record()) {
zgu@4248 48 tty->print_cr(" (release)");
zgu@4248 49 } else {
zgu@4248 50 tty->print_cr(" (tag)");
zgu@4248 51 }
zgu@4248 52 } else {
zgu@4248 53 if (rec->is_arena_size_record()) {
zgu@4248 54 tty->print_cr(" (arena size)");
zgu@4248 55 } else if (rec->is_allocation_record()) {
zgu@4248 56 tty->print_cr(" (malloc)");
zgu@4248 57 } else {
zgu@4248 58 tty->print_cr(" (free)");
zgu@4248 59 }
zgu@4248 60 }
zgu@4248 61 if (MemTracker::track_callsite()) {
zgu@4248 62 char buf[1024];
zgu@4248 63 address pc = ((MemPointerRecordEx*)rec)->pc();
zgu@4248 64 if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
zgu@4248 65 tty->print_cr("\tfrom %s", buf);
zgu@4248 66 } else {
zgu@4248 67 tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
zgu@4248 68 }
zgu@4248 69 }
zgu@4248 70 }
zgu@4248 71
zgu@4248 72 void decode_vm_region_record(VMMemRegion* rec) {
zgu@4248 73 tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
zgu@4248 74 rec->addr() + rec->size());
zgu@4248 75 tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
zgu@4248 76 if (rec->is_allocation_record()) {
zgu@4248 77 tty->print_cr(" (reserved)");
zgu@4248 78 } else if (rec->is_commit_record()) {
zgu@4248 79 tty->print_cr(" (committed)");
zgu@4248 80 } else {
zgu@4248 81 ShouldNotReachHere();
zgu@4248 82 }
zgu@4248 83 if (MemTracker::track_callsite()) {
zgu@4248 84 char buf[1024];
zgu@4248 85 address pc = ((VMMemRegionEx*)rec)->pc();
zgu@4248 86 if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
zgu@4248 87 tty->print_cr("\tfrom %s", buf);
zgu@4248 88 } else {
zgu@4248 89 tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
zgu@4248 90 }
zgu@4248 91
zgu@4248 92 }
zgu@4248 93 }
zgu@4248 94
zgu@4248 95 #endif
zgu@4248 96
zgu@4193 97
zgu@4193 98 bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
zgu@4193 99 VMMemRegionEx new_rec;
zgu@4193 100 assert(rec->is_allocation_record() || rec->is_commit_record(),
zgu@4193 101 "Sanity check");
zgu@4193 102 if (MemTracker::track_callsite()) {
zgu@4193 103 new_rec.init((MemPointerRecordEx*)rec);
zgu@4193 104 } else {
zgu@4193 105 new_rec.init(rec);
zgu@4193 106 }
zgu@4193 107 return insert(&new_rec);
zgu@4193 108 }
zgu@4193 109
zgu@4193 110 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
zgu@4193 111 VMMemRegionEx new_rec;
zgu@4193 112 assert(rec->is_allocation_record() || rec->is_commit_record(),
zgu@4193 113 "Sanity check");
zgu@4193 114 if (MemTracker::track_callsite()) {
zgu@4193 115 new_rec.init((MemPointerRecordEx*)rec);
zgu@4193 116 } else {
zgu@4193 117 new_rec.init(rec);
zgu@4193 118 }
zgu@4193 119 return insert_after(&new_rec);
zgu@4193 120 }
zgu@4193 121
zgu@4193 122 // we don't consolidate reserved regions, since they may be categorized
zgu@4193 123 // in different types.
zgu@4193 124 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
zgu@4193 125 assert(rec->is_allocation_record(), "Sanity check");
zgu@4272 126 VMMemRegion* reserved_region = (VMMemRegion*)current();
zgu@4193 127
zgu@4193 128 // we don't have anything yet
zgu@4272 129 if (reserved_region == NULL) {
zgu@4193 130 return insert_record(rec);
zgu@4193 131 }
zgu@4193 132
zgu@4272 133 assert(reserved_region->is_reserved_region(), "Sanity check");
zgu@4193 134 // duplicated records
zgu@4272 135 if (reserved_region->is_same_region(rec)) {
zgu@4193 136 return true;
zgu@4193 137 }
zgu@4272 138 // Overlapping stack regions indicate that a JNI thread failed to
zgu@4272 139 // detach from the VM before exiting. This leaks the JavaThread object.
zgu@4272 140 if (CheckJNICalls) {
zgu@4272 141 guarantee(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) != mtThreadStack ||
zgu@4272 142 !reserved_region->overlaps_region(rec),
zgu@4272 143 "Attached JNI thread exited without being detached");
zgu@4272 144 }
zgu@4272 145 // otherwise, we should not have overlapping reserved regions
zgu@4272 146 assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
zgu@4272 147 reserved_region->base() > rec->addr(), "Just check: locate()");
zgu@4272 148 assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
zgu@4272 149 !reserved_region->overlaps_region(rec), "overlapping reserved regions");
zgu@4272 150
zgu@4193 151 return insert_record(rec);
zgu@4193 152 }
zgu@4193 153
zgu@4193 154 // we do consolidate committed regions
zgu@4193 155 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
zgu@4193 156 assert(rec->is_commit_record(), "Sanity check");
zgu@4248 157 VMMemRegion* reserved_rgn = (VMMemRegion*)current();
zgu@4248 158 assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec),
zgu@4193 159 "Sanity check");
zgu@4193 160
zgu@4193 161 // thread's native stack is always marked as "committed", ignore
zgu@4193 162 // the "commit" operation for creating stack guard pages
zgu@4248 163 if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
zgu@4193 164 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
zgu@4193 165 return true;
zgu@4193 166 }
zgu@4193 167
zgu@4248 168 // if the reserved region has any committed regions
zgu@4248 169 VMMemRegion* committed_rgn = (VMMemRegion*)next();
zgu@4248 170 while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
zgu@4193 171 // duplicated commit records
zgu@4248 172 if(committed_rgn->contains_region(rec)) {
zgu@4193 173 return true;
zgu@4248 174 } else if (committed_rgn->overlaps_region(rec)) {
zgu@4248 175 // overlaps front part
zgu@4248 176 if (rec->addr() < committed_rgn->addr()) {
zgu@4248 177 committed_rgn->expand_region(rec->addr(),
zgu@4248 178 committed_rgn->addr() - rec->addr());
zgu@4193 179 } else {
zgu@4248 180 // overlaps tail part
zgu@4248 181 address committed_rgn_end = committed_rgn->addr() +
zgu@4248 182 committed_rgn->size();
zgu@4248 183 assert(committed_rgn_end < rec->addr() + rec->size(),
zgu@4248 184 "overlap tail part");
zgu@4248 185 committed_rgn->expand_region(committed_rgn_end,
zgu@4248 186 (rec->addr() + rec->size()) - committed_rgn_end);
zgu@4193 187 }
zgu@4248 188 } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
zgu@4248 189 // adjunct each other
zgu@4248 190 committed_rgn->expand_region(rec->addr(), rec->size());
zgu@4193 191 VMMemRegion* next_reg = (VMMemRegion*)next();
zgu@4193 192 // see if we can consolidate next committed region
zgu@4193 193 if (next_reg != NULL && next_reg->is_committed_region() &&
zgu@4248 194 next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
zgu@4248 195 committed_rgn->expand_region(next_reg->base(), next_reg->size());
zgu@4248 196 // delete merged region
zgu@4193 197 remove();
zgu@4193 198 }
zgu@4193 199 return true;
zgu@4248 200 } else if (committed_rgn->base() > rec->addr()) {
zgu@4248 201 // found the location, insert this committed region
zgu@4248 202 return insert_record(rec);
zgu@4193 203 }
zgu@4248 204 committed_rgn = (VMMemRegion*)next();
zgu@4193 205 }
zgu@4193 206 return insert_record(rec);
zgu@4193 207 }
zgu@4193 208
zgu@4193 209 bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
zgu@4193 210 assert(rec->is_uncommit_record(), "sanity check");
zgu@4193 211 VMMemRegion* cur;
zgu@4193 212 cur = (VMMemRegion*)current();
zgu@4193 213 assert(cur->is_reserved_region() && cur->contains_region(rec),
zgu@4193 214 "Sanity check");
zgu@4193 215 // thread's native stack is always marked as "committed", ignore
zgu@4193 216 // the "commit" operation for creating stack guard pages
zgu@4193 217 if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
zgu@4193 218 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
zgu@4193 219 return true;
zgu@4193 220 }
zgu@4193 221
zgu@4193 222 cur = (VMMemRegion*)next();
zgu@4193 223 while (cur != NULL && cur->is_committed_region()) {
zgu@4193 224 // region already uncommitted, must be due to duplicated record
zgu@4193 225 if (cur->addr() >= rec->addr() + rec->size()) {
zgu@4193 226 break;
zgu@4193 227 } else if (cur->contains_region(rec)) {
zgu@4193 228 // uncommit whole region
zgu@4193 229 if (cur->is_same_region(rec)) {
zgu@4193 230 remove();
zgu@4193 231 break;
zgu@4193 232 } else if (rec->addr() == cur->addr() ||
zgu@4193 233 rec->addr() + rec->size() == cur->addr() + cur->size()) {
zgu@4193 234 // uncommitted from either end of current memory region.
zgu@4193 235 cur->exclude_region(rec->addr(), rec->size());
zgu@4193 236 break;
zgu@4193 237 } else { // split the committed region and release the middle
zgu@4193 238 address high_addr = cur->addr() + cur->size();
zgu@4193 239 size_t sz = high_addr - rec->addr();
zgu@4193 240 cur->exclude_region(rec->addr(), sz);
zgu@4193 241 sz = high_addr - (rec->addr() + rec->size());
zgu@4193 242 if (MemTracker::track_callsite()) {
zgu@4193 243 MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
zgu@4193 244 ((VMMemRegionEx*)cur)->pc());
zgu@4193 245 return insert_record_after(&tmp);
zgu@4193 246 } else {
zgu@4193 247 MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
zgu@4193 248 return insert_record_after(&tmp);
zgu@4193 249 }
zgu@4193 250 }
zgu@4193 251 }
zgu@4193 252 cur = (VMMemRegion*)next();
zgu@4193 253 }
zgu@4193 254
zgu@4193 255 // we may not find committed record due to duplicated records
zgu@4193 256 return true;
zgu@4193 257 }
zgu@4193 258
zgu@4193 259 bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
zgu@4193 260 assert(rec->is_deallocation_record(), "Sanity check");
zgu@4193 261 VMMemRegion* cur = (VMMemRegion*)current();
zgu@4193 262 assert(cur->is_reserved_region() && cur->contains_region(rec),
zgu@4193 263 "Sanity check");
zgu@4193 264 #ifdef ASSERT
zgu@4193 265 VMMemRegion* next_reg = (VMMemRegion*)peek_next();
zgu@4193 266 // should not have any committed memory in this reserved region
zgu@4193 267 assert(next_reg == NULL || !next_reg->is_committed_region(), "Sanity check");
zgu@4193 268 #endif
zgu@4193 269 if (rec->is_same_region(cur)) {
zgu@4193 270 remove();
zgu@4193 271 } else if (rec->addr() == cur->addr() ||
zgu@4193 272 rec->addr() + rec->size() == cur->addr() + cur->size()) {
zgu@4193 273 // released region is at either end of this region
zgu@4193 274 cur->exclude_region(rec->addr(), rec->size());
zgu@4193 275 } else { // split the reserved region and release the middle
zgu@4193 276 address high_addr = cur->addr() + cur->size();
zgu@4193 277 size_t sz = high_addr - rec->addr();
zgu@4193 278 cur->exclude_region(rec->addr(), sz);
zgu@4193 279 sz = high_addr - rec->addr() - rec->size();
zgu@4193 280 if (MemTracker::track_callsite()) {
zgu@4193 281 MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
zgu@4193 282 ((VMMemRegionEx*)cur)->pc());
zgu@4193 283 return insert_reserved_region(&tmp);
zgu@4193 284 } else {
zgu@4193 285 MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
zgu@4193 286 return insert_reserved_region(&tmp);
zgu@4193 287 }
zgu@4193 288 }
zgu@4193 289 return true;
zgu@4193 290 }
zgu@4193 291
zgu@4193 292 bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
zgu@4193 293 // skip all 'commit' records associated with previous reserved region
zgu@4193 294 VMMemRegion* p = (VMMemRegion*)next();
zgu@4193 295 while (p != NULL && p->is_committed_region() &&
zgu@4193 296 p->base() + p->size() < rec->addr()) {
zgu@4193 297 p = (VMMemRegion*)next();
zgu@4193 298 }
zgu@4193 299 return insert_record(rec);
zgu@4193 300 }
zgu@4193 301
zgu@4193 302 bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
zgu@4193 303 assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
zgu@4193 304 address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
zgu@4193 305 if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
zgu@4193 306 size_t sz = rgn->size() - new_rgn_size;
zgu@4193 307 // the original region becomes 'new' region
zgu@4193 308 rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
zgu@4193 309 // remaining becomes next region
zgu@4193 310 MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
zgu@4193 311 return insert_reserved_region(&next_rgn);
zgu@4193 312 } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
zgu@4193 313 rgn->exclude_region(new_rgn_addr, new_rgn_size);
zgu@4193 314 MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
zgu@4193 315 return insert_reserved_region(&next_rgn);
zgu@4193 316 } else {
zgu@4193 317 // the orginal region will be split into three
zgu@4193 318 address rgn_high_addr = rgn->base() + rgn->size();
zgu@4193 319 // first region
zgu@4193 320 rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
zgu@4193 321 // the second region is the new region
zgu@4193 322 MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
zgu@4193 323 if (!insert_reserved_region(&new_rgn)) return false;
zgu@4193 324 // the remaining region
zgu@4193 325 MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
zgu@4193 326 rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
zgu@4193 327 return insert_reserved_region(&rem_rgn);
zgu@4193 328 }
zgu@4193 329 }
zgu@4193 330
zgu@4053 331 static int sort_in_seq_order(const void* p1, const void* p2) {
zgu@4053 332 assert(p1 != NULL && p2 != NULL, "Sanity check");
zgu@4053 333 const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
zgu@4053 334 const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
zgu@4053 335 return (mp1->seq() - mp2->seq());
zgu@4053 336 }
zgu@3900 337
zgu@4053 338 bool StagingArea::init() {
zgu@4053 339 if (MemTracker::track_callsite()) {
zgu@4053 340 _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
zgu@4053 341 _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
zgu@3900 342 } else {
zgu@4053 343 _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
zgu@4053 344 _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
zgu@3900 345 }
zgu@3900 346
zgu@4053 347 if (_malloc_data != NULL && _vm_data != NULL &&
zgu@4053 348 !_malloc_data->out_of_memory() &&
zgu@4053 349 !_vm_data->out_of_memory()) {
zgu@3900 350 return true;
zgu@4053 351 } else {
zgu@4053 352 if (_malloc_data != NULL) delete _malloc_data;
zgu@4053 353 if (_vm_data != NULL) delete _vm_data;
zgu@4053 354 _malloc_data = NULL;
zgu@4053 355 _vm_data = NULL;
zgu@4053 356 return false;
zgu@3900 357 }
zgu@3900 358 }
zgu@3900 359
zgu@3900 360
zgu@4193 361 VMRecordIterator StagingArea::virtual_memory_record_walker() {
zgu@4053 362 MemPointerArray* arr = vm_data();
zgu@4053 363 // sort into seq number order
zgu@4053 364 arr->sort((FN_SORT)sort_in_seq_order);
zgu@4193 365 return VMRecordIterator(arr);
zgu@4053 366 }
zgu@3900 367
zgu@3900 368
zgu@3900 369 MemSnapshot::MemSnapshot() {
zgu@3900 370 if (MemTracker::track_callsite()) {
zgu@3900 371 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
zgu@3900 372 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
zgu@3900 373 } else {
zgu@3900 374 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
zgu@3900 375 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
zgu@3900 376 }
zgu@3900 377
zgu@4053 378 _staging_area.init();
zgu@3936 379 _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
zgu@3900 380 NOT_PRODUCT(_untracked_count = 0;)
zgu@3900 381 }
zgu@3900 382
zgu@3900 383 MemSnapshot::~MemSnapshot() {
zgu@3900 384 assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
zgu@3900 385 {
zgu@3900 386 MutexLockerEx locker(_lock);
zgu@3900 387 if (_alloc_ptrs != NULL) {
zgu@3900 388 delete _alloc_ptrs;
zgu@3900 389 _alloc_ptrs = NULL;
zgu@3900 390 }
zgu@3900 391
zgu@3900 392 if (_vm_ptrs != NULL) {
zgu@3900 393 delete _vm_ptrs;
zgu@3900 394 _vm_ptrs = NULL;
zgu@3900 395 }
zgu@3900 396 }
zgu@3900 397
zgu@3900 398 if (_lock != NULL) {
zgu@3900 399 delete _lock;
zgu@3900 400 _lock = NULL;
zgu@3900 401 }
zgu@3900 402 }
zgu@3900 403
zgu@3900 404 void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
zgu@3900 405 assert(dest != NULL && src != NULL, "Just check");
zgu@3900 406 assert(dest->addr() == src->addr(), "Just check");
zgu@3900 407
zgu@3900 408 MEMFLAGS flags = dest->flags();
zgu@3900 409
zgu@3900 410 if (MemTracker::track_callsite()) {
zgu@3900 411 *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
zgu@3900 412 } else {
zgu@3900 413 *dest = *src;
zgu@3900 414 }
zgu@3900 415 }
zgu@3900 416
zgu@3900 417
zgu@3900 418 // merge a per-thread memory recorder to the staging area
zgu@3900 419 bool MemSnapshot::merge(MemRecorder* rec) {
zgu@3900 420 assert(rec != NULL && !rec->out_of_memory(), "Just check");
zgu@3900 421
zgu@3900 422 SequencedRecordIterator itr(rec->pointer_itr());
zgu@3900 423
zgu@3900 424 MutexLockerEx lock(_lock, true);
zgu@4053 425 MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
zgu@3900 426 MemPointerRecord *p1, *p2;
zgu@3900 427 p1 = (MemPointerRecord*) itr.current();
zgu@3900 428 while (p1 != NULL) {
zgu@4053 429 if (p1->is_vm_pointer()) {
zgu@4053 430 // we don't do anything with virtual memory records during merge
zgu@4053 431 if (!_staging_area.vm_data()->append(p1)) {
zgu@3900 432 return false;
zgu@3900 433 }
zgu@4053 434 } else {
zgu@4193 435 // locate matched record and/or also position the iterator to proper
zgu@4193 436 // location for this incoming record.
zgu@4053 437 p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
zgu@4053 438 // we have not seen this memory block, so just add to staging area
zgu@4053 439 if (p2 == NULL) {
zgu@4053 440 if (!malloc_staging_itr.insert(p1)) {
zgu@4053 441 return false;
zgu@4053 442 }
zgu@4053 443 } else if (p1->addr() == p2->addr()) {
zgu@4053 444 MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
zgu@4053 445 // a memory block can have many tagging records, find right one to replace or
zgu@4053 446 // right position to insert
zgu@4053 447 while (staging_next != NULL && staging_next->addr() == p1->addr()) {
zgu@4053 448 if ((staging_next->flags() & MemPointerRecord::tag_masks) <=
zgu@4053 449 (p1->flags() & MemPointerRecord::tag_masks)) {
zgu@4053 450 p2 = (MemPointerRecord*)malloc_staging_itr.next();
zgu@4053 451 staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
zgu@4053 452 } else {
zgu@4053 453 break;
zgu@4053 454 }
zgu@4053 455 }
zgu@4053 456 int df = (p1->flags() & MemPointerRecord::tag_masks) -
zgu@4053 457 (p2->flags() & MemPointerRecord::tag_masks);
zgu@4053 458 if (df == 0) {
zgu@4053 459 assert(p1->seq() > 0, "not sequenced");
zgu@4053 460 assert(p2->seq() > 0, "not sequenced");
zgu@4053 461 if (p1->seq() > p2->seq()) {
zgu@4053 462 copy_pointer(p2, p1);
zgu@4053 463 }
zgu@4053 464 } else if (df < 0) {
zgu@4053 465 if (!malloc_staging_itr.insert(p1)) {
zgu@4053 466 return false;
zgu@4053 467 }
zgu@3900 468 } else {
zgu@4053 469 if (!malloc_staging_itr.insert_after(p1)) {
zgu@4053 470 return false;
zgu@4053 471 }
zgu@3900 472 }
zgu@4053 473 } else if (p1->addr() < p2->addr()) {
zgu@4053 474 if (!malloc_staging_itr.insert(p1)) {
zgu@3900 475 return false;
zgu@3900 476 }
zgu@3900 477 } else {
zgu@4053 478 if (!malloc_staging_itr.insert_after(p1)) {
zgu@3900 479 return false;
zgu@3900 480 }
zgu@3900 481 }
zgu@3900 482 }
zgu@3900 483 p1 = (MemPointerRecord*)itr.next();
zgu@3900 484 }
zgu@3900 485 NOT_PRODUCT(void check_staging_data();)
zgu@3900 486 return true;
zgu@3900 487 }
zgu@3900 488
zgu@3900 489
zgu@3900 490
zgu@3900 491 // promote data to next generation
zgu@4053 492 bool MemSnapshot::promote() {
zgu@4053 493 assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
zgu@4053 494 assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
zgu@4053 495 "Just check");
zgu@3900 496 MutexLockerEx lock(_lock, true);
zgu@4053 497
zgu@4053 498 MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker();
zgu@4053 499 bool promoted = false;
zgu@4053 500 if (promote_malloc_records(&malloc_itr)) {
zgu@4193 501 VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
zgu@4053 502 if (promote_virtual_memory_records(&vm_itr)) {
zgu@4053 503 promoted = true;
zgu@4053 504 }
zgu@4053 505 }
zgu@4053 506
zgu@4053 507 NOT_PRODUCT(check_malloc_pointers();)
zgu@4053 508 _staging_area.clear();
zgu@4053 509 return promoted;
zgu@4053 510 }
zgu@4053 511
zgu@4053 512 bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
zgu@4053 513 MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
zgu@4053 514 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
zgu@4053 515 MemPointerRecord* matched_rec;
zgu@4053 516 while (new_rec != NULL) {
zgu@4053 517 matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
zgu@4053 518 // found matched memory block
zgu@4053 519 if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
zgu@4193 520 // snapshot already contains 'live' records
zgu@4053 521 assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
zgu@4053 522 "Sanity check");
zgu@4053 523 // update block states
zgu@4053 524 if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
zgu@4053 525 copy_pointer(matched_rec, new_rec);
zgu@4053 526 } else {
zgu@4053 527 // a deallocation record
zgu@4053 528 assert(new_rec->is_deallocation_record(), "Sanity check");
zgu@4053 529 // an arena record can be followed by a size record, we need to remove both
zgu@4053 530 if (matched_rec->is_arena_record()) {
zgu@4053 531 MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
zgu@4053 532 if (next->is_arena_size_record()) {
zgu@4053 533 // it has to match the arena record
zgu@4053 534 assert(next->is_size_record_of_arena(matched_rec), "Sanity check");
zgu@4053 535 malloc_snapshot_itr.remove();
zgu@3900 536 }
zgu@4053 537 }
zgu@4053 538 // the memory is deallocated, remove related record(s)
zgu@4053 539 malloc_snapshot_itr.remove();
zgu@4053 540 }
zgu@4053 541 } else {
zgu@4053 542 // it is a new record, insert into snapshot
zgu@4053 543 if (new_rec->is_arena_size_record()) {
zgu@4053 544 MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev();
zgu@4053 545 if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) {
zgu@4053 546 // no matched arena record, ignore the size record
zgu@4053 547 new_rec = NULL;
zgu@4053 548 }
zgu@4053 549 }
zgu@4053 550 // only 'live' record can go into snapshot
zgu@4053 551 if (new_rec != NULL) {
zgu@4053 552 if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
zgu@4053 553 if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
zgu@4053 554 if (!malloc_snapshot_itr.insert_after(new_rec)) {
zgu@4053 555 return false;
zgu@4053 556 }
zgu@3900 557 } else {
zgu@4053 558 if (!malloc_snapshot_itr.insert(new_rec)) {
zgu@4053 559 return false;
zgu@4053 560 }
zgu@4053 561 }
zgu@4053 562 }
zgu@4053 563 #ifndef PRODUCT
zgu@4053 564 else if (!has_allocation_record(new_rec->addr())) {
zgu@4053 565 // NMT can not track some startup memory, which is allocated before NMT is on
zgu@4053 566 _untracked_count ++;
zgu@4053 567 }
zgu@4053 568 #endif
zgu@4053 569 }
zgu@4053 570 }
zgu@4053 571 new_rec = (MemPointerRecord*)itr->next();
zgu@4053 572 }
zgu@4053 573 return true;
zgu@4053 574 }
zgu@4053 575
zgu@4053 576 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
zgu@4053 577 VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
zgu@4053 578 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
zgu@4193 579 VMMemRegion* reserved_rec;
zgu@4053 580 while (new_rec != NULL) {
zgu@4053 581 assert(new_rec->is_vm_pointer(), "Sanity check");
zgu@4193 582
zgu@4193 583 // locate a reserved region that contains the specified address, or
zgu@4193 584 // the nearest reserved region has base address just above the specified
zgu@4193 585 // address
zgu@4193 586 reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
zgu@4193 587 if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
zgu@4053 588 // snapshot can only have 'live' records
zgu@4193 589 assert(reserved_rec->is_reserved_region(), "Sanity check");
zgu@4193 590 if (new_rec->is_allocation_record()) {
zgu@4193 591 if (!reserved_rec->is_same_region(new_rec)) {
zgu@4193 592 // only deal with split a bigger reserved region into smaller regions.
zgu@4193 593 // So far, CDS is the only use case.
zgu@4193 594 if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
zgu@4053 595 return false;
zgu@3900 596 }
zgu@3900 597 }
zgu@4193 598 } else if (new_rec->is_uncommit_record()) {
zgu@4193 599 if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
zgu@4193 600 return false;
zgu@4193 601 }
zgu@4193 602 } else if (new_rec->is_commit_record()) {
zgu@4193 603 // insert or expand existing committed region to cover this
zgu@4193 604 // newly committed region
zgu@4193 605 if (!vm_snapshot_itr.add_committed_region(new_rec)) {
zgu@4193 606 return false;
zgu@4193 607 }
zgu@4193 608 } else if (new_rec->is_deallocation_record()) {
zgu@4193 609 // release part or all memory region
zgu@4193 610 if (!vm_snapshot_itr.remove_released_region(new_rec)) {
zgu@4193 611 return false;
zgu@4193 612 }
zgu@4193 613 } else if (new_rec->is_type_tagging_record()) {
zgu@4193 614 // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
zgu@4193 615 // to different type.
zgu@4193 616 assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
zgu@4193 617 FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
zgu@4193 618 "Sanity check");
zgu@4193 619 reserved_rec->tag(new_rec->flags());
zgu@4193 620 } else {
zgu@4193 621 ShouldNotReachHere();
zgu@4193 622 }
zgu@4193 623 } else {
zgu@4193 624 /*
zgu@4193 625 * The assertion failure indicates mis-matched virtual memory records. The likely
zgu@4193 626 * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
zgu@4193 627 * api, which have to be tracked manually. (perfMemory is an example).
zgu@4193 628 */
zgu@4193 629 assert(new_rec->is_allocation_record(), "Sanity check");
zgu@4193 630 if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
zgu@4193 631 return false;
zgu@4193 632 }
zgu@3900 633 }
zgu@4053 634 new_rec = (MemPointerRecord*)itr->next();
zgu@4053 635 }
zgu@4053 636 return true;
zgu@3900 637 }
zgu@3900 638
zgu@3994 639 #ifndef PRODUCT
zgu@3900 640 void MemSnapshot::print_snapshot_stats(outputStream* st) {
zgu@3900 641 st->print_cr("Snapshot:");
zgu@3900 642 st->print_cr("\tMalloced: %d/%d [%5.2f%%] %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
zgu@3900 643 (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
zgu@3900 644
zgu@3900 645 st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
zgu@3900 646 (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
zgu@3900 647
zgu@4053 648 st->print_cr("\tMalloc staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
zgu@4053 649 _staging_area.malloc_data()->capacity(),
zgu@4053 650 (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
zgu@4053 651 _staging_area.malloc_data()->instance_size()/K);
zgu@4053 652
zgu@4053 653 st->print_cr("\tVirtual memory staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
zgu@4053 654 _staging_area.vm_data()->capacity(),
zgu@4053 655 (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
zgu@4053 656 _staging_area.vm_data()->instance_size()/K);
zgu@3900 657
zgu@3900 658 st->print_cr("\tUntracked allocation: %d", _untracked_count);
zgu@3900 659 }
zgu@3900 660
zgu@3900 661 void MemSnapshot::check_malloc_pointers() {
zgu@3900 662 MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
zgu@3900 663 MemPointerRecord* p = (MemPointerRecord*)mItr.current();
zgu@3900 664 MemPointerRecord* prev = NULL;
zgu@3900 665 while (p != NULL) {
zgu@3900 666 if (prev != NULL) {
zgu@3900 667 assert(p->addr() >= prev->addr(), "sorting order");
zgu@3900 668 }
zgu@3900 669 prev = p;
zgu@3900 670 p = (MemPointerRecord*)mItr.next();
zgu@3900 671 }
zgu@3900 672 }
zgu@3900 673
zgu@3994 674 bool MemSnapshot::has_allocation_record(address addr) {
zgu@4053 675 MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
zgu@3994 676 MemPointerRecord* cur = (MemPointerRecord*)itr.current();
zgu@3994 677 while (cur != NULL) {
zgu@3994 678 if (cur->addr() == addr && cur->is_allocation_record()) {
zgu@3994 679 return true;
zgu@3994 680 }
zgu@3994 681 cur = (MemPointerRecord*)itr.next();
zgu@3994 682 }
zgu@3994 683 return false;
zgu@3994 684 }
zgu@3994 685 #endif // PRODUCT
zgu@3994 686
zgu@3994 687 #ifdef ASSERT
zgu@3900 688 void MemSnapshot::check_staging_data() {
zgu@4053 689 MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
zgu@3900 690 MemPointerRecord* cur = (MemPointerRecord*)itr.current();
zgu@3900 691 MemPointerRecord* next = (MemPointerRecord*)itr.next();
zgu@3900 692 while (next != NULL) {
zgu@3900 693 assert((next->addr() > cur->addr()) ||
zgu@3900 694 ((next->flags() & MemPointerRecord::tag_masks) >
zgu@3900 695 (cur->flags() & MemPointerRecord::tag_masks)),
zgu@3900 696 "sorting order");
zgu@3900 697 cur = next;
zgu@3900 698 next = (MemPointerRecord*)itr.next();
zgu@3900 699 }
zgu@4053 700
zgu@4053 701 MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
zgu@4053 702 cur = (MemPointerRecord*)vm_itr.current();
zgu@4053 703 while (cur != NULL) {
zgu@4053 704 assert(cur->is_vm_pointer(), "virtual memory pointer only");
zgu@4053 705 cur = (MemPointerRecord*)vm_itr.next();
zgu@4053 706 }
zgu@3900 707 }
zgu@4193 708
zgu@4193 709 void MemSnapshot::dump_all_vm_pointers() {
zgu@4193 710 MemPointerArrayIteratorImpl itr(_vm_ptrs);
zgu@4193 711 VMMemRegion* ptr = (VMMemRegion*)itr.current();
zgu@4193 712 tty->print_cr("dump virtual memory pointers:");
zgu@4193 713 while (ptr != NULL) {
zgu@4193 714 if (ptr->is_committed_region()) {
zgu@4193 715 tty->print("\t");
zgu@4193 716 }
zgu@4193 717 tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
zgu@4193 718 (ptr->addr() + ptr->size()), ptr->flags());
zgu@4193 719
zgu@4193 720 if (MemTracker::track_callsite()) {
zgu@4193 721 VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
zgu@4193 722 if (ex->pc() != NULL) {
zgu@4193 723 char buf[1024];
zgu@4193 724 if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
zgu@4193 725 tty->print_cr("\t%s", buf);
zgu@4193 726 } else {
zgu@4193 727 tty->print_cr("");
zgu@4193 728 }
zgu@4193 729 }
zgu@4193 730 }
zgu@4193 731
zgu@4193 732 ptr = (VMMemRegion*)itr.next();
zgu@4193 733 }
zgu@4193 734 tty->flush();
zgu@4193 735 }
zgu@3994 736 #endif // ASSERT
zgu@3900 737

mercurial