src/share/vm/services/memSnapshot.cpp

Tue, 08 Aug 2017 15:57:29 +0800

author
aoqi
date
Tue, 08 Aug 2017 15:57:29 +0800
changeset 6876
710a3c8b516e
parent 6680
78bbf4d43a14
parent 0
f90c822e73f8
permissions
-rw-r--r--

merge

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "runtime/mutexLocker.hpp"
aoqi@0 27 #include "utilities/decoder.hpp"
aoqi@0 28 #include "services/memBaseline.hpp"
aoqi@0 29 #include "services/memPtr.hpp"
aoqi@0 30 #include "services/memPtrArray.hpp"
aoqi@0 31 #include "services/memSnapshot.hpp"
aoqi@0 32 #include "services/memTracker.hpp"
aoqi@0 33
aoqi@0 34 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
aoqi@0 35
aoqi@0 36 #ifdef ASSERT
aoqi@0 37
aoqi@0 38 void decode_pointer_record(MemPointerRecord* rec) {
aoqi@0 39 tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT "] size = %d bytes", rec->addr(),
aoqi@0 40 rec->addr() + rec->size(), (int)rec->size());
aoqi@0 41 tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
aoqi@0 42 if (rec->is_vm_pointer()) {
aoqi@0 43 if (rec->is_allocation_record()) {
aoqi@0 44 tty->print_cr(" (reserve)");
aoqi@0 45 } else if (rec->is_commit_record()) {
aoqi@0 46 tty->print_cr(" (commit)");
aoqi@0 47 } else if (rec->is_uncommit_record()) {
aoqi@0 48 tty->print_cr(" (uncommit)");
aoqi@0 49 } else if (rec->is_deallocation_record()) {
aoqi@0 50 tty->print_cr(" (release)");
aoqi@0 51 } else {
aoqi@0 52 tty->print_cr(" (tag)");
aoqi@0 53 }
aoqi@0 54 } else {
aoqi@0 55 if (rec->is_arena_memory_record()) {
aoqi@0 56 tty->print_cr(" (arena size)");
aoqi@0 57 } else if (rec->is_allocation_record()) {
aoqi@0 58 tty->print_cr(" (malloc)");
aoqi@0 59 } else {
aoqi@0 60 tty->print_cr(" (free)");
aoqi@0 61 }
aoqi@0 62 }
aoqi@0 63 if (MemTracker::track_callsite()) {
aoqi@0 64 char buf[1024];
aoqi@0 65 address pc = ((MemPointerRecordEx*)rec)->pc();
aoqi@0 66 if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
aoqi@0 67 tty->print_cr("\tfrom %s", buf);
aoqi@0 68 } else {
aoqi@0 69 tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
aoqi@0 70 }
aoqi@0 71 }
aoqi@0 72 }
aoqi@0 73
aoqi@0 74 void decode_vm_region_record(VMMemRegion* rec) {
aoqi@0 75 tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
aoqi@0 76 rec->addr() + rec->size());
aoqi@0 77 tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
aoqi@0 78 if (rec->is_allocation_record()) {
aoqi@0 79 tty->print_cr(" (reserved)");
aoqi@0 80 } else if (rec->is_commit_record()) {
aoqi@0 81 tty->print_cr(" (committed)");
aoqi@0 82 } else {
aoqi@0 83 ShouldNotReachHere();
aoqi@0 84 }
aoqi@0 85 if (MemTracker::track_callsite()) {
aoqi@0 86 char buf[1024];
aoqi@0 87 address pc = ((VMMemRegionEx*)rec)->pc();
aoqi@0 88 if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
aoqi@0 89 tty->print_cr("\tfrom %s", buf);
aoqi@0 90 } else {
aoqi@0 91 tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
aoqi@0 92 }
aoqi@0 93
aoqi@0 94 }
aoqi@0 95 }
aoqi@0 96
aoqi@0 97 #endif
aoqi@0 98
aoqi@0 99
aoqi@0 100 bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
aoqi@0 101 VMMemRegionEx new_rec;
aoqi@0 102 assert(rec->is_allocation_record() || rec->is_commit_record(),
aoqi@0 103 "Sanity check");
aoqi@0 104 if (MemTracker::track_callsite()) {
aoqi@0 105 new_rec.init((MemPointerRecordEx*)rec);
aoqi@0 106 } else {
aoqi@0 107 new_rec.init(rec);
aoqi@0 108 }
aoqi@0 109 return insert(&new_rec);
aoqi@0 110 }
aoqi@0 111
aoqi@0 112 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
aoqi@0 113 VMMemRegionEx new_rec;
aoqi@0 114 assert(rec->is_allocation_record() || rec->is_commit_record(),
aoqi@0 115 "Sanity check");
aoqi@0 116 if (MemTracker::track_callsite()) {
aoqi@0 117 new_rec.init((MemPointerRecordEx*)rec);
aoqi@0 118 } else {
aoqi@0 119 new_rec.init(rec);
aoqi@0 120 }
aoqi@0 121 return insert_after(&new_rec);
aoqi@0 122 }
aoqi@0 123
aoqi@0 124 // we don't consolidate reserved regions, since they may be categorized
aoqi@0 125 // in different types.
aoqi@0 126 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
aoqi@0 127 assert(rec->is_allocation_record(), "Sanity check");
aoqi@0 128 VMMemRegion* reserved_region = (VMMemRegion*)current();
aoqi@0 129
aoqi@0 130 // we don't have anything yet
aoqi@0 131 if (reserved_region == NULL) {
aoqi@0 132 return insert_record(rec);
aoqi@0 133 }
aoqi@0 134
aoqi@0 135 assert(reserved_region->is_reserved_region(), "Sanity check");
aoqi@0 136 // duplicated records
aoqi@0 137 if (reserved_region->is_same_region(rec)) {
aoqi@0 138 return true;
aoqi@0 139 }
aoqi@0 140 // Overlapping stack regions indicate that a JNI thread failed to
aoqi@0 141 // detach from the VM before exiting. This leaks the JavaThread object.
aoqi@0 142 if (CheckJNICalls) {
aoqi@0 143 guarantee(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) != mtThreadStack ||
aoqi@0 144 !reserved_region->overlaps_region(rec),
aoqi@0 145 "Attached JNI thread exited without being detached");
aoqi@0 146 }
aoqi@0 147 // otherwise, we should not have overlapping reserved regions
aoqi@0 148 assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
aoqi@0 149 reserved_region->base() > rec->addr(), "Just check: locate()");
aoqi@0 150 assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
aoqi@0 151 !reserved_region->overlaps_region(rec), "overlapping reserved regions");
aoqi@0 152
aoqi@0 153 return insert_record(rec);
aoqi@0 154 }
aoqi@0 155
aoqi@0 156 // we do consolidate committed regions
aoqi@0 157 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
aoqi@0 158 assert(rec->is_commit_record(), "Sanity check");
aoqi@0 159 VMMemRegion* reserved_rgn = (VMMemRegion*)current();
aoqi@0 160 assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec),
aoqi@0 161 "Sanity check");
aoqi@0 162
aoqi@0 163 // thread's native stack is always marked as "committed", ignore
aoqi@0 164 // the "commit" operation for creating stack guard pages
aoqi@0 165 if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
aoqi@0 166 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
aoqi@0 167 return true;
aoqi@0 168 }
aoqi@0 169
aoqi@0 170 // if the reserved region has any committed regions
aoqi@0 171 VMMemRegion* committed_rgn = (VMMemRegion*)next();
aoqi@0 172 while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
aoqi@0 173 // duplicated commit records
aoqi@0 174 if(committed_rgn->contains_region(rec)) {
aoqi@0 175 return true;
aoqi@0 176 } else if (committed_rgn->overlaps_region(rec)) {
aoqi@0 177 // overlaps front part
aoqi@0 178 if (rec->addr() < committed_rgn->addr()) {
aoqi@0 179 committed_rgn->expand_region(rec->addr(),
aoqi@0 180 committed_rgn->addr() - rec->addr());
aoqi@0 181 } else {
aoqi@0 182 // overlaps tail part
aoqi@0 183 address committed_rgn_end = committed_rgn->addr() +
aoqi@0 184 committed_rgn->size();
aoqi@0 185 assert(committed_rgn_end < rec->addr() + rec->size(),
aoqi@0 186 "overlap tail part");
aoqi@0 187 committed_rgn->expand_region(committed_rgn_end,
aoqi@0 188 (rec->addr() + rec->size()) - committed_rgn_end);
aoqi@0 189 }
aoqi@0 190 } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
aoqi@0 191 // adjunct each other
aoqi@0 192 committed_rgn->expand_region(rec->addr(), rec->size());
aoqi@0 193 VMMemRegion* next_reg = (VMMemRegion*)next();
aoqi@0 194 // see if we can consolidate next committed region
aoqi@0 195 if (next_reg != NULL && next_reg->is_committed_region() &&
aoqi@0 196 next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
aoqi@0 197 committed_rgn->expand_region(next_reg->base(), next_reg->size());
aoqi@0 198 // delete merged region
aoqi@0 199 remove();
aoqi@0 200 }
aoqi@0 201 return true;
aoqi@0 202 } else if (committed_rgn->base() > rec->addr()) {
aoqi@0 203 // found the location, insert this committed region
aoqi@0 204 return insert_record(rec);
aoqi@0 205 }
aoqi@0 206 committed_rgn = (VMMemRegion*)next();
aoqi@0 207 }
aoqi@0 208 return insert_record(rec);
aoqi@0 209 }
aoqi@0 210
aoqi@0 211 bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
aoqi@0 212 assert(rec->is_uncommit_record(), "sanity check");
aoqi@0 213 VMMemRegion* cur;
aoqi@0 214 cur = (VMMemRegion*)current();
aoqi@0 215 assert(cur->is_reserved_region() && cur->contains_region(rec),
aoqi@0 216 "Sanity check");
aoqi@0 217 // thread's native stack is always marked as "committed", ignore
aoqi@0 218 // the "commit" operation for creating stack guard pages
aoqi@0 219 if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
aoqi@0 220 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
aoqi@0 221 return true;
aoqi@0 222 }
aoqi@0 223
aoqi@0 224 cur = (VMMemRegion*)next();
aoqi@0 225 while (cur != NULL && cur->is_committed_region()) {
aoqi@0 226 // region already uncommitted, must be due to duplicated record
aoqi@0 227 if (cur->addr() >= rec->addr() + rec->size()) {
aoqi@0 228 break;
aoqi@0 229 } else if (cur->contains_region(rec)) {
aoqi@0 230 // uncommit whole region
aoqi@0 231 if (cur->is_same_region(rec)) {
aoqi@0 232 remove();
aoqi@0 233 break;
aoqi@0 234 } else if (rec->addr() == cur->addr() ||
aoqi@0 235 rec->addr() + rec->size() == cur->addr() + cur->size()) {
aoqi@0 236 // uncommitted from either end of current memory region.
aoqi@0 237 cur->exclude_region(rec->addr(), rec->size());
aoqi@0 238 break;
aoqi@0 239 } else { // split the committed region and release the middle
aoqi@0 240 address high_addr = cur->addr() + cur->size();
aoqi@0 241 size_t sz = high_addr - rec->addr();
aoqi@0 242 cur->exclude_region(rec->addr(), sz);
aoqi@0 243 sz = high_addr - (rec->addr() + rec->size());
aoqi@0 244 if (MemTracker::track_callsite()) {
aoqi@0 245 MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
aoqi@0 246 ((VMMemRegionEx*)cur)->pc());
aoqi@0 247 return insert_record_after(&tmp);
aoqi@0 248 } else {
aoqi@0 249 MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
aoqi@0 250 return insert_record_after(&tmp);
aoqi@0 251 }
aoqi@0 252 }
aoqi@0 253 }
aoqi@0 254 cur = (VMMemRegion*)next();
aoqi@0 255 }
aoqi@0 256
aoqi@0 257 // we may not find committed record due to duplicated records
aoqi@0 258 return true;
aoqi@0 259 }
aoqi@0 260
aoqi@0 261 bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
aoqi@0 262 assert(rec->is_deallocation_record(), "Sanity check");
aoqi@0 263 VMMemRegion* cur = (VMMemRegion*)current();
aoqi@0 264 assert(cur->is_reserved_region() && cur->contains_region(rec),
aoqi@0 265 "Sanity check");
aoqi@0 266 if (rec->is_same_region(cur)) {
aoqi@0 267
aoqi@0 268 // In snapshot, the virtual memory records are sorted in following orders:
aoqi@0 269 // 1. virtual memory's base address
aoqi@0 270 // 2. virtual memory reservation record, followed by commit records within this reservation.
aoqi@0 271 // The commit records are also in base address order.
aoqi@0 272 // When a reserved region is released, we want to remove the reservation record and all
aoqi@0 273 // commit records following it.
aoqi@0 274 #ifdef ASSERT
aoqi@0 275 address low_addr = cur->addr();
aoqi@0 276 address high_addr = low_addr + cur->size();
aoqi@0 277 #endif
aoqi@0 278 // remove virtual memory reservation record
aoqi@0 279 remove();
aoqi@0 280 // remove committed regions within above reservation
aoqi@0 281 VMMemRegion* next_region = (VMMemRegion*)current();
aoqi@0 282 while (next_region != NULL && next_region->is_committed_region()) {
aoqi@0 283 assert(next_region->addr() >= low_addr &&
aoqi@0 284 next_region->addr() + next_region->size() <= high_addr,
aoqi@0 285 "Range check");
aoqi@0 286 remove();
aoqi@0 287 next_region = (VMMemRegion*)current();
aoqi@0 288 }
aoqi@0 289 } else if (rec->addr() == cur->addr() ||
aoqi@0 290 rec->addr() + rec->size() == cur->addr() + cur->size()) {
aoqi@0 291 // released region is at either end of this region
aoqi@0 292 cur->exclude_region(rec->addr(), rec->size());
aoqi@0 293 assert(check_reserved_region(), "Integrity check");
aoqi@0 294 } else { // split the reserved region and release the middle
aoqi@0 295 address high_addr = cur->addr() + cur->size();
aoqi@0 296 size_t sz = high_addr - rec->addr();
aoqi@0 297 cur->exclude_region(rec->addr(), sz);
aoqi@0 298 sz = high_addr - rec->addr() - rec->size();
aoqi@0 299 if (MemTracker::track_callsite()) {
aoqi@0 300 MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
aoqi@0 301 ((VMMemRegionEx*)cur)->pc());
aoqi@0 302 bool ret = insert_reserved_region(&tmp);
aoqi@0 303 assert(!ret || check_reserved_region(), "Integrity check");
aoqi@0 304 return ret;
aoqi@0 305 } else {
aoqi@0 306 MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
aoqi@0 307 bool ret = insert_reserved_region(&tmp);
aoqi@0 308 assert(!ret || check_reserved_region(), "Integrity check");
aoqi@0 309 return ret;
aoqi@0 310 }
aoqi@0 311 }
aoqi@0 312 return true;
aoqi@0 313 }
aoqi@0 314
aoqi@0 315 bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
aoqi@0 316 // skip all 'commit' records associated with previous reserved region
aoqi@0 317 VMMemRegion* p = (VMMemRegion*)next();
aoqi@0 318 while (p != NULL && p->is_committed_region() &&
aoqi@0 319 p->base() + p->size() < rec->addr()) {
aoqi@0 320 p = (VMMemRegion*)next();
aoqi@0 321 }
aoqi@0 322 return insert_record(rec);
aoqi@0 323 }
aoqi@0 324
aoqi@0 325 bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
aoqi@0 326 assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
aoqi@0 327 address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
aoqi@0 328 if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
aoqi@0 329 size_t sz = rgn->size() - new_rgn_size;
aoqi@0 330 // the original region becomes 'new' region
aoqi@0 331 rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
aoqi@0 332 // remaining becomes next region
aoqi@0 333 MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
aoqi@0 334 return insert_reserved_region(&next_rgn);
aoqi@0 335 } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
aoqi@0 336 rgn->exclude_region(new_rgn_addr, new_rgn_size);
aoqi@0 337 MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
aoqi@0 338 return insert_reserved_region(&next_rgn);
aoqi@0 339 } else {
aoqi@0 340 // the orginal region will be split into three
aoqi@0 341 address rgn_high_addr = rgn->base() + rgn->size();
aoqi@0 342 // first region
aoqi@0 343 rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
aoqi@0 344 // the second region is the new region
aoqi@0 345 MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
aoqi@0 346 if (!insert_reserved_region(&new_rgn)) return false;
aoqi@0 347 // the remaining region
aoqi@0 348 MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
aoqi@0 349 rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
aoqi@0 350 return insert_reserved_region(&rem_rgn);
aoqi@0 351 }
aoqi@0 352 }
aoqi@0 353
aoqi@0 354 static int sort_in_seq_order(const void* p1, const void* p2) {
aoqi@0 355 assert(p1 != NULL && p2 != NULL, "Sanity check");
aoqi@0 356 const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
aoqi@0 357 const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
aoqi@0 358 return (mp1->seq() - mp2->seq());
aoqi@0 359 }
aoqi@0 360
aoqi@0 361 bool StagingArea::init() {
aoqi@0 362 if (MemTracker::track_callsite()) {
aoqi@0 363 _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
aoqi@0 364 _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
aoqi@0 365 } else {
aoqi@0 366 _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
aoqi@0 367 _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
aoqi@0 368 }
aoqi@0 369
aoqi@0 370 if (_malloc_data != NULL && _vm_data != NULL &&
aoqi@0 371 !_malloc_data->out_of_memory() &&
aoqi@0 372 !_vm_data->out_of_memory()) {
aoqi@0 373 return true;
aoqi@0 374 } else {
aoqi@0 375 if (_malloc_data != NULL) delete _malloc_data;
aoqi@0 376 if (_vm_data != NULL) delete _vm_data;
aoqi@0 377 _malloc_data = NULL;
aoqi@0 378 _vm_data = NULL;
aoqi@0 379 return false;
aoqi@0 380 }
aoqi@0 381 }
aoqi@0 382
aoqi@0 383
aoqi@0 384 VMRecordIterator StagingArea::virtual_memory_record_walker() {
aoqi@0 385 MemPointerArray* arr = vm_data();
aoqi@0 386 // sort into seq number order
aoqi@0 387 arr->sort((FN_SORT)sort_in_seq_order);
aoqi@0 388 return VMRecordIterator(arr);
aoqi@0 389 }
aoqi@0 390
aoqi@0 391
aoqi@0 392 MemSnapshot::MemSnapshot() {
aoqi@0 393 if (MemTracker::track_callsite()) {
aoqi@0 394 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
aoqi@0 395 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
aoqi@0 396 } else {
aoqi@0 397 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
aoqi@0 398 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
aoqi@0 399 }
aoqi@0 400
aoqi@0 401 _staging_area.init();
aoqi@0 402 _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
aoqi@0 403 NOT_PRODUCT(_untracked_count = 0;)
aoqi@0 404 _number_of_classes = 0;
aoqi@0 405 }
aoqi@0 406
aoqi@0 407 MemSnapshot::~MemSnapshot() {
aoqi@0 408 assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
aoqi@0 409 {
aoqi@0 410 MutexLockerEx locker(_lock);
aoqi@0 411 if (_alloc_ptrs != NULL) {
aoqi@0 412 delete _alloc_ptrs;
aoqi@0 413 _alloc_ptrs = NULL;
aoqi@0 414 }
aoqi@0 415
aoqi@0 416 if (_vm_ptrs != NULL) {
aoqi@0 417 delete _vm_ptrs;
aoqi@0 418 _vm_ptrs = NULL;
aoqi@0 419 }
aoqi@0 420 }
aoqi@0 421
aoqi@0 422 if (_lock != NULL) {
aoqi@0 423 delete _lock;
aoqi@0 424 _lock = NULL;
aoqi@0 425 }
aoqi@0 426 }
aoqi@0 427
aoqi@0 428
aoqi@0 429 void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
aoqi@0 430 assert(dest != NULL && src != NULL, "Just check");
aoqi@0 431 assert(dest->addr() == src->addr(), "Just check");
aoqi@0 432 assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
aoqi@0 433
aoqi@0 434 if (MemTracker::track_callsite()) {
aoqi@0 435 *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
aoqi@0 436 } else {
aoqi@0 437 *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
aoqi@0 438 }
aoqi@0 439 }
aoqi@0 440
aoqi@0 441 void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
aoqi@0 442 assert(src != NULL && dest != NULL, "Just check");
aoqi@0 443 assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
aoqi@0 444
aoqi@0 445 if (MemTracker::track_callsite()) {
aoqi@0 446 *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
aoqi@0 447 } else {
aoqi@0 448 *(MemPointerRecord*)dest = *(MemPointerRecord*)src;
aoqi@0 449 }
aoqi@0 450 }
aoqi@0 451
aoqi@0 452 // merge a recorder to the staging area
aoqi@0 453 bool MemSnapshot::merge(MemRecorder* rec) {
aoqi@0 454 assert(rec != NULL && !rec->out_of_memory(), "Just check");
aoqi@0 455
aoqi@0 456 SequencedRecordIterator itr(rec->pointer_itr());
aoqi@0 457
aoqi@0 458 MutexLockerEx lock(_lock, true);
aoqi@0 459 MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
aoqi@0 460 MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
aoqi@0 461 MemPointerRecord* matched_rec;
aoqi@0 462
aoqi@0 463 while (incoming_rec != NULL) {
aoqi@0 464 if (incoming_rec->is_vm_pointer()) {
aoqi@0 465 // we don't do anything with virtual memory records during merge
aoqi@0 466 if (!_staging_area.vm_data()->append(incoming_rec)) {
aoqi@0 467 return false;
aoqi@0 468 }
aoqi@0 469 } else {
aoqi@0 470 // locate matched record and/or also position the iterator to proper
aoqi@0 471 // location for this incoming record.
aoqi@0 472 matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
aoqi@0 473 // we have not seen this memory block in this generation,
aoqi@0 474 // so just add to staging area
aoqi@0 475 if (matched_rec == NULL) {
aoqi@0 476 if (!malloc_staging_itr.insert(incoming_rec)) {
aoqi@0 477 return false;
aoqi@0 478 }
aoqi@0 479 } else if (incoming_rec->addr() == matched_rec->addr()) {
aoqi@0 480 // whoever has higher sequence number wins
aoqi@0 481 if (incoming_rec->seq() > matched_rec->seq()) {
aoqi@0 482 copy_seq_pointer(matched_rec, incoming_rec);
aoqi@0 483 }
aoqi@0 484 } else if (incoming_rec->addr() < matched_rec->addr()) {
aoqi@0 485 if (!malloc_staging_itr.insert(incoming_rec)) {
aoqi@0 486 return false;
aoqi@0 487 }
aoqi@0 488 } else {
aoqi@0 489 ShouldNotReachHere();
aoqi@0 490 }
aoqi@0 491 }
aoqi@0 492 incoming_rec = (MemPointerRecord*)itr.next();
aoqi@0 493 }
aoqi@0 494 NOT_PRODUCT(void check_staging_data();)
aoqi@0 495 return true;
aoqi@0 496 }
aoqi@0 497
aoqi@0 498
aoqi@0 499 // promote data to next generation
aoqi@0 500 bool MemSnapshot::promote(int number_of_classes) {
aoqi@0 501 assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
aoqi@0 502 assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
aoqi@0 503 "Just check");
aoqi@0 504 MutexLockerEx lock(_lock, true);
aoqi@0 505
aoqi@0 506 MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker();
aoqi@0 507 bool promoted = false;
aoqi@0 508 if (promote_malloc_records(&malloc_itr)) {
aoqi@0 509 VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
aoqi@0 510 if (promote_virtual_memory_records(&vm_itr)) {
aoqi@0 511 promoted = true;
aoqi@0 512 }
aoqi@0 513 }
aoqi@0 514
aoqi@0 515 NOT_PRODUCT(check_malloc_pointers();)
aoqi@0 516 _staging_area.clear();
aoqi@0 517 _number_of_classes = number_of_classes;
aoqi@0 518 return promoted;
aoqi@0 519 }
aoqi@0 520
aoqi@0 521 bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
aoqi@0 522 MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
aoqi@0 523 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
aoqi@0 524 MemPointerRecord* matched_rec;
aoqi@0 525 while (new_rec != NULL) {
aoqi@0 526 matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
aoqi@0 527 // found matched memory block
aoqi@0 528 if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
aoqi@0 529 // snapshot already contains 'live' records
aoqi@0 530 assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(),
aoqi@0 531 "Sanity check");
aoqi@0 532 // update block states
aoqi@0 533 if (new_rec->is_allocation_record()) {
aoqi@0 534 assign_pointer(matched_rec, new_rec);
aoqi@0 535 } else if (new_rec->is_arena_memory_record()) {
aoqi@0 536 if (new_rec->size() == 0) {
aoqi@0 537 // remove size record once size drops to 0
aoqi@0 538 malloc_snapshot_itr.remove();
aoqi@0 539 } else {
aoqi@0 540 assign_pointer(matched_rec, new_rec);
aoqi@0 541 }
aoqi@0 542 } else {
aoqi@0 543 // a deallocation record
aoqi@0 544 assert(new_rec->is_deallocation_record(), "Sanity check");
aoqi@0 545 // an arena record can be followed by a size record, we need to remove both
aoqi@0 546 if (matched_rec->is_arena_record()) {
aoqi@0 547 MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
aoqi@0 548 if (next != NULL && next->is_arena_memory_record() &&
aoqi@0 549 next->is_memory_record_of_arena(matched_rec)) {
aoqi@0 550 malloc_snapshot_itr.remove();
aoqi@0 551 }
aoqi@0 552 }
aoqi@0 553 // the memory is deallocated, remove related record(s)
aoqi@0 554 malloc_snapshot_itr.remove();
aoqi@0 555 }
aoqi@0 556 } else {
aoqi@0 557 // don't insert size 0 record
aoqi@0 558 if (new_rec->is_arena_memory_record() && new_rec->size() == 0) {
aoqi@0 559 new_rec = NULL;
aoqi@0 560 }
aoqi@0 561
aoqi@0 562 if (new_rec != NULL) {
aoqi@0 563 if (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) {
aoqi@0 564 if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
aoqi@0 565 if (!malloc_snapshot_itr.insert_after(new_rec)) {
aoqi@0 566 return false;
aoqi@0 567 }
aoqi@0 568 } else {
aoqi@0 569 if (!malloc_snapshot_itr.insert(new_rec)) {
aoqi@0 570 return false;
aoqi@0 571 }
aoqi@0 572 }
aoqi@0 573 }
aoqi@0 574 #ifndef PRODUCT
aoqi@0 575 else if (!has_allocation_record(new_rec->addr())) {
aoqi@0 576 // NMT can not track some startup memory, which is allocated before NMT is on
aoqi@0 577 _untracked_count ++;
aoqi@0 578 }
aoqi@0 579 #endif
aoqi@0 580 }
aoqi@0 581 }
aoqi@0 582 new_rec = (MemPointerRecord*)itr->next();
aoqi@0 583 }
aoqi@0 584 return true;
aoqi@0 585 }
aoqi@0 586
aoqi@0 587 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
aoqi@0 588 VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
aoqi@0 589 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
aoqi@0 590 VMMemRegion* reserved_rec;
aoqi@0 591 while (new_rec != NULL) {
aoqi@0 592 assert(new_rec->is_vm_pointer(), "Sanity check");
aoqi@0 593
aoqi@0 594 // locate a reserved region that contains the specified address, or
aoqi@0 595 // the nearest reserved region has base address just above the specified
aoqi@0 596 // address
aoqi@0 597 reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
aoqi@0 598 if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
aoqi@0 599 // snapshot can only have 'live' records
aoqi@0 600 assert(reserved_rec->is_reserved_region(), "Sanity check");
aoqi@0 601 if (new_rec->is_allocation_record()) {
aoqi@0 602 if (!reserved_rec->is_same_region(new_rec)) {
aoqi@0 603 // only deal with split a bigger reserved region into smaller regions.
aoqi@0 604 // So far, CDS is the only use case.
aoqi@0 605 if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
aoqi@0 606 return false;
aoqi@0 607 }
aoqi@0 608 }
aoqi@0 609 } else if (new_rec->is_uncommit_record()) {
aoqi@0 610 if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
aoqi@0 611 return false;
aoqi@0 612 }
aoqi@0 613 } else if (new_rec->is_commit_record()) {
aoqi@0 614 // insert or expand existing committed region to cover this
aoqi@0 615 // newly committed region
aoqi@0 616 if (!vm_snapshot_itr.add_committed_region(new_rec)) {
aoqi@0 617 return false;
aoqi@0 618 }
aoqi@0 619 } else if (new_rec->is_deallocation_record()) {
aoqi@0 620 // release part or all memory region
aoqi@0 621 if (!vm_snapshot_itr.remove_released_region(new_rec)) {
aoqi@0 622 return false;
aoqi@0 623 }
aoqi@0 624 } else if (new_rec->is_type_tagging_record()) {
aoqi@0 625 // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
aoqi@0 626 // to different type.
aoqi@0 627 assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
aoqi@0 628 FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
aoqi@0 629 "Sanity check");
aoqi@0 630 reserved_rec->tag(new_rec->flags());
aoqi@0 631 } else {
aoqi@0 632 ShouldNotReachHere();
aoqi@0 633 }
aoqi@0 634 } else {
aoqi@0 635 /*
aoqi@0 636 * The assertion failure indicates mis-matched virtual memory records. The likely
aoqi@0 637 * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
aoqi@0 638 * api, which have to be tracked manually. (perfMemory is an example).
aoqi@0 639 */
aoqi@0 640 assert(new_rec->is_allocation_record(), "Sanity check");
aoqi@0 641 if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
aoqi@0 642 return false;
aoqi@0 643 }
aoqi@0 644 }
aoqi@0 645 new_rec = (MemPointerRecord*)itr->next();
aoqi@0 646 }
aoqi@0 647 return true;
aoqi@0 648 }
aoqi@0 649
aoqi@0 650 #ifndef PRODUCT
aoqi@0 651 void MemSnapshot::print_snapshot_stats(outputStream* st) {
aoqi@0 652 st->print_cr("Snapshot:");
aoqi@0 653 st->print_cr("\tMalloced: %d/%d [%5.2f%%] %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
aoqi@0 654 (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
aoqi@0 655
aoqi@0 656 st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
aoqi@0 657 (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
aoqi@0 658
aoqi@0 659 st->print_cr("\tMalloc staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
aoqi@0 660 _staging_area.malloc_data()->capacity(),
aoqi@0 661 (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
aoqi@0 662 _staging_area.malloc_data()->instance_size()/K);
aoqi@0 663
aoqi@0 664 st->print_cr("\tVirtual memory staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
aoqi@0 665 _staging_area.vm_data()->capacity(),
aoqi@0 666 (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
aoqi@0 667 _staging_area.vm_data()->instance_size()/K);
aoqi@0 668
aoqi@0 669 st->print_cr("\tUntracked allocation: %d", _untracked_count);
aoqi@0 670 }
aoqi@0 671
aoqi@0 672 void MemSnapshot::check_malloc_pointers() {
aoqi@0 673 MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
aoqi@0 674 MemPointerRecord* p = (MemPointerRecord*)mItr.current();
aoqi@0 675 MemPointerRecord* prev = NULL;
aoqi@0 676 while (p != NULL) {
aoqi@0 677 if (prev != NULL) {
aoqi@0 678 assert(p->addr() >= prev->addr(), "sorting order");
aoqi@0 679 }
aoqi@0 680 prev = p;
aoqi@0 681 p = (MemPointerRecord*)mItr.next();
aoqi@0 682 }
aoqi@0 683 }
aoqi@0 684
aoqi@0 685 bool MemSnapshot::has_allocation_record(address addr) {
aoqi@0 686 MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
aoqi@0 687 MemPointerRecord* cur = (MemPointerRecord*)itr.current();
aoqi@0 688 while (cur != NULL) {
aoqi@0 689 if (cur->addr() == addr && cur->is_allocation_record()) {
aoqi@0 690 return true;
aoqi@0 691 }
aoqi@0 692 cur = (MemPointerRecord*)itr.next();
aoqi@0 693 }
aoqi@0 694 return false;
aoqi@0 695 }
aoqi@0 696 #endif // PRODUCT
aoqi@0 697
aoqi@0 698 #ifdef ASSERT
aoqi@0 699 void MemSnapshot::check_staging_data() {
aoqi@0 700 MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
aoqi@0 701 MemPointerRecord* cur = (MemPointerRecord*)itr.current();
aoqi@0 702 MemPointerRecord* next = (MemPointerRecord*)itr.next();
aoqi@0 703 while (next != NULL) {
aoqi@0 704 assert((next->addr() > cur->addr()) ||
aoqi@0 705 ((next->flags() & MemPointerRecord::tag_masks) >
aoqi@0 706 (cur->flags() & MemPointerRecord::tag_masks)),
aoqi@0 707 "sorting order");
aoqi@0 708 cur = next;
aoqi@0 709 next = (MemPointerRecord*)itr.next();
aoqi@0 710 }
aoqi@0 711
aoqi@0 712 MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
aoqi@0 713 cur = (MemPointerRecord*)vm_itr.current();
aoqi@0 714 while (cur != NULL) {
aoqi@0 715 assert(cur->is_vm_pointer(), "virtual memory pointer only");
aoqi@0 716 cur = (MemPointerRecord*)vm_itr.next();
aoqi@0 717 }
aoqi@0 718 }
aoqi@0 719
aoqi@0 720 void MemSnapshot::dump_all_vm_pointers() {
aoqi@0 721 MemPointerArrayIteratorImpl itr(_vm_ptrs);
aoqi@0 722 VMMemRegion* ptr = (VMMemRegion*)itr.current();
aoqi@0 723 tty->print_cr("dump virtual memory pointers:");
aoqi@0 724 while (ptr != NULL) {
aoqi@0 725 if (ptr->is_committed_region()) {
aoqi@0 726 tty->print("\t");
aoqi@0 727 }
aoqi@0 728 tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
aoqi@0 729 (ptr->addr() + ptr->size()), ptr->flags());
aoqi@0 730
aoqi@0 731 if (MemTracker::track_callsite()) {
aoqi@0 732 VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
aoqi@0 733 if (ex->pc() != NULL) {
aoqi@0 734 char buf[1024];
aoqi@0 735 if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
aoqi@0 736 tty->print_cr("\t%s", buf);
aoqi@0 737 } else {
aoqi@0 738 tty->cr();
aoqi@0 739 }
aoqi@0 740 }
aoqi@0 741 }
aoqi@0 742
aoqi@0 743 ptr = (VMMemRegion*)itr.next();
aoqi@0 744 }
aoqi@0 745 tty->flush();
aoqi@0 746 }
aoqi@0 747 #endif // ASSERT
aoqi@0 748

mercurial