src/share/vm/services/memSnapshot.cpp

Mon, 31 Mar 2014 13:09:35 -0700

author
minqi
date
Mon, 31 Mar 2014 13:09:35 -0700
changeset 6535
f42c10a3d4b1
parent 5053
c18152e0554e
child 6680
78bbf4d43a14
permissions
-rw-r--r--

7090324: gclog rotation via external tool
Summary: GC log rotation can be set via java command line, but customer sometime need to sync with OS level rotation setting.
Reviewed-by: sla, minqi, ehelin
Contributed-by: suenaga.yasumasa@lab.ntt.co.jp

zgu@3900 1 /*
zgu@5053 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
zgu@3900 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
zgu@3900 4 *
zgu@3900 5 * This code is free software; you can redistribute it and/or modify it
zgu@3900 6 * under the terms of the GNU General Public License version 2 only, as
zgu@3900 7 * published by the Free Software Foundation.
zgu@3900 8 *
zgu@3900 9 * This code is distributed in the hope that it will be useful, but WITHOUT
zgu@3900 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
zgu@3900 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
zgu@3900 12 * version 2 for more details (a copy is included in the LICENSE file that
zgu@3900 13 * accompanied this code).
zgu@3900 14 *
zgu@3900 15 * You should have received a copy of the GNU General Public License version
zgu@3900 16 * 2 along with this work; if not, write to the Free Software Foundation,
zgu@3900 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
zgu@3900 18 *
zgu@3900 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
zgu@3900 20 * or visit www.oracle.com if you need additional information or have any
zgu@3900 21 * questions.
zgu@3900 22 *
zgu@3900 23 */
zgu@3900 24
zgu@3900 25 #include "precompiled.hpp"
zgu@3900 26 #include "runtime/mutexLocker.hpp"
zgu@3900 27 #include "utilities/decoder.hpp"
zgu@3900 28 #include "services/memBaseline.hpp"
zgu@3900 29 #include "services/memPtr.hpp"
zgu@3900 30 #include "services/memPtrArray.hpp"
zgu@3900 31 #include "services/memSnapshot.hpp"
zgu@3900 32 #include "services/memTracker.hpp"
zgu@3900 33
zgu@4248 34 #ifdef ASSERT
zgu@4248 35
zgu@4248 36 void decode_pointer_record(MemPointerRecord* rec) {
zgu@4248 37 tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT "] size = %d bytes", rec->addr(),
zgu@4248 38 rec->addr() + rec->size(), (int)rec->size());
zgu@4248 39 tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
zgu@4248 40 if (rec->is_vm_pointer()) {
zgu@4248 41 if (rec->is_allocation_record()) {
zgu@4248 42 tty->print_cr(" (reserve)");
zgu@4248 43 } else if (rec->is_commit_record()) {
zgu@4248 44 tty->print_cr(" (commit)");
zgu@4248 45 } else if (rec->is_uncommit_record()) {
zgu@4248 46 tty->print_cr(" (uncommit)");
zgu@4248 47 } else if (rec->is_deallocation_record()) {
zgu@4248 48 tty->print_cr(" (release)");
zgu@4248 49 } else {
zgu@4248 50 tty->print_cr(" (tag)");
zgu@4248 51 }
zgu@4248 52 } else {
zgu@4274 53 if (rec->is_arena_memory_record()) {
zgu@4248 54 tty->print_cr(" (arena size)");
zgu@4248 55 } else if (rec->is_allocation_record()) {
zgu@4248 56 tty->print_cr(" (malloc)");
zgu@4248 57 } else {
zgu@4248 58 tty->print_cr(" (free)");
zgu@4248 59 }
zgu@4248 60 }
zgu@4248 61 if (MemTracker::track_callsite()) {
zgu@4248 62 char buf[1024];
zgu@4248 63 address pc = ((MemPointerRecordEx*)rec)->pc();
zgu@4248 64 if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
zgu@4248 65 tty->print_cr("\tfrom %s", buf);
zgu@4248 66 } else {
zgu@4248 67 tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
zgu@4248 68 }
zgu@4248 69 }
zgu@4248 70 }
zgu@4248 71
zgu@4248 72 void decode_vm_region_record(VMMemRegion* rec) {
zgu@4248 73 tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
zgu@4248 74 rec->addr() + rec->size());
zgu@4248 75 tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
zgu@4248 76 if (rec->is_allocation_record()) {
zgu@4248 77 tty->print_cr(" (reserved)");
zgu@4248 78 } else if (rec->is_commit_record()) {
zgu@4248 79 tty->print_cr(" (committed)");
zgu@4248 80 } else {
zgu@4248 81 ShouldNotReachHere();
zgu@4248 82 }
zgu@4248 83 if (MemTracker::track_callsite()) {
zgu@4248 84 char buf[1024];
zgu@4248 85 address pc = ((VMMemRegionEx*)rec)->pc();
zgu@4248 86 if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
zgu@4248 87 tty->print_cr("\tfrom %s", buf);
zgu@4248 88 } else {
zgu@4248 89 tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
zgu@4248 90 }
zgu@4248 91
zgu@4248 92 }
zgu@4248 93 }
zgu@4248 94
zgu@4248 95 #endif
zgu@4248 96
zgu@4193 97
zgu@4193 98 bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
zgu@4193 99 VMMemRegionEx new_rec;
zgu@4193 100 assert(rec->is_allocation_record() || rec->is_commit_record(),
zgu@4193 101 "Sanity check");
zgu@4193 102 if (MemTracker::track_callsite()) {
zgu@4193 103 new_rec.init((MemPointerRecordEx*)rec);
zgu@4193 104 } else {
zgu@4193 105 new_rec.init(rec);
zgu@4193 106 }
zgu@4193 107 return insert(&new_rec);
zgu@4193 108 }
zgu@4193 109
zgu@4193 110 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
zgu@4193 111 VMMemRegionEx new_rec;
zgu@4193 112 assert(rec->is_allocation_record() || rec->is_commit_record(),
zgu@4193 113 "Sanity check");
zgu@4193 114 if (MemTracker::track_callsite()) {
zgu@4193 115 new_rec.init((MemPointerRecordEx*)rec);
zgu@4193 116 } else {
zgu@4193 117 new_rec.init(rec);
zgu@4193 118 }
zgu@4193 119 return insert_after(&new_rec);
zgu@4193 120 }
zgu@4193 121
zgu@4193 122 // we don't consolidate reserved regions, since they may be categorized
zgu@4193 123 // in different types.
zgu@4193 124 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
zgu@4193 125 assert(rec->is_allocation_record(), "Sanity check");
zgu@4272 126 VMMemRegion* reserved_region = (VMMemRegion*)current();
zgu@4193 127
zgu@4193 128 // we don't have anything yet
zgu@4272 129 if (reserved_region == NULL) {
zgu@4193 130 return insert_record(rec);
zgu@4193 131 }
zgu@4193 132
zgu@4272 133 assert(reserved_region->is_reserved_region(), "Sanity check");
zgu@4193 134 // duplicated records
zgu@4272 135 if (reserved_region->is_same_region(rec)) {
zgu@4193 136 return true;
zgu@4193 137 }
zgu@4272 138 // Overlapping stack regions indicate that a JNI thread failed to
zgu@4272 139 // detach from the VM before exiting. This leaks the JavaThread object.
zgu@4272 140 if (CheckJNICalls) {
zgu@4272 141 guarantee(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) != mtThreadStack ||
zgu@4272 142 !reserved_region->overlaps_region(rec),
zgu@4272 143 "Attached JNI thread exited without being detached");
zgu@4272 144 }
zgu@4272 145 // otherwise, we should not have overlapping reserved regions
zgu@4272 146 assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
zgu@4272 147 reserved_region->base() > rec->addr(), "Just check: locate()");
zgu@4272 148 assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
zgu@4272 149 !reserved_region->overlaps_region(rec), "overlapping reserved regions");
zgu@4272 150
zgu@4193 151 return insert_record(rec);
zgu@4193 152 }
zgu@4193 153
zgu@4193 154 // we do consolidate committed regions
zgu@4193 155 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
zgu@4193 156 assert(rec->is_commit_record(), "Sanity check");
zgu@4248 157 VMMemRegion* reserved_rgn = (VMMemRegion*)current();
zgu@4248 158 assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec),
zgu@4193 159 "Sanity check");
zgu@4193 160
zgu@4193 161 // thread's native stack is always marked as "committed", ignore
zgu@4193 162 // the "commit" operation for creating stack guard pages
zgu@4248 163 if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
zgu@4193 164 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
zgu@4193 165 return true;
zgu@4193 166 }
zgu@4193 167
zgu@4248 168 // if the reserved region has any committed regions
zgu@4248 169 VMMemRegion* committed_rgn = (VMMemRegion*)next();
zgu@4248 170 while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
zgu@4193 171 // duplicated commit records
zgu@4248 172 if(committed_rgn->contains_region(rec)) {
zgu@4193 173 return true;
zgu@4248 174 } else if (committed_rgn->overlaps_region(rec)) {
zgu@4248 175 // overlaps front part
zgu@4248 176 if (rec->addr() < committed_rgn->addr()) {
zgu@4248 177 committed_rgn->expand_region(rec->addr(),
zgu@4248 178 committed_rgn->addr() - rec->addr());
zgu@4193 179 } else {
zgu@4248 180 // overlaps tail part
zgu@4248 181 address committed_rgn_end = committed_rgn->addr() +
zgu@4248 182 committed_rgn->size();
zgu@4248 183 assert(committed_rgn_end < rec->addr() + rec->size(),
zgu@4248 184 "overlap tail part");
zgu@4248 185 committed_rgn->expand_region(committed_rgn_end,
zgu@4248 186 (rec->addr() + rec->size()) - committed_rgn_end);
zgu@4193 187 }
zgu@4248 188 } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
zgu@4248 189 // adjunct each other
zgu@4248 190 committed_rgn->expand_region(rec->addr(), rec->size());
zgu@4193 191 VMMemRegion* next_reg = (VMMemRegion*)next();
zgu@4193 192 // see if we can consolidate next committed region
zgu@4193 193 if (next_reg != NULL && next_reg->is_committed_region() &&
zgu@4248 194 next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
zgu@4248 195 committed_rgn->expand_region(next_reg->base(), next_reg->size());
zgu@4248 196 // delete merged region
zgu@4193 197 remove();
zgu@4193 198 }
zgu@4193 199 return true;
zgu@4248 200 } else if (committed_rgn->base() > rec->addr()) {
zgu@4248 201 // found the location, insert this committed region
zgu@4248 202 return insert_record(rec);
zgu@4193 203 }
zgu@4248 204 committed_rgn = (VMMemRegion*)next();
zgu@4193 205 }
zgu@4193 206 return insert_record(rec);
zgu@4193 207 }
zgu@4193 208
zgu@4193 209 bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
zgu@4193 210 assert(rec->is_uncommit_record(), "sanity check");
zgu@4193 211 VMMemRegion* cur;
zgu@4193 212 cur = (VMMemRegion*)current();
zgu@4193 213 assert(cur->is_reserved_region() && cur->contains_region(rec),
zgu@4193 214 "Sanity check");
zgu@4193 215 // thread's native stack is always marked as "committed", ignore
zgu@4193 216 // the "commit" operation for creating stack guard pages
zgu@4193 217 if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
zgu@4193 218 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
zgu@4193 219 return true;
zgu@4193 220 }
zgu@4193 221
zgu@4193 222 cur = (VMMemRegion*)next();
zgu@4193 223 while (cur != NULL && cur->is_committed_region()) {
zgu@4193 224 // region already uncommitted, must be due to duplicated record
zgu@4193 225 if (cur->addr() >= rec->addr() + rec->size()) {
zgu@4193 226 break;
zgu@4193 227 } else if (cur->contains_region(rec)) {
zgu@4193 228 // uncommit whole region
zgu@4193 229 if (cur->is_same_region(rec)) {
zgu@4193 230 remove();
zgu@4193 231 break;
zgu@4193 232 } else if (rec->addr() == cur->addr() ||
zgu@4193 233 rec->addr() + rec->size() == cur->addr() + cur->size()) {
zgu@4193 234 // uncommitted from either end of current memory region.
zgu@4193 235 cur->exclude_region(rec->addr(), rec->size());
zgu@4193 236 break;
zgu@4193 237 } else { // split the committed region and release the middle
zgu@4193 238 address high_addr = cur->addr() + cur->size();
zgu@4193 239 size_t sz = high_addr - rec->addr();
zgu@4193 240 cur->exclude_region(rec->addr(), sz);
zgu@4193 241 sz = high_addr - (rec->addr() + rec->size());
zgu@4193 242 if (MemTracker::track_callsite()) {
zgu@4193 243 MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
zgu@4193 244 ((VMMemRegionEx*)cur)->pc());
zgu@4193 245 return insert_record_after(&tmp);
zgu@4193 246 } else {
zgu@4193 247 MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
zgu@4193 248 return insert_record_after(&tmp);
zgu@4193 249 }
zgu@4193 250 }
zgu@4193 251 }
zgu@4193 252 cur = (VMMemRegion*)next();
zgu@4193 253 }
zgu@4193 254
zgu@4193 255 // we may not find committed record due to duplicated records
zgu@4193 256 return true;
zgu@4193 257 }
zgu@4193 258
zgu@4193 259 bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
zgu@4193 260 assert(rec->is_deallocation_record(), "Sanity check");
zgu@4193 261 VMMemRegion* cur = (VMMemRegion*)current();
zgu@4193 262 assert(cur->is_reserved_region() && cur->contains_region(rec),
zgu@4193 263 "Sanity check");
zgu@4285 264 if (rec->is_same_region(cur)) {
zgu@5053 265
zgu@5053 266 // In snapshot, the virtual memory records are sorted in following orders:
zgu@5053 267 // 1. virtual memory's base address
zgu@5053 268 // 2. virtual memory reservation record, followed by commit records within this reservation.
zgu@5053 269 // The commit records are also in base address order.
zgu@5053 270 // When a reserved region is released, we want to remove the reservation record and all
zgu@5053 271 // commit records following it.
zgu@4193 272 #ifdef ASSERT
zgu@5053 273 address low_addr = cur->addr();
zgu@5053 274 address high_addr = low_addr + cur->size();
zgu@4193 275 #endif
zgu@5053 276 // remove virtual memory reservation record
zgu@4193 277 remove();
zgu@5053 278 // remove committed regions within above reservation
zgu@5053 279 VMMemRegion* next_region = (VMMemRegion*)current();
zgu@5053 280 while (next_region != NULL && next_region->is_committed_region()) {
zgu@5053 281 assert(next_region->addr() >= low_addr &&
zgu@5053 282 next_region->addr() + next_region->size() <= high_addr,
zgu@5053 283 "Range check");
zgu@5053 284 remove();
zgu@5053 285 next_region = (VMMemRegion*)current();
zgu@5053 286 }
zgu@4193 287 } else if (rec->addr() == cur->addr() ||
zgu@4193 288 rec->addr() + rec->size() == cur->addr() + cur->size()) {
zgu@4193 289 // released region is at either end of this region
zgu@4193 290 cur->exclude_region(rec->addr(), rec->size());
zgu@4285 291 assert(check_reserved_region(), "Integrity check");
zgu@4193 292 } else { // split the reserved region and release the middle
zgu@4193 293 address high_addr = cur->addr() + cur->size();
zgu@4193 294 size_t sz = high_addr - rec->addr();
zgu@4193 295 cur->exclude_region(rec->addr(), sz);
zgu@4193 296 sz = high_addr - rec->addr() - rec->size();
zgu@4193 297 if (MemTracker::track_callsite()) {
zgu@4193 298 MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
zgu@4193 299 ((VMMemRegionEx*)cur)->pc());
zgu@4285 300 bool ret = insert_reserved_region(&tmp);
zgu@4285 301 assert(!ret || check_reserved_region(), "Integrity check");
zgu@4285 302 return ret;
zgu@4193 303 } else {
zgu@4193 304 MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
zgu@4285 305 bool ret = insert_reserved_region(&tmp);
zgu@4285 306 assert(!ret || check_reserved_region(), "Integrity check");
zgu@4285 307 return ret;
zgu@4193 308 }
zgu@4193 309 }
zgu@4193 310 return true;
zgu@4193 311 }
zgu@4193 312
zgu@4193 313 bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
zgu@4193 314 // skip all 'commit' records associated with previous reserved region
zgu@4193 315 VMMemRegion* p = (VMMemRegion*)next();
zgu@4193 316 while (p != NULL && p->is_committed_region() &&
zgu@4193 317 p->base() + p->size() < rec->addr()) {
zgu@4193 318 p = (VMMemRegion*)next();
zgu@4193 319 }
zgu@4193 320 return insert_record(rec);
zgu@4193 321 }
zgu@4193 322
zgu@4193 323 bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
zgu@4193 324 assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
zgu@4193 325 address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
zgu@4193 326 if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
zgu@4193 327 size_t sz = rgn->size() - new_rgn_size;
zgu@4193 328 // the original region becomes 'new' region
zgu@4193 329 rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
zgu@4193 330 // remaining becomes next region
zgu@4193 331 MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
zgu@4193 332 return insert_reserved_region(&next_rgn);
zgu@4193 333 } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
zgu@4193 334 rgn->exclude_region(new_rgn_addr, new_rgn_size);
zgu@4193 335 MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
zgu@4193 336 return insert_reserved_region(&next_rgn);
zgu@4193 337 } else {
zgu@4193 338 // the orginal region will be split into three
zgu@4193 339 address rgn_high_addr = rgn->base() + rgn->size();
zgu@4193 340 // first region
zgu@4193 341 rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
zgu@4193 342 // the second region is the new region
zgu@4193 343 MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
zgu@4193 344 if (!insert_reserved_region(&new_rgn)) return false;
zgu@4193 345 // the remaining region
zgu@4193 346 MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
zgu@4193 347 rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
zgu@4193 348 return insert_reserved_region(&rem_rgn);
zgu@4193 349 }
zgu@4193 350 }
zgu@4193 351
zgu@4053 352 static int sort_in_seq_order(const void* p1, const void* p2) {
zgu@4053 353 assert(p1 != NULL && p2 != NULL, "Sanity check");
zgu@4053 354 const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
zgu@4053 355 const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
zgu@4053 356 return (mp1->seq() - mp2->seq());
zgu@4053 357 }
zgu@3900 358
zgu@4053 359 bool StagingArea::init() {
zgu@4053 360 if (MemTracker::track_callsite()) {
zgu@4053 361 _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
zgu@4053 362 _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
zgu@3900 363 } else {
zgu@4053 364 _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
zgu@4053 365 _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
zgu@3900 366 }
zgu@3900 367
zgu@4053 368 if (_malloc_data != NULL && _vm_data != NULL &&
zgu@4053 369 !_malloc_data->out_of_memory() &&
zgu@4053 370 !_vm_data->out_of_memory()) {
zgu@3900 371 return true;
zgu@4053 372 } else {
zgu@4053 373 if (_malloc_data != NULL) delete _malloc_data;
zgu@4053 374 if (_vm_data != NULL) delete _vm_data;
zgu@4053 375 _malloc_data = NULL;
zgu@4053 376 _vm_data = NULL;
zgu@4053 377 return false;
zgu@3900 378 }
zgu@3900 379 }
zgu@3900 380
zgu@3900 381
zgu@4193 382 VMRecordIterator StagingArea::virtual_memory_record_walker() {
zgu@4053 383 MemPointerArray* arr = vm_data();
zgu@4053 384 // sort into seq number order
zgu@4053 385 arr->sort((FN_SORT)sort_in_seq_order);
zgu@4193 386 return VMRecordIterator(arr);
zgu@4053 387 }
zgu@3900 388
zgu@3900 389
zgu@3900 390 MemSnapshot::MemSnapshot() {
zgu@3900 391 if (MemTracker::track_callsite()) {
zgu@3900 392 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
zgu@3900 393 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
zgu@3900 394 } else {
zgu@3900 395 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
zgu@3900 396 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
zgu@3900 397 }
zgu@3900 398
zgu@4053 399 _staging_area.init();
zgu@3936 400 _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
zgu@3900 401 NOT_PRODUCT(_untracked_count = 0;)
zgu@4400 402 _number_of_classes = 0;
zgu@3900 403 }
zgu@3900 404
zgu@3900 405 MemSnapshot::~MemSnapshot() {
zgu@3900 406 assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
zgu@3900 407 {
zgu@3900 408 MutexLockerEx locker(_lock);
zgu@3900 409 if (_alloc_ptrs != NULL) {
zgu@3900 410 delete _alloc_ptrs;
zgu@3900 411 _alloc_ptrs = NULL;
zgu@3900 412 }
zgu@3900 413
zgu@3900 414 if (_vm_ptrs != NULL) {
zgu@3900 415 delete _vm_ptrs;
zgu@3900 416 _vm_ptrs = NULL;
zgu@3900 417 }
zgu@3900 418 }
zgu@3900 419
zgu@3900 420 if (_lock != NULL) {
zgu@3900 421 delete _lock;
zgu@3900 422 _lock = NULL;
zgu@3900 423 }
zgu@3900 424 }
zgu@3900 425
zgu@4274 426
zgu@4274 427 void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
zgu@3900 428 assert(dest != NULL && src != NULL, "Just check");
zgu@3900 429 assert(dest->addr() == src->addr(), "Just check");
zgu@4274 430 assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
zgu@3900 431
zgu@4274 432 if (MemTracker::track_callsite()) {
zgu@4274 433 *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
zgu@4274 434 } else {
zgu@4274 435 *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
zgu@4274 436 }
zgu@4274 437 }
zgu@4274 438
zgu@4274 439 void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
zgu@4274 440 assert(src != NULL && dest != NULL, "Just check");
zgu@4274 441 assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
zgu@3900 442
zgu@3900 443 if (MemTracker::track_callsite()) {
zgu@3900 444 *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
zgu@3900 445 } else {
zgu@4274 446 *(MemPointerRecord*)dest = *(MemPointerRecord*)src;
zgu@3900 447 }
zgu@3900 448 }
zgu@3900 449
zgu@4274 450 // merge a recorder to the staging area
zgu@3900 451 bool MemSnapshot::merge(MemRecorder* rec) {
zgu@3900 452 assert(rec != NULL && !rec->out_of_memory(), "Just check");
zgu@3900 453
zgu@3900 454 SequencedRecordIterator itr(rec->pointer_itr());
zgu@3900 455
zgu@3900 456 MutexLockerEx lock(_lock, true);
zgu@4053 457 MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
zgu@4274 458 MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
zgu@4274 459 MemPointerRecord* matched_rec;
zgu@4274 460
zgu@4274 461 while (incoming_rec != NULL) {
zgu@4274 462 if (incoming_rec->is_vm_pointer()) {
zgu@4053 463 // we don't do anything with virtual memory records during merge
zgu@4274 464 if (!_staging_area.vm_data()->append(incoming_rec)) {
zgu@3900 465 return false;
zgu@3900 466 }
zgu@4053 467 } else {
zgu@4193 468 // locate matched record and/or also position the iterator to proper
zgu@4193 469 // location for this incoming record.
zgu@4274 470 matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
zgu@4274 471 // we have not seen this memory block in this generation,
zgu@4274 472 // so just add to staging area
zgu@4274 473 if (matched_rec == NULL) {
zgu@4274 474 if (!malloc_staging_itr.insert(incoming_rec)) {
zgu@4053 475 return false;
zgu@4053 476 }
zgu@4274 477 } else if (incoming_rec->addr() == matched_rec->addr()) {
zgu@4274 478 // whoever has higher sequence number wins
zgu@4274 479 if (incoming_rec->seq() > matched_rec->seq()) {
zgu@4274 480 copy_seq_pointer(matched_rec, incoming_rec);
zgu@4053 481 }
zgu@4274 482 } else if (incoming_rec->addr() < matched_rec->addr()) {
zgu@4274 483 if (!malloc_staging_itr.insert(incoming_rec)) {
zgu@3900 484 return false;
zgu@3900 485 }
zgu@3900 486 } else {
zgu@4274 487 ShouldNotReachHere();
zgu@3900 488 }
zgu@3900 489 }
zgu@4274 490 incoming_rec = (MemPointerRecord*)itr.next();
zgu@3900 491 }
zgu@3900 492 NOT_PRODUCT(void check_staging_data();)
zgu@3900 493 return true;
zgu@3900 494 }
zgu@3900 495
zgu@3900 496
zgu@3900 497 // promote data to next generation
zgu@4400 498 bool MemSnapshot::promote(int number_of_classes) {
zgu@4053 499 assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
zgu@4053 500 assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
zgu@4053 501 "Just check");
zgu@3900 502 MutexLockerEx lock(_lock, true);
zgu@4053 503
zgu@4053 504 MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker();
zgu@4053 505 bool promoted = false;
zgu@4053 506 if (promote_malloc_records(&malloc_itr)) {
zgu@4193 507 VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
zgu@4053 508 if (promote_virtual_memory_records(&vm_itr)) {
zgu@4053 509 promoted = true;
zgu@4053 510 }
zgu@4053 511 }
zgu@4053 512
zgu@4053 513 NOT_PRODUCT(check_malloc_pointers();)
zgu@4053 514 _staging_area.clear();
zgu@4400 515 _number_of_classes = number_of_classes;
zgu@4053 516 return promoted;
zgu@4053 517 }
zgu@4053 518
zgu@4053 519 bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
zgu@4053 520 MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
zgu@4053 521 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
zgu@4053 522 MemPointerRecord* matched_rec;
zgu@4053 523 while (new_rec != NULL) {
zgu@4053 524 matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
zgu@4053 525 // found matched memory block
zgu@4053 526 if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
zgu@4193 527 // snapshot already contains 'live' records
zgu@4274 528 assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(),
zgu@4053 529 "Sanity check");
zgu@4053 530 // update block states
zgu@4274 531 if (new_rec->is_allocation_record()) {
zgu@4274 532 assign_pointer(matched_rec, new_rec);
zgu@4274 533 } else if (new_rec->is_arena_memory_record()) {
zgu@4274 534 if (new_rec->size() == 0) {
zgu@4274 535 // remove size record once size drops to 0
zgu@4274 536 malloc_snapshot_itr.remove();
zgu@4274 537 } else {
zgu@4274 538 assign_pointer(matched_rec, new_rec);
zgu@4274 539 }
zgu@4053 540 } else {
zgu@4053 541 // a deallocation record
zgu@4053 542 assert(new_rec->is_deallocation_record(), "Sanity check");
zgu@4053 543 // an arena record can be followed by a size record, we need to remove both
zgu@4053 544 if (matched_rec->is_arena_record()) {
zgu@4053 545 MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
zgu@4641 546 if (next != NULL && next->is_arena_memory_record() &&
zgu@4641 547 next->is_memory_record_of_arena(matched_rec)) {
zgu@4053 548 malloc_snapshot_itr.remove();
zgu@3900 549 }
zgu@4053 550 }
zgu@4053 551 // the memory is deallocated, remove related record(s)
zgu@4053 552 malloc_snapshot_itr.remove();
zgu@4053 553 }
zgu@4053 554 } else {
zgu@4274 555 // don't insert size 0 record
zgu@4274 556 if (new_rec->is_arena_memory_record() && new_rec->size() == 0) {
zgu@4274 557 new_rec = NULL;
zgu@4053 558 }
zgu@4274 559
zgu@4053 560 if (new_rec != NULL) {
zgu@4274 561 if (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) {
zgu@4053 562 if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
zgu@4053 563 if (!malloc_snapshot_itr.insert_after(new_rec)) {
zgu@4053 564 return false;
zgu@4053 565 }
zgu@3900 566 } else {
zgu@4053 567 if (!malloc_snapshot_itr.insert(new_rec)) {
zgu@4053 568 return false;
zgu@4053 569 }
zgu@4053 570 }
zgu@4053 571 }
zgu@4053 572 #ifndef PRODUCT
zgu@4053 573 else if (!has_allocation_record(new_rec->addr())) {
zgu@4053 574 // NMT can not track some startup memory, which is allocated before NMT is on
zgu@4053 575 _untracked_count ++;
zgu@4053 576 }
zgu@4053 577 #endif
zgu@4053 578 }
zgu@4053 579 }
zgu@4053 580 new_rec = (MemPointerRecord*)itr->next();
zgu@4053 581 }
zgu@4053 582 return true;
zgu@4053 583 }
zgu@4053 584
zgu@4053 585 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
zgu@4053 586 VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
zgu@4053 587 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
zgu@4193 588 VMMemRegion* reserved_rec;
zgu@4053 589 while (new_rec != NULL) {
zgu@4053 590 assert(new_rec->is_vm_pointer(), "Sanity check");
zgu@4193 591
zgu@4193 592 // locate a reserved region that contains the specified address, or
zgu@4193 593 // the nearest reserved region has base address just above the specified
zgu@4193 594 // address
zgu@4193 595 reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
zgu@4193 596 if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
zgu@4053 597 // snapshot can only have 'live' records
zgu@4193 598 assert(reserved_rec->is_reserved_region(), "Sanity check");
zgu@4193 599 if (new_rec->is_allocation_record()) {
zgu@4193 600 if (!reserved_rec->is_same_region(new_rec)) {
zgu@4193 601 // only deal with split a bigger reserved region into smaller regions.
zgu@4193 602 // So far, CDS is the only use case.
zgu@4193 603 if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
zgu@4053 604 return false;
zgu@3900 605 }
zgu@3900 606 }
zgu@4193 607 } else if (new_rec->is_uncommit_record()) {
zgu@4193 608 if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
zgu@4193 609 return false;
zgu@4193 610 }
zgu@4193 611 } else if (new_rec->is_commit_record()) {
zgu@4193 612 // insert or expand existing committed region to cover this
zgu@4193 613 // newly committed region
zgu@4193 614 if (!vm_snapshot_itr.add_committed_region(new_rec)) {
zgu@4193 615 return false;
zgu@4193 616 }
zgu@4193 617 } else if (new_rec->is_deallocation_record()) {
zgu@4193 618 // release part or all memory region
zgu@4193 619 if (!vm_snapshot_itr.remove_released_region(new_rec)) {
zgu@4193 620 return false;
zgu@4193 621 }
zgu@4193 622 } else if (new_rec->is_type_tagging_record()) {
zgu@4193 623 // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
zgu@4193 624 // to different type.
zgu@4193 625 assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
zgu@4193 626 FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
zgu@4193 627 "Sanity check");
zgu@4193 628 reserved_rec->tag(new_rec->flags());
zgu@4193 629 } else {
zgu@4193 630 ShouldNotReachHere();
zgu@4193 631 }
zgu@4193 632 } else {
zgu@4193 633 /*
zgu@4193 634 * The assertion failure indicates mis-matched virtual memory records. The likely
zgu@4193 635 * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
zgu@4193 636 * api, which have to be tracked manually. (perfMemory is an example).
zgu@4193 637 */
zgu@4193 638 assert(new_rec->is_allocation_record(), "Sanity check");
zgu@4193 639 if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
zgu@4193 640 return false;
zgu@4193 641 }
zgu@3900 642 }
zgu@4053 643 new_rec = (MemPointerRecord*)itr->next();
zgu@4053 644 }
zgu@4053 645 return true;
zgu@3900 646 }
zgu@3900 647
zgu@3994 648 #ifndef PRODUCT
zgu@3900 649 void MemSnapshot::print_snapshot_stats(outputStream* st) {
zgu@3900 650 st->print_cr("Snapshot:");
zgu@3900 651 st->print_cr("\tMalloced: %d/%d [%5.2f%%] %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
zgu@3900 652 (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
zgu@3900 653
zgu@3900 654 st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
zgu@3900 655 (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
zgu@3900 656
zgu@4053 657 st->print_cr("\tMalloc staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
zgu@4053 658 _staging_area.malloc_data()->capacity(),
zgu@4053 659 (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
zgu@4053 660 _staging_area.malloc_data()->instance_size()/K);
zgu@4053 661
zgu@4053 662 st->print_cr("\tVirtual memory staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
zgu@4053 663 _staging_area.vm_data()->capacity(),
zgu@4053 664 (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
zgu@4053 665 _staging_area.vm_data()->instance_size()/K);
zgu@3900 666
zgu@3900 667 st->print_cr("\tUntracked allocation: %d", _untracked_count);
zgu@3900 668 }
zgu@3900 669
zgu@3900 670 void MemSnapshot::check_malloc_pointers() {
zgu@3900 671 MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
zgu@3900 672 MemPointerRecord* p = (MemPointerRecord*)mItr.current();
zgu@3900 673 MemPointerRecord* prev = NULL;
zgu@3900 674 while (p != NULL) {
zgu@3900 675 if (prev != NULL) {
zgu@3900 676 assert(p->addr() >= prev->addr(), "sorting order");
zgu@3900 677 }
zgu@3900 678 prev = p;
zgu@3900 679 p = (MemPointerRecord*)mItr.next();
zgu@3900 680 }
zgu@3900 681 }
zgu@3900 682
zgu@3994 683 bool MemSnapshot::has_allocation_record(address addr) {
zgu@4053 684 MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
zgu@3994 685 MemPointerRecord* cur = (MemPointerRecord*)itr.current();
zgu@3994 686 while (cur != NULL) {
zgu@3994 687 if (cur->addr() == addr && cur->is_allocation_record()) {
zgu@3994 688 return true;
zgu@3994 689 }
zgu@3994 690 cur = (MemPointerRecord*)itr.next();
zgu@3994 691 }
zgu@3994 692 return false;
zgu@3994 693 }
zgu@3994 694 #endif // PRODUCT
zgu@3994 695
zgu@3994 696 #ifdef ASSERT
zgu@3900 697 void MemSnapshot::check_staging_data() {
zgu@4053 698 MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
zgu@3900 699 MemPointerRecord* cur = (MemPointerRecord*)itr.current();
zgu@3900 700 MemPointerRecord* next = (MemPointerRecord*)itr.next();
zgu@3900 701 while (next != NULL) {
zgu@3900 702 assert((next->addr() > cur->addr()) ||
zgu@3900 703 ((next->flags() & MemPointerRecord::tag_masks) >
zgu@3900 704 (cur->flags() & MemPointerRecord::tag_masks)),
zgu@3900 705 "sorting order");
zgu@3900 706 cur = next;
zgu@3900 707 next = (MemPointerRecord*)itr.next();
zgu@3900 708 }
zgu@4053 709
zgu@4053 710 MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
zgu@4053 711 cur = (MemPointerRecord*)vm_itr.current();
zgu@4053 712 while (cur != NULL) {
zgu@4053 713 assert(cur->is_vm_pointer(), "virtual memory pointer only");
zgu@4053 714 cur = (MemPointerRecord*)vm_itr.next();
zgu@4053 715 }
zgu@3900 716 }
zgu@4193 717
zgu@4193 718 void MemSnapshot::dump_all_vm_pointers() {
zgu@4193 719 MemPointerArrayIteratorImpl itr(_vm_ptrs);
zgu@4193 720 VMMemRegion* ptr = (VMMemRegion*)itr.current();
zgu@4193 721 tty->print_cr("dump virtual memory pointers:");
zgu@4193 722 while (ptr != NULL) {
zgu@4193 723 if (ptr->is_committed_region()) {
zgu@4193 724 tty->print("\t");
zgu@4193 725 }
zgu@4193 726 tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
zgu@4193 727 (ptr->addr() + ptr->size()), ptr->flags());
zgu@4193 728
zgu@4193 729 if (MemTracker::track_callsite()) {
zgu@4193 730 VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
zgu@4193 731 if (ex->pc() != NULL) {
zgu@4193 732 char buf[1024];
zgu@4193 733 if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
zgu@4193 734 tty->print_cr("\t%s", buf);
zgu@4193 735 } else {
zgu@4193 736 tty->print_cr("");
zgu@4193 737 }
zgu@4193 738 }
zgu@4193 739 }
zgu@4193 740
zgu@4193 741 ptr = (VMMemRegion*)itr.next();
zgu@4193 742 }
zgu@4193 743 tty->flush();
zgu@4193 744 }
zgu@3994 745 #endif // ASSERT
zgu@3900 746

mercurial