src/share/vm/services/memSnapshot.hpp

Fri, 09 Nov 2012 19:24:31 -0500

author
zgu
date
Fri, 09 Nov 2012 19:24:31 -0500
changeset 4274
fb3190e77d3c
parent 4193
716c64bda5ba
child 4285
49cbd3e25ba9
permissions
-rw-r--r--

8001592: NMT: assertion failed: assert(_amount >= amt) failed: Just check: memBaseline.hpp:180
Summary: Fixed NMT that miscounted arena memory when it is used as value or stack object.
Reviewed-by: acorn, coleenp

zgu@3900 1 /*
zgu@3900 2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
zgu@3900 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
zgu@3900 4 *
zgu@3900 5 * This code is free software; you can redistribute it and/or modify it
zgu@3900 6 * under the terms of the GNU General Public License version 2 only, as
zgu@3900 7 * published by the Free Software Foundation.
zgu@3900 8 *
zgu@3900 9 * This code is distributed in the hope that it will be useful, but WITHOUT
zgu@3900 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
zgu@3900 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
zgu@3900 12 * version 2 for more details (a copy is included in the LICENSE file that
zgu@3900 13 * accompanied this code).
zgu@3900 14 *
zgu@3900 15 * You should have received a copy of the GNU General Public License version
zgu@3900 16 * 2 along with this work; if not, write to the Free Software Foundation,
zgu@3900 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
zgu@3900 18 *
zgu@3900 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
zgu@3900 20 * or visit www.oracle.com if you need additional information or have any
zgu@3900 21 * questions.
zgu@3900 22 *
zgu@3900 23 */
zgu@3900 24
zgu@3900 25 #ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
zgu@3900 26 #define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
zgu@3900 27
zgu@3900 28 #include "memory/allocation.hpp"
zgu@3900 29 #include "runtime/mutex.hpp"
zgu@3900 30 #include "runtime/mutexLocker.hpp"
zgu@3900 31 #include "services/memBaseline.hpp"
zgu@3900 32 #include "services/memPtrArray.hpp"
zgu@3900 33
zgu@3900 34 // Snapshot pointer array iterator
zgu@3900 35
zgu@3900 36 // The pointer array contains malloc-ed pointers
zgu@3900 37 class MemPointerIterator : public MemPointerArrayIteratorImpl {
zgu@3900 38 public:
zgu@3900 39 MemPointerIterator(MemPointerArray* arr):
zgu@3900 40 MemPointerArrayIteratorImpl(arr) {
zgu@3900 41 assert(arr != NULL, "null array");
zgu@3900 42 }
zgu@3900 43
zgu@3900 44 #ifdef ASSERT
zgu@3900 45 virtual bool is_dup_pointer(const MemPointer* ptr1,
zgu@3900 46 const MemPointer* ptr2) const {
zgu@3900 47 MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
zgu@3900 48 MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
zgu@3900 49
zgu@3900 50 if (p1->addr() != p2->addr()) return false;
zgu@3900 51 if ((p1->flags() & MemPointerRecord::tag_masks) !=
zgu@3900 52 (p2->flags() & MemPointerRecord::tag_masks)) {
zgu@3900 53 return false;
zgu@3900 54 }
zgu@3900 55 // we do see multiple commit/uncommit on the same memory, it is ok
zgu@3900 56 return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
zgu@3900 57 (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
zgu@3900 58 }
zgu@3900 59
zgu@3900 60 virtual bool insert(MemPointer* ptr) {
zgu@3900 61 if (_pos > 0) {
zgu@3900 62 MemPointer* p1 = (MemPointer*)ptr;
zgu@3900 63 MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
zgu@3900 64 assert(!is_dup_pointer(p1, p2),
zgu@3986 65 err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
zgu@3900 66 }
zgu@3900 67 if (_pos < _array->length() -1) {
zgu@3900 68 MemPointer* p1 = (MemPointer*)ptr;
zgu@3900 69 MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
zgu@3900 70 assert(!is_dup_pointer(p1, p2),
zgu@3986 71 err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
zgu@3900 72 }
zgu@3900 73 return _array->insert_at(ptr, _pos);
zgu@3900 74 }
zgu@3900 75
zgu@3900 76 virtual bool insert_after(MemPointer* ptr) {
zgu@3900 77 if (_pos > 0) {
zgu@3900 78 MemPointer* p1 = (MemPointer*)ptr;
zgu@3900 79 MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
zgu@3900 80 assert(!is_dup_pointer(p1, p2),
zgu@3986 81 err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
zgu@3900 82 }
zgu@3900 83 if (_pos < _array->length() - 1) {
zgu@3900 84 MemPointer* p1 = (MemPointer*)ptr;
zgu@3900 85 MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
zgu@3900 86
zgu@3900 87 assert(!is_dup_pointer(p1, p2),
zgu@3986 88 err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
zgu@3900 89 }
zgu@3900 90 if (_array->insert_at(ptr, _pos + 1)) {
zgu@3900 91 _pos ++;
zgu@3900 92 return true;
zgu@3900 93 }
zgu@3900 94 return false;
zgu@3900 95 }
zgu@3900 96 #endif
zgu@3900 97
zgu@3900 98 virtual MemPointer* locate(address addr) {
zgu@3900 99 MemPointer* cur = current();
zgu@3900 100 while (cur != NULL && cur->addr() < addr) {
zgu@3900 101 cur = next();
zgu@3900 102 }
zgu@3900 103 return cur;
zgu@3900 104 }
zgu@3900 105 };
zgu@3900 106
zgu@3900 107 class VMMemPointerIterator : public MemPointerIterator {
zgu@3900 108 public:
zgu@3900 109 VMMemPointerIterator(MemPointerArray* arr):
zgu@3900 110 MemPointerIterator(arr) {
zgu@3900 111 }
zgu@3900 112
zgu@4193 113 // locate an existing reserved memory region that contains specified address,
zgu@4193 114 // or the reserved region just above this address, where the incoming
zgu@4193 115 // reserved region should be inserted.
zgu@3900 116 virtual MemPointer* locate(address addr) {
zgu@4193 117 reset();
zgu@4193 118 VMMemRegion* reg = (VMMemRegion*)current();
zgu@4193 119 while (reg != NULL) {
zgu@4193 120 if (reg->is_reserved_region()) {
zgu@4193 121 if (reg->contains_address(addr) || addr < reg->base()) {
zgu@4193 122 return reg;
zgu@3900 123 }
zgu@3900 124 }
zgu@4193 125 reg = (VMMemRegion*)next();
zgu@4193 126 }
zgu@4053 127 return NULL;
zgu@4053 128 }
zgu@3900 129
zgu@4193 130 // following methods update virtual memory in the context
zgu@4193 131 // of 'current' position, which is properly positioned by
zgu@4193 132 // callers via locate method.
zgu@4193 133 bool add_reserved_region(MemPointerRecord* rec);
zgu@4193 134 bool add_committed_region(MemPointerRecord* rec);
zgu@4193 135 bool remove_uncommitted_region(MemPointerRecord* rec);
zgu@4193 136 bool remove_released_region(MemPointerRecord* rec);
zgu@4193 137
zgu@4193 138 // split a reserved region to create a new memory region with specified base and size
zgu@4193 139 bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
zgu@4193 140 private:
zgu@4193 141 bool insert_record(MemPointerRecord* rec);
zgu@4193 142 bool insert_record_after(MemPointerRecord* rec);
zgu@4193 143
zgu@4193 144 bool insert_reserved_region(MemPointerRecord* rec);
zgu@4193 145
zgu@4193 146 // reset current position
zgu@4193 147 inline void reset() { _pos = 0; }
zgu@3900 148 #ifdef ASSERT
zgu@3900 149 virtual bool is_dup_pointer(const MemPointer* ptr1,
zgu@3900 150 const MemPointer* ptr2) const {
zgu@3900 151 VMMemRegion* p1 = (VMMemRegion*)ptr1;
zgu@3900 152 VMMemRegion* p2 = (VMMemRegion*)ptr2;
zgu@3900 153
zgu@3900 154 if (p1->addr() != p2->addr()) return false;
zgu@3900 155 if ((p1->flags() & MemPointerRecord::tag_masks) !=
zgu@3900 156 (p2->flags() & MemPointerRecord::tag_masks)) {
zgu@3900 157 return false;
zgu@3900 158 }
zgu@3900 159 // we do see multiple commit/uncommit on the same memory, it is ok
zgu@3900 160 return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
zgu@3900 161 (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
zgu@3900 162 }
zgu@3900 163 #endif
zgu@3900 164 };
zgu@3900 165
zgu@4053 166 class MallocRecordIterator : public MemPointerArrayIterator {
zgu@4274 167 private:
zgu@3900 168 MemPointerArrayIteratorImpl _itr;
zgu@3900 169
zgu@4274 170
zgu@4274 171
zgu@3900 172 public:
zgu@4053 173 MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
zgu@3900 174 }
zgu@3900 175
zgu@4193 176 virtual MemPointer* current() const {
zgu@4274 177 #ifdef ASSERT
zgu@4274 178 MemPointer* cur_rec = _itr.current();
zgu@4274 179 if (cur_rec != NULL) {
zgu@4274 180 MemPointer* prev_rec = _itr.peek_prev();
zgu@4274 181 MemPointer* next_rec = _itr.peek_next();
zgu@4274 182 assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
zgu@4274 183 assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
zgu@3900 184 }
zgu@4274 185 #endif
zgu@4274 186 return _itr.current();
zgu@3900 187 }
zgu@4193 188 virtual MemPointer* next() {
zgu@4274 189 MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
zgu@4274 190 // arena memory record is a special case, which we have to compare
zgu@4274 191 // sequence number against its associated arena record.
zgu@4274 192 if (next_rec != NULL && next_rec->is_arena_memory_record()) {
zgu@4274 193 MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
zgu@4274 194 // if there is an associated arena record, it has to be previous
zgu@4274 195 // record because of sorting order (by address) - NMT generates a pseudo address
zgu@4274 196 // for arena's size record by offsetting arena's address, that guarantees
zgu@4274 197 // the order of arena record and it's size record.
zgu@4274 198 if (prev_rec != NULL && prev_rec->is_arena_record() &&
zgu@4274 199 next_rec->is_memory_record_of_arena(prev_rec)) {
zgu@4274 200 if (prev_rec->seq() > next_rec->seq()) {
zgu@4274 201 // Skip this arena memory record
zgu@4274 202 // Two scenarios:
zgu@4274 203 // - if the arena record is an allocation record, this early
zgu@4274 204 // size record must be leftover by previous arena,
zgu@4274 205 // and the last size record should have size = 0.
zgu@4274 206 // - if the arena record is a deallocation record, this
zgu@4274 207 // size record should be its cleanup record, which should
zgu@4274 208 // also have size = 0. In other world, arena alway reset
zgu@4274 209 // its size before gone (see Arena's destructor)
zgu@4274 210 assert(next_rec->size() == 0, "size not reset");
zgu@4274 211 return _itr.next();
zgu@4274 212 } else {
zgu@4274 213 assert(prev_rec->is_allocation_record(),
zgu@4274 214 "Arena size record ahead of allocation record");
zgu@4274 215 }
zgu@4274 216 }
zgu@4053 217 }
zgu@4274 218 return next_rec;
zgu@3900 219 }
zgu@3900 220
zgu@4053 221 MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
zgu@4053 222 MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
zgu@4053 223 void remove() { ShouldNotReachHere(); }
zgu@4053 224 bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
zgu@4053 225 bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
zgu@4053 226 };
zgu@4053 227
zgu@4193 228 // collapse duplicated records. Eliminating duplicated records here, is much
zgu@4193 229 // cheaper than during promotion phase. However, it does have limitation - it
zgu@4193 230 // can only eliminate duplicated records within the generation, there are
zgu@4193 231 // still chances seeing duplicated records during promotion.
zgu@4193 232 // We want to use the record with higher sequence number, because it has
zgu@4193 233 // more accurate callsite pc.
zgu@4274 234 class VMRecordIterator : public MemPointerArrayIterator {
zgu@4274 235 private:
zgu@4274 236 MemPointerArrayIteratorImpl _itr;
zgu@4274 237
zgu@4193 238 public:
zgu@4274 239 VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
zgu@4193 240 MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
zgu@4193 241 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
zgu@4193 242 while (next != NULL) {
zgu@4193 243 assert(cur != NULL, "Sanity check");
zgu@4193 244 assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
zgu@4193 245 "pre-sort order");
zgu@4193 246
zgu@4193 247 if (is_duplicated_record(cur, next)) {
zgu@4193 248 _itr.next();
zgu@4193 249 next = (MemPointerRecord*)_itr.peek_next();
zgu@4193 250 } else {
zgu@4193 251 break;
zgu@4193 252 }
zgu@4193 253 }
zgu@4193 254 }
zgu@4193 255
zgu@4193 256 virtual MemPointer* current() const {
zgu@4193 257 return _itr.current();
zgu@4193 258 }
zgu@4193 259
zgu@4193 260 // get next record, but skip the duplicated records
zgu@4193 261 virtual MemPointer* next() {
zgu@4193 262 MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
zgu@4193 263 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
zgu@4193 264 while (next != NULL) {
zgu@4193 265 assert(cur != NULL, "Sanity check");
zgu@4193 266 assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
zgu@4193 267 "pre-sort order");
zgu@4193 268
zgu@4193 269 if (is_duplicated_record(cur, next)) {
zgu@4193 270 _itr.next();
zgu@4193 271 cur = next;
zgu@4193 272 next = (MemPointerRecord*)_itr.peek_next();
zgu@4193 273 } else {
zgu@4193 274 break;
zgu@4193 275 }
zgu@4193 276 }
zgu@4193 277 return cur;
zgu@4193 278 }
zgu@4193 279
zgu@4274 280 MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
zgu@4274 281 MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
zgu@4274 282 void remove() { ShouldNotReachHere(); }
zgu@4274 283 bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
zgu@4274 284 bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
zgu@4274 285
zgu@4193 286 private:
zgu@4193 287 bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
zgu@4193 288 bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
zgu@4193 289 assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
zgu@4193 290 return ret;
zgu@4193 291 }
zgu@4193 292 };
zgu@4193 293
zgu@4053 294 class StagingArea : public _ValueObj {
zgu@4053 295 private:
zgu@4053 296 MemPointerArray* _malloc_data;
zgu@4053 297 MemPointerArray* _vm_data;
zgu@4053 298
zgu@4053 299 public:
zgu@4053 300 StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
zgu@4053 301 init();
zgu@3900 302 }
zgu@3900 303
zgu@4053 304 ~StagingArea() {
zgu@4053 305 if (_malloc_data != NULL) delete _malloc_data;
zgu@4053 306 if (_vm_data != NULL) delete _vm_data;
zgu@3900 307 }
zgu@3900 308
zgu@4053 309 MallocRecordIterator malloc_record_walker() {
zgu@4053 310 return MallocRecordIterator(malloc_data());
zgu@3900 311 }
zgu@3900 312
zgu@4193 313 VMRecordIterator virtual_memory_record_walker();
zgu@4193 314
zgu@4053 315 bool init();
zgu@4053 316 void clear() {
zgu@4053 317 assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
zgu@4053 318 _malloc_data->shrink();
zgu@4053 319 _malloc_data->clear();
zgu@4053 320 _vm_data->clear();
zgu@3900 321 }
zgu@3900 322
zgu@4053 323 inline MemPointerArray* malloc_data() { return _malloc_data; }
zgu@4053 324 inline MemPointerArray* vm_data() { return _vm_data; }
zgu@3900 325 };
zgu@3900 326
zgu@3900 327 class MemBaseline;
zgu@3900 328 class MemSnapshot : public CHeapObj<mtNMT> {
zgu@3900 329 private:
zgu@3900 330 // the following two arrays contain records of all known lived memory blocks
zgu@3900 331 // live malloc-ed memory pointers
zgu@3900 332 MemPointerArray* _alloc_ptrs;
zgu@3900 333 // live virtual memory pointers
zgu@3900 334 MemPointerArray* _vm_ptrs;
zgu@3900 335
zgu@4053 336 StagingArea _staging_area;
zgu@3900 337
zgu@3900 338 // the lock to protect this snapshot
zgu@3900 339 Monitor* _lock;
zgu@3900 340
zgu@3900 341 NOT_PRODUCT(size_t _untracked_count;)
zgu@3900 342 friend class MemBaseline;
zgu@3900 343
zgu@3900 344 public:
zgu@3900 345 MemSnapshot();
zgu@3900 346 virtual ~MemSnapshot();
zgu@3900 347
zgu@3900 348 // if we are running out of native memory
zgu@4053 349 bool out_of_memory() {
zgu@4053 350 return (_alloc_ptrs == NULL ||
zgu@4053 351 _staging_area.malloc_data() == NULL ||
zgu@4053 352 _staging_area.vm_data() == NULL ||
zgu@3900 353 _vm_ptrs == NULL || _lock == NULL ||
zgu@3900 354 _alloc_ptrs->out_of_memory() ||
zgu@3900 355 _vm_ptrs->out_of_memory());
zgu@3900 356 }
zgu@3900 357
zgu@3900 358 // merge a per-thread memory recorder into staging area
zgu@3900 359 bool merge(MemRecorder* rec);
zgu@3900 360 // promote staged data to snapshot
zgu@4053 361 bool promote();
zgu@3900 362
zgu@3900 363
zgu@3900 364 void wait(long timeout) {
zgu@3900 365 assert(_lock != NULL, "Just check");
zgu@3900 366 MonitorLockerEx locker(_lock);
zgu@3900 367 locker.wait(true, timeout);
zgu@3900 368 }
zgu@3900 369
zgu@3900 370 NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
zgu@3900 371 NOT_PRODUCT(void check_staging_data();)
zgu@3900 372 NOT_PRODUCT(void check_malloc_pointers();)
zgu@3900 373 NOT_PRODUCT(bool has_allocation_record(address addr);)
zgu@4193 374 // dump all virtual memory pointers in snapshot
zgu@4193 375 DEBUG_ONLY( void dump_all_vm_pointers();)
zgu@3900 376
zgu@3900 377 private:
zgu@4274 378 // copy sequenced pointer from src to dest
zgu@4274 379 void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
zgu@4274 380 // assign a sequenced pointer to non-sequenced pointer
zgu@4274 381 void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
zgu@4053 382
zgu@4053 383 bool promote_malloc_records(MemPointerArrayIterator* itr);
zgu@4053 384 bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
zgu@3900 385 };
zgu@3900 386
zgu@3900 387 #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP

mercurial