src/share/vm/services/memSnapshot.hpp

Wed, 27 Apr 2016 01:25:04 +0800

author
aoqi
date
Wed, 27 Apr 2016 01:25:04 +0800
changeset 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
aoqi@0 26 #define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
aoqi@0 27
aoqi@0 28 #include "memory/allocation.hpp"
aoqi@0 29 #include "runtime/mutex.hpp"
aoqi@0 30 #include "runtime/mutexLocker.hpp"
aoqi@0 31 #include "services/memBaseline.hpp"
aoqi@0 32 #include "services/memPtrArray.hpp"
aoqi@0 33
aoqi@0 34 // Snapshot pointer array iterator
aoqi@0 35
aoqi@0 36 // The pointer array contains malloc-ed pointers
aoqi@0 37 class MemPointerIterator : public MemPointerArrayIteratorImpl {
aoqi@0 38 public:
aoqi@0 39 MemPointerIterator(MemPointerArray* arr):
aoqi@0 40 MemPointerArrayIteratorImpl(arr) {
aoqi@0 41 assert(arr != NULL, "null array");
aoqi@0 42 }
aoqi@0 43
aoqi@0 44 #ifdef ASSERT
aoqi@0 45 virtual bool is_dup_pointer(const MemPointer* ptr1,
aoqi@0 46 const MemPointer* ptr2) const {
aoqi@0 47 MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
aoqi@0 48 MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
aoqi@0 49
aoqi@0 50 if (p1->addr() != p2->addr()) return false;
aoqi@0 51 if ((p1->flags() & MemPointerRecord::tag_masks) !=
aoqi@0 52 (p2->flags() & MemPointerRecord::tag_masks)) {
aoqi@0 53 return false;
aoqi@0 54 }
aoqi@0 55 // we do see multiple commit/uncommit on the same memory, it is ok
aoqi@0 56 return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
aoqi@0 57 (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
aoqi@0 58 }
aoqi@0 59
aoqi@0 60 virtual bool insert(MemPointer* ptr) {
aoqi@0 61 if (_pos > 0) {
aoqi@0 62 MemPointer* p1 = (MemPointer*)ptr;
aoqi@0 63 MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
aoqi@0 64 assert(!is_dup_pointer(p1, p2),
aoqi@0 65 err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
aoqi@0 66 }
aoqi@0 67 if (_pos < _array->length() -1) {
aoqi@0 68 MemPointer* p1 = (MemPointer*)ptr;
aoqi@0 69 MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
aoqi@0 70 assert(!is_dup_pointer(p1, p2),
aoqi@0 71 err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
aoqi@0 72 }
aoqi@0 73 return _array->insert_at(ptr, _pos);
aoqi@0 74 }
aoqi@0 75
aoqi@0 76 virtual bool insert_after(MemPointer* ptr) {
aoqi@0 77 if (_pos > 0) {
aoqi@0 78 MemPointer* p1 = (MemPointer*)ptr;
aoqi@0 79 MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
aoqi@0 80 assert(!is_dup_pointer(p1, p2),
aoqi@0 81 err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
aoqi@0 82 }
aoqi@0 83 if (_pos < _array->length() - 1) {
aoqi@0 84 MemPointer* p1 = (MemPointer*)ptr;
aoqi@0 85 MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
aoqi@0 86
aoqi@0 87 assert(!is_dup_pointer(p1, p2),
aoqi@0 88 err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
aoqi@0 89 }
aoqi@0 90 if (_array->insert_at(ptr, _pos + 1)) {
aoqi@0 91 _pos ++;
aoqi@0 92 return true;
aoqi@0 93 }
aoqi@0 94 return false;
aoqi@0 95 }
aoqi@0 96 #endif
aoqi@0 97
aoqi@0 98 virtual MemPointer* locate(address addr) {
aoqi@0 99 MemPointer* cur = current();
aoqi@0 100 while (cur != NULL && cur->addr() < addr) {
aoqi@0 101 cur = next();
aoqi@0 102 }
aoqi@0 103 return cur;
aoqi@0 104 }
aoqi@0 105 };
aoqi@0 106
aoqi@0 107 class VMMemPointerIterator : public MemPointerIterator {
aoqi@0 108 public:
aoqi@0 109 VMMemPointerIterator(MemPointerArray* arr):
aoqi@0 110 MemPointerIterator(arr) {
aoqi@0 111 }
aoqi@0 112
aoqi@0 113 // locate an existing reserved memory region that contains specified address,
aoqi@0 114 // or the reserved region just above this address, where the incoming
aoqi@0 115 // reserved region should be inserted.
aoqi@0 116 virtual MemPointer* locate(address addr) {
aoqi@0 117 reset();
aoqi@0 118 VMMemRegion* reg = (VMMemRegion*)current();
aoqi@0 119 while (reg != NULL) {
aoqi@0 120 if (reg->is_reserved_region()) {
aoqi@0 121 if (reg->contains_address(addr) || addr < reg->base()) {
aoqi@0 122 return reg;
aoqi@0 123 }
aoqi@0 124 }
aoqi@0 125 reg = (VMMemRegion*)next();
aoqi@0 126 }
aoqi@0 127 return NULL;
aoqi@0 128 }
aoqi@0 129
aoqi@0 130 // following methods update virtual memory in the context
aoqi@0 131 // of 'current' position, which is properly positioned by
aoqi@0 132 // callers via locate method.
aoqi@0 133 bool add_reserved_region(MemPointerRecord* rec);
aoqi@0 134 bool add_committed_region(MemPointerRecord* rec);
aoqi@0 135 bool remove_uncommitted_region(MemPointerRecord* rec);
aoqi@0 136 bool remove_released_region(MemPointerRecord* rec);
aoqi@0 137
aoqi@0 138 // split a reserved region to create a new memory region with specified base and size
aoqi@0 139 bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
aoqi@0 140 private:
aoqi@0 141 bool insert_record(MemPointerRecord* rec);
aoqi@0 142 bool insert_record_after(MemPointerRecord* rec);
aoqi@0 143
aoqi@0 144 bool insert_reserved_region(MemPointerRecord* rec);
aoqi@0 145
aoqi@0 146 // reset current position
aoqi@0 147 inline void reset() { _pos = 0; }
aoqi@0 148 #ifdef ASSERT
aoqi@0 149 // check integrity of records on current reserved memory region.
aoqi@0 150 bool check_reserved_region() {
aoqi@0 151 VMMemRegion* reserved_region = (VMMemRegion*)current();
aoqi@0 152 assert(reserved_region != NULL && reserved_region->is_reserved_region(),
aoqi@0 153 "Sanity check");
aoqi@0 154 // all committed regions that follow current reserved region, should all
aoqi@0 155 // belong to the reserved region.
aoqi@0 156 VMMemRegion* next_region = (VMMemRegion*)next();
aoqi@0 157 for (; next_region != NULL && next_region->is_committed_region();
aoqi@0 158 next_region = (VMMemRegion*)next() ) {
aoqi@0 159 if(!reserved_region->contains_region(next_region)) {
aoqi@0 160 return false;
aoqi@0 161 }
aoqi@0 162 }
aoqi@0 163 return true;
aoqi@0 164 }
aoqi@0 165
aoqi@0 166 virtual bool is_dup_pointer(const MemPointer* ptr1,
aoqi@0 167 const MemPointer* ptr2) const {
aoqi@0 168 VMMemRegion* p1 = (VMMemRegion*)ptr1;
aoqi@0 169 VMMemRegion* p2 = (VMMemRegion*)ptr2;
aoqi@0 170
aoqi@0 171 if (p1->addr() != p2->addr()) return false;
aoqi@0 172 if ((p1->flags() & MemPointerRecord::tag_masks) !=
aoqi@0 173 (p2->flags() & MemPointerRecord::tag_masks)) {
aoqi@0 174 return false;
aoqi@0 175 }
aoqi@0 176 // we do see multiple commit/uncommit on the same memory, it is ok
aoqi@0 177 return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
aoqi@0 178 (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
aoqi@0 179 }
aoqi@0 180 #endif
aoqi@0 181 };
aoqi@0 182
aoqi@0 183 class MallocRecordIterator : public MemPointerArrayIterator {
aoqi@0 184 private:
aoqi@0 185 MemPointerArrayIteratorImpl _itr;
aoqi@0 186
aoqi@0 187
aoqi@0 188
aoqi@0 189 public:
aoqi@0 190 MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
aoqi@0 191 }
aoqi@0 192
aoqi@0 193 virtual MemPointer* current() const {
aoqi@0 194 #ifdef ASSERT
aoqi@0 195 MemPointer* cur_rec = _itr.current();
aoqi@0 196 if (cur_rec != NULL) {
aoqi@0 197 MemPointer* prev_rec = _itr.peek_prev();
aoqi@0 198 MemPointer* next_rec = _itr.peek_next();
aoqi@0 199 assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
aoqi@0 200 assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
aoqi@0 201 }
aoqi@0 202 #endif
aoqi@0 203 return _itr.current();
aoqi@0 204 }
aoqi@0 205 virtual MemPointer* next() {
aoqi@0 206 MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
aoqi@0 207 // arena memory record is a special case, which we have to compare
aoqi@0 208 // sequence number against its associated arena record.
aoqi@0 209 if (next_rec != NULL && next_rec->is_arena_memory_record()) {
aoqi@0 210 MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
aoqi@0 211 // if there is an associated arena record, it has to be previous
aoqi@0 212 // record because of sorting order (by address) - NMT generates a pseudo address
aoqi@0 213 // for arena's size record by offsetting arena's address, that guarantees
aoqi@0 214 // the order of arena record and it's size record.
aoqi@0 215 if (prev_rec != NULL && prev_rec->is_arena_record() &&
aoqi@0 216 next_rec->is_memory_record_of_arena(prev_rec)) {
aoqi@0 217 if (prev_rec->seq() > next_rec->seq()) {
aoqi@0 218 // Skip this arena memory record
aoqi@0 219 // Two scenarios:
aoqi@0 220 // - if the arena record is an allocation record, this early
aoqi@0 221 // size record must be leftover by previous arena,
aoqi@0 222 // and the last size record should have size = 0.
aoqi@0 223 // - if the arena record is a deallocation record, this
aoqi@0 224 // size record should be its cleanup record, which should
aoqi@0 225 // also have size = 0. In other world, arena alway reset
aoqi@0 226 // its size before gone (see Arena's destructor)
aoqi@0 227 assert(next_rec->size() == 0, "size not reset");
aoqi@0 228 return _itr.next();
aoqi@0 229 } else {
aoqi@0 230 assert(prev_rec->is_allocation_record(),
aoqi@0 231 "Arena size record ahead of allocation record");
aoqi@0 232 }
aoqi@0 233 }
aoqi@0 234 }
aoqi@0 235 return next_rec;
aoqi@0 236 }
aoqi@0 237
aoqi@0 238 MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
aoqi@0 239 MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
aoqi@0 240 void remove() { ShouldNotReachHere(); }
aoqi@0 241 bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
aoqi@0 242 bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
aoqi@0 243 };
aoqi@0 244
aoqi@0 245 // collapse duplicated records. Eliminating duplicated records here, is much
aoqi@0 246 // cheaper than during promotion phase. However, it does have limitation - it
aoqi@0 247 // can only eliminate duplicated records within the generation, there are
aoqi@0 248 // still chances seeing duplicated records during promotion.
aoqi@0 249 // We want to use the record with higher sequence number, because it has
aoqi@0 250 // more accurate callsite pc.
aoqi@0 251 class VMRecordIterator : public MemPointerArrayIterator {
aoqi@0 252 private:
aoqi@0 253 MemPointerArrayIteratorImpl _itr;
aoqi@0 254
aoqi@0 255 public:
aoqi@0 256 VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
aoqi@0 257 MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
aoqi@0 258 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
aoqi@0 259 while (next != NULL) {
aoqi@0 260 assert(cur != NULL, "Sanity check");
aoqi@0 261 assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
aoqi@0 262 "pre-sort order");
aoqi@0 263
aoqi@0 264 if (is_duplicated_record(cur, next)) {
aoqi@0 265 _itr.next();
aoqi@0 266 next = (MemPointerRecord*)_itr.peek_next();
aoqi@0 267 } else {
aoqi@0 268 break;
aoqi@0 269 }
aoqi@0 270 }
aoqi@0 271 }
aoqi@0 272
aoqi@0 273 virtual MemPointer* current() const {
aoqi@0 274 return _itr.current();
aoqi@0 275 }
aoqi@0 276
aoqi@0 277 // get next record, but skip the duplicated records
aoqi@0 278 virtual MemPointer* next() {
aoqi@0 279 MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
aoqi@0 280 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
aoqi@0 281 while (next != NULL) {
aoqi@0 282 assert(cur != NULL, "Sanity check");
aoqi@0 283 assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
aoqi@0 284 "pre-sort order");
aoqi@0 285
aoqi@0 286 if (is_duplicated_record(cur, next)) {
aoqi@0 287 _itr.next();
aoqi@0 288 cur = next;
aoqi@0 289 next = (MemPointerRecord*)_itr.peek_next();
aoqi@0 290 } else {
aoqi@0 291 break;
aoqi@0 292 }
aoqi@0 293 }
aoqi@0 294 return cur;
aoqi@0 295 }
aoqi@0 296
aoqi@0 297 MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
aoqi@0 298 MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
aoqi@0 299 void remove() { ShouldNotReachHere(); }
aoqi@0 300 bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
aoqi@0 301 bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
aoqi@0 302
aoqi@0 303 private:
aoqi@0 304 bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
aoqi@0 305 bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
aoqi@0 306 assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
aoqi@0 307 return ret;
aoqi@0 308 }
aoqi@0 309 };
aoqi@0 310
aoqi@0 311 class StagingArea VALUE_OBJ_CLASS_SPEC {
aoqi@0 312 private:
aoqi@0 313 MemPointerArray* _malloc_data;
aoqi@0 314 MemPointerArray* _vm_data;
aoqi@0 315
aoqi@0 316 public:
aoqi@0 317 StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
aoqi@0 318 init();
aoqi@0 319 }
aoqi@0 320
aoqi@0 321 ~StagingArea() {
aoqi@0 322 if (_malloc_data != NULL) delete _malloc_data;
aoqi@0 323 if (_vm_data != NULL) delete _vm_data;
aoqi@0 324 }
aoqi@0 325
aoqi@0 326 MallocRecordIterator malloc_record_walker() {
aoqi@0 327 return MallocRecordIterator(malloc_data());
aoqi@0 328 }
aoqi@0 329
aoqi@0 330 VMRecordIterator virtual_memory_record_walker();
aoqi@0 331
aoqi@0 332 bool init();
aoqi@0 333 void clear() {
aoqi@0 334 assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
aoqi@0 335 _malloc_data->shrink();
aoqi@0 336 _malloc_data->clear();
aoqi@0 337 _vm_data->clear();
aoqi@0 338 }
aoqi@0 339
aoqi@0 340 inline MemPointerArray* malloc_data() { return _malloc_data; }
aoqi@0 341 inline MemPointerArray* vm_data() { return _vm_data; }
aoqi@0 342 };
aoqi@0 343
aoqi@0 344 class MemBaseline;
aoqi@0 345 class MemSnapshot : public CHeapObj<mtNMT> {
aoqi@0 346 private:
aoqi@0 347 // the following two arrays contain records of all known lived memory blocks
aoqi@0 348 // live malloc-ed memory pointers
aoqi@0 349 MemPointerArray* _alloc_ptrs;
aoqi@0 350 // live virtual memory pointers
aoqi@0 351 MemPointerArray* _vm_ptrs;
aoqi@0 352
aoqi@0 353 StagingArea _staging_area;
aoqi@0 354
aoqi@0 355 // the lock to protect this snapshot
aoqi@0 356 Monitor* _lock;
aoqi@0 357
aoqi@0 358 // the number of instance classes
aoqi@0 359 int _number_of_classes;
aoqi@0 360
aoqi@0 361 NOT_PRODUCT(size_t _untracked_count;)
aoqi@0 362 friend class MemBaseline;
aoqi@0 363
aoqi@0 364 public:
aoqi@0 365 MemSnapshot();
aoqi@0 366 virtual ~MemSnapshot();
aoqi@0 367
aoqi@0 368 // if we are running out of native memory
aoqi@0 369 bool out_of_memory() {
aoqi@0 370 return (_alloc_ptrs == NULL ||
aoqi@0 371 _staging_area.malloc_data() == NULL ||
aoqi@0 372 _staging_area.vm_data() == NULL ||
aoqi@0 373 _vm_ptrs == NULL || _lock == NULL ||
aoqi@0 374 _alloc_ptrs->out_of_memory() ||
aoqi@0 375 _vm_ptrs->out_of_memory());
aoqi@0 376 }
aoqi@0 377
aoqi@0 378 // merge a per-thread memory recorder into staging area
aoqi@0 379 bool merge(MemRecorder* rec);
aoqi@0 380 // promote staged data to snapshot
aoqi@0 381 bool promote(int number_of_classes);
aoqi@0 382
aoqi@0 383 int number_of_classes() const { return _number_of_classes; }
aoqi@0 384
aoqi@0 385 void wait(long timeout) {
aoqi@0 386 assert(_lock != NULL, "Just check");
aoqi@0 387 MonitorLockerEx locker(_lock);
aoqi@0 388 locker.wait(true, timeout);
aoqi@0 389 }
aoqi@0 390
aoqi@0 391 NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
aoqi@0 392 NOT_PRODUCT(void check_staging_data();)
aoqi@0 393 NOT_PRODUCT(void check_malloc_pointers();)
aoqi@0 394 NOT_PRODUCT(bool has_allocation_record(address addr);)
aoqi@0 395 // dump all virtual memory pointers in snapshot
aoqi@0 396 DEBUG_ONLY( void dump_all_vm_pointers();)
aoqi@0 397
aoqi@0 398 private:
aoqi@0 399 // copy sequenced pointer from src to dest
aoqi@0 400 void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
aoqi@0 401 // assign a sequenced pointer to non-sequenced pointer
aoqi@0 402 void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
aoqi@0 403
aoqi@0 404 bool promote_malloc_records(MemPointerArrayIterator* itr);
aoqi@0 405 bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
aoqi@0 406 };
aoqi@0 407
aoqi@0 408 #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP

mercurial