Tue, 11 Sep 2012 20:53:17 -0400
7181995: NMT ON: NMT assertion failure assert(cur_vm->is_uncommit_record() || cur_vm->is_deallocation_record
Summary: Fixed virtual memory records merge and promotion logic, should be based on sequence number vs. base address order
Reviewed-by: coleenp, acorn
zgu@3900 | 1 | /* |
zgu@3900 | 2 | * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. |
zgu@3900 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
zgu@3900 | 4 | * |
zgu@3900 | 5 | * This code is free software; you can redistribute it and/or modify it |
zgu@3900 | 6 | * under the terms of the GNU General Public License version 2 only, as |
zgu@3900 | 7 | * published by the Free Software Foundation. |
zgu@3900 | 8 | * |
zgu@3900 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
zgu@3900 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
zgu@3900 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
zgu@3900 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
zgu@3900 | 13 | * accompanied this code). |
zgu@3900 | 14 | * |
zgu@3900 | 15 | * You should have received a copy of the GNU General Public License version |
zgu@3900 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
zgu@3900 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
zgu@3900 | 18 | * |
zgu@3900 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
zgu@3900 | 20 | * or visit www.oracle.com if you need additional information or have any |
zgu@3900 | 21 | * questions. |
zgu@3900 | 22 | * |
zgu@3900 | 23 | */ |
zgu@3900 | 24 | |
zgu@3900 | 25 | #ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP |
zgu@3900 | 26 | #define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP |
zgu@3900 | 27 | |
zgu@3900 | 28 | #include "memory/allocation.hpp" |
zgu@3900 | 29 | #include "runtime/mutex.hpp" |
zgu@3900 | 30 | #include "runtime/mutexLocker.hpp" |
zgu@3900 | 31 | #include "services/memBaseline.hpp" |
zgu@3900 | 32 | #include "services/memPtrArray.hpp" |
zgu@3900 | 33 | |
zgu@3900 | 34 | |
zgu@3900 | 35 | // Snapshot pointer array iterator |
zgu@3900 | 36 | |
zgu@3900 | 37 | // The pointer array contains malloc-ed pointers |
zgu@3900 | 38 | class MemPointerIterator : public MemPointerArrayIteratorImpl { |
zgu@3900 | 39 | public: |
zgu@3900 | 40 | MemPointerIterator(MemPointerArray* arr): |
zgu@3900 | 41 | MemPointerArrayIteratorImpl(arr) { |
zgu@3900 | 42 | assert(arr != NULL, "null array"); |
zgu@3900 | 43 | } |
zgu@3900 | 44 | |
zgu@3900 | 45 | #ifdef ASSERT |
zgu@3900 | 46 | virtual bool is_dup_pointer(const MemPointer* ptr1, |
zgu@3900 | 47 | const MemPointer* ptr2) const { |
zgu@3900 | 48 | MemPointerRecord* p1 = (MemPointerRecord*)ptr1; |
zgu@3900 | 49 | MemPointerRecord* p2 = (MemPointerRecord*)ptr2; |
zgu@3900 | 50 | |
zgu@3900 | 51 | if (p1->addr() != p2->addr()) return false; |
zgu@3900 | 52 | if ((p1->flags() & MemPointerRecord::tag_masks) != |
zgu@3900 | 53 | (p2->flags() & MemPointerRecord::tag_masks)) { |
zgu@3900 | 54 | return false; |
zgu@3900 | 55 | } |
zgu@3900 | 56 | // we do see multiple commit/uncommit on the same memory, it is ok |
zgu@3900 | 57 | return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc || |
zgu@3900 | 58 | (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release; |
zgu@3900 | 59 | } |
zgu@3900 | 60 | |
zgu@3900 | 61 | virtual bool insert(MemPointer* ptr) { |
zgu@3900 | 62 | if (_pos > 0) { |
zgu@3900 | 63 | MemPointer* p1 = (MemPointer*)ptr; |
zgu@3900 | 64 | MemPointer* p2 = (MemPointer*)_array->at(_pos - 1); |
zgu@3900 | 65 | assert(!is_dup_pointer(p1, p2), |
zgu@3986 | 66 | err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); |
zgu@3900 | 67 | } |
zgu@3900 | 68 | if (_pos < _array->length() -1) { |
zgu@3900 | 69 | MemPointer* p1 = (MemPointer*)ptr; |
zgu@3900 | 70 | MemPointer* p2 = (MemPointer*)_array->at(_pos + 1); |
zgu@3900 | 71 | assert(!is_dup_pointer(p1, p2), |
zgu@3986 | 72 | err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); |
zgu@3900 | 73 | } |
zgu@3900 | 74 | return _array->insert_at(ptr, _pos); |
zgu@3900 | 75 | } |
zgu@3900 | 76 | |
zgu@3900 | 77 | virtual bool insert_after(MemPointer* ptr) { |
zgu@3900 | 78 | if (_pos > 0) { |
zgu@3900 | 79 | MemPointer* p1 = (MemPointer*)ptr; |
zgu@3900 | 80 | MemPointer* p2 = (MemPointer*)_array->at(_pos - 1); |
zgu@3900 | 81 | assert(!is_dup_pointer(p1, p2), |
zgu@3986 | 82 | err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); |
zgu@3900 | 83 | } |
zgu@3900 | 84 | if (_pos < _array->length() - 1) { |
zgu@3900 | 85 | MemPointer* p1 = (MemPointer*)ptr; |
zgu@3900 | 86 | MemPointer* p2 = (MemPointer*)_array->at(_pos + 1); |
zgu@3900 | 87 | |
zgu@3900 | 88 | assert(!is_dup_pointer(p1, p2), |
zgu@3986 | 89 | err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags())); |
zgu@3900 | 90 | } |
zgu@3900 | 91 | if (_array->insert_at(ptr, _pos + 1)) { |
zgu@3900 | 92 | _pos ++; |
zgu@3900 | 93 | return true; |
zgu@3900 | 94 | } |
zgu@3900 | 95 | return false; |
zgu@3900 | 96 | } |
zgu@3900 | 97 | #endif |
zgu@3900 | 98 | |
zgu@3900 | 99 | virtual MemPointer* locate(address addr) { |
zgu@3900 | 100 | MemPointer* cur = current(); |
zgu@3900 | 101 | while (cur != NULL && cur->addr() < addr) { |
zgu@3900 | 102 | cur = next(); |
zgu@3900 | 103 | } |
zgu@3900 | 104 | return cur; |
zgu@3900 | 105 | } |
zgu@3900 | 106 | }; |
zgu@3900 | 107 | |
zgu@3900 | 108 | class VMMemPointerIterator : public MemPointerIterator { |
zgu@3900 | 109 | public: |
zgu@3900 | 110 | VMMemPointerIterator(MemPointerArray* arr): |
zgu@3900 | 111 | MemPointerIterator(arr) { |
zgu@3900 | 112 | } |
zgu@3900 | 113 | |
zgu@4053 | 114 | // locate an existing record that contains specified address, or |
zgu@3900 | 115 | // the record, where the record with specified address, should |
zgu@4053 | 116 | // be inserted. |
zgu@4053 | 117 | // virtual memory record array is sorted in address order, so |
zgu@4053 | 118 | // binary search is performed |
zgu@3900 | 119 | virtual MemPointer* locate(address addr) { |
zgu@4053 | 120 | int index_low = 0; |
zgu@4053 | 121 | int index_high = _array->length(); |
zgu@4053 | 122 | int index_mid = (index_high + index_low) / 2; |
zgu@4053 | 123 | int r = 1; |
zgu@4053 | 124 | while (index_low < index_high && (r = compare(index_mid, addr)) != 0) { |
zgu@4053 | 125 | if (r > 0) { |
zgu@4053 | 126 | index_high = index_mid; |
zgu@3900 | 127 | } else { |
zgu@4053 | 128 | index_low = index_mid; |
zgu@3900 | 129 | } |
zgu@4053 | 130 | index_mid = (index_high + index_low) / 2; |
zgu@3900 | 131 | } |
zgu@4053 | 132 | if (r == 0) { |
zgu@4053 | 133 | // update current location |
zgu@4053 | 134 | _pos = index_mid; |
zgu@4053 | 135 | return _array->at(index_mid); |
zgu@4053 | 136 | } else { |
zgu@4053 | 137 | return NULL; |
zgu@4053 | 138 | } |
zgu@3900 | 139 | } |
zgu@3900 | 140 | |
zgu@3900 | 141 | #ifdef ASSERT |
zgu@3900 | 142 | virtual bool is_dup_pointer(const MemPointer* ptr1, |
zgu@3900 | 143 | const MemPointer* ptr2) const { |
zgu@3900 | 144 | VMMemRegion* p1 = (VMMemRegion*)ptr1; |
zgu@3900 | 145 | VMMemRegion* p2 = (VMMemRegion*)ptr2; |
zgu@3900 | 146 | |
zgu@3900 | 147 | if (p1->addr() != p2->addr()) return false; |
zgu@3900 | 148 | if ((p1->flags() & MemPointerRecord::tag_masks) != |
zgu@3900 | 149 | (p2->flags() & MemPointerRecord::tag_masks)) { |
zgu@3900 | 150 | return false; |
zgu@3900 | 151 | } |
zgu@3900 | 152 | // we do see multiple commit/uncommit on the same memory, it is ok |
zgu@3900 | 153 | return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc || |
zgu@3900 | 154 | (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release; |
zgu@3900 | 155 | } |
zgu@3900 | 156 | #endif |
zgu@4053 | 157 | // compare if an address falls into a memory region, |
zgu@4053 | 158 | // return 0, if the address falls into a memory region at specified index |
zgu@4053 | 159 | // return 1, if memory region pointed by specified index is higher than the address |
zgu@4053 | 160 | // return -1, if memory region pointed by specified index is lower than the address |
zgu@4053 | 161 | int compare(int index, address addr) const { |
zgu@4053 | 162 | VMMemRegion* r = (VMMemRegion*)_array->at(index); |
zgu@4053 | 163 | assert(r->is_reserve_record(), "Sanity check"); |
zgu@4053 | 164 | if (r->addr() > addr) { |
zgu@4053 | 165 | return 1; |
zgu@4053 | 166 | } else if (r->addr() + r->reserved_size() <= addr) { |
zgu@4053 | 167 | return -1; |
zgu@4053 | 168 | } else { |
zgu@4053 | 169 | return 0; |
zgu@4053 | 170 | } |
zgu@4053 | 171 | } |
zgu@3900 | 172 | }; |
zgu@3900 | 173 | |
zgu@4053 | 174 | class MallocRecordIterator : public MemPointerArrayIterator { |
zgu@3900 | 175 | private: |
zgu@3900 | 176 | MemPointerArrayIteratorImpl _itr; |
zgu@3900 | 177 | |
zgu@3900 | 178 | public: |
zgu@4053 | 179 | MallocRecordIterator(MemPointerArray* arr) : _itr(arr) { |
zgu@3900 | 180 | } |
zgu@3900 | 181 | |
zgu@3900 | 182 | MemPointer* current() const { |
zgu@4053 | 183 | MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); |
zgu@4053 | 184 | assert(cur == NULL || !cur->is_vm_pointer(), "seek error"); |
zgu@4053 | 185 | MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next(); |
zgu@4053 | 186 | if (next == NULL || next->addr() != cur->addr()) { |
zgu@4053 | 187 | return cur; |
zgu@3900 | 188 | } else { |
zgu@4053 | 189 | assert(!cur->is_vm_pointer(), "Sanity check"); |
zgu@4053 | 190 | assert(cur->is_allocation_record() && next->is_deallocation_record(), |
zgu@4053 | 191 | "sorting order"); |
zgu@4053 | 192 | assert(cur->seq() != next->seq(), "Sanity check"); |
zgu@4053 | 193 | return cur->seq() > next->seq() ? cur : next; |
zgu@3900 | 194 | } |
zgu@3900 | 195 | } |
zgu@3900 | 196 | |
zgu@4053 | 197 | MemPointer* next() { |
zgu@4053 | 198 | MemPointerRecord* cur = (MemPointerRecord*)_itr.current(); |
zgu@4053 | 199 | assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check"); |
zgu@4053 | 200 | MemPointerRecord* next = (MemPointerRecord*)_itr.next(); |
zgu@4053 | 201 | if (next == NULL) { |
zgu@4053 | 202 | return NULL; |
zgu@4053 | 203 | } |
zgu@4053 | 204 | if (cur->addr() == next->addr()) { |
zgu@4053 | 205 | next = (MemPointerRecord*)_itr.next(); |
zgu@4053 | 206 | } |
zgu@4053 | 207 | return current(); |
zgu@3900 | 208 | } |
zgu@3900 | 209 | |
zgu@4053 | 210 | MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; } |
zgu@4053 | 211 | MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; } |
zgu@4053 | 212 | void remove() { ShouldNotReachHere(); } |
zgu@4053 | 213 | bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; } |
zgu@4053 | 214 | bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; } |
zgu@4053 | 215 | }; |
zgu@4053 | 216 | |
zgu@4053 | 217 | class StagingArea : public _ValueObj { |
zgu@4053 | 218 | private: |
zgu@4053 | 219 | MemPointerArray* _malloc_data; |
zgu@4053 | 220 | MemPointerArray* _vm_data; |
zgu@4053 | 221 | |
zgu@4053 | 222 | public: |
zgu@4053 | 223 | StagingArea() : _malloc_data(NULL), _vm_data(NULL) { |
zgu@4053 | 224 | init(); |
zgu@3900 | 225 | } |
zgu@3900 | 226 | |
zgu@4053 | 227 | ~StagingArea() { |
zgu@4053 | 228 | if (_malloc_data != NULL) delete _malloc_data; |
zgu@4053 | 229 | if (_vm_data != NULL) delete _vm_data; |
zgu@3900 | 230 | } |
zgu@3900 | 231 | |
zgu@4053 | 232 | MallocRecordIterator malloc_record_walker() { |
zgu@4053 | 233 | return MallocRecordIterator(malloc_data()); |
zgu@3900 | 234 | } |
zgu@3900 | 235 | |
zgu@4053 | 236 | MemPointerArrayIteratorImpl virtual_memory_record_walker(); |
zgu@4053 | 237 | bool init(); |
zgu@4053 | 238 | void clear() { |
zgu@4053 | 239 | assert(_malloc_data != NULL && _vm_data != NULL, "Just check"); |
zgu@4053 | 240 | _malloc_data->shrink(); |
zgu@4053 | 241 | _malloc_data->clear(); |
zgu@4053 | 242 | _vm_data->clear(); |
zgu@3900 | 243 | } |
zgu@3900 | 244 | |
zgu@4053 | 245 | inline MemPointerArray* malloc_data() { return _malloc_data; } |
zgu@4053 | 246 | inline MemPointerArray* vm_data() { return _vm_data; } |
zgu@3900 | 247 | }; |
zgu@3900 | 248 | |
zgu@3900 | 249 | class MemBaseline; |
zgu@3900 | 250 | class MemSnapshot : public CHeapObj<mtNMT> { |
zgu@3900 | 251 | private: |
zgu@3900 | 252 | // the following two arrays contain records of all known lived memory blocks |
zgu@3900 | 253 | // live malloc-ed memory pointers |
zgu@3900 | 254 | MemPointerArray* _alloc_ptrs; |
zgu@3900 | 255 | // live virtual memory pointers |
zgu@3900 | 256 | MemPointerArray* _vm_ptrs; |
zgu@3900 | 257 | |
zgu@4053 | 258 | StagingArea _staging_area; |
zgu@3900 | 259 | |
zgu@3900 | 260 | // the lock to protect this snapshot |
zgu@3900 | 261 | Monitor* _lock; |
zgu@3900 | 262 | |
zgu@3900 | 263 | NOT_PRODUCT(size_t _untracked_count;) |
zgu@3900 | 264 | friend class MemBaseline; |
zgu@3900 | 265 | |
zgu@3900 | 266 | public: |
zgu@3900 | 267 | MemSnapshot(); |
zgu@3900 | 268 | virtual ~MemSnapshot(); |
zgu@3900 | 269 | |
zgu@3900 | 270 | // if we are running out of native memory |
zgu@4053 | 271 | bool out_of_memory() { |
zgu@4053 | 272 | return (_alloc_ptrs == NULL || |
zgu@4053 | 273 | _staging_area.malloc_data() == NULL || |
zgu@4053 | 274 | _staging_area.vm_data() == NULL || |
zgu@3900 | 275 | _vm_ptrs == NULL || _lock == NULL || |
zgu@3900 | 276 | _alloc_ptrs->out_of_memory() || |
zgu@3900 | 277 | _vm_ptrs->out_of_memory()); |
zgu@3900 | 278 | } |
zgu@3900 | 279 | |
zgu@3900 | 280 | // merge a per-thread memory recorder into staging area |
zgu@3900 | 281 | bool merge(MemRecorder* rec); |
zgu@3900 | 282 | // promote staged data to snapshot |
zgu@4053 | 283 | bool promote(); |
zgu@3900 | 284 | |
zgu@3900 | 285 | |
zgu@3900 | 286 | void wait(long timeout) { |
zgu@3900 | 287 | assert(_lock != NULL, "Just check"); |
zgu@3900 | 288 | MonitorLockerEx locker(_lock); |
zgu@3900 | 289 | locker.wait(true, timeout); |
zgu@3900 | 290 | } |
zgu@3900 | 291 | |
zgu@3900 | 292 | NOT_PRODUCT(void print_snapshot_stats(outputStream* st);) |
zgu@3900 | 293 | NOT_PRODUCT(void check_staging_data();) |
zgu@3900 | 294 | NOT_PRODUCT(void check_malloc_pointers();) |
zgu@3900 | 295 | NOT_PRODUCT(bool has_allocation_record(address addr);) |
zgu@3900 | 296 | |
zgu@3900 | 297 | private: |
zgu@3900 | 298 | // copy pointer data from src to dest |
zgu@3900 | 299 | void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src); |
zgu@4053 | 300 | |
zgu@4053 | 301 | bool promote_malloc_records(MemPointerArrayIterator* itr); |
zgu@4053 | 302 | bool promote_virtual_memory_records(MemPointerArrayIterator* itr); |
zgu@3900 | 303 | }; |
zgu@3900 | 304 | |
zgu@3900 | 305 | |
zgu@3900 | 306 | #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP |