Fri, 19 Oct 2012 21:40:07 -0400
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
Summary: Enhanced virtual memory tracking to track committed regions as well as reserved regions, so NMT now can generate virtual memory map.
Reviewed-by: acorn, coleenp
1 /*
2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
26 #define SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP
28 #include "memory/allocation.hpp"
29 #include "runtime/mutex.hpp"
30 #include "runtime/mutexLocker.hpp"
31 #include "services/memBaseline.hpp"
32 #include "services/memPtrArray.hpp"
35 // Snapshot pointer array iterator
37 // The pointer array contains malloc-ed pointers
38 class MemPointerIterator : public MemPointerArrayIteratorImpl {
39 public:
40 MemPointerIterator(MemPointerArray* arr):
41 MemPointerArrayIteratorImpl(arr) {
42 assert(arr != NULL, "null array");
43 }
45 #ifdef ASSERT
46 virtual bool is_dup_pointer(const MemPointer* ptr1,
47 const MemPointer* ptr2) const {
48 MemPointerRecord* p1 = (MemPointerRecord*)ptr1;
49 MemPointerRecord* p2 = (MemPointerRecord*)ptr2;
51 if (p1->addr() != p2->addr()) return false;
52 if ((p1->flags() & MemPointerRecord::tag_masks) !=
53 (p2->flags() & MemPointerRecord::tag_masks)) {
54 return false;
55 }
56 // we do see multiple commit/uncommit on the same memory, it is ok
57 return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
58 (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
59 }
61 virtual bool insert(MemPointer* ptr) {
62 if (_pos > 0) {
63 MemPointer* p1 = (MemPointer*)ptr;
64 MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
65 assert(!is_dup_pointer(p1, p2),
66 err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
67 }
68 if (_pos < _array->length() -1) {
69 MemPointer* p1 = (MemPointer*)ptr;
70 MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
71 assert(!is_dup_pointer(p1, p2),
72 err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
73 }
74 return _array->insert_at(ptr, _pos);
75 }
77 virtual bool insert_after(MemPointer* ptr) {
78 if (_pos > 0) {
79 MemPointer* p1 = (MemPointer*)ptr;
80 MemPointer* p2 = (MemPointer*)_array->at(_pos - 1);
81 assert(!is_dup_pointer(p1, p2),
82 err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
83 }
84 if (_pos < _array->length() - 1) {
85 MemPointer* p1 = (MemPointer*)ptr;
86 MemPointer* p2 = (MemPointer*)_array->at(_pos + 1);
88 assert(!is_dup_pointer(p1, p2),
89 err_msg("duplicated pointer, flag = [%x]", (unsigned int)((MemPointerRecord*)p1)->flags()));
90 }
91 if (_array->insert_at(ptr, _pos + 1)) {
92 _pos ++;
93 return true;
94 }
95 return false;
96 }
97 #endif
99 virtual MemPointer* locate(address addr) {
100 MemPointer* cur = current();
101 while (cur != NULL && cur->addr() < addr) {
102 cur = next();
103 }
104 return cur;
105 }
106 };
108 class VMMemPointerIterator : public MemPointerIterator {
109 public:
110 VMMemPointerIterator(MemPointerArray* arr):
111 MemPointerIterator(arr) {
112 }
114 // locate an existing reserved memory region that contains specified address,
115 // or the reserved region just above this address, where the incoming
116 // reserved region should be inserted.
117 virtual MemPointer* locate(address addr) {
118 reset();
119 VMMemRegion* reg = (VMMemRegion*)current();
120 while (reg != NULL) {
121 if (reg->is_reserved_region()) {
122 if (reg->contains_address(addr) || addr < reg->base()) {
123 return reg;
124 }
125 }
126 reg = (VMMemRegion*)next();
127 }
128 return NULL;
129 }
131 // following methods update virtual memory in the context
132 // of 'current' position, which is properly positioned by
133 // callers via locate method.
134 bool add_reserved_region(MemPointerRecord* rec);
135 bool add_committed_region(MemPointerRecord* rec);
136 bool remove_uncommitted_region(MemPointerRecord* rec);
137 bool remove_released_region(MemPointerRecord* rec);
139 // split a reserved region to create a new memory region with specified base and size
140 bool split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size);
141 private:
142 bool insert_record(MemPointerRecord* rec);
143 bool insert_record_after(MemPointerRecord* rec);
145 bool insert_reserved_region(MemPointerRecord* rec);
147 // reset current position
148 inline void reset() { _pos = 0; }
149 #ifdef ASSERT
150 virtual bool is_dup_pointer(const MemPointer* ptr1,
151 const MemPointer* ptr2) const {
152 VMMemRegion* p1 = (VMMemRegion*)ptr1;
153 VMMemRegion* p2 = (VMMemRegion*)ptr2;
155 if (p1->addr() != p2->addr()) return false;
156 if ((p1->flags() & MemPointerRecord::tag_masks) !=
157 (p2->flags() & MemPointerRecord::tag_masks)) {
158 return false;
159 }
160 // we do see multiple commit/uncommit on the same memory, it is ok
161 return (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_alloc ||
162 (p1->flags() & MemPointerRecord::tag_masks) == MemPointerRecord::tag_release;
163 }
164 #endif
165 };
167 class MallocRecordIterator : public MemPointerArrayIterator {
168 protected:
169 MemPointerArrayIteratorImpl _itr;
171 public:
172 MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
173 }
175 virtual MemPointer* current() const {
176 MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
177 assert(cur == NULL || !cur->is_vm_pointer(), "seek error");
178 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
179 if (next == NULL || next->addr() != cur->addr()) {
180 return cur;
181 } else {
182 assert(!cur->is_vm_pointer(), "Sanity check");
183 assert(cur->is_allocation_record() && next->is_deallocation_record(),
184 "sorting order");
185 assert(cur->seq() != next->seq(), "Sanity check");
186 return cur->seq() > next->seq() ? cur : next;
187 }
188 }
190 virtual MemPointer* next() {
191 MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
192 assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check");
193 MemPointerRecord* next = (MemPointerRecord*)_itr.next();
194 if (next == NULL) {
195 return NULL;
196 }
197 if (cur->addr() == next->addr()) {
198 next = (MemPointerRecord*)_itr.next();
199 }
200 return current();
201 }
203 MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
204 MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
205 void remove() { ShouldNotReachHere(); }
206 bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
207 bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
208 };
210 // collapse duplicated records. Eliminating duplicated records here, is much
211 // cheaper than during promotion phase. However, it does have limitation - it
212 // can only eliminate duplicated records within the generation, there are
213 // still chances seeing duplicated records during promotion.
214 // We want to use the record with higher sequence number, because it has
215 // more accurate callsite pc.
216 class VMRecordIterator : public MallocRecordIterator {
217 public:
218 VMRecordIterator(MemPointerArray* arr) : MallocRecordIterator(arr) {
219 MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
220 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
221 while (next != NULL) {
222 assert(cur != NULL, "Sanity check");
223 assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
224 "pre-sort order");
226 if (is_duplicated_record(cur, next)) {
227 _itr.next();
228 next = (MemPointerRecord*)_itr.peek_next();
229 } else {
230 break;
231 }
232 }
233 }
235 virtual MemPointer* current() const {
236 return _itr.current();
237 }
239 // get next record, but skip the duplicated records
240 virtual MemPointer* next() {
241 MemPointerRecord* cur = (MemPointerRecord*)_itr.next();
242 MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
243 while (next != NULL) {
244 assert(cur != NULL, "Sanity check");
245 assert(((SeqMemPointerRecord*)next)->seq() > ((SeqMemPointerRecord*)cur)->seq(),
246 "pre-sort order");
248 if (is_duplicated_record(cur, next)) {
249 _itr.next();
250 cur = next;
251 next = (MemPointerRecord*)_itr.peek_next();
252 } else {
253 break;
254 }
255 }
256 return cur;
257 }
259 private:
260 bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
261 bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
262 assert(!(ret && FLAGS_TO_MEMORY_TYPE(p1->flags()) == mtThreadStack), "dup on stack record");
263 return ret;
264 }
265 };
267 class StagingArea : public _ValueObj {
268 private:
269 MemPointerArray* _malloc_data;
270 MemPointerArray* _vm_data;
272 public:
273 StagingArea() : _malloc_data(NULL), _vm_data(NULL) {
274 init();
275 }
277 ~StagingArea() {
278 if (_malloc_data != NULL) delete _malloc_data;
279 if (_vm_data != NULL) delete _vm_data;
280 }
282 MallocRecordIterator malloc_record_walker() {
283 return MallocRecordIterator(malloc_data());
284 }
286 VMRecordIterator virtual_memory_record_walker();
288 bool init();
289 void clear() {
290 assert(_malloc_data != NULL && _vm_data != NULL, "Just check");
291 _malloc_data->shrink();
292 _malloc_data->clear();
293 _vm_data->clear();
294 }
296 inline MemPointerArray* malloc_data() { return _malloc_data; }
297 inline MemPointerArray* vm_data() { return _vm_data; }
298 };
300 class MemBaseline;
301 class MemSnapshot : public CHeapObj<mtNMT> {
302 private:
303 // the following two arrays contain records of all known lived memory blocks
304 // live malloc-ed memory pointers
305 MemPointerArray* _alloc_ptrs;
306 // live virtual memory pointers
307 MemPointerArray* _vm_ptrs;
309 StagingArea _staging_area;
311 // the lock to protect this snapshot
312 Monitor* _lock;
314 NOT_PRODUCT(size_t _untracked_count;)
315 friend class MemBaseline;
317 public:
318 MemSnapshot();
319 virtual ~MemSnapshot();
321 // if we are running out of native memory
322 bool out_of_memory() {
323 return (_alloc_ptrs == NULL ||
324 _staging_area.malloc_data() == NULL ||
325 _staging_area.vm_data() == NULL ||
326 _vm_ptrs == NULL || _lock == NULL ||
327 _alloc_ptrs->out_of_memory() ||
328 _vm_ptrs->out_of_memory());
329 }
331 // merge a per-thread memory recorder into staging area
332 bool merge(MemRecorder* rec);
333 // promote staged data to snapshot
334 bool promote();
337 void wait(long timeout) {
338 assert(_lock != NULL, "Just check");
339 MonitorLockerEx locker(_lock);
340 locker.wait(true, timeout);
341 }
343 NOT_PRODUCT(void print_snapshot_stats(outputStream* st);)
344 NOT_PRODUCT(void check_staging_data();)
345 NOT_PRODUCT(void check_malloc_pointers();)
346 NOT_PRODUCT(bool has_allocation_record(address addr);)
347 // dump all virtual memory pointers in snapshot
348 DEBUG_ONLY( void dump_all_vm_pointers();)
350 private:
351 // copy pointer data from src to dest
352 void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
354 bool promote_malloc_records(MemPointerArrayIterator* itr);
355 bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
356 };
358 #endif // SHARE_VM_SERVICES_MEM_SNAPSHOT_HPP