Fri, 19 Oct 2012 21:40:07 -0400
7199092: NMT: NMT needs to deal overlapped virtual memory ranges
Summary: Enhanced virtual memory tracking to track committed regions as well as reserved regions, so NMT now can generate virtual memory map.
Reviewed-by: acorn, coleenp
1 /*
2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
27 #include "runtime/atomic.hpp"
28 #include "services/memBaseline.hpp"
29 #include "services/memRecorder.hpp"
30 #include "services/memPtr.hpp"
31 #include "services/memTracker.hpp"
33 MemPointer* SequencedRecordIterator::next_record() {
34 MemPointerRecord* itr_cur = (MemPointerRecord*)_itr.current();
35 if (itr_cur == NULL) {
36 return itr_cur;
37 }
39 MemPointerRecord* itr_next = (MemPointerRecord*)_itr.next();
41 // don't collapse virtual memory records
42 while (itr_next != NULL && !itr_cur->is_vm_pointer() &&
43 !itr_next->is_vm_pointer() &&
44 same_kind(itr_cur, itr_next)) {
45 itr_cur = itr_next;
46 itr_next = (MemPointerRecord*)_itr.next();
47 }
49 return itr_cur;
50 }
53 volatile jint MemRecorder::_instance_count = 0;
55 MemRecorder::MemRecorder() {
56 assert(MemTracker::is_on(), "Native memory tracking is off");
57 Atomic::inc(&_instance_count);
58 debug_only(set_generation();)
60 if (MemTracker::track_callsite()) {
61 _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecordEx,
62 DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
63 } else {
64 _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecord,
65 DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
66 }
67 _next = NULL;
70 if (_pointer_records != NULL) {
71 // recode itself
72 record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
73 sizeof(MemRecorder), CALLER_PC);
74 record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
75 _pointer_records->instance_size(),CURRENT_PC);
76 }
77 }
79 MemRecorder::~MemRecorder() {
80 if (_pointer_records != NULL) {
81 if (MemTracker::is_on()) {
82 MemTracker::record_free((address)_pointer_records, mtNMT);
83 MemTracker::record_free((address)this, mtNMT);
84 }
85 delete _pointer_records;
86 }
87 if (_next != NULL) {
88 delete _next;
89 }
91 Atomic::dec(&_instance_count);
92 }
94 // Sorting order:
95 // 1. memory block address
96 // 2. mem pointer record tags
97 // 3. sequence number
98 int MemRecorder::sort_record_fn(const void* e1, const void* e2) {
99 const MemPointerRecord* p1 = (const MemPointerRecord*)e1;
100 const MemPointerRecord* p2 = (const MemPointerRecord*)e2;
101 int delta = UNSIGNED_COMPARE(p1->addr(), p2->addr());
102 if (delta == 0) {
103 int df = UNSIGNED_COMPARE((p1->flags() & MemPointerRecord::tag_masks),
104 (p2->flags() & MemPointerRecord::tag_masks));
105 if (df == 0) {
106 assert(p1->seq() != p2->seq(), "dup seq");
107 return p1->seq() - p2->seq();
108 } else {
109 return df;
110 }
111 } else {
112 return delta;
113 }
114 }
116 bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, address pc) {
117 #ifdef ASSERT
118 if (MemPointerRecord::is_virtual_memory_record(flags)) {
119 assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record");
120 } else {
121 assert((flags & MemPointerRecord::tag_masks) == MemPointerRecord::malloc_tag() ||
122 (flags & MemPointerRecord::tag_masks) == MemPointerRecord::free_tag() ||
123 IS_ARENA_OBJ(flags),
124 "bad malloc record");
125 }
126 // a recorder should only hold records within the same generation
127 unsigned long cur_generation = SequenceGenerator::current_generation();
128 assert(cur_generation == _generation,
129 "this thread did not enter sync point");
130 #endif
132 if (MemTracker::track_callsite()) {
133 SeqMemPointerRecordEx ap(p, flags, size, pc);
134 debug_only(check_dup_seq(ap.seq());)
135 return _pointer_records->append(&ap);
136 } else {
137 SeqMemPointerRecord ap(p, flags, size);
138 debug_only(check_dup_seq(ap.seq());)
139 return _pointer_records->append(&ap);
140 }
141 }
143 // iterator for alloc pointers
144 SequencedRecordIterator MemRecorder::pointer_itr() {
145 assert(_pointer_records != NULL, "just check");
146 _pointer_records->sort((FN_SORT)sort_record_fn);
147 return SequencedRecordIterator(_pointer_records);
148 }
151 #ifdef ASSERT
152 void MemRecorder::set_generation() {
153 _generation = SequenceGenerator::current_generation();
154 }
156 void MemRecorder::check_dup_seq(jint seq) const {
157 MemPointerArrayIteratorImpl itr(_pointer_records);
158 MemPointerRecord* rc = (MemPointerRecord*)itr.current();
159 while (rc != NULL) {
160 assert(rc->seq() != seq, "dup seq");
161 rc = (MemPointerRecord*)itr.next();
162 }
163 }
165 #endif