Mon, 05 Nov 2012 15:30:22 -0500
8001591: NMT: assertion failed: assert(rec->addr() + rec->size() <= cur->base()) failed: Can not overlap in memSnapshot.cpp
Summary: NMT should allow overlapping committed regions as long as they belong to the same reserved region
Reviewed-by: dholmes, coleenp
1 /*
2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "classfile/systemDictionary.hpp"
26 #include "memory/allocation.hpp"
27 #include "services/memBaseline.hpp"
28 #include "services/memTracker.hpp"
30 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
31 {mtJavaHeap, "Java Heap"},
32 {mtClass, "Class"},
33 {mtThreadStack,"Thread Stack"},
34 {mtThread, "Thread"},
35 {mtCode, "Code"},
36 {mtGC, "GC"},
37 {mtCompiler, "Compiler"},
38 {mtInternal, "Internal"},
39 {mtOther, "Other"},
40 {mtSymbol, "Symbol"},
41 {mtNMT, "Memory Tracking"},
42 {mtChunk, "Pooled Free Chunks"},
43 {mtClassShared,"Shared spaces for classes"},
44 {mtNone, "Unknown"} // It can happen when type tagging records are lagging
45 // behind
46 };
48 MemBaseline::MemBaseline() {
49 _baselined = false;
51 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
52 _malloc_data[index].set_type(MemType2NameMap[index]._flag);
53 _vm_data[index].set_type(MemType2NameMap[index]._flag);
54 _arena_data[index].set_type(MemType2NameMap[index]._flag);
55 }
57 _malloc_cs = NULL;
58 _vm_cs = NULL;
59 _vm_map = NULL;
61 _number_of_classes = 0;
62 _number_of_threads = 0;
63 }
66 void MemBaseline::clear() {
67 if (_malloc_cs != NULL) {
68 delete _malloc_cs;
69 _malloc_cs = NULL;
70 }
72 if (_vm_cs != NULL) {
73 delete _vm_cs;
74 _vm_cs = NULL;
75 }
77 if (_vm_map != NULL) {
78 delete _vm_map;
79 _vm_map = NULL;
80 }
82 reset();
83 }
86 void MemBaseline::reset() {
87 _baselined = false;
88 _total_vm_reserved = 0;
89 _total_vm_committed = 0;
90 _total_malloced = 0;
91 _number_of_classes = 0;
93 if (_malloc_cs != NULL) _malloc_cs->clear();
94 if (_vm_cs != NULL) _vm_cs->clear();
95 if (_vm_map != NULL) _vm_map->clear();
97 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
98 _malloc_data[index].clear();
99 _vm_data[index].clear();
100 _arena_data[index].clear();
101 }
102 }
104 MemBaseline::~MemBaseline() {
105 clear();
106 }
108 // baseline malloc'd memory records, generate overall summary and summaries by
109 // memory types
110 bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
111 MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
112 MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
113 size_t used_arena_size = 0;
114 int index;
115 while (malloc_ptr != NULL) {
116 index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
117 size_t size = malloc_ptr->size();
118 _total_malloced += size;
119 _malloc_data[index].inc(size);
120 if (MemPointerRecord::is_arena_record(malloc_ptr->flags())) {
121 // see if arena size record present
122 MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
123 if (MemPointerRecord::is_arena_size_record(next_malloc_ptr->flags())) {
124 assert(next_malloc_ptr->is_size_record_of_arena(malloc_ptr), "arena records do not match");
125 size = next_malloc_ptr->size();
126 _arena_data[index].inc(size);
127 used_arena_size += size;
128 malloc_itr.next();
129 }
130 }
131 malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
132 }
134 // substract used arena size to get size of arena chunk in free list
135 index = flag2index(mtChunk);
136 _malloc_data[index].reduce(used_arena_size);
137 // we really don't know how many chunks in free list, so just set to
138 // 0
139 _malloc_data[index].overwrite_counter(0);
141 return true;
142 }
144 // baseline mmap'd memory records, generate overall summary and summaries by
145 // memory types
146 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
147 MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
148 VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
149 int index;
150 while (vm_ptr != NULL) {
151 if (vm_ptr->is_reserved_region()) {
152 index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
153 // we use the number of thread stack to count threads
154 if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
155 _number_of_threads ++;
156 }
157 _total_vm_reserved += vm_ptr->size();
158 _vm_data[index].inc(vm_ptr->size(), 0);
159 } else {
160 _total_vm_committed += vm_ptr->size();
161 _vm_data[index].inc(0, vm_ptr->size());
162 }
163 vm_ptr = (VMMemRegion*)vm_itr.next();
164 }
165 return true;
166 }
168 // baseline malloc'd memory by callsites, but only the callsites with memory allocation
169 // over 1KB are stored.
170 bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
171 assert(MemTracker::track_callsite(), "detail tracking is off");
173 MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
174 MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
175 MallocCallsitePointer malloc_callsite;
177 // initailize malloc callsite array
178 if (_malloc_cs == NULL) {
179 _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
180 // out of native memory
181 if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
182 return false;
183 }
184 } else {
185 _malloc_cs->clear();
186 }
188 MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
190 // sort into callsite pc order. Details are aggregated by callsites
191 malloc_data->sort((FN_SORT)malloc_sort_by_pc);
192 bool ret = true;
194 // baseline memory that is totaled over 1 KB
195 while (malloc_ptr != NULL) {
196 if (!MemPointerRecord::is_arena_size_record(malloc_ptr->flags())) {
197 // skip thread stacks
198 if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
199 if (malloc_callsite.addr() != malloc_ptr->pc()) {
200 if ((malloc_callsite.amount()/K) > 0) {
201 if (!_malloc_cs->append(&malloc_callsite)) {
202 ret = false;
203 break;
204 }
205 }
206 malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
207 }
208 malloc_callsite.inc(malloc_ptr->size());
209 }
210 }
211 malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
212 }
214 // restore to address order. Snapshot malloc data is maintained in memory
215 // address order.
216 malloc_data->sort((FN_SORT)malloc_sort_by_addr);
218 if (!ret) {
219 return false;
220 }
221 // deal with last record
222 if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
223 if (!_malloc_cs->append(&malloc_callsite)) {
224 return false;
225 }
226 }
227 return true;
228 }
230 // baseline mmap'd memory by callsites
231 bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
232 assert(MemTracker::track_callsite(), "detail tracking is off");
234 VMCallsitePointer vm_callsite;
235 VMCallsitePointer* cur_callsite = NULL;
236 MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
237 VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
239 // initialize virtual memory map array
240 if (_vm_map == NULL) {
241 _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
242 if (_vm_map == NULL || _vm_map->out_of_memory()) {
243 return false;
244 }
245 } else {
246 _vm_map->clear();
247 }
249 // initialize virtual memory callsite array
250 if (_vm_cs == NULL) {
251 _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
252 if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
253 return false;
254 }
255 } else {
256 _vm_cs->clear();
257 }
259 // consolidate virtual memory data
260 VMMemRegionEx* reserved_rec = NULL;
261 VMMemRegionEx* committed_rec = NULL;
263 // vm_ptr is coming in increasing base address order
264 while (vm_ptr != NULL) {
265 if (vm_ptr->is_reserved_region()) {
266 // consolidate reserved memory regions for virtual memory map.
267 // The criteria for consolidation is:
268 // 1. two adjacent reserved memory regions
269 // 2. belong to the same memory type
270 // 3. reserved from the same callsite
271 if (reserved_rec == NULL ||
272 reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
273 FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
274 reserved_rec->pc() != vm_ptr->pc()) {
275 if (!_vm_map->append(vm_ptr)) {
276 return false;
277 }
278 // inserted reserved region, we need the pointer to the element in virtual
279 // memory map array.
280 reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
281 } else {
282 reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
283 }
285 if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
286 return false;
287 }
288 vm_callsite = VMCallsitePointer(vm_ptr->pc());
289 cur_callsite = &vm_callsite;
290 vm_callsite.inc(vm_ptr->size(), 0);
291 } else {
292 // consolidate committed memory regions for virtual memory map
293 // The criterial is:
294 // 1. two adjacent committed memory regions
295 // 2. committed from the same callsite
296 if (committed_rec == NULL ||
297 committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
298 committed_rec->pc() != vm_ptr->pc()) {
299 if (!_vm_map->append(vm_ptr)) {
300 return false;
301 }
302 committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
303 } else {
304 committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
305 }
306 vm_callsite.inc(0, vm_ptr->size());
307 }
308 vm_ptr = (VMMemRegionEx*)vm_itr.next();
309 }
310 // deal with last record
311 if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
312 return false;
313 }
315 // sort it into callsite pc order. Details are aggregated by callsites
316 _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
318 // walk the array to consolidate record by pc
319 MemPointerArrayIteratorImpl itr(_vm_cs);
320 VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
321 VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
322 while (next_rec != NULL) {
323 assert(callsite_rec != NULL, "Sanity check");
324 if (next_rec->addr() == callsite_rec->addr()) {
325 callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
326 itr.remove();
327 next_rec = (VMCallsitePointer*)itr.current();
328 } else {
329 callsite_rec = next_rec;
330 next_rec = (VMCallsitePointer*)itr.next();
331 }
332 }
334 return true;
335 }
337 // baseline a snapshot. If summary_only = false, memory usages aggregated by
338 // callsites are also baselined.
339 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
340 MutexLockerEx snapshot_locker(snapshot._lock, true);
341 reset();
342 _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
343 baseline_vm_summary(snapshot._vm_ptrs);
344 _number_of_classes = SystemDictionary::number_of_classes();
346 if (!summary_only && MemTracker::track_callsite() && _baselined) {
347 _baselined = baseline_malloc_details(snapshot._alloc_ptrs) &&
348 baseline_vm_details(snapshot._vm_ptrs);
349 }
350 return _baselined;
351 }
354 int MemBaseline::flag2index(MEMFLAGS flag) const {
355 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
356 if (MemType2NameMap[index]._flag == flag) {
357 return index;
358 }
359 }
360 assert(false, "no type");
361 return -1;
362 }
364 const char* MemBaseline::type2name(MEMFLAGS type) {
365 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
366 if (MemType2NameMap[index]._flag == type) {
367 return MemType2NameMap[index]._name;
368 }
369 }
370 assert(false, err_msg("bad type %x", type));
371 return NULL;
372 }
375 MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
376 _total_malloced = other._total_malloced;
377 _total_vm_reserved = other._total_vm_reserved;
378 _total_vm_committed = other._total_vm_committed;
380 _baselined = other._baselined;
381 _number_of_classes = other._number_of_classes;
383 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
384 _malloc_data[index] = other._malloc_data[index];
385 _vm_data[index] = other._vm_data[index];
386 _arena_data[index] = other._arena_data[index];
387 }
389 if (MemTracker::track_callsite()) {
390 assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
391 assert(other._malloc_cs != NULL && other._vm_cs != NULL,
392 "not properly baselined");
393 _malloc_cs->clear();
394 _vm_cs->clear();
395 int index;
396 for (index = 0; index < other._malloc_cs->length(); index ++) {
397 _malloc_cs->append(other._malloc_cs->at(index));
398 }
400 for (index = 0; index < other._vm_cs->length(); index ++) {
401 _vm_cs->append(other._vm_cs->at(index));
402 }
403 }
404 return *this;
405 }
407 /* compare functions for sorting */
409 // sort snapshot malloc'd records in callsite pc order
410 int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
411 assert(MemTracker::track_callsite(),"Just check");
412 const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
413 const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
414 return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
415 }
417 // sort baselined malloc'd records in size order
418 int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
419 assert(MemTracker::is_on(), "Just check");
420 const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
421 const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
422 return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
423 }
425 // sort baselined malloc'd records in callsite pc order
426 int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
427 assert(MemTracker::is_on(), "Just check");
428 const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
429 const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
430 return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
431 }
434 // sort baselined mmap'd records in size (reserved size) order
435 int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
436 assert(MemTracker::is_on(), "Just check");
437 const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
438 const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
439 return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
440 }
442 // sort baselined mmap'd records in callsite pc order
443 int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
444 assert(MemTracker::is_on(), "Just check");
445 const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
446 const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
447 return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
448 }
451 // sort snapshot malloc'd records in memory block address order
452 int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
453 assert(MemTracker::is_on(), "Just check");
454 const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
455 const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
456 int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
457 assert(delta != 0, "dup pointer");
458 return delta;
459 }