Tue, 08 Jan 2013 14:04:25 -0500
8005048: NMT: #loaded classes needs to just show the # defined classes
Summary: Count number of instance classes so that it matches class metadata size
Reviewed-by: coleenp, acorn
1 /*
2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25 #include "memory/allocation.hpp"
26 #include "services/memBaseline.hpp"
27 #include "services/memTracker.hpp"
29 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
30 {mtJavaHeap, "Java Heap"},
31 {mtClass, "Class"},
32 {mtThreadStack,"Thread Stack"},
33 {mtThread, "Thread"},
34 {mtCode, "Code"},
35 {mtGC, "GC"},
36 {mtCompiler, "Compiler"},
37 {mtInternal, "Internal"},
38 {mtOther, "Other"},
39 {mtSymbol, "Symbol"},
40 {mtNMT, "Memory Tracking"},
41 {mtChunk, "Pooled Free Chunks"},
42 {mtClassShared,"Shared spaces for classes"},
43 {mtNone, "Unknown"} // It can happen when type tagging records are lagging
44 // behind
45 };
47 MemBaseline::MemBaseline() {
48 _baselined = false;
50 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
51 _malloc_data[index].set_type(MemType2NameMap[index]._flag);
52 _vm_data[index].set_type(MemType2NameMap[index]._flag);
53 _arena_data[index].set_type(MemType2NameMap[index]._flag);
54 }
56 _malloc_cs = NULL;
57 _vm_cs = NULL;
58 _vm_map = NULL;
60 _number_of_classes = 0;
61 _number_of_threads = 0;
62 }
65 void MemBaseline::clear() {
66 if (_malloc_cs != NULL) {
67 delete _malloc_cs;
68 _malloc_cs = NULL;
69 }
71 if (_vm_cs != NULL) {
72 delete _vm_cs;
73 _vm_cs = NULL;
74 }
76 if (_vm_map != NULL) {
77 delete _vm_map;
78 _vm_map = NULL;
79 }
81 reset();
82 }
85 void MemBaseline::reset() {
86 _baselined = false;
87 _total_vm_reserved = 0;
88 _total_vm_committed = 0;
89 _total_malloced = 0;
90 _number_of_classes = 0;
92 if (_malloc_cs != NULL) _malloc_cs->clear();
93 if (_vm_cs != NULL) _vm_cs->clear();
94 if (_vm_map != NULL) _vm_map->clear();
96 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
97 _malloc_data[index].clear();
98 _vm_data[index].clear();
99 _arena_data[index].clear();
100 }
101 }
103 MemBaseline::~MemBaseline() {
104 clear();
105 }
107 // baseline malloc'd memory records, generate overall summary and summaries by
108 // memory types
109 bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
110 MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
111 MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
112 size_t used_arena_size = 0;
113 int index;
114 while (malloc_ptr != NULL) {
115 index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
116 size_t size = malloc_ptr->size();
117 if (malloc_ptr->is_arena_memory_record()) {
118 // We do have anonymous arenas, they are either used as value objects,
119 // which are embedded inside other objects, or used as stack objects.
120 _arena_data[index].inc(size);
121 used_arena_size += size;
122 } else {
123 _total_malloced += size;
124 _malloc_data[index].inc(size);
125 if (malloc_ptr->is_arena_record()) {
126 // see if arena memory record present
127 MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
128 if (next_malloc_ptr->is_arena_memory_record()) {
129 assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
130 "Arena records do not match");
131 size = next_malloc_ptr->size();
132 _arena_data[index].inc(size);
133 used_arena_size += size;
134 malloc_itr.next();
135 }
136 }
137 }
138 malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
139 }
141 // substract used arena size to get size of arena chunk in free list
142 index = flag2index(mtChunk);
143 _malloc_data[index].reduce(used_arena_size);
144 // we really don't know how many chunks in free list, so just set to
145 // 0
146 _malloc_data[index].overwrite_counter(0);
148 return true;
149 }
151 // baseline mmap'd memory records, generate overall summary and summaries by
152 // memory types
153 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
154 MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
155 VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
156 int index;
157 while (vm_ptr != NULL) {
158 if (vm_ptr->is_reserved_region()) {
159 index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
160 // we use the number of thread stack to count threads
161 if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
162 _number_of_threads ++;
163 }
164 _total_vm_reserved += vm_ptr->size();
165 _vm_data[index].inc(vm_ptr->size(), 0);
166 } else {
167 _total_vm_committed += vm_ptr->size();
168 _vm_data[index].inc(0, vm_ptr->size());
169 }
170 vm_ptr = (VMMemRegion*)vm_itr.next();
171 }
172 return true;
173 }
175 // baseline malloc'd memory by callsites, but only the callsites with memory allocation
176 // over 1KB are stored.
177 bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
178 assert(MemTracker::track_callsite(), "detail tracking is off");
180 MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
181 MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
182 MallocCallsitePointer malloc_callsite;
184 // initailize malloc callsite array
185 if (_malloc_cs == NULL) {
186 _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
187 // out of native memory
188 if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
189 return false;
190 }
191 } else {
192 _malloc_cs->clear();
193 }
195 MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
197 // sort into callsite pc order. Details are aggregated by callsites
198 malloc_data->sort((FN_SORT)malloc_sort_by_pc);
199 bool ret = true;
201 // baseline memory that is totaled over 1 KB
202 while (malloc_ptr != NULL) {
203 if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
204 // skip thread stacks
205 if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
206 if (malloc_callsite.addr() != malloc_ptr->pc()) {
207 if ((malloc_callsite.amount()/K) > 0) {
208 if (!_malloc_cs->append(&malloc_callsite)) {
209 ret = false;
210 break;
211 }
212 }
213 malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
214 }
215 malloc_callsite.inc(malloc_ptr->size());
216 }
217 }
218 malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
219 }
221 // restore to address order. Snapshot malloc data is maintained in memory
222 // address order.
223 malloc_data->sort((FN_SORT)malloc_sort_by_addr);
225 if (!ret) {
226 return false;
227 }
228 // deal with last record
229 if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
230 if (!_malloc_cs->append(&malloc_callsite)) {
231 return false;
232 }
233 }
234 return true;
235 }
237 // baseline mmap'd memory by callsites
238 bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
239 assert(MemTracker::track_callsite(), "detail tracking is off");
241 VMCallsitePointer vm_callsite;
242 VMCallsitePointer* cur_callsite = NULL;
243 MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
244 VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
246 // initialize virtual memory map array
247 if (_vm_map == NULL) {
248 _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
249 if (_vm_map == NULL || _vm_map->out_of_memory()) {
250 return false;
251 }
252 } else {
253 _vm_map->clear();
254 }
256 // initialize virtual memory callsite array
257 if (_vm_cs == NULL) {
258 _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
259 if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
260 return false;
261 }
262 } else {
263 _vm_cs->clear();
264 }
266 // consolidate virtual memory data
267 VMMemRegionEx* reserved_rec = NULL;
268 VMMemRegionEx* committed_rec = NULL;
270 // vm_ptr is coming in increasing base address order
271 while (vm_ptr != NULL) {
272 if (vm_ptr->is_reserved_region()) {
273 // consolidate reserved memory regions for virtual memory map.
274 // The criteria for consolidation is:
275 // 1. two adjacent reserved memory regions
276 // 2. belong to the same memory type
277 // 3. reserved from the same callsite
278 if (reserved_rec == NULL ||
279 reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
280 FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
281 reserved_rec->pc() != vm_ptr->pc()) {
282 if (!_vm_map->append(vm_ptr)) {
283 return false;
284 }
285 // inserted reserved region, we need the pointer to the element in virtual
286 // memory map array.
287 reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
288 } else {
289 reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
290 }
292 if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
293 return false;
294 }
295 vm_callsite = VMCallsitePointer(vm_ptr->pc());
296 cur_callsite = &vm_callsite;
297 vm_callsite.inc(vm_ptr->size(), 0);
298 } else {
299 // consolidate committed memory regions for virtual memory map
300 // The criterial is:
301 // 1. two adjacent committed memory regions
302 // 2. committed from the same callsite
303 if (committed_rec == NULL ||
304 committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
305 committed_rec->pc() != vm_ptr->pc()) {
306 if (!_vm_map->append(vm_ptr)) {
307 return false;
308 }
309 committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
310 } else {
311 committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
312 }
313 vm_callsite.inc(0, vm_ptr->size());
314 }
315 vm_ptr = (VMMemRegionEx*)vm_itr.next();
316 }
317 // deal with last record
318 if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
319 return false;
320 }
322 // sort it into callsite pc order. Details are aggregated by callsites
323 _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
325 // walk the array to consolidate record by pc
326 MemPointerArrayIteratorImpl itr(_vm_cs);
327 VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
328 VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
329 while (next_rec != NULL) {
330 assert(callsite_rec != NULL, "Sanity check");
331 if (next_rec->addr() == callsite_rec->addr()) {
332 callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
333 itr.remove();
334 next_rec = (VMCallsitePointer*)itr.current();
335 } else {
336 callsite_rec = next_rec;
337 next_rec = (VMCallsitePointer*)itr.next();
338 }
339 }
341 return true;
342 }
344 // baseline a snapshot. If summary_only = false, memory usages aggregated by
345 // callsites are also baselined.
346 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
347 MutexLockerEx snapshot_locker(snapshot._lock, true);
348 reset();
349 _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
350 baseline_vm_summary(snapshot._vm_ptrs);
351 _number_of_classes = snapshot.number_of_classes();
353 if (!summary_only && MemTracker::track_callsite() && _baselined) {
354 _baselined = baseline_malloc_details(snapshot._alloc_ptrs) &&
355 baseline_vm_details(snapshot._vm_ptrs);
356 }
357 return _baselined;
358 }
361 int MemBaseline::flag2index(MEMFLAGS flag) const {
362 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
363 if (MemType2NameMap[index]._flag == flag) {
364 return index;
365 }
366 }
367 assert(false, "no type");
368 return -1;
369 }
371 const char* MemBaseline::type2name(MEMFLAGS type) {
372 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
373 if (MemType2NameMap[index]._flag == type) {
374 return MemType2NameMap[index]._name;
375 }
376 }
377 assert(false, err_msg("bad type %x", type));
378 return NULL;
379 }
382 MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
383 _total_malloced = other._total_malloced;
384 _total_vm_reserved = other._total_vm_reserved;
385 _total_vm_committed = other._total_vm_committed;
387 _baselined = other._baselined;
388 _number_of_classes = other._number_of_classes;
390 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
391 _malloc_data[index] = other._malloc_data[index];
392 _vm_data[index] = other._vm_data[index];
393 _arena_data[index] = other._arena_data[index];
394 }
396 if (MemTracker::track_callsite()) {
397 assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
398 assert(other._malloc_cs != NULL && other._vm_cs != NULL,
399 "not properly baselined");
400 _malloc_cs->clear();
401 _vm_cs->clear();
402 int index;
403 for (index = 0; index < other._malloc_cs->length(); index ++) {
404 _malloc_cs->append(other._malloc_cs->at(index));
405 }
407 for (index = 0; index < other._vm_cs->length(); index ++) {
408 _vm_cs->append(other._vm_cs->at(index));
409 }
410 }
411 return *this;
412 }
414 /* compare functions for sorting */
416 // sort snapshot malloc'd records in callsite pc order
417 int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
418 assert(MemTracker::track_callsite(),"Just check");
419 const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
420 const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
421 return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
422 }
424 // sort baselined malloc'd records in size order
425 int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
426 assert(MemTracker::is_on(), "Just check");
427 const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
428 const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
429 return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
430 }
432 // sort baselined malloc'd records in callsite pc order
433 int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
434 assert(MemTracker::is_on(), "Just check");
435 const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
436 const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
437 return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
438 }
441 // sort baselined mmap'd records in size (reserved size) order
442 int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
443 assert(MemTracker::is_on(), "Just check");
444 const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
445 const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
446 return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
447 }
449 // sort baselined mmap'd records in callsite pc order
450 int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
451 assert(MemTracker::is_on(), "Just check");
452 const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
453 const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
454 return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
455 }
458 // sort snapshot malloc'd records in memory block address order
459 int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
460 assert(MemTracker::is_on(), "Just check");
461 const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
462 const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
463 int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
464 assert(delta != 0, "dup pointer");
465 return delta;
466 }