20 * or visit www.oracle.com if you need additional information or have any |
20 * or visit www.oracle.com if you need additional information or have any |
21 * questions. |
21 * questions. |
22 * |
22 * |
23 */ |
23 */ |
24 #include "precompiled.hpp" |
24 #include "precompiled.hpp" |
|
25 |
25 #include "memory/allocation.hpp" |
26 #include "memory/allocation.hpp" |
26 #include "runtime/safepoint.hpp" |
27 #include "runtime/safepoint.hpp" |
27 #include "runtime/thread.inline.hpp" |
28 #include "runtime/thread.inline.hpp" |
28 #include "services/memBaseline.hpp" |
29 #include "services/memBaseline.hpp" |
29 #include "services/memTracker.hpp" |
30 #include "services/memTracker.hpp" |
30 |
31 |
31 |
32 /* |
32 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = { |
33 * Sizes are sorted in descenting order for reporting |
33 {mtJavaHeap, "Java Heap"}, |
34 */ |
34 {mtClass, "Class"}, |
35 int compare_malloc_size(const MallocSite& s1, const MallocSite& s2) { |
35 {mtThreadStack,"Thread Stack"}, |
36 if (s1.size() == s2.size()) { |
36 {mtThread, "Thread"}, |
37 return 0; |
37 {mtCode, "Code"}, |
38 } else if (s1.size() > s2.size()) { |
38 {mtGC, "GC"}, |
39 return -1; |
39 {mtCompiler, "Compiler"}, |
40 } else { |
40 {mtInternal, "Internal"}, |
41 return 1; |
41 {mtOther, "Other"}, |
42 } |
42 {mtSymbol, "Symbol"}, |
43 } |
43 {mtNMT, "Memory Tracking"}, |
44 |
44 {mtTracing, "Tracing"}, |
45 |
45 {mtChunk, "Pooled Free Chunks"}, |
46 int compare_virtual_memory_size(const VirtualMemoryAllocationSite& s1, |
46 {mtClassShared,"Shared spaces for classes"}, |
47 const VirtualMemoryAllocationSite& s2) { |
47 {mtTest, "Test"}, |
48 if (s1.reserved() == s2.reserved()) { |
48 {mtNone, "Unknown"} // It can happen when type tagging records are lagging |
49 return 0; |
49 // behind |
50 } else if (s1.reserved() > s2.reserved()) { |
|
51 return -1; |
|
52 } else { |
|
53 return 1; |
|
54 } |
|
55 } |
|
56 |
|
57 // Sort into allocation site addresses order for baseline comparison |
|
58 int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) { |
|
59 return s1.call_stack()->compare(*s2.call_stack()); |
|
60 } |
|
61 |
|
62 |
|
63 int compare_virtual_memory_site(const VirtualMemoryAllocationSite& s1, |
|
64 const VirtualMemoryAllocationSite& s2) { |
|
65 return s1.call_stack()->compare(*s2.call_stack()); |
|
66 } |
|
67 |
|
68 /* |
|
69 * Walker to walk malloc allocation site table |
|
70 */ |
|
71 class MallocAllocationSiteWalker : public MallocSiteWalker { |
|
72 private: |
|
73 SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA> |
|
74 _malloc_sites; |
|
75 size_t _count; |
|
76 |
|
77 // Entries in MallocSiteTable with size = 0 and count = 0, |
|
78 // when the malloc site is not longer there. |
|
79 public: |
|
80 MallocAllocationSiteWalker(Arena* arena) : _count(0), _malloc_sites(arena) { |
|
81 } |
|
82 |
|
83 inline size_t count() const { return _count; } |
|
84 |
|
85 LinkedList<MallocSite>* malloc_sites() { |
|
86 return &_malloc_sites; |
|
87 } |
|
88 |
|
89 bool do_malloc_site(const MallocSite* site) { |
|
90 if (site->size() >= MemBaseline::SIZE_THRESHOLD) { |
|
91 if (_malloc_sites.add(*site) != NULL) { |
|
92 _count++; |
|
93 return true; |
|
94 } else { |
|
95 return false; // OOM |
|
96 } |
|
97 } else { |
|
98 // malloc site does not meet threshold, ignore and continue |
|
99 return true; |
|
100 } |
|
101 } |
50 }; |
102 }; |
51 |
103 |
52 MemBaseline::MemBaseline() { |
104 // Compare virtual memory region's base address |
53 _baselined = false; |
105 int compare_virtual_memory_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) { |
54 |
106 return r1.compare(r2); |
55 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { |
107 } |
56 _malloc_data[index].set_type(MemType2NameMap[index]._flag); |
108 |
57 _vm_data[index].set_type(MemType2NameMap[index]._flag); |
109 // Walk all virtual memory regions for baselining |
58 _arena_data[index].set_type(MemType2NameMap[index]._flag); |
110 class VirtualMemoryAllocationWalker : public VirtualMemoryWalker { |
59 } |
111 private: |
60 |
112 SortedLinkedList<ReservedMemoryRegion, compare_virtual_memory_base, ResourceObj::ARENA> |
61 _malloc_cs = NULL; |
113 _virtual_memory_regions; |
62 _vm_cs = NULL; |
114 size_t _count; |
63 _vm_map = NULL; |
115 |
64 |
116 public: |
65 _number_of_classes = 0; |
117 VirtualMemoryAllocationWalker(Arena* a) : _count(0), _virtual_memory_regions(a) { |
66 _number_of_threads = 0; |
118 } |
67 } |
119 |
68 |
120 bool do_allocation_site(const ReservedMemoryRegion* rgn) { |
69 |
121 if (rgn->size() >= MemBaseline::SIZE_THRESHOLD) { |
70 void MemBaseline::clear() { |
122 if (_virtual_memory_regions.add(*rgn) != NULL) { |
71 if (_malloc_cs != NULL) { |
123 _count ++; |
72 delete _malloc_cs; |
124 return true; |
73 _malloc_cs = NULL; |
125 } else { |
74 } |
126 return false; |
75 |
|
76 if (_vm_cs != NULL) { |
|
77 delete _vm_cs; |
|
78 _vm_cs = NULL; |
|
79 } |
|
80 |
|
81 if (_vm_map != NULL) { |
|
82 delete _vm_map; |
|
83 _vm_map = NULL; |
|
84 } |
|
85 |
|
86 reset(); |
|
87 } |
|
88 |
|
89 |
|
90 void MemBaseline::reset() { |
|
91 _baselined = false; |
|
92 _total_vm_reserved = 0; |
|
93 _total_vm_committed = 0; |
|
94 _total_malloced = 0; |
|
95 _number_of_classes = 0; |
|
96 |
|
97 if (_malloc_cs != NULL) _malloc_cs->clear(); |
|
98 if (_vm_cs != NULL) _vm_cs->clear(); |
|
99 if (_vm_map != NULL) _vm_map->clear(); |
|
100 |
|
101 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { |
|
102 _malloc_data[index].clear(); |
|
103 _vm_data[index].clear(); |
|
104 _arena_data[index].clear(); |
|
105 } |
|
106 } |
|
107 |
|
108 MemBaseline::~MemBaseline() { |
|
109 clear(); |
|
110 } |
|
111 |
|
112 // baseline malloc'd memory records, generate overall summary and summaries by |
|
113 // memory types |
|
114 bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) { |
|
115 MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records); |
|
116 MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current(); |
|
117 size_t used_arena_size = 0; |
|
118 int index; |
|
119 while (malloc_ptr != NULL) { |
|
120 index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags())); |
|
121 size_t size = malloc_ptr->size(); |
|
122 if (malloc_ptr->is_arena_memory_record()) { |
|
123 // We do have anonymous arenas, they are either used as value objects, |
|
124 // which are embedded inside other objects, or used as stack objects. |
|
125 _arena_data[index].inc(size); |
|
126 used_arena_size += size; |
|
127 } else { |
|
128 _total_malloced += size; |
|
129 _malloc_data[index].inc(size); |
|
130 if (malloc_ptr->is_arena_record()) { |
|
131 // see if arena memory record present |
|
132 MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next(); |
|
133 if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) { |
|
134 assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr), |
|
135 "Arena records do not match"); |
|
136 size = next_malloc_ptr->size(); |
|
137 _arena_data[index].inc(size); |
|
138 used_arena_size += size; |
|
139 malloc_itr.next(); |
|
140 } |
|
141 } |
127 } |
142 } |
128 } |
143 malloc_ptr = (MemPointerRecordEx*)malloc_itr.next(); |
129 return true; |
144 } |
130 } |
145 |
131 |
146 // substract used arena size to get size of arena chunk in free list |
132 LinkedList<ReservedMemoryRegion>* virtual_memory_allocations() { |
147 index = flag2index(mtChunk); |
133 return &_virtual_memory_regions; |
148 _malloc_data[index].reduce(used_arena_size); |
134 } |
149 // we really don't know how many chunks in free list, so just set to |
135 }; |
150 // 0 |
136 |
151 _malloc_data[index].overwrite_counter(0); |
137 |
152 |
138 bool MemBaseline::baseline_summary() { |
|
139 assert(_malloc_memory_snapshot == NULL, "Malloc baseline not yet reset"); |
|
140 assert(_virtual_memory_snapshot == NULL, "Virtual baseline not yet reset"); |
|
141 |
|
142 _malloc_memory_snapshot = new (arena()) MallocMemorySnapshot(); |
|
143 _virtual_memory_snapshot = new (arena()) VirtualMemorySnapshot(); |
|
144 if (_malloc_memory_snapshot == NULL || _virtual_memory_snapshot == NULL) { |
|
145 return false; |
|
146 } |
|
147 MallocMemorySummary::snapshot(_malloc_memory_snapshot); |
|
148 VirtualMemorySummary::snapshot(_virtual_memory_snapshot); |
153 return true; |
149 return true; |
154 } |
150 } |
155 |
151 |
156 // check if there is a safepoint in progress, if so, block the thread |
152 bool MemBaseline::baseline_allocation_sites() { |
157 // for the safepoint |
153 assert(arena() != NULL, "Just check"); |
158 void MemBaseline::check_safepoint(JavaThread* thr) { |
154 // Malloc allocation sites |
159 if (SafepointSynchronize::is_synchronizing()) { |
155 MallocAllocationSiteWalker malloc_walker(arena()); |
160 // grab and drop the SR_lock to honor the safepoint protocol |
156 if (!MallocSiteTable::walk_malloc_site(&malloc_walker)) { |
161 MutexLocker ml(thr->SR_lock()); |
157 return false; |
162 } |
158 } |
163 } |
159 |
164 |
160 _malloc_sites.set_head(malloc_walker.malloc_sites()->head()); |
165 // baseline mmap'd memory records, generate overall summary and summaries by |
161 // The malloc sites are collected in size order |
166 // memory types |
162 _malloc_sites_order = by_size; |
167 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) { |
163 |
168 MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records); |
164 // Virtual memory allocation sites |
169 VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current(); |
165 VirtualMemoryAllocationWalker virtual_memory_walker(arena()); |
170 int index; |
166 if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) { |
171 while (vm_ptr != NULL) { |
167 return false; |
172 if (vm_ptr->is_reserved_region()) { |
168 } |
173 index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags())); |
169 |
174 // we use the number of thread stack to count threads |
170 // Virtual memory allocations are collected in call stack order |
175 if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) { |
171 _virtual_memory_allocations.set_head(virtual_memory_walker.virtual_memory_allocations()->head()); |
176 _number_of_threads ++; |
172 |
|
173 if (!aggregate_virtual_memory_allocation_sites()) { |
|
174 return false; |
|
175 } |
|
176 // Virtual memory allocation sites are aggregrated in call stack order |
|
177 _virtual_memory_sites_order = by_address; |
|
178 |
|
179 return true; |
|
180 } |
|
181 |
|
182 bool MemBaseline::baseline(bool summaryOnly) { |
|
183 if (arena() == NULL) { |
|
184 _arena = new (std::nothrow, mtNMT) Arena(mtNMT); |
|
185 if (arena() == NULL) return false; |
|
186 } |
|
187 |
|
188 reset(); |
|
189 |
|
190 _class_count = InstanceKlass::number_of_instance_classes(); |
|
191 |
|
192 if (!baseline_summary()) { |
|
193 return false; |
|
194 } |
|
195 |
|
196 _baseline_type = Summary_baselined; |
|
197 |
|
198 // baseline details |
|
199 if (!summaryOnly && |
|
200 MemTracker::tracking_level() == NMT_detail) { |
|
201 baseline_allocation_sites(); |
|
202 _baseline_type = Detail_baselined; |
|
203 } |
|
204 |
|
205 return true; |
|
206 } |
|
207 |
|
208 int compare_allocation_site(const VirtualMemoryAllocationSite& s1, |
|
209 const VirtualMemoryAllocationSite& s2) { |
|
210 return s1.call_stack()->compare(*s2.call_stack()); |
|
211 } |
|
212 |
|
213 bool MemBaseline::aggregate_virtual_memory_allocation_sites() { |
|
214 SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site, ResourceObj::ARENA> |
|
215 allocation_sites(arena()); |
|
216 |
|
217 VirtualMemoryAllocationIterator itr = virtual_memory_allocations(); |
|
218 const ReservedMemoryRegion* rgn; |
|
219 VirtualMemoryAllocationSite* site; |
|
220 while ((rgn = itr.next()) != NULL) { |
|
221 VirtualMemoryAllocationSite tmp(*rgn->call_stack()); |
|
222 site = allocation_sites.find(tmp); |
|
223 if (site == NULL) { |
|
224 LinkedListNode<VirtualMemoryAllocationSite>* node = |
|
225 allocation_sites.add(tmp); |
|
226 if (node == NULL) return false; |
|
227 site = node->data(); |
177 } |
228 } |
178 _total_vm_reserved += vm_ptr->size(); |
229 site->reserve_memory(rgn->size()); |
179 _vm_data[index].inc(vm_ptr->size(), 0); |
230 site->commit_memory(rgn->committed_size()); |
180 } else { |
231 } |
181 _total_vm_committed += vm_ptr->size(); |
232 |
182 _vm_data[index].inc(0, vm_ptr->size()); |
233 _virtual_memory_sites.set_head(allocation_sites.head()); |
183 } |
|
184 vm_ptr = (VMMemRegion*)vm_itr.next(); |
|
185 } |
|
186 return true; |
234 return true; |
187 } |
235 } |
188 |
236 |
189 // baseline malloc'd memory by callsites, but only the callsites with memory allocation |
237 MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) { |
190 // over 1KB are stored. |
238 assert(!_malloc_sites.is_empty(), "Detail baseline?"); |
191 bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) { |
239 switch(order) { |
192 assert(MemTracker::track_callsite(), "detail tracking is off"); |
240 case by_size: |
193 |
241 malloc_sites_to_size_order(); |
194 MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records)); |
242 break; |
195 MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current(); |
243 case by_site: |
196 MallocCallsitePointer malloc_callsite; |
244 malloc_sites_to_allocation_site_order(); |
197 |
245 break; |
198 // initailize malloc callsite array |
246 case by_address: |
199 if (_malloc_cs == NULL) { |
247 default: |
200 _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64); |
248 ShouldNotReachHere(); |
201 // out of native memory |
249 } |
202 if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) { |
250 return MallocSiteIterator(_malloc_sites.head()); |
203 return false; |
251 } |
204 } |
252 |
205 } else { |
253 VirtualMemorySiteIterator MemBaseline::virtual_memory_sites(SortingOrder order) { |
206 _malloc_cs->clear(); |
254 assert(!_virtual_memory_sites.is_empty(), "Detail baseline?"); |
207 } |
255 switch(order) { |
208 |
256 case by_size: |
209 MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records); |
257 virtual_memory_sites_to_size_order(); |
210 |
258 break; |
211 // sort into callsite pc order. Details are aggregated by callsites |
259 case by_site: |
212 malloc_data->sort((FN_SORT)malloc_sort_by_pc); |
260 virtual_memory_sites_to_reservation_site_order(); |
213 bool ret = true; |
261 break; |
214 |
262 case by_address: |
215 // baseline memory that is totaled over 1 KB |
263 default: |
216 while (malloc_ptr != NULL) { |
264 ShouldNotReachHere(); |
217 if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) { |
265 } |
218 // skip thread stacks |
266 return VirtualMemorySiteIterator(_virtual_memory_sites.head()); |
219 if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) { |
267 } |
220 if (malloc_callsite.addr() != malloc_ptr->pc()) { |
268 |
221 if ((malloc_callsite.amount()/K) > 0) { |
269 |
222 if (!_malloc_cs->append(&malloc_callsite)) { |
270 // Sorting allocations sites in different orders |
223 ret = false; |
271 void MemBaseline::malloc_sites_to_size_order() { |
224 break; |
272 if (_malloc_sites_order != by_size) { |
225 } |
273 SortedLinkedList<MallocSite, compare_malloc_size, ResourceObj::ARENA> |
226 } |
274 tmp(arena()); |
227 malloc_callsite = MallocCallsitePointer(malloc_ptr->pc()); |
275 |
228 } |
276 // Add malloc sites to sorted linked list to sort into size order |
229 malloc_callsite.inc(malloc_ptr->size()); |
277 tmp.move(&_malloc_sites); |
230 } |
278 _malloc_sites.set_head(tmp.head()); |
231 } |
279 tmp.set_head(NULL); |
232 malloc_ptr = (MemPointerRecordEx*)malloc_itr.next(); |
280 _malloc_sites_order = by_size; |
233 } |
281 } |
234 |
282 } |
235 // restore to address order. Snapshot malloc data is maintained in memory |
283 |
236 // address order. |
284 void MemBaseline::malloc_sites_to_allocation_site_order() { |
237 malloc_data->sort((FN_SORT)malloc_sort_by_addr); |
285 if (_malloc_sites_order != by_site) { |
238 |
286 SortedLinkedList<MallocSite, compare_malloc_site, ResourceObj::ARENA> |
239 if (!ret) { |
287 tmp(arena()); |
240 return false; |
288 // Add malloc sites to sorted linked list to sort into site (address) order |
241 } |
289 tmp.move(&_malloc_sites); |
242 // deal with last record |
290 _malloc_sites.set_head(tmp.head()); |
243 if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) { |
291 tmp.set_head(NULL); |
244 if (!_malloc_cs->append(&malloc_callsite)) { |
292 _malloc_sites_order = by_site; |
245 return false; |
293 } |
246 } |
294 } |
247 } |
295 |
248 return true; |
296 void MemBaseline::virtual_memory_sites_to_size_order() { |
249 } |
297 if (_virtual_memory_sites_order != by_size) { |
250 |
298 SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_size, ResourceObj::ARENA> |
251 // baseline mmap'd memory by callsites |
299 tmp(arena()); |
252 bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) { |
300 |
253 assert(MemTracker::track_callsite(), "detail tracking is off"); |
301 tmp.move(&_virtual_memory_sites); |
254 |
302 |
255 VMCallsitePointer vm_callsite; |
303 _virtual_memory_sites.set_head(tmp.head()); |
256 VMCallsitePointer* cur_callsite = NULL; |
304 tmp.set_head(NULL); |
257 MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records); |
305 _virtual_memory_sites_order = by_size; |
258 VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current(); |
306 } |
259 |
307 } |
260 // initialize virtual memory map array |
308 |
261 if (_vm_map == NULL) { |
309 void MemBaseline::virtual_memory_sites_to_reservation_site_order() { |
262 _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length()); |
310 if (_virtual_memory_sites_order != by_size) { |
263 if (_vm_map == NULL || _vm_map->out_of_memory()) { |
311 SortedLinkedList<VirtualMemoryAllocationSite, compare_virtual_memory_site, ResourceObj::ARENA> |
264 return false; |
312 tmp(arena()); |
265 } |
313 |
266 } else { |
314 tmp.add(&_virtual_memory_sites); |
267 _vm_map->clear(); |
315 |
268 } |
316 _virtual_memory_sites.set_head(tmp.head()); |
269 |
317 tmp.set_head(NULL); |
270 // initialize virtual memory callsite array |
318 |
271 if (_vm_cs == NULL) { |
319 _virtual_memory_sites_order = by_size; |
272 _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64); |
320 } |
273 if (_vm_cs == NULL || _vm_cs->out_of_memory()) { |
321 } |
274 return false; |
322 |
275 } |
|
276 } else { |
|
277 _vm_cs->clear(); |
|
278 } |
|
279 |
|
280 // consolidate virtual memory data |
|
281 VMMemRegionEx* reserved_rec = NULL; |
|
282 VMMemRegionEx* committed_rec = NULL; |
|
283 |
|
284 // vm_ptr is coming in increasing base address order |
|
285 while (vm_ptr != NULL) { |
|
286 if (vm_ptr->is_reserved_region()) { |
|
287 // consolidate reserved memory regions for virtual memory map. |
|
288 // The criteria for consolidation is: |
|
289 // 1. two adjacent reserved memory regions |
|
290 // 2. belong to the same memory type |
|
291 // 3. reserved from the same callsite |
|
292 if (reserved_rec == NULL || |
|
293 reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() || |
|
294 FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) || |
|
295 reserved_rec->pc() != vm_ptr->pc()) { |
|
296 if (!_vm_map->append(vm_ptr)) { |
|
297 return false; |
|
298 } |
|
299 // inserted reserved region, we need the pointer to the element in virtual |
|
300 // memory map array. |
|
301 reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1); |
|
302 } else { |
|
303 reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size()); |
|
304 } |
|
305 |
|
306 if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) { |
|
307 return false; |
|
308 } |
|
309 vm_callsite = VMCallsitePointer(vm_ptr->pc()); |
|
310 cur_callsite = &vm_callsite; |
|
311 vm_callsite.inc(vm_ptr->size(), 0); |
|
312 } else { |
|
313 // consolidate committed memory regions for virtual memory map |
|
314 // The criterial is: |
|
315 // 1. two adjacent committed memory regions |
|
316 // 2. committed from the same callsite |
|
317 if (committed_rec == NULL || |
|
318 committed_rec->base() + committed_rec->size() != vm_ptr->addr() || |
|
319 committed_rec->pc() != vm_ptr->pc()) { |
|
320 if (!_vm_map->append(vm_ptr)) { |
|
321 return false; |
|
322 } |
|
323 committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1); |
|
324 } else { |
|
325 committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size()); |
|
326 } |
|
327 vm_callsite.inc(0, vm_ptr->size()); |
|
328 } |
|
329 vm_ptr = (VMMemRegionEx*)vm_itr.next(); |
|
330 } |
|
331 // deal with last record |
|
332 if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) { |
|
333 return false; |
|
334 } |
|
335 |
|
336 // sort it into callsite pc order. Details are aggregated by callsites |
|
337 _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc); |
|
338 |
|
339 // walk the array to consolidate record by pc |
|
340 MemPointerArrayIteratorImpl itr(_vm_cs); |
|
341 VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current(); |
|
342 VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next(); |
|
343 while (next_rec != NULL) { |
|
344 assert(callsite_rec != NULL, "Sanity check"); |
|
345 if (next_rec->addr() == callsite_rec->addr()) { |
|
346 callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount()); |
|
347 itr.remove(); |
|
348 next_rec = (VMCallsitePointer*)itr.current(); |
|
349 } else { |
|
350 callsite_rec = next_rec; |
|
351 next_rec = (VMCallsitePointer*)itr.next(); |
|
352 } |
|
353 } |
|
354 |
|
355 return true; |
|
356 } |
|
357 |
|
358 // baseline a snapshot. If summary_only = false, memory usages aggregated by |
|
359 // callsites are also baselined. |
|
360 // The method call can be lengthy, especially when detail tracking info is |
|
361 // requested. So the method checks for safepoint explicitly. |
|
362 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) { |
|
363 Thread* THREAD = Thread::current(); |
|
364 assert(THREAD->is_Java_thread(), "must be a JavaThread"); |
|
365 MutexLocker snapshot_locker(snapshot._lock); |
|
366 reset(); |
|
367 _baselined = baseline_malloc_summary(snapshot._alloc_ptrs); |
|
368 if (_baselined) { |
|
369 check_safepoint((JavaThread*)THREAD); |
|
370 _baselined = baseline_vm_summary(snapshot._vm_ptrs); |
|
371 } |
|
372 _number_of_classes = snapshot.number_of_classes(); |
|
373 |
|
374 if (!summary_only && MemTracker::track_callsite() && _baselined) { |
|
375 check_safepoint((JavaThread*)THREAD); |
|
376 _baselined = baseline_malloc_details(snapshot._alloc_ptrs); |
|
377 if (_baselined) { |
|
378 check_safepoint((JavaThread*)THREAD); |
|
379 _baselined = baseline_vm_details(snapshot._vm_ptrs); |
|
380 } |
|
381 } |
|
382 return _baselined; |
|
383 } |
|
384 |
|
385 |
|
386 int MemBaseline::flag2index(MEMFLAGS flag) const { |
|
387 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { |
|
388 if (MemType2NameMap[index]._flag == flag) { |
|
389 return index; |
|
390 } |
|
391 } |
|
392 assert(false, "no type"); |
|
393 return -1; |
|
394 } |
|
395 |
|
396 const char* MemBaseline::type2name(MEMFLAGS type) { |
|
397 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { |
|
398 if (MemType2NameMap[index]._flag == type) { |
|
399 return MemType2NameMap[index]._name; |
|
400 } |
|
401 } |
|
402 assert(false, err_msg("bad type %x", type)); |
|
403 return NULL; |
|
404 } |
|
405 |
|
406 |
|
407 MemBaseline& MemBaseline::operator=(const MemBaseline& other) { |
|
408 _total_malloced = other._total_malloced; |
|
409 _total_vm_reserved = other._total_vm_reserved; |
|
410 _total_vm_committed = other._total_vm_committed; |
|
411 |
|
412 _baselined = other._baselined; |
|
413 _number_of_classes = other._number_of_classes; |
|
414 |
|
415 for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) { |
|
416 _malloc_data[index] = other._malloc_data[index]; |
|
417 _vm_data[index] = other._vm_data[index]; |
|
418 _arena_data[index] = other._arena_data[index]; |
|
419 } |
|
420 |
|
421 if (MemTracker::track_callsite()) { |
|
422 assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory"); |
|
423 assert(other._malloc_cs != NULL && other._vm_cs != NULL, |
|
424 "not properly baselined"); |
|
425 _malloc_cs->clear(); |
|
426 _vm_cs->clear(); |
|
427 int index; |
|
428 for (index = 0; index < other._malloc_cs->length(); index ++) { |
|
429 _malloc_cs->append(other._malloc_cs->at(index)); |
|
430 } |
|
431 |
|
432 for (index = 0; index < other._vm_cs->length(); index ++) { |
|
433 _vm_cs->append(other._vm_cs->at(index)); |
|
434 } |
|
435 } |
|
436 return *this; |
|
437 } |
|
438 |
|
439 /* compare functions for sorting */ |
|
440 |
|
441 // sort snapshot malloc'd records in callsite pc order |
|
442 int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) { |
|
443 assert(MemTracker::track_callsite(),"Just check"); |
|
444 const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1; |
|
445 const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2; |
|
446 return UNSIGNED_COMPARE(mp1->pc(), mp2->pc()); |
|
447 } |
|
448 |
|
449 // sort baselined malloc'd records in size order |
|
450 int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) { |
|
451 assert(MemTracker::is_on(), "Just check"); |
|
452 const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1; |
|
453 const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2; |
|
454 return UNSIGNED_COMPARE(mp2->amount(), mp1->amount()); |
|
455 } |
|
456 |
|
457 // sort baselined malloc'd records in callsite pc order |
|
458 int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) { |
|
459 assert(MemTracker::is_on(), "Just check"); |
|
460 const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1; |
|
461 const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2; |
|
462 return UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); |
|
463 } |
|
464 |
|
465 |
|
466 // sort baselined mmap'd records in size (reserved size) order |
|
467 int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) { |
|
468 assert(MemTracker::is_on(), "Just check"); |
|
469 const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1; |
|
470 const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2; |
|
471 return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount()); |
|
472 } |
|
473 |
|
474 // sort baselined mmap'd records in callsite pc order |
|
475 int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) { |
|
476 assert(MemTracker::is_on(), "Just check"); |
|
477 const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1; |
|
478 const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2; |
|
479 return UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); |
|
480 } |
|
481 |
|
482 |
|
483 // sort snapshot malloc'd records in memory block address order |
|
484 int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) { |
|
485 assert(MemTracker::is_on(), "Just check"); |
|
486 const MemPointerRecord* mp1 = (const MemPointerRecord*)p1; |
|
487 const MemPointerRecord* mp2 = (const MemPointerRecord*)p2; |
|
488 int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr()); |
|
489 assert(p1 == p2 || delta != 0, "dup pointer"); |
|
490 return delta; |
|
491 } |
|
492 |
|