src/share/vm/services/virtualMemoryTracker.cpp

changeset 7074
833b0f92429a
child 7077
36c9011aaead
equal deleted inserted replaced
7073:4d3a43351904 7074:833b0f92429a
1 /*
2 * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
25
26 #include "runtime/threadCritical.hpp"
27 #include "services/virtualMemoryTracker.hpp"
28
29 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
30
31 void VirtualMemorySummary::initialize() {
32 assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
33 // Use placement operator new to initialize static data area.
34 ::new ((void*)_snapshot) VirtualMemorySnapshot();
35 }
36
37 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> VirtualMemoryTracker::_reserved_regions;
38
39 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
40 return r1.compare(r2);
41 }
42
43 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
44 return r1.compare(r2);
45 }
46
47 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
48 assert(addr != NULL, "Invalid address");
49 assert(size > 0, "Invalid size");
50 assert(contain_region(addr, size), "Not contain this region");
51
52 if (all_committed()) return true;
53
54 CommittedMemoryRegion committed_rgn(addr, size, stack);
55 LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn);
56 if (node != NULL) {
57 CommittedMemoryRegion* rgn = node->data();
58 if (rgn->same_region(addr, size)) {
59 return true;
60 }
61
62 if (rgn->adjacent_to(addr, size)) {
63 // check if the next region covers this committed region,
64 // the regions may not be merged due to different call stacks
65 LinkedListNode<CommittedMemoryRegion>* next =
66 node->next();
67 if (next != NULL && next->data()->contain_region(addr, size)) {
68 if (next->data()->same_region(addr, size)) {
69 next->data()->set_call_stack(stack);
70 }
71 return true;
72 }
73 if (rgn->call_stack()->equals(stack)) {
74 VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
75 // the two adjacent regions have the same call stack, merge them
76 rgn->expand_region(addr, size);
77 VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
78 return true;
79 }
80 VirtualMemorySummary::record_committed_memory(size, flag());
81 if (rgn->base() > addr) {
82 return _committed_regions.insert_before(committed_rgn, node) != NULL;
83 } else {
84 return _committed_regions.insert_after(committed_rgn, node) != NULL;
85 }
86 }
87 assert(rgn->contain_region(addr, size), "Must cover this region");
88 return true;
89 } else {
90 // New committed region
91 VirtualMemorySummary::record_committed_memory(size, flag());
92 return add_committed_region(committed_rgn);
93 }
94 }
95
96 void ReservedMemoryRegion::set_all_committed(bool b) {
97 if (all_committed() != b) {
98 _all_committed = b;
99 if (b) {
100 VirtualMemorySummary::record_committed_memory(size(), flag());
101 }
102 }
103 }
104
105 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
106 address addr, size_t size) {
107 assert(addr != NULL, "Invalid address");
108 assert(size > 0, "Invalid size");
109
110 CommittedMemoryRegion* rgn = node->data();
111 assert(rgn->contain_region(addr, size), "Has to be contained");
112 assert(!rgn->same_region(addr, size), "Can not be the same region");
113
114 if (rgn->base() == addr ||
115 rgn->end() == addr + size) {
116 rgn->exclude_region(addr, size);
117 return true;
118 } else {
119 // split this region
120 address top =rgn->end();
121 // use this region for lower part
122 size_t exclude_size = rgn->end() - addr;
123 rgn->exclude_region(addr, exclude_size);
124
125 // higher part
126 address high_base = addr + size;
127 size_t high_size = top - high_base;
128
129 CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
130 LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
131 assert(high_node == NULL || node->next() == high_node, "Should be right after");
132 return (high_node != NULL);
133 }
134
135 return false;
136 }
137
138 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
139 // uncommit stack guard pages
140 if (flag() == mtThreadStack && !same_region(addr, sz)) {
141 return true;
142 }
143
144 assert(addr != NULL, "Invalid address");
145 assert(sz > 0, "Invalid size");
146
147 if (all_committed()) {
148 assert(_committed_regions.is_empty(), "Sanity check");
149 assert(contain_region(addr, sz), "Reserved region does not contain this region");
150 set_all_committed(false);
151 VirtualMemorySummary::record_uncommitted_memory(sz, flag());
152 if (same_region(addr, sz)) {
153 return true;
154 } else {
155 CommittedMemoryRegion rgn(base(), size(), *call_stack());
156 if (rgn.base() == addr || rgn.end() == (addr + sz)) {
157 rgn.exclude_region(addr, sz);
158 return add_committed_region(rgn);
159 } else {
160 // split this region
161 // top of the whole region
162 address top =rgn.end();
163 // use this region for lower part
164 size_t exclude_size = rgn.end() - addr;
165 rgn.exclude_region(addr, exclude_size);
166 if (add_committed_region(rgn)) {
167 // higher part
168 address high_base = addr + sz;
169 size_t high_size = top - high_base;
170 CommittedMemoryRegion high_rgn(high_base, high_size, emptyStack);
171 return add_committed_region(high_rgn);
172 } else {
173 return false;
174 }
175 }
176 }
177 } else {
178 // we have to walk whole list to remove the committed regions in
179 // specified range
180 LinkedListNode<CommittedMemoryRegion>* head =
181 _committed_regions.head();
182 LinkedListNode<CommittedMemoryRegion>* prev = NULL;
183 VirtualMemoryRegion uncommitted_rgn(addr, sz);
184
185 while (head != NULL && !uncommitted_rgn.is_empty()) {
186 CommittedMemoryRegion* crgn = head->data();
187 // this committed region overlaps to region to uncommit
188 if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
189 if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
190 // find matched region, remove the node will do
191 VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
192 _committed_regions.remove_after(prev);
193 return true;
194 } else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
195 // this committed region contains whole uncommitted region
196 VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
197 return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size());
198 } else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) {
199 // this committed region has been uncommitted
200 size_t exclude_size = crgn->end() - uncommitted_rgn.base();
201 uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size);
202 VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
203 LinkedListNode<CommittedMemoryRegion>* tmp = head;
204 head = head->next();
205 _committed_regions.remove_after(prev);
206 continue;
207 } else if (crgn->contain_address(uncommitted_rgn.base())) {
208 size_t toUncommitted = crgn->end() - uncommitted_rgn.base();
209 crgn->exclude_region(uncommitted_rgn.base(), toUncommitted);
210 uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted);
211 VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
212 } else if (uncommitted_rgn.contain_address(crgn->base())) {
213 size_t toUncommitted = uncommitted_rgn.end() - crgn->base();
214 crgn->exclude_region(crgn->base(), toUncommitted);
215 uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted,
216 toUncommitted);
217 VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
218 }
219 }
220 prev = head;
221 head = head->next();
222 }
223 }
224
225 return true;
226 }
227
228 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
229 assert(addr != NULL, "Invalid address");
230
231 // split committed regions
232 LinkedListNode<CommittedMemoryRegion>* head =
233 _committed_regions.head();
234 LinkedListNode<CommittedMemoryRegion>* prev = NULL;
235
236 while (head != NULL) {
237 if (head->data()->base() >= addr) {
238 break;
239 }
240 prev = head;
241 head = head->next();
242 }
243
244 if (head != NULL) {
245 if (prev != NULL) {
246 prev->set_next(head->next());
247 } else {
248 _committed_regions.set_head(NULL);
249 }
250 }
251
252 rgn._committed_regions.set_head(head);
253 }
254
255 size_t ReservedMemoryRegion::committed_size() const {
256 if (all_committed()) {
257 return size();
258 } else {
259 size_t committed = 0;
260 LinkedListNode<CommittedMemoryRegion>* head =
261 _committed_regions.head();
262 while (head != NULL) {
263 committed += head->data()->size();
264 head = head->next();
265 }
266 return committed;
267 }
268 }
269
270 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
271 assert((flag() == mtNone || flag() == f), "Overwrite memory type");
272 if (flag() != f) {
273 VirtualMemorySummary::move_reserved_memory(flag(), f, size());
274 VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
275 _flag = f;
276 }
277 }
278
279 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
280 if (level >= NMT_summary) {
281 VirtualMemorySummary::initialize();
282 }
283 return true;
284 }
285
286 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
287 const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
288 assert(base_addr != NULL, "Invalid address");
289 assert(size > 0, "Invalid size");
290
291 ReservedMemoryRegion rgn(base_addr, size, stack, flag);
292 ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
293 LinkedListNode<ReservedMemoryRegion>* node;
294 if (reserved_rgn == NULL) {
295 VirtualMemorySummary::record_reserved_memory(size, flag);
296 node = _reserved_regions.add(rgn);
297 if (node != NULL) {
298 node->data()->set_all_committed(all_committed);
299 return true;
300 } else {
301 return false;
302 }
303 } else {
304 if (reserved_rgn->same_region(base_addr, size)) {
305 reserved_rgn->set_call_stack(stack);
306 reserved_rgn->set_flag(flag);
307 return true;
308 } else if (reserved_rgn->adjacent_to(base_addr, size)) {
309 VirtualMemorySummary::record_reserved_memory(size, flag);
310 reserved_rgn->expand_region(base_addr, size);
311 reserved_rgn->set_call_stack(stack);
312 return true;
313 } else {
314 // Overlapped reservation.
315 // It can happen when the regions are thread stacks, as JNI
316 // thread does not detach from VM before exits, and leads to
317 // leak JavaThread object
318 if (reserved_rgn->flag() == mtThreadStack) {
319 guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
320 // Overwrite with new region
321
322 // Release old region
323 VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
324 VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
325
326 // Add new region
327 VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
328
329 *reserved_rgn = rgn;
330 return true;
331 } else {
332 ShouldNotReachHere();
333 return false;
334 }
335 }
336 }
337 }
338
339 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
340 assert(addr != NULL, "Invalid address");
341
342 ReservedMemoryRegion rgn(addr, 1);
343 ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
344 if (reserved_rgn != NULL) {
345 assert(reserved_rgn->contain_address(addr), "Containment");
346 if (reserved_rgn->flag() != flag) {
347 assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
348 reserved_rgn->set_flag(flag);
349 }
350 }
351 }
352
353 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
354 const NativeCallStack& stack) {
355 assert(addr != NULL, "Invalid address");
356 assert(size > 0, "Invalid size");
357 ReservedMemoryRegion rgn(addr, size);
358 ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
359
360 assert(reserved_rgn != NULL, "No reserved region");
361 assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
362 return reserved_rgn->add_committed_region(addr, size, stack);
363 }
364
365 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
366 assert(addr != NULL, "Invalid address");
367 assert(size > 0, "Invalid size");
368 ReservedMemoryRegion rgn(addr, size);
369 ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
370 assert(reserved_rgn != NULL, "No reserved region");
371 assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
372 return reserved_rgn->remove_uncommitted_region(addr, size);
373 }
374
375 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
376 assert(addr != NULL, "Invalid address");
377 assert(size > 0, "Invalid size");
378
379 ReservedMemoryRegion rgn(addr, size);
380 ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn);
381
382 assert(reserved_rgn != NULL, "No reserved region");
383
384 // uncommit regions within the released region
385 if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
386 return false;
387 }
388
389
390 VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
391
392 if (reserved_rgn->same_region(addr, size)) {
393 return _reserved_regions.remove(rgn);
394 } else {
395 assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
396 if (reserved_rgn->base() == addr ||
397 reserved_rgn->end() == addr + size) {
398 reserved_rgn->exclude_region(addr, size);
399 return true;
400 } else {
401 address top = reserved_rgn->end();
402 address high_base = addr + size;
403 ReservedMemoryRegion high_rgn(high_base, top - high_base,
404 *reserved_rgn->call_stack(), reserved_rgn->flag());
405
406 // use original region for lower region
407 reserved_rgn->exclude_region(addr, top - addr);
408 LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions.add(high_rgn);
409 if (new_rgn == NULL) {
410 return false;
411 } else {
412 reserved_rgn->move_committed_regions(addr, *new_rgn->data());
413 return true;
414 }
415 }
416 }
417 }
418
419
420 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
421 ThreadCritical tc;
422 LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions.head();
423 while (head != NULL) {
424 const ReservedMemoryRegion* rgn = head->peek();
425 if (!walker->do_allocation_site(rgn)) {
426 return false;
427 }
428 head = head->next();
429 }
430 return true;
431 }
432
433 // Transition virtual memory tracking level.
434 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
435 if (from == NMT_minimal) {
436 assert(to == NMT_summary || to == NMT_detail, "Just check");
437 VirtualMemorySummary::reset();
438 } else if (to == NMT_minimal) {
439 assert(from == NMT_summary || from == NMT_detail, "Just check");
440 // Clean up virtual memory tracking data structures.
441 ThreadCritical tc;
442 _reserved_regions.clear();
443 }
444
445 return true;
446 }
447
448

mercurial