src/share/vm/services/virtualMemoryTracker.hpp

changeset 7074
833b0f92429a
child 7077
36c9011aaead
equal deleted inserted replaced
7073:4d3a43351904 7074:833b0f92429a
1 /*
2 * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
26 #define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
27
28 #if INCLUDE_NMT
29
30 #include "memory/allocation.hpp"
31 #include "services/allocationSite.hpp"
32 #include "services/nmtCommon.hpp"
33 #include "utilities/linkedlist.hpp"
34 #include "utilities/nativeCallStack.hpp"
35 #include "utilities/ostream.hpp"
36
37
38 /*
39 * Virtual memory counter
40 */
41 class VirtualMemory VALUE_OBJ_CLASS_SPEC {
42 private:
43 size_t _reserved;
44 size_t _committed;
45
46 public:
47 VirtualMemory() : _reserved(0), _committed(0) { }
48
49 inline void reserve_memory(size_t sz) { _reserved += sz; }
50 inline void commit_memory (size_t sz) {
51 _committed += sz;
52 assert(_committed <= _reserved, "Sanity check");
53 }
54
55 inline void release_memory (size_t sz) {
56 assert(_reserved >= sz, "Negative amount");
57 _reserved -= sz;
58 }
59
60 inline void uncommit_memory(size_t sz) {
61 assert(_committed >= sz, "Negative amount");
62 _committed -= sz;
63 }
64
65 void reset() {
66 _reserved = 0;
67 _committed = 0;
68 }
69
70 inline size_t reserved() const { return _reserved; }
71 inline size_t committed() const { return _committed; }
72 };
73
74 // Virtual memory allocation site, keeps track where the virtual memory is reserved.
75 class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> {
76 public:
77 VirtualMemoryAllocationSite(const NativeCallStack& stack) :
78 AllocationSite<VirtualMemory>(stack) { }
79
80 inline void reserve_memory(size_t sz) { data()->reserve_memory(sz); }
81 inline void commit_memory (size_t sz) { data()->commit_memory(sz); }
82 inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); }
83 inline void release_memory(size_t sz) { data()->release_memory(sz); }
84 inline size_t reserved() const { return peek()->reserved(); }
85 inline size_t committed() const { return peek()->committed(); }
86 };
87
88 class VirtualMemorySummary;
89
90 // This class represents a snapshot of virtual memory at a given time.
91 // The latest snapshot is saved in a static area.
92 class VirtualMemorySnapshot : public ResourceObj {
93 friend class VirtualMemorySummary;
94
95 private:
96 VirtualMemory _virtual_memory[mt_number_of_types];
97
98 public:
99 inline VirtualMemory* by_type(MEMFLAGS flag) {
100 int index = NMTUtil::flag_to_index(flag);
101 return &_virtual_memory[index];
102 }
103
104 inline VirtualMemory* by_index(int index) {
105 assert(index >= 0, "Index out of bound");
106 assert(index < mt_number_of_types, "Index out of bound");
107 return &_virtual_memory[index];
108 }
109
110 inline size_t total_reserved() const {
111 size_t amount = 0;
112 for (int index = 0; index < mt_number_of_types; index ++) {
113 amount += _virtual_memory[index].reserved();
114 }
115 return amount;
116 }
117
118 inline size_t total_committed() const {
119 size_t amount = 0;
120 for (int index = 0; index < mt_number_of_types; index ++) {
121 amount += _virtual_memory[index].committed();
122 }
123 return amount;
124 }
125
126 inline void reset() {
127 for (int index = 0; index < mt_number_of_types; index ++) {
128 _virtual_memory[index].reset();
129 }
130 }
131
132 void copy_to(VirtualMemorySnapshot* s) {
133 for (int index = 0; index < mt_number_of_types; index ++) {
134 s->_virtual_memory[index] = _virtual_memory[index];
135 }
136 }
137 };
138
139 class VirtualMemorySummary : AllStatic {
140 public:
141 static void initialize();
142
143 static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
144 as_snapshot()->by_type(flag)->reserve_memory(size);
145 }
146
147 static inline void record_committed_memory(size_t size, MEMFLAGS flag) {
148 as_snapshot()->by_type(flag)->commit_memory(size);
149 }
150
151 static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) {
152 as_snapshot()->by_type(flag)->uncommit_memory(size);
153 }
154
155 static inline void record_released_memory(size_t size, MEMFLAGS flag) {
156 as_snapshot()->by_type(flag)->release_memory(size);
157 }
158
159 // Move virtual memory from one memory type to another.
160 // Virtual memory can be reserved before it is associated with a memory type, and tagged
161 // as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown'
162 // type to specified memory type.
163 static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
164 as_snapshot()->by_type(from)->release_memory(size);
165 as_snapshot()->by_type(to)->reserve_memory(size);
166 }
167
168 static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
169 as_snapshot()->by_type(from)->uncommit_memory(size);
170 as_snapshot()->by_type(to)->commit_memory(size);
171 }
172
173 static inline void snapshot(VirtualMemorySnapshot* s) {
174 as_snapshot()->copy_to(s);
175 }
176
177 static inline void reset() {
178 as_snapshot()->reset();
179 }
180
181 static VirtualMemorySnapshot* as_snapshot() {
182 return (VirtualMemorySnapshot*)_snapshot;
183 }
184
185 private:
186 static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
187 };
188
189
190
191 /*
192 * A virtual memory region
193 */
194 class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC {
195 private:
196 address _base_address;
197 size_t _size;
198
199 public:
200 VirtualMemoryRegion(address addr, size_t size) :
201 _base_address(addr), _size(size) {
202 assert(addr != NULL, "Invalid address");
203 assert(size > 0, "Invalid size");
204 }
205
206 inline address base() const { return _base_address; }
207 inline address end() const { return base() + size(); }
208 inline size_t size() const { return _size; }
209
210 inline bool is_empty() const { return size() == 0; }
211
212 inline bool contain_address(address addr) const {
213 return (addr >= base() && addr < end());
214 }
215
216
217 inline bool contain_region(address addr, size_t size) const {
218 return contain_address(addr) && contain_address(addr + size - 1);
219 }
220
221 inline bool same_region(address addr, size_t sz) const {
222 return (addr == base() && sz == size());
223 }
224
225
226 inline bool overlap_region(address addr, size_t sz) const {
227 VirtualMemoryRegion rgn(addr, sz);
228 return contain_address(addr) ||
229 contain_address(addr + sz - 1) ||
230 rgn.contain_address(base()) ||
231 rgn.contain_address(end() - 1);
232 }
233
234 inline bool adjacent_to(address addr, size_t sz) const {
235 return (addr == end() || (addr + sz) == base());
236 }
237
238 void exclude_region(address addr, size_t sz) {
239 assert(contain_region(addr, sz), "Not containment");
240 assert(addr == base() || addr + sz == end(), "Can not exclude from middle");
241 size_t new_size = size() - sz;
242
243 if (addr == base()) {
244 set_base(addr + sz);
245 }
246 set_size(new_size);
247 }
248
249 void expand_region(address addr, size_t sz) {
250 assert(adjacent_to(addr, sz), "Not adjacent regions");
251 if (base() == addr + sz) {
252 set_base(addr);
253 }
254 set_size(size() + sz);
255 }
256
257 protected:
258 void set_base(address base) {
259 assert(base != NULL, "Sanity check");
260 _base_address = base;
261 }
262
263 void set_size(size_t size) {
264 assert(size > 0, "Sanity check");
265 _size = size;
266 }
267 };
268
269
270 class CommittedMemoryRegion : public VirtualMemoryRegion {
271 private:
272 NativeCallStack _stack;
273
274 public:
275 CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
276 VirtualMemoryRegion(addr, size), _stack(stack) { }
277
278 inline int compare(const CommittedMemoryRegion& rgn) const {
279 if (overlap_region(rgn.base(), rgn.size()) ||
280 adjacent_to (rgn.base(), rgn.size())) {
281 return 0;
282 } else {
283 if (base() == rgn.base()) {
284 return 0;
285 } else if (base() > rgn.base()) {
286 return 1;
287 } else {
288 return -1;
289 }
290 }
291 }
292
293 inline bool equals(const CommittedMemoryRegion& rgn) const {
294 return compare(rgn) == 0;
295 }
296
297 inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
298 inline const NativeCallStack* call_stack() const { return &_stack; }
299 };
300
301
302 typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator;
303
304 int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&);
305 class ReservedMemoryRegion : public VirtualMemoryRegion {
306 private:
307 SortedLinkedList<CommittedMemoryRegion, compare_committed_region>
308 _committed_regions;
309
310 NativeCallStack _stack;
311 MEMFLAGS _flag;
312
313 bool _all_committed;
314
315 public:
316 ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
317 MEMFLAGS flag = mtNone) :
318 VirtualMemoryRegion(base, size), _stack(stack), _flag(flag),
319 _all_committed(false) { }
320
321
322 ReservedMemoryRegion(address base, size_t size) :
323 VirtualMemoryRegion(base, size), _stack(emptyStack), _flag(mtNone),
324 _all_committed(false) { }
325
326 // Copy constructor
327 ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
328 VirtualMemoryRegion(rr.base(), rr.size()) {
329 *this = rr;
330 }
331
332 inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
333 inline const NativeCallStack* call_stack() const { return &_stack; }
334
335 void set_flag(MEMFLAGS flag);
336 inline MEMFLAGS flag() const { return _flag; }
337
338 inline int compare(const ReservedMemoryRegion& rgn) const {
339 if (overlap_region(rgn.base(), rgn.size())) {
340 return 0;
341 } else {
342 if (base() == rgn.base()) {
343 return 0;
344 } else if (base() > rgn.base()) {
345 return 1;
346 } else {
347 return -1;
348 }
349 }
350 }
351
352 inline bool equals(const ReservedMemoryRegion& rgn) const {
353 return compare(rgn) == 0;
354 }
355
356 bool add_committed_region(address addr, size_t size, const NativeCallStack& stack);
357 bool remove_uncommitted_region(address addr, size_t size);
358
359 size_t committed_size() const;
360
361 // move committed regions that higher than specified address to
362 // the new region
363 void move_committed_regions(address addr, ReservedMemoryRegion& rgn);
364
365 inline bool all_committed() const { return _all_committed; }
366 void set_all_committed(bool b);
367
368 CommittedRegionIterator iterate_committed_regions() const {
369 return CommittedRegionIterator(_committed_regions.head());
370 }
371
372 ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) {
373 set_base(other.base());
374 set_size(other.size());
375
376 _stack = *other.call_stack();
377 _flag = other.flag();
378 _all_committed = other.all_committed();
379 if (other.all_committed()) {
380 set_all_committed(true);
381 } else {
382 CommittedRegionIterator itr = other.iterate_committed_regions();
383 const CommittedMemoryRegion* rgn = itr.next();
384 while (rgn != NULL) {
385 _committed_regions.add(*rgn);
386 rgn = itr.next();
387 }
388 }
389 return *this;
390 }
391
392 private:
393 // The committed region contains the uncommitted region, subtract the uncommitted
394 // region from this committed region
395 bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
396 address addr, size_t sz);
397
398 bool add_committed_region(const CommittedMemoryRegion& rgn) {
399 assert(rgn.base() != NULL, "Invalid base address");
400 assert(size() > 0, "Invalid size");
401 return _committed_regions.add(rgn) != NULL;
402 }
403 };
404
405 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2);
406
407 class VirtualMemoryWalker : public StackObj {
408 public:
409 virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
410 };
411
412 // Main class called from MemTracker to track virtual memory allocations, commits and releases.
413 class VirtualMemoryTracker : AllStatic {
414 public:
415 static bool initialize(NMT_TrackingLevel level);
416
417 static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack,
418 MEMFLAGS flag = mtNone, bool all_committed = false);
419
420 static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
421 static bool remove_uncommitted_region (address base_addr, size_t size);
422 static bool remove_released_region (address base_addr, size_t size);
423 static void set_reserved_region_type (address addr, MEMFLAGS flag);
424
425 // Walk virtual memory data structure for creating baseline, etc.
426 static bool walk_virtual_memory(VirtualMemoryWalker* walker);
427
428 static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
429
430 private:
431 static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> _reserved_regions;
432 };
433
434
435 #endif // INCLUDE_NMT
436
437 #endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP

mercurial