Wed, 27 Aug 2014 08:19:12 -0400
8046598: Scalable Native memory tracking development
Summary: Enhance scalability of native memory tracking
Reviewed-by: coleenp, ctornqvi, gtriantafill
zgu@7074 | 1 | /* |
zgu@7074 | 2 | * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. |
zgu@7074 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
zgu@7074 | 4 | * |
zgu@7074 | 5 | * This code is free software; you can redistribute it and/or modify it |
zgu@7074 | 6 | * under the terms of the GNU General Public License version 2 only, as |
zgu@7074 | 7 | * published by the Free Software Foundation. |
zgu@7074 | 8 | * |
zgu@7074 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
zgu@7074 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
zgu@7074 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
zgu@7074 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
zgu@7074 | 13 | * accompanied this code). |
zgu@7074 | 14 | * |
zgu@7074 | 15 | * You should have received a copy of the GNU General Public License version |
zgu@7074 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
zgu@7074 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
zgu@7074 | 18 | * |
zgu@7074 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
zgu@7074 | 20 | * or visit www.oracle.com if you need additional information or have any |
zgu@7074 | 21 | * questions. |
zgu@7074 | 22 | * |
zgu@7074 | 23 | */ |
zgu@7074 | 24 | #include "precompiled.hpp" |
zgu@7074 | 25 | |
zgu@7074 | 26 | #include "runtime/threadCritical.hpp" |
zgu@7074 | 27 | #include "services/virtualMemoryTracker.hpp" |
zgu@7074 | 28 | |
zgu@7074 | 29 | size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)]; |
zgu@7074 | 30 | |
zgu@7074 | 31 | void VirtualMemorySummary::initialize() { |
zgu@7074 | 32 | assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check"); |
zgu@7074 | 33 | // Use placement operator new to initialize static data area. |
zgu@7074 | 34 | ::new ((void*)_snapshot) VirtualMemorySnapshot(); |
zgu@7074 | 35 | } |
zgu@7074 | 36 | |
zgu@7074 | 37 | SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base> VirtualMemoryTracker::_reserved_regions; |
zgu@7074 | 38 | |
zgu@7074 | 39 | int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) { |
zgu@7074 | 40 | return r1.compare(r2); |
zgu@7074 | 41 | } |
zgu@7074 | 42 | |
zgu@7074 | 43 | int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) { |
zgu@7074 | 44 | return r1.compare(r2); |
zgu@7074 | 45 | } |
zgu@7074 | 46 | |
zgu@7074 | 47 | bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) { |
zgu@7074 | 48 | assert(addr != NULL, "Invalid address"); |
zgu@7074 | 49 | assert(size > 0, "Invalid size"); |
zgu@7074 | 50 | assert(contain_region(addr, size), "Not contain this region"); |
zgu@7074 | 51 | |
zgu@7074 | 52 | if (all_committed()) return true; |
zgu@7074 | 53 | |
zgu@7074 | 54 | CommittedMemoryRegion committed_rgn(addr, size, stack); |
zgu@7074 | 55 | LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn); |
zgu@7074 | 56 | if (node != NULL) { |
zgu@7074 | 57 | CommittedMemoryRegion* rgn = node->data(); |
zgu@7074 | 58 | if (rgn->same_region(addr, size)) { |
zgu@7074 | 59 | return true; |
zgu@7074 | 60 | } |
zgu@7074 | 61 | |
zgu@7074 | 62 | if (rgn->adjacent_to(addr, size)) { |
zgu@7074 | 63 | // check if the next region covers this committed region, |
zgu@7074 | 64 | // the regions may not be merged due to different call stacks |
zgu@7074 | 65 | LinkedListNode<CommittedMemoryRegion>* next = |
zgu@7074 | 66 | node->next(); |
zgu@7074 | 67 | if (next != NULL && next->data()->contain_region(addr, size)) { |
zgu@7074 | 68 | if (next->data()->same_region(addr, size)) { |
zgu@7074 | 69 | next->data()->set_call_stack(stack); |
zgu@7074 | 70 | } |
zgu@7074 | 71 | return true; |
zgu@7074 | 72 | } |
zgu@7074 | 73 | if (rgn->call_stack()->equals(stack)) { |
zgu@7074 | 74 | VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag()); |
zgu@7074 | 75 | // the two adjacent regions have the same call stack, merge them |
zgu@7074 | 76 | rgn->expand_region(addr, size); |
zgu@7074 | 77 | VirtualMemorySummary::record_committed_memory(rgn->size(), flag()); |
zgu@7074 | 78 | return true; |
zgu@7074 | 79 | } |
zgu@7074 | 80 | VirtualMemorySummary::record_committed_memory(size, flag()); |
zgu@7074 | 81 | if (rgn->base() > addr) { |
zgu@7074 | 82 | return _committed_regions.insert_before(committed_rgn, node) != NULL; |
zgu@7074 | 83 | } else { |
zgu@7074 | 84 | return _committed_regions.insert_after(committed_rgn, node) != NULL; |
zgu@7074 | 85 | } |
zgu@7074 | 86 | } |
zgu@7074 | 87 | assert(rgn->contain_region(addr, size), "Must cover this region"); |
zgu@7074 | 88 | return true; |
zgu@7074 | 89 | } else { |
zgu@7074 | 90 | // New committed region |
zgu@7074 | 91 | VirtualMemorySummary::record_committed_memory(size, flag()); |
zgu@7074 | 92 | return add_committed_region(committed_rgn); |
zgu@7074 | 93 | } |
zgu@7074 | 94 | } |
zgu@7074 | 95 | |
zgu@7074 | 96 | void ReservedMemoryRegion::set_all_committed(bool b) { |
zgu@7074 | 97 | if (all_committed() != b) { |
zgu@7074 | 98 | _all_committed = b; |
zgu@7074 | 99 | if (b) { |
zgu@7074 | 100 | VirtualMemorySummary::record_committed_memory(size(), flag()); |
zgu@7074 | 101 | } |
zgu@7074 | 102 | } |
zgu@7074 | 103 | } |
zgu@7074 | 104 | |
zgu@7074 | 105 | bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node, |
zgu@7074 | 106 | address addr, size_t size) { |
zgu@7074 | 107 | assert(addr != NULL, "Invalid address"); |
zgu@7074 | 108 | assert(size > 0, "Invalid size"); |
zgu@7074 | 109 | |
zgu@7074 | 110 | CommittedMemoryRegion* rgn = node->data(); |
zgu@7074 | 111 | assert(rgn->contain_region(addr, size), "Has to be contained"); |
zgu@7074 | 112 | assert(!rgn->same_region(addr, size), "Can not be the same region"); |
zgu@7074 | 113 | |
zgu@7074 | 114 | if (rgn->base() == addr || |
zgu@7074 | 115 | rgn->end() == addr + size) { |
zgu@7074 | 116 | rgn->exclude_region(addr, size); |
zgu@7074 | 117 | return true; |
zgu@7074 | 118 | } else { |
zgu@7074 | 119 | // split this region |
zgu@7074 | 120 | address top =rgn->end(); |
zgu@7074 | 121 | // use this region for lower part |
zgu@7074 | 122 | size_t exclude_size = rgn->end() - addr; |
zgu@7074 | 123 | rgn->exclude_region(addr, exclude_size); |
zgu@7074 | 124 | |
zgu@7074 | 125 | // higher part |
zgu@7074 | 126 | address high_base = addr + size; |
zgu@7074 | 127 | size_t high_size = top - high_base; |
zgu@7074 | 128 | |
zgu@7074 | 129 | CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack()); |
zgu@7074 | 130 | LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn); |
zgu@7074 | 131 | assert(high_node == NULL || node->next() == high_node, "Should be right after"); |
zgu@7074 | 132 | return (high_node != NULL); |
zgu@7074 | 133 | } |
zgu@7074 | 134 | |
zgu@7074 | 135 | return false; |
zgu@7074 | 136 | } |
zgu@7074 | 137 | |
zgu@7074 | 138 | bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) { |
zgu@7074 | 139 | // uncommit stack guard pages |
zgu@7074 | 140 | if (flag() == mtThreadStack && !same_region(addr, sz)) { |
zgu@7074 | 141 | return true; |
zgu@7074 | 142 | } |
zgu@7074 | 143 | |
zgu@7074 | 144 | assert(addr != NULL, "Invalid address"); |
zgu@7074 | 145 | assert(sz > 0, "Invalid size"); |
zgu@7074 | 146 | |
zgu@7074 | 147 | if (all_committed()) { |
zgu@7074 | 148 | assert(_committed_regions.is_empty(), "Sanity check"); |
zgu@7074 | 149 | assert(contain_region(addr, sz), "Reserved region does not contain this region"); |
zgu@7074 | 150 | set_all_committed(false); |
zgu@7074 | 151 | VirtualMemorySummary::record_uncommitted_memory(sz, flag()); |
zgu@7074 | 152 | if (same_region(addr, sz)) { |
zgu@7074 | 153 | return true; |
zgu@7074 | 154 | } else { |
zgu@7074 | 155 | CommittedMemoryRegion rgn(base(), size(), *call_stack()); |
zgu@7074 | 156 | if (rgn.base() == addr || rgn.end() == (addr + sz)) { |
zgu@7074 | 157 | rgn.exclude_region(addr, sz); |
zgu@7074 | 158 | return add_committed_region(rgn); |
zgu@7074 | 159 | } else { |
zgu@7074 | 160 | // split this region |
zgu@7074 | 161 | // top of the whole region |
zgu@7074 | 162 | address top =rgn.end(); |
zgu@7074 | 163 | // use this region for lower part |
zgu@7074 | 164 | size_t exclude_size = rgn.end() - addr; |
zgu@7074 | 165 | rgn.exclude_region(addr, exclude_size); |
zgu@7074 | 166 | if (add_committed_region(rgn)) { |
zgu@7074 | 167 | // higher part |
zgu@7074 | 168 | address high_base = addr + sz; |
zgu@7074 | 169 | size_t high_size = top - high_base; |
zgu@7074 | 170 | CommittedMemoryRegion high_rgn(high_base, high_size, emptyStack); |
zgu@7074 | 171 | return add_committed_region(high_rgn); |
zgu@7074 | 172 | } else { |
zgu@7074 | 173 | return false; |
zgu@7074 | 174 | } |
zgu@7074 | 175 | } |
zgu@7074 | 176 | } |
zgu@7074 | 177 | } else { |
zgu@7074 | 178 | // we have to walk whole list to remove the committed regions in |
zgu@7074 | 179 | // specified range |
zgu@7074 | 180 | LinkedListNode<CommittedMemoryRegion>* head = |
zgu@7074 | 181 | _committed_regions.head(); |
zgu@7074 | 182 | LinkedListNode<CommittedMemoryRegion>* prev = NULL; |
zgu@7074 | 183 | VirtualMemoryRegion uncommitted_rgn(addr, sz); |
zgu@7074 | 184 | |
zgu@7074 | 185 | while (head != NULL && !uncommitted_rgn.is_empty()) { |
zgu@7074 | 186 | CommittedMemoryRegion* crgn = head->data(); |
zgu@7074 | 187 | // this committed region overlaps to region to uncommit |
zgu@7074 | 188 | if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) { |
zgu@7074 | 189 | if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) { |
zgu@7074 | 190 | // find matched region, remove the node will do |
zgu@7074 | 191 | VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag()); |
zgu@7074 | 192 | _committed_regions.remove_after(prev); |
zgu@7074 | 193 | return true; |
zgu@7074 | 194 | } else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) { |
zgu@7074 | 195 | // this committed region contains whole uncommitted region |
zgu@7074 | 196 | VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag()); |
zgu@7074 | 197 | return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size()); |
zgu@7074 | 198 | } else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) { |
zgu@7074 | 199 | // this committed region has been uncommitted |
zgu@7074 | 200 | size_t exclude_size = crgn->end() - uncommitted_rgn.base(); |
zgu@7074 | 201 | uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size); |
zgu@7074 | 202 | VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); |
zgu@7074 | 203 | LinkedListNode<CommittedMemoryRegion>* tmp = head; |
zgu@7074 | 204 | head = head->next(); |
zgu@7074 | 205 | _committed_regions.remove_after(prev); |
zgu@7074 | 206 | continue; |
zgu@7074 | 207 | } else if (crgn->contain_address(uncommitted_rgn.base())) { |
zgu@7074 | 208 | size_t toUncommitted = crgn->end() - uncommitted_rgn.base(); |
zgu@7074 | 209 | crgn->exclude_region(uncommitted_rgn.base(), toUncommitted); |
zgu@7074 | 210 | uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted); |
zgu@7074 | 211 | VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag()); |
zgu@7074 | 212 | } else if (uncommitted_rgn.contain_address(crgn->base())) { |
zgu@7074 | 213 | size_t toUncommitted = uncommitted_rgn.end() - crgn->base(); |
zgu@7074 | 214 | crgn->exclude_region(crgn->base(), toUncommitted); |
zgu@7074 | 215 | uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted, |
zgu@7074 | 216 | toUncommitted); |
zgu@7074 | 217 | VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag()); |
zgu@7074 | 218 | } |
zgu@7074 | 219 | } |
zgu@7074 | 220 | prev = head; |
zgu@7074 | 221 | head = head->next(); |
zgu@7074 | 222 | } |
zgu@7074 | 223 | } |
zgu@7074 | 224 | |
zgu@7074 | 225 | return true; |
zgu@7074 | 226 | } |
zgu@7074 | 227 | |
zgu@7074 | 228 | void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) { |
zgu@7074 | 229 | assert(addr != NULL, "Invalid address"); |
zgu@7074 | 230 | |
zgu@7074 | 231 | // split committed regions |
zgu@7074 | 232 | LinkedListNode<CommittedMemoryRegion>* head = |
zgu@7074 | 233 | _committed_regions.head(); |
zgu@7074 | 234 | LinkedListNode<CommittedMemoryRegion>* prev = NULL; |
zgu@7074 | 235 | |
zgu@7074 | 236 | while (head != NULL) { |
zgu@7074 | 237 | if (head->data()->base() >= addr) { |
zgu@7074 | 238 | break; |
zgu@7074 | 239 | } |
zgu@7074 | 240 | prev = head; |
zgu@7074 | 241 | head = head->next(); |
zgu@7074 | 242 | } |
zgu@7074 | 243 | |
zgu@7074 | 244 | if (head != NULL) { |
zgu@7074 | 245 | if (prev != NULL) { |
zgu@7074 | 246 | prev->set_next(head->next()); |
zgu@7074 | 247 | } else { |
zgu@7074 | 248 | _committed_regions.set_head(NULL); |
zgu@7074 | 249 | } |
zgu@7074 | 250 | } |
zgu@7074 | 251 | |
zgu@7074 | 252 | rgn._committed_regions.set_head(head); |
zgu@7074 | 253 | } |
zgu@7074 | 254 | |
zgu@7074 | 255 | size_t ReservedMemoryRegion::committed_size() const { |
zgu@7074 | 256 | if (all_committed()) { |
zgu@7074 | 257 | return size(); |
zgu@7074 | 258 | } else { |
zgu@7074 | 259 | size_t committed = 0; |
zgu@7074 | 260 | LinkedListNode<CommittedMemoryRegion>* head = |
zgu@7074 | 261 | _committed_regions.head(); |
zgu@7074 | 262 | while (head != NULL) { |
zgu@7074 | 263 | committed += head->data()->size(); |
zgu@7074 | 264 | head = head->next(); |
zgu@7074 | 265 | } |
zgu@7074 | 266 | return committed; |
zgu@7074 | 267 | } |
zgu@7074 | 268 | } |
zgu@7074 | 269 | |
zgu@7074 | 270 | void ReservedMemoryRegion::set_flag(MEMFLAGS f) { |
zgu@7074 | 271 | assert((flag() == mtNone || flag() == f), "Overwrite memory type"); |
zgu@7074 | 272 | if (flag() != f) { |
zgu@7074 | 273 | VirtualMemorySummary::move_reserved_memory(flag(), f, size()); |
zgu@7074 | 274 | VirtualMemorySummary::move_committed_memory(flag(), f, committed_size()); |
zgu@7074 | 275 | _flag = f; |
zgu@7074 | 276 | } |
zgu@7074 | 277 | } |
zgu@7074 | 278 | |
zgu@7074 | 279 | bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) { |
zgu@7074 | 280 | if (level >= NMT_summary) { |
zgu@7074 | 281 | VirtualMemorySummary::initialize(); |
zgu@7074 | 282 | } |
zgu@7074 | 283 | return true; |
zgu@7074 | 284 | } |
zgu@7074 | 285 | |
zgu@7074 | 286 | bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, |
zgu@7074 | 287 | const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) { |
zgu@7074 | 288 | assert(base_addr != NULL, "Invalid address"); |
zgu@7074 | 289 | assert(size > 0, "Invalid size"); |
zgu@7074 | 290 | |
zgu@7074 | 291 | ReservedMemoryRegion rgn(base_addr, size, stack, flag); |
zgu@7074 | 292 | ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn); |
zgu@7074 | 293 | LinkedListNode<ReservedMemoryRegion>* node; |
zgu@7074 | 294 | if (reserved_rgn == NULL) { |
zgu@7074 | 295 | VirtualMemorySummary::record_reserved_memory(size, flag); |
zgu@7074 | 296 | node = _reserved_regions.add(rgn); |
zgu@7074 | 297 | if (node != NULL) { |
zgu@7074 | 298 | node->data()->set_all_committed(all_committed); |
zgu@7074 | 299 | return true; |
zgu@7074 | 300 | } else { |
zgu@7074 | 301 | return false; |
zgu@7074 | 302 | } |
zgu@7074 | 303 | } else { |
zgu@7074 | 304 | if (reserved_rgn->same_region(base_addr, size)) { |
zgu@7074 | 305 | reserved_rgn->set_call_stack(stack); |
zgu@7074 | 306 | reserved_rgn->set_flag(flag); |
zgu@7074 | 307 | return true; |
zgu@7074 | 308 | } else if (reserved_rgn->adjacent_to(base_addr, size)) { |
zgu@7074 | 309 | VirtualMemorySummary::record_reserved_memory(size, flag); |
zgu@7074 | 310 | reserved_rgn->expand_region(base_addr, size); |
zgu@7074 | 311 | reserved_rgn->set_call_stack(stack); |
zgu@7074 | 312 | return true; |
zgu@7074 | 313 | } else { |
zgu@7074 | 314 | // Overlapped reservation. |
zgu@7074 | 315 | // It can happen when the regions are thread stacks, as JNI |
zgu@7074 | 316 | // thread does not detach from VM before exits, and leads to |
zgu@7074 | 317 | // leak JavaThread object |
zgu@7074 | 318 | if (reserved_rgn->flag() == mtThreadStack) { |
zgu@7074 | 319 | guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached"); |
zgu@7074 | 320 | // Overwrite with new region |
zgu@7074 | 321 | |
zgu@7074 | 322 | // Release old region |
zgu@7074 | 323 | VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag()); |
zgu@7074 | 324 | VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag()); |
zgu@7074 | 325 | |
zgu@7074 | 326 | // Add new region |
zgu@7074 | 327 | VirtualMemorySummary::record_reserved_memory(rgn.size(), flag); |
zgu@7074 | 328 | |
zgu@7074 | 329 | *reserved_rgn = rgn; |
zgu@7074 | 330 | return true; |
zgu@7074 | 331 | } else { |
zgu@7074 | 332 | ShouldNotReachHere(); |
zgu@7074 | 333 | return false; |
zgu@7074 | 334 | } |
zgu@7074 | 335 | } |
zgu@7074 | 336 | } |
zgu@7074 | 337 | } |
zgu@7074 | 338 | |
zgu@7074 | 339 | void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) { |
zgu@7074 | 340 | assert(addr != NULL, "Invalid address"); |
zgu@7074 | 341 | |
zgu@7074 | 342 | ReservedMemoryRegion rgn(addr, 1); |
zgu@7074 | 343 | ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn); |
zgu@7074 | 344 | if (reserved_rgn != NULL) { |
zgu@7074 | 345 | assert(reserved_rgn->contain_address(addr), "Containment"); |
zgu@7074 | 346 | if (reserved_rgn->flag() != flag) { |
zgu@7074 | 347 | assert(reserved_rgn->flag() == mtNone, "Overwrite memory type"); |
zgu@7074 | 348 | reserved_rgn->set_flag(flag); |
zgu@7074 | 349 | } |
zgu@7074 | 350 | } |
zgu@7074 | 351 | } |
zgu@7074 | 352 | |
zgu@7074 | 353 | bool VirtualMemoryTracker::add_committed_region(address addr, size_t size, |
zgu@7074 | 354 | const NativeCallStack& stack) { |
zgu@7074 | 355 | assert(addr != NULL, "Invalid address"); |
zgu@7074 | 356 | assert(size > 0, "Invalid size"); |
zgu@7074 | 357 | ReservedMemoryRegion rgn(addr, size); |
zgu@7074 | 358 | ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn); |
zgu@7074 | 359 | |
zgu@7074 | 360 | assert(reserved_rgn != NULL, "No reserved region"); |
zgu@7074 | 361 | assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); |
zgu@7074 | 362 | return reserved_rgn->add_committed_region(addr, size, stack); |
zgu@7074 | 363 | } |
zgu@7074 | 364 | |
zgu@7074 | 365 | bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) { |
zgu@7074 | 366 | assert(addr != NULL, "Invalid address"); |
zgu@7074 | 367 | assert(size > 0, "Invalid size"); |
zgu@7074 | 368 | ReservedMemoryRegion rgn(addr, size); |
zgu@7074 | 369 | ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn); |
zgu@7074 | 370 | assert(reserved_rgn != NULL, "No reserved region"); |
zgu@7074 | 371 | assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); |
zgu@7074 | 372 | return reserved_rgn->remove_uncommitted_region(addr, size); |
zgu@7074 | 373 | } |
zgu@7074 | 374 | |
zgu@7074 | 375 | bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { |
zgu@7074 | 376 | assert(addr != NULL, "Invalid address"); |
zgu@7074 | 377 | assert(size > 0, "Invalid size"); |
zgu@7074 | 378 | |
zgu@7074 | 379 | ReservedMemoryRegion rgn(addr, size); |
zgu@7074 | 380 | ReservedMemoryRegion* reserved_rgn = _reserved_regions.find(rgn); |
zgu@7074 | 381 | |
zgu@7074 | 382 | assert(reserved_rgn != NULL, "No reserved region"); |
zgu@7074 | 383 | |
zgu@7074 | 384 | // uncommit regions within the released region |
zgu@7074 | 385 | if (!reserved_rgn->remove_uncommitted_region(addr, size)) { |
zgu@7074 | 386 | return false; |
zgu@7074 | 387 | } |
zgu@7074 | 388 | |
zgu@7074 | 389 | |
zgu@7074 | 390 | VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag()); |
zgu@7074 | 391 | |
zgu@7074 | 392 | if (reserved_rgn->same_region(addr, size)) { |
zgu@7074 | 393 | return _reserved_regions.remove(rgn); |
zgu@7074 | 394 | } else { |
zgu@7074 | 395 | assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); |
zgu@7074 | 396 | if (reserved_rgn->base() == addr || |
zgu@7074 | 397 | reserved_rgn->end() == addr + size) { |
zgu@7074 | 398 | reserved_rgn->exclude_region(addr, size); |
zgu@7074 | 399 | return true; |
zgu@7074 | 400 | } else { |
zgu@7074 | 401 | address top = reserved_rgn->end(); |
zgu@7074 | 402 | address high_base = addr + size; |
zgu@7074 | 403 | ReservedMemoryRegion high_rgn(high_base, top - high_base, |
zgu@7074 | 404 | *reserved_rgn->call_stack(), reserved_rgn->flag()); |
zgu@7074 | 405 | |
zgu@7074 | 406 | // use original region for lower region |
zgu@7074 | 407 | reserved_rgn->exclude_region(addr, top - addr); |
zgu@7074 | 408 | LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions.add(high_rgn); |
zgu@7074 | 409 | if (new_rgn == NULL) { |
zgu@7074 | 410 | return false; |
zgu@7074 | 411 | } else { |
zgu@7074 | 412 | reserved_rgn->move_committed_regions(addr, *new_rgn->data()); |
zgu@7074 | 413 | return true; |
zgu@7074 | 414 | } |
zgu@7074 | 415 | } |
zgu@7074 | 416 | } |
zgu@7074 | 417 | } |
zgu@7074 | 418 | |
zgu@7074 | 419 | |
zgu@7074 | 420 | bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) { |
zgu@7074 | 421 | ThreadCritical tc; |
zgu@7074 | 422 | LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions.head(); |
zgu@7074 | 423 | while (head != NULL) { |
zgu@7074 | 424 | const ReservedMemoryRegion* rgn = head->peek(); |
zgu@7074 | 425 | if (!walker->do_allocation_site(rgn)) { |
zgu@7074 | 426 | return false; |
zgu@7074 | 427 | } |
zgu@7074 | 428 | head = head->next(); |
zgu@7074 | 429 | } |
zgu@7074 | 430 | return true; |
zgu@7074 | 431 | } |
zgu@7074 | 432 | |
zgu@7074 | 433 | // Transition virtual memory tracking level. |
zgu@7074 | 434 | bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { |
zgu@7074 | 435 | if (from == NMT_minimal) { |
zgu@7074 | 436 | assert(to == NMT_summary || to == NMT_detail, "Just check"); |
zgu@7074 | 437 | VirtualMemorySummary::reset(); |
zgu@7074 | 438 | } else if (to == NMT_minimal) { |
zgu@7074 | 439 | assert(from == NMT_summary || from == NMT_detail, "Just check"); |
zgu@7074 | 440 | // Clean up virtual memory tracking data structures. |
zgu@7074 | 441 | ThreadCritical tc; |
zgu@7074 | 442 | _reserved_regions.clear(); |
zgu@7074 | 443 | } |
zgu@7074 | 444 | |
zgu@7074 | 445 | return true; |
zgu@7074 | 446 | } |
zgu@7074 | 447 | |
zgu@7074 | 448 |