Tue, 15 Mar 2011 06:35:10 -0700
7024234: 2/3 jvmti tests fail assert(!_oops_are_stale) failed: oops are stale on Win-AMD64
Summary: Move initialization of the '_instance' field to avoid race with ServiceThread start.
Reviewed-by: dholmes, kamg, never, dsamersoff, ysr, coleenp, acorn
duke@435 | 1 | /* |
stefank@2314 | 2 | * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "oops/markOop.hpp" |
stefank@2314 | 27 | #include "oops/oop.inline.hpp" |
stefank@2314 | 28 | #include "runtime/virtualspace.hpp" |
stefank@2314 | 29 | #ifdef TARGET_OS_FAMILY_linux |
stefank@2314 | 30 | # include "os_linux.inline.hpp" |
stefank@2314 | 31 | #endif |
stefank@2314 | 32 | #ifdef TARGET_OS_FAMILY_solaris |
stefank@2314 | 33 | # include "os_solaris.inline.hpp" |
stefank@2314 | 34 | #endif |
stefank@2314 | 35 | #ifdef TARGET_OS_FAMILY_windows |
stefank@2314 | 36 | # include "os_windows.inline.hpp" |
stefank@2314 | 37 | #endif |
duke@435 | 38 | |
duke@435 | 39 | |
duke@435 | 40 | // ReservedSpace |
duke@435 | 41 | ReservedSpace::ReservedSpace(size_t size) { |
coleenp@1091 | 42 | initialize(size, 0, false, NULL, 0, false); |
duke@435 | 43 | } |
duke@435 | 44 | |
duke@435 | 45 | ReservedSpace::ReservedSpace(size_t size, size_t alignment, |
coleenp@672 | 46 | bool large, |
coleenp@672 | 47 | char* requested_address, |
coleenp@672 | 48 | const size_t noaccess_prefix) { |
coleenp@672 | 49 | initialize(size+noaccess_prefix, alignment, large, requested_address, |
coleenp@1091 | 50 | noaccess_prefix, false); |
coleenp@1091 | 51 | } |
coleenp@1091 | 52 | |
coleenp@1091 | 53 | ReservedSpace::ReservedSpace(size_t size, size_t alignment, |
coleenp@1091 | 54 | bool large, |
coleenp@1091 | 55 | bool executable) { |
coleenp@1091 | 56 | initialize(size, alignment, large, NULL, 0, executable); |
duke@435 | 57 | } |
duke@435 | 58 | |
duke@435 | 59 | char * |
duke@435 | 60 | ReservedSpace::align_reserved_region(char* addr, const size_t len, |
duke@435 | 61 | const size_t prefix_size, |
duke@435 | 62 | const size_t prefix_align, |
duke@435 | 63 | const size_t suffix_size, |
duke@435 | 64 | const size_t suffix_align) |
duke@435 | 65 | { |
duke@435 | 66 | assert(addr != NULL, "sanity"); |
duke@435 | 67 | const size_t required_size = prefix_size + suffix_size; |
duke@435 | 68 | assert(len >= required_size, "len too small"); |
duke@435 | 69 | |
duke@435 | 70 | const size_t s = size_t(addr); |
duke@435 | 71 | const size_t beg_ofs = s + prefix_size & suffix_align - 1; |
duke@435 | 72 | const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs; |
duke@435 | 73 | |
duke@435 | 74 | if (len < beg_delta + required_size) { |
duke@435 | 75 | return NULL; // Cannot do proper alignment. |
duke@435 | 76 | } |
duke@435 | 77 | const size_t end_delta = len - (beg_delta + required_size); |
duke@435 | 78 | |
duke@435 | 79 | if (beg_delta != 0) { |
duke@435 | 80 | os::release_memory(addr, beg_delta); |
duke@435 | 81 | } |
duke@435 | 82 | |
duke@435 | 83 | if (end_delta != 0) { |
duke@435 | 84 | char* release_addr = (char*) (s + beg_delta + required_size); |
duke@435 | 85 | os::release_memory(release_addr, end_delta); |
duke@435 | 86 | } |
duke@435 | 87 | |
duke@435 | 88 | return (char*) (s + beg_delta); |
duke@435 | 89 | } |
duke@435 | 90 | |
duke@435 | 91 | char* ReservedSpace::reserve_and_align(const size_t reserve_size, |
duke@435 | 92 | const size_t prefix_size, |
duke@435 | 93 | const size_t prefix_align, |
duke@435 | 94 | const size_t suffix_size, |
duke@435 | 95 | const size_t suffix_align) |
duke@435 | 96 | { |
duke@435 | 97 | assert(reserve_size > prefix_size + suffix_size, "should not be here"); |
duke@435 | 98 | |
duke@435 | 99 | char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align); |
duke@435 | 100 | if (raw_addr == NULL) return NULL; |
duke@435 | 101 | |
duke@435 | 102 | char* result = align_reserved_region(raw_addr, reserve_size, prefix_size, |
duke@435 | 103 | prefix_align, suffix_size, |
duke@435 | 104 | suffix_align); |
duke@435 | 105 | if (result == NULL && !os::release_memory(raw_addr, reserve_size)) { |
duke@435 | 106 | fatal("os::release_memory failed"); |
duke@435 | 107 | } |
duke@435 | 108 | |
duke@435 | 109 | #ifdef ASSERT |
duke@435 | 110 | if (result != NULL) { |
duke@435 | 111 | const size_t raw = size_t(raw_addr); |
duke@435 | 112 | const size_t res = size_t(result); |
duke@435 | 113 | assert(res >= raw, "alignment decreased start addr"); |
duke@435 | 114 | assert(res + prefix_size + suffix_size <= raw + reserve_size, |
duke@435 | 115 | "alignment increased end addr"); |
duke@435 | 116 | assert((res & prefix_align - 1) == 0, "bad alignment of prefix"); |
duke@435 | 117 | assert((res + prefix_size & suffix_align - 1) == 0, |
duke@435 | 118 | "bad alignment of suffix"); |
duke@435 | 119 | } |
duke@435 | 120 | #endif |
duke@435 | 121 | |
duke@435 | 122 | return result; |
duke@435 | 123 | } |
duke@435 | 124 | |
kvn@1973 | 125 | // Helper method. |
kvn@1973 | 126 | static bool failed_to_reserve_as_requested(char* base, char* requested_address, |
kvn@1973 | 127 | const size_t size, bool special) |
kvn@1973 | 128 | { |
kvn@1973 | 129 | if (base == requested_address || requested_address == NULL) |
kvn@1973 | 130 | return false; // did not fail |
kvn@1973 | 131 | |
kvn@1973 | 132 | if (base != NULL) { |
kvn@1973 | 133 | // Different reserve address may be acceptable in other cases |
kvn@1973 | 134 | // but for compressed oops heap should be at requested address. |
kvn@1973 | 135 | assert(UseCompressedOops, "currently requested address used only for compressed oops"); |
kvn@1973 | 136 | if (PrintCompressedOopsMode) { |
kvn@1973 | 137 | tty->cr(); |
kvn@1973 | 138 | tty->print_cr("Reserved memory at not requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address); |
kvn@1973 | 139 | } |
kvn@1973 | 140 | // OS ignored requested address. Try different address. |
kvn@1973 | 141 | if (special) { |
kvn@1973 | 142 | if (!os::release_memory_special(base, size)) { |
kvn@1973 | 143 | fatal("os::release_memory_special failed"); |
kvn@1973 | 144 | } |
kvn@1973 | 145 | } else { |
kvn@1973 | 146 | if (!os::release_memory(base, size)) { |
kvn@1973 | 147 | fatal("os::release_memory failed"); |
kvn@1973 | 148 | } |
kvn@1973 | 149 | } |
kvn@1973 | 150 | } |
kvn@1973 | 151 | return true; |
kvn@1973 | 152 | } |
kvn@1973 | 153 | |
duke@435 | 154 | ReservedSpace::ReservedSpace(const size_t prefix_size, |
duke@435 | 155 | const size_t prefix_align, |
duke@435 | 156 | const size_t suffix_size, |
coleenp@672 | 157 | const size_t suffix_align, |
kvn@1077 | 158 | char* requested_address, |
coleenp@672 | 159 | const size_t noaccess_prefix) |
duke@435 | 160 | { |
duke@435 | 161 | assert(prefix_size != 0, "sanity"); |
duke@435 | 162 | assert(prefix_align != 0, "sanity"); |
duke@435 | 163 | assert(suffix_size != 0, "sanity"); |
duke@435 | 164 | assert(suffix_align != 0, "sanity"); |
duke@435 | 165 | assert((prefix_size & prefix_align - 1) == 0, |
duke@435 | 166 | "prefix_size not divisible by prefix_align"); |
duke@435 | 167 | assert((suffix_size & suffix_align - 1) == 0, |
duke@435 | 168 | "suffix_size not divisible by suffix_align"); |
duke@435 | 169 | assert((suffix_align & prefix_align - 1) == 0, |
duke@435 | 170 | "suffix_align not divisible by prefix_align"); |
duke@435 | 171 | |
kvn@1973 | 172 | // Assert that if noaccess_prefix is used, it is the same as prefix_align. |
kvn@1973 | 173 | assert(noaccess_prefix == 0 || |
kvn@1973 | 174 | noaccess_prefix == prefix_align, "noaccess prefix wrong"); |
kvn@1973 | 175 | |
coleenp@672 | 176 | // Add in noaccess_prefix to prefix_size; |
coleenp@672 | 177 | const size_t adjusted_prefix_size = prefix_size + noaccess_prefix; |
coleenp@672 | 178 | const size_t size = adjusted_prefix_size + suffix_size; |
coleenp@672 | 179 | |
duke@435 | 180 | // On systems where the entire region has to be reserved and committed up |
duke@435 | 181 | // front, the compound alignment normally done by this method is unnecessary. |
duke@435 | 182 | const bool try_reserve_special = UseLargePages && |
duke@435 | 183 | prefix_align == os::large_page_size(); |
duke@435 | 184 | if (!os::can_commit_large_page_memory() && try_reserve_special) { |
coleenp@1091 | 185 | initialize(size, prefix_align, true, requested_address, noaccess_prefix, |
coleenp@1091 | 186 | false); |
duke@435 | 187 | return; |
duke@435 | 188 | } |
duke@435 | 189 | |
duke@435 | 190 | _base = NULL; |
duke@435 | 191 | _size = 0; |
duke@435 | 192 | _alignment = 0; |
duke@435 | 193 | _special = false; |
coleenp@672 | 194 | _noaccess_prefix = 0; |
coleenp@1091 | 195 | _executable = false; |
coleenp@672 | 196 | |
duke@435 | 197 | // Optimistically try to reserve the exact size needed. |
kvn@1077 | 198 | char* addr; |
kvn@1077 | 199 | if (requested_address != 0) { |
kvn@1973 | 200 | requested_address -= noaccess_prefix; // adjust address |
kvn@1973 | 201 | assert(requested_address != NULL, "huge noaccess prefix?"); |
kvn@1973 | 202 | addr = os::attempt_reserve_memory_at(size, requested_address); |
kvn@1973 | 203 | if (failed_to_reserve_as_requested(addr, requested_address, size, false)) { |
kvn@1973 | 204 | // OS ignored requested address. Try different address. |
kvn@1973 | 205 | addr = NULL; |
kvn@1973 | 206 | } |
kvn@1077 | 207 | } else { |
kvn@1077 | 208 | addr = os::reserve_memory(size, NULL, prefix_align); |
kvn@1077 | 209 | } |
duke@435 | 210 | if (addr == NULL) return; |
duke@435 | 211 | |
duke@435 | 212 | // Check whether the result has the needed alignment (unlikely unless |
duke@435 | 213 | // prefix_align == suffix_align). |
coleenp@672 | 214 | const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1; |
duke@435 | 215 | if (ofs != 0) { |
duke@435 | 216 | // Wrong alignment. Release, allocate more space and do manual alignment. |
duke@435 | 217 | // |
duke@435 | 218 | // On most operating systems, another allocation with a somewhat larger size |
duke@435 | 219 | // will return an address "close to" that of the previous allocation. The |
duke@435 | 220 | // result is often the same address (if the kernel hands out virtual |
duke@435 | 221 | // addresses from low to high), or an address that is offset by the increase |
duke@435 | 222 | // in size. Exploit that to minimize the amount of extra space requested. |
duke@435 | 223 | if (!os::release_memory(addr, size)) { |
duke@435 | 224 | fatal("os::release_memory failed"); |
duke@435 | 225 | } |
duke@435 | 226 | |
duke@435 | 227 | const size_t extra = MAX2(ofs, suffix_align - ofs); |
coleenp@672 | 228 | addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align, |
duke@435 | 229 | suffix_size, suffix_align); |
duke@435 | 230 | if (addr == NULL) { |
duke@435 | 231 | // Try an even larger region. If this fails, address space is exhausted. |
coleenp@672 | 232 | addr = reserve_and_align(size + suffix_align, adjusted_prefix_size, |
duke@435 | 233 | prefix_align, suffix_size, suffix_align); |
duke@435 | 234 | } |
duke@435 | 235 | } |
duke@435 | 236 | |
duke@435 | 237 | _base = addr; |
duke@435 | 238 | _size = size; |
duke@435 | 239 | _alignment = prefix_align; |
coleenp@672 | 240 | _noaccess_prefix = noaccess_prefix; |
duke@435 | 241 | } |
duke@435 | 242 | |
duke@435 | 243 | void ReservedSpace::initialize(size_t size, size_t alignment, bool large, |
coleenp@672 | 244 | char* requested_address, |
coleenp@1091 | 245 | const size_t noaccess_prefix, |
coleenp@1091 | 246 | bool executable) { |
duke@435 | 247 | const size_t granularity = os::vm_allocation_granularity(); |
duke@435 | 248 | assert((size & granularity - 1) == 0, |
duke@435 | 249 | "size not aligned to os::vm_allocation_granularity()"); |
duke@435 | 250 | assert((alignment & granularity - 1) == 0, |
duke@435 | 251 | "alignment not aligned to os::vm_allocation_granularity()"); |
duke@435 | 252 | assert(alignment == 0 || is_power_of_2((intptr_t)alignment), |
duke@435 | 253 | "not a power of 2"); |
duke@435 | 254 | |
duke@435 | 255 | _base = NULL; |
duke@435 | 256 | _size = 0; |
duke@435 | 257 | _special = false; |
coleenp@1091 | 258 | _executable = executable; |
duke@435 | 259 | _alignment = 0; |
coleenp@672 | 260 | _noaccess_prefix = 0; |
duke@435 | 261 | if (size == 0) { |
duke@435 | 262 | return; |
duke@435 | 263 | } |
duke@435 | 264 | |
duke@435 | 265 | // If OS doesn't support demand paging for large page memory, we need |
duke@435 | 266 | // to use reserve_memory_special() to reserve and pin the entire region. |
duke@435 | 267 | bool special = large && !os::can_commit_large_page_memory(); |
duke@435 | 268 | char* base = NULL; |
duke@435 | 269 | |
kvn@1973 | 270 | if (requested_address != 0) { |
kvn@1973 | 271 | requested_address -= noaccess_prefix; // adjust requested address |
kvn@1973 | 272 | assert(requested_address != NULL, "huge noaccess prefix?"); |
kvn@1973 | 273 | } |
kvn@1973 | 274 | |
duke@435 | 275 | if (special) { |
duke@435 | 276 | |
coleenp@1091 | 277 | base = os::reserve_memory_special(size, requested_address, executable); |
duke@435 | 278 | |
duke@435 | 279 | if (base != NULL) { |
kvn@1973 | 280 | if (failed_to_reserve_as_requested(base, requested_address, size, true)) { |
kvn@1973 | 281 | // OS ignored requested address. Try different address. |
kvn@1973 | 282 | return; |
kvn@1973 | 283 | } |
duke@435 | 284 | // Check alignment constraints |
duke@435 | 285 | if (alignment > 0) { |
duke@435 | 286 | assert((uintptr_t) base % alignment == 0, |
duke@435 | 287 | "Large pages returned a non-aligned address"); |
duke@435 | 288 | } |
duke@435 | 289 | _special = true; |
duke@435 | 290 | } else { |
duke@435 | 291 | // failed; try to reserve regular memory below |
kvn@1973 | 292 | if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || |
kvn@1973 | 293 | !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { |
kvn@1973 | 294 | if (PrintCompressedOopsMode) { |
kvn@1973 | 295 | tty->cr(); |
kvn@1973 | 296 | tty->print_cr("Reserve regular memory without large pages."); |
kvn@1973 | 297 | } |
kvn@1973 | 298 | } |
duke@435 | 299 | } |
duke@435 | 300 | } |
duke@435 | 301 | |
duke@435 | 302 | if (base == NULL) { |
duke@435 | 303 | // Optimistically assume that the OSes returns an aligned base pointer. |
duke@435 | 304 | // When reserving a large address range, most OSes seem to align to at |
duke@435 | 305 | // least 64K. |
duke@435 | 306 | |
duke@435 | 307 | // If the memory was requested at a particular address, use |
duke@435 | 308 | // os::attempt_reserve_memory_at() to avoid over mapping something |
duke@435 | 309 | // important. If available space is not detected, return NULL. |
duke@435 | 310 | |
duke@435 | 311 | if (requested_address != 0) { |
kvn@1973 | 312 | base = os::attempt_reserve_memory_at(size, requested_address); |
kvn@1973 | 313 | if (failed_to_reserve_as_requested(base, requested_address, size, false)) { |
kvn@1973 | 314 | // OS ignored requested address. Try different address. |
kvn@1973 | 315 | base = NULL; |
kvn@1973 | 316 | } |
duke@435 | 317 | } else { |
duke@435 | 318 | base = os::reserve_memory(size, NULL, alignment); |
duke@435 | 319 | } |
duke@435 | 320 | |
duke@435 | 321 | if (base == NULL) return; |
duke@435 | 322 | |
duke@435 | 323 | // Check alignment constraints |
duke@435 | 324 | if (alignment > 0 && ((size_t)base & alignment - 1) != 0) { |
duke@435 | 325 | // Base not aligned, retry |
duke@435 | 326 | if (!os::release_memory(base, size)) fatal("os::release_memory failed"); |
duke@435 | 327 | // Reserve size large enough to do manual alignment and |
duke@435 | 328 | // increase size to a multiple of the desired alignment |
duke@435 | 329 | size = align_size_up(size, alignment); |
duke@435 | 330 | size_t extra_size = size + alignment; |
ysr@777 | 331 | do { |
ysr@777 | 332 | char* extra_base = os::reserve_memory(extra_size, NULL, alignment); |
ysr@777 | 333 | if (extra_base == NULL) return; |
ysr@777 | 334 | // Do manual alignement |
ysr@777 | 335 | base = (char*) align_size_up((uintptr_t) extra_base, alignment); |
ysr@777 | 336 | assert(base >= extra_base, "just checking"); |
ysr@777 | 337 | // Re-reserve the region at the aligned base address. |
ysr@777 | 338 | os::release_memory(extra_base, extra_size); |
ysr@777 | 339 | base = os::reserve_memory(size, base); |
ysr@777 | 340 | } while (base == NULL); |
duke@435 | 341 | } |
duke@435 | 342 | } |
duke@435 | 343 | // Done |
duke@435 | 344 | _base = base; |
duke@435 | 345 | _size = size; |
duke@435 | 346 | _alignment = MAX2(alignment, (size_t) os::vm_page_size()); |
coleenp@672 | 347 | _noaccess_prefix = noaccess_prefix; |
coleenp@672 | 348 | |
coleenp@672 | 349 | // Assert that if noaccess_prefix is used, it is the same as alignment. |
coleenp@672 | 350 | assert(noaccess_prefix == 0 || |
coleenp@672 | 351 | noaccess_prefix == _alignment, "noaccess prefix wrong"); |
duke@435 | 352 | |
duke@435 | 353 | assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base, |
duke@435 | 354 | "area must be distinguisable from marks for mark-sweep"); |
duke@435 | 355 | assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size], |
duke@435 | 356 | "area must be distinguisable from marks for mark-sweep"); |
duke@435 | 357 | } |
duke@435 | 358 | |
duke@435 | 359 | |
duke@435 | 360 | ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, |
coleenp@1091 | 361 | bool special, bool executable) { |
duke@435 | 362 | assert((size % os::vm_allocation_granularity()) == 0, |
duke@435 | 363 | "size not allocation aligned"); |
duke@435 | 364 | _base = base; |
duke@435 | 365 | _size = size; |
duke@435 | 366 | _alignment = alignment; |
coleenp@672 | 367 | _noaccess_prefix = 0; |
duke@435 | 368 | _special = special; |
coleenp@1091 | 369 | _executable = executable; |
duke@435 | 370 | } |
duke@435 | 371 | |
duke@435 | 372 | |
duke@435 | 373 | ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, |
duke@435 | 374 | bool split, bool realloc) { |
duke@435 | 375 | assert(partition_size <= size(), "partition failed"); |
duke@435 | 376 | if (split) { |
coleenp@1091 | 377 | os::split_reserved_memory(base(), size(), partition_size, realloc); |
duke@435 | 378 | } |
coleenp@1091 | 379 | ReservedSpace result(base(), partition_size, alignment, special(), |
coleenp@1091 | 380 | executable()); |
duke@435 | 381 | return result; |
duke@435 | 382 | } |
duke@435 | 383 | |
duke@435 | 384 | |
duke@435 | 385 | ReservedSpace |
duke@435 | 386 | ReservedSpace::last_part(size_t partition_size, size_t alignment) { |
duke@435 | 387 | assert(partition_size <= size(), "partition failed"); |
duke@435 | 388 | ReservedSpace result(base() + partition_size, size() - partition_size, |
coleenp@1091 | 389 | alignment, special(), executable()); |
duke@435 | 390 | return result; |
duke@435 | 391 | } |
duke@435 | 392 | |
duke@435 | 393 | |
duke@435 | 394 | size_t ReservedSpace::page_align_size_up(size_t size) { |
duke@435 | 395 | return align_size_up(size, os::vm_page_size()); |
duke@435 | 396 | } |
duke@435 | 397 | |
duke@435 | 398 | |
duke@435 | 399 | size_t ReservedSpace::page_align_size_down(size_t size) { |
duke@435 | 400 | return align_size_down(size, os::vm_page_size()); |
duke@435 | 401 | } |
duke@435 | 402 | |
duke@435 | 403 | |
duke@435 | 404 | size_t ReservedSpace::allocation_align_size_up(size_t size) { |
duke@435 | 405 | return align_size_up(size, os::vm_allocation_granularity()); |
duke@435 | 406 | } |
duke@435 | 407 | |
duke@435 | 408 | |
duke@435 | 409 | size_t ReservedSpace::allocation_align_size_down(size_t size) { |
duke@435 | 410 | return align_size_down(size, os::vm_allocation_granularity()); |
duke@435 | 411 | } |
duke@435 | 412 | |
duke@435 | 413 | |
duke@435 | 414 | void ReservedSpace::release() { |
duke@435 | 415 | if (is_reserved()) { |
coleenp@672 | 416 | char *real_base = _base - _noaccess_prefix; |
coleenp@672 | 417 | const size_t real_size = _size + _noaccess_prefix; |
duke@435 | 418 | if (special()) { |
coleenp@672 | 419 | os::release_memory_special(real_base, real_size); |
duke@435 | 420 | } else{ |
coleenp@672 | 421 | os::release_memory(real_base, real_size); |
duke@435 | 422 | } |
duke@435 | 423 | _base = NULL; |
duke@435 | 424 | _size = 0; |
coleenp@672 | 425 | _noaccess_prefix = 0; |
duke@435 | 426 | _special = false; |
coleenp@1091 | 427 | _executable = false; |
duke@435 | 428 | } |
duke@435 | 429 | } |
duke@435 | 430 | |
coleenp@672 | 431 | void ReservedSpace::protect_noaccess_prefix(const size_t size) { |
kvn@1973 | 432 | assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL && |
kvn@1973 | 433 | (size_t(_base + _size) > OopEncodingHeapMax) && |
kvn@1973 | 434 | Universe::narrow_oop_use_implicit_null_checks()), |
kvn@1973 | 435 | "noaccess_prefix should be used only with non zero based compressed oops"); |
kvn@1973 | 436 | |
kvn@1973 | 437 | // If there is no noaccess prefix, return. |
coleenp@672 | 438 | if (_noaccess_prefix == 0) return; |
coleenp@672 | 439 | |
coleenp@672 | 440 | assert(_noaccess_prefix >= (size_t)os::vm_page_size(), |
coleenp@672 | 441 | "must be at least page size big"); |
coleenp@672 | 442 | |
coleenp@672 | 443 | // Protect memory at the base of the allocated region. |
coleenp@672 | 444 | // If special, the page was committed (only matters on windows) |
coleenp@672 | 445 | if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, |
coleenp@672 | 446 | _special)) { |
coleenp@672 | 447 | fatal("cannot protect protection page"); |
coleenp@672 | 448 | } |
kvn@1973 | 449 | if (PrintCompressedOopsMode) { |
kvn@1973 | 450 | tty->cr(); |
kvn@1973 | 451 | tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix); |
kvn@1973 | 452 | } |
coleenp@672 | 453 | |
coleenp@672 | 454 | _base += _noaccess_prefix; |
coleenp@672 | 455 | _size -= _noaccess_prefix; |
coleenp@672 | 456 | assert((size == _size) && ((uintptr_t)_base % _alignment == 0), |
coleenp@672 | 457 | "must be exactly of required size and alignment"); |
coleenp@672 | 458 | } |
coleenp@672 | 459 | |
coleenp@672 | 460 | ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, |
coleenp@672 | 461 | bool large, char* requested_address) : |
coleenp@672 | 462 | ReservedSpace(size, alignment, large, |
coleenp@672 | 463 | requested_address, |
kvn@1077 | 464 | (UseCompressedOops && (Universe::narrow_oop_base() != NULL) && |
kvn@1077 | 465 | Universe::narrow_oop_use_implicit_null_checks()) ? |
coleenp@760 | 466 | lcm(os::vm_page_size(), alignment) : 0) { |
coleenp@672 | 467 | // Only reserved space for the java heap should have a noaccess_prefix |
coleenp@672 | 468 | // if using compressed oops. |
coleenp@672 | 469 | protect_noaccess_prefix(size); |
coleenp@672 | 470 | } |
coleenp@672 | 471 | |
coleenp@672 | 472 | ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size, |
coleenp@672 | 473 | const size_t prefix_align, |
coleenp@672 | 474 | const size_t suffix_size, |
kvn@1077 | 475 | const size_t suffix_align, |
kvn@1077 | 476 | char* requested_address) : |
coleenp@672 | 477 | ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align, |
kvn@1077 | 478 | requested_address, |
kvn@1077 | 479 | (UseCompressedOops && (Universe::narrow_oop_base() != NULL) && |
kvn@1077 | 480 | Universe::narrow_oop_use_implicit_null_checks()) ? |
coleenp@760 | 481 | lcm(os::vm_page_size(), prefix_align) : 0) { |
coleenp@672 | 482 | protect_noaccess_prefix(prefix_size+suffix_size); |
coleenp@672 | 483 | } |
duke@435 | 484 | |
coleenp@1091 | 485 | // Reserve space for code segment. Same as Java heap only we mark this as |
coleenp@1091 | 486 | // executable. |
coleenp@1091 | 487 | ReservedCodeSpace::ReservedCodeSpace(size_t r_size, |
coleenp@1091 | 488 | size_t rs_align, |
coleenp@1091 | 489 | bool large) : |
coleenp@1091 | 490 | ReservedSpace(r_size, rs_align, large, /*executable*/ true) { |
coleenp@1091 | 491 | } |
coleenp@1091 | 492 | |
duke@435 | 493 | // VirtualSpace |
duke@435 | 494 | |
duke@435 | 495 | VirtualSpace::VirtualSpace() { |
duke@435 | 496 | _low_boundary = NULL; |
duke@435 | 497 | _high_boundary = NULL; |
duke@435 | 498 | _low = NULL; |
duke@435 | 499 | _high = NULL; |
duke@435 | 500 | _lower_high = NULL; |
duke@435 | 501 | _middle_high = NULL; |
duke@435 | 502 | _upper_high = NULL; |
duke@435 | 503 | _lower_high_boundary = NULL; |
duke@435 | 504 | _middle_high_boundary = NULL; |
duke@435 | 505 | _upper_high_boundary = NULL; |
duke@435 | 506 | _lower_alignment = 0; |
duke@435 | 507 | _middle_alignment = 0; |
duke@435 | 508 | _upper_alignment = 0; |
coleenp@672 | 509 | _special = false; |
coleenp@1091 | 510 | _executable = false; |
duke@435 | 511 | } |
duke@435 | 512 | |
duke@435 | 513 | |
duke@435 | 514 | bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { |
duke@435 | 515 | if(!rs.is_reserved()) return false; // allocation failed. |
duke@435 | 516 | assert(_low_boundary == NULL, "VirtualSpace already initialized"); |
duke@435 | 517 | _low_boundary = rs.base(); |
duke@435 | 518 | _high_boundary = low_boundary() + rs.size(); |
duke@435 | 519 | |
duke@435 | 520 | _low = low_boundary(); |
duke@435 | 521 | _high = low(); |
duke@435 | 522 | |
duke@435 | 523 | _special = rs.special(); |
coleenp@1091 | 524 | _executable = rs.executable(); |
duke@435 | 525 | |
duke@435 | 526 | // When a VirtualSpace begins life at a large size, make all future expansion |
duke@435 | 527 | // and shrinking occur aligned to a granularity of large pages. This avoids |
duke@435 | 528 | // fragmentation of physical addresses that inhibits the use of large pages |
duke@435 | 529 | // by the OS virtual memory system. Empirically, we see that with a 4MB |
duke@435 | 530 | // page size, the only spaces that get handled this way are codecache and |
duke@435 | 531 | // the heap itself, both of which provide a substantial performance |
duke@435 | 532 | // boost in many benchmarks when covered by large pages. |
duke@435 | 533 | // |
duke@435 | 534 | // No attempt is made to force large page alignment at the very top and |
duke@435 | 535 | // bottom of the space if they are not aligned so already. |
duke@435 | 536 | _lower_alignment = os::vm_page_size(); |
duke@435 | 537 | _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1); |
duke@435 | 538 | _upper_alignment = os::vm_page_size(); |
duke@435 | 539 | |
duke@435 | 540 | // End of each region |
duke@435 | 541 | _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment()); |
duke@435 | 542 | _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment()); |
duke@435 | 543 | _upper_high_boundary = high_boundary(); |
duke@435 | 544 | |
duke@435 | 545 | // High address of each region |
duke@435 | 546 | _lower_high = low_boundary(); |
duke@435 | 547 | _middle_high = lower_high_boundary(); |
duke@435 | 548 | _upper_high = middle_high_boundary(); |
duke@435 | 549 | |
duke@435 | 550 | // commit to initial size |
duke@435 | 551 | if (committed_size > 0) { |
duke@435 | 552 | if (!expand_by(committed_size)) { |
duke@435 | 553 | return false; |
duke@435 | 554 | } |
duke@435 | 555 | } |
duke@435 | 556 | return true; |
duke@435 | 557 | } |
duke@435 | 558 | |
duke@435 | 559 | |
duke@435 | 560 | VirtualSpace::~VirtualSpace() { |
duke@435 | 561 | release(); |
duke@435 | 562 | } |
duke@435 | 563 | |
duke@435 | 564 | |
duke@435 | 565 | void VirtualSpace::release() { |
coleenp@672 | 566 | // This does not release memory it never reserved. |
coleenp@672 | 567 | // Caller must release via rs.release(); |
duke@435 | 568 | _low_boundary = NULL; |
duke@435 | 569 | _high_boundary = NULL; |
duke@435 | 570 | _low = NULL; |
duke@435 | 571 | _high = NULL; |
duke@435 | 572 | _lower_high = NULL; |
duke@435 | 573 | _middle_high = NULL; |
duke@435 | 574 | _upper_high = NULL; |
duke@435 | 575 | _lower_high_boundary = NULL; |
duke@435 | 576 | _middle_high_boundary = NULL; |
duke@435 | 577 | _upper_high_boundary = NULL; |
duke@435 | 578 | _lower_alignment = 0; |
duke@435 | 579 | _middle_alignment = 0; |
duke@435 | 580 | _upper_alignment = 0; |
duke@435 | 581 | _special = false; |
coleenp@1091 | 582 | _executable = false; |
duke@435 | 583 | } |
duke@435 | 584 | |
duke@435 | 585 | |
duke@435 | 586 | size_t VirtualSpace::committed_size() const { |
duke@435 | 587 | return pointer_delta(high(), low(), sizeof(char)); |
duke@435 | 588 | } |
duke@435 | 589 | |
duke@435 | 590 | |
duke@435 | 591 | size_t VirtualSpace::reserved_size() const { |
duke@435 | 592 | return pointer_delta(high_boundary(), low_boundary(), sizeof(char)); |
duke@435 | 593 | } |
duke@435 | 594 | |
duke@435 | 595 | |
duke@435 | 596 | size_t VirtualSpace::uncommitted_size() const { |
duke@435 | 597 | return reserved_size() - committed_size(); |
duke@435 | 598 | } |
duke@435 | 599 | |
duke@435 | 600 | |
duke@435 | 601 | bool VirtualSpace::contains(const void* p) const { |
duke@435 | 602 | return low() <= (const char*) p && (const char*) p < high(); |
duke@435 | 603 | } |
duke@435 | 604 | |
duke@435 | 605 | /* |
duke@435 | 606 | First we need to determine if a particular virtual space is using large |
duke@435 | 607 | pages. This is done at the initialize function and only virtual spaces |
duke@435 | 608 | that are larger than LargePageSizeInBytes use large pages. Once we |
duke@435 | 609 | have determined this, all expand_by and shrink_by calls must grow and |
duke@435 | 610 | shrink by large page size chunks. If a particular request |
duke@435 | 611 | is within the current large page, the call to commit and uncommit memory |
duke@435 | 612 | can be ignored. In the case that the low and high boundaries of this |
duke@435 | 613 | space is not large page aligned, the pages leading to the first large |
duke@435 | 614 | page address and the pages after the last large page address must be |
duke@435 | 615 | allocated with default pages. |
duke@435 | 616 | */ |
duke@435 | 617 | bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) { |
duke@435 | 618 | if (uncommitted_size() < bytes) return false; |
duke@435 | 619 | |
duke@435 | 620 | if (special()) { |
duke@435 | 621 | // don't commit memory if the entire space is pinned in memory |
duke@435 | 622 | _high += bytes; |
duke@435 | 623 | return true; |
duke@435 | 624 | } |
duke@435 | 625 | |
duke@435 | 626 | char* previous_high = high(); |
duke@435 | 627 | char* unaligned_new_high = high() + bytes; |
duke@435 | 628 | assert(unaligned_new_high <= high_boundary(), |
duke@435 | 629 | "cannot expand by more than upper boundary"); |
duke@435 | 630 | |
duke@435 | 631 | // Calculate where the new high for each of the regions should be. If |
duke@435 | 632 | // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned |
duke@435 | 633 | // then the unaligned lower and upper new highs would be the |
duke@435 | 634 | // lower_high() and upper_high() respectively. |
duke@435 | 635 | char* unaligned_lower_new_high = |
duke@435 | 636 | MIN2(unaligned_new_high, lower_high_boundary()); |
duke@435 | 637 | char* unaligned_middle_new_high = |
duke@435 | 638 | MIN2(unaligned_new_high, middle_high_boundary()); |
duke@435 | 639 | char* unaligned_upper_new_high = |
duke@435 | 640 | MIN2(unaligned_new_high, upper_high_boundary()); |
duke@435 | 641 | |
duke@435 | 642 | // Align the new highs based on the regions alignment. lower and upper |
duke@435 | 643 | // alignment will always be default page size. middle alignment will be |
duke@435 | 644 | // LargePageSizeInBytes if the actual size of the virtual space is in |
duke@435 | 645 | // fact larger than LargePageSizeInBytes. |
duke@435 | 646 | char* aligned_lower_new_high = |
duke@435 | 647 | (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); |
duke@435 | 648 | char* aligned_middle_new_high = |
duke@435 | 649 | (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); |
duke@435 | 650 | char* aligned_upper_new_high = |
duke@435 | 651 | (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); |
duke@435 | 652 | |
duke@435 | 653 | // Determine which regions need to grow in this expand_by call. |
duke@435 | 654 | // If you are growing in the lower region, high() must be in that |
duke@435 | 655 | // region so calcuate the size based on high(). For the middle and |
duke@435 | 656 | // upper regions, determine the starting point of growth based on the |
duke@435 | 657 | // location of high(). By getting the MAX of the region's low address |
duke@435 | 658 | // (or the prevoius region's high address) and high(), we can tell if it |
duke@435 | 659 | // is an intra or inter region growth. |
duke@435 | 660 | size_t lower_needs = 0; |
duke@435 | 661 | if (aligned_lower_new_high > lower_high()) { |
duke@435 | 662 | lower_needs = |
duke@435 | 663 | pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char)); |
duke@435 | 664 | } |
duke@435 | 665 | size_t middle_needs = 0; |
duke@435 | 666 | if (aligned_middle_new_high > middle_high()) { |
duke@435 | 667 | middle_needs = |
duke@435 | 668 | pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char)); |
duke@435 | 669 | } |
duke@435 | 670 | size_t upper_needs = 0; |
duke@435 | 671 | if (aligned_upper_new_high > upper_high()) { |
duke@435 | 672 | upper_needs = |
duke@435 | 673 | pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char)); |
duke@435 | 674 | } |
duke@435 | 675 | |
duke@435 | 676 | // Check contiguity. |
duke@435 | 677 | assert(low_boundary() <= lower_high() && |
duke@435 | 678 | lower_high() <= lower_high_boundary(), |
duke@435 | 679 | "high address must be contained within the region"); |
duke@435 | 680 | assert(lower_high_boundary() <= middle_high() && |
duke@435 | 681 | middle_high() <= middle_high_boundary(), |
duke@435 | 682 | "high address must be contained within the region"); |
duke@435 | 683 | assert(middle_high_boundary() <= upper_high() && |
duke@435 | 684 | upper_high() <= upper_high_boundary(), |
duke@435 | 685 | "high address must be contained within the region"); |
duke@435 | 686 | |
duke@435 | 687 | // Commit regions |
duke@435 | 688 | if (lower_needs > 0) { |
duke@435 | 689 | assert(low_boundary() <= lower_high() && |
duke@435 | 690 | lower_high() + lower_needs <= lower_high_boundary(), |
duke@435 | 691 | "must not expand beyond region"); |
coleenp@1091 | 692 | if (!os::commit_memory(lower_high(), lower_needs, _executable)) { |
duke@435 | 693 | debug_only(warning("os::commit_memory failed")); |
duke@435 | 694 | return false; |
duke@435 | 695 | } else { |
duke@435 | 696 | _lower_high += lower_needs; |
duke@435 | 697 | } |
duke@435 | 698 | } |
duke@435 | 699 | if (middle_needs > 0) { |
duke@435 | 700 | assert(lower_high_boundary() <= middle_high() && |
duke@435 | 701 | middle_high() + middle_needs <= middle_high_boundary(), |
duke@435 | 702 | "must not expand beyond region"); |
coleenp@1091 | 703 | if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(), |
coleenp@1091 | 704 | _executable)) { |
duke@435 | 705 | debug_only(warning("os::commit_memory failed")); |
duke@435 | 706 | return false; |
duke@435 | 707 | } |
duke@435 | 708 | _middle_high += middle_needs; |
duke@435 | 709 | } |
duke@435 | 710 | if (upper_needs > 0) { |
duke@435 | 711 | assert(middle_high_boundary() <= upper_high() && |
duke@435 | 712 | upper_high() + upper_needs <= upper_high_boundary(), |
duke@435 | 713 | "must not expand beyond region"); |
coleenp@1091 | 714 | if (!os::commit_memory(upper_high(), upper_needs, _executable)) { |
duke@435 | 715 | debug_only(warning("os::commit_memory failed")); |
duke@435 | 716 | return false; |
duke@435 | 717 | } else { |
duke@435 | 718 | _upper_high += upper_needs; |
duke@435 | 719 | } |
duke@435 | 720 | } |
duke@435 | 721 | |
duke@435 | 722 | if (pre_touch || AlwaysPreTouch) { |
duke@435 | 723 | int vm_ps = os::vm_page_size(); |
duke@435 | 724 | for (char* curr = previous_high; |
duke@435 | 725 | curr < unaligned_new_high; |
duke@435 | 726 | curr += vm_ps) { |
duke@435 | 727 | // Note the use of a write here; originally we tried just a read, but |
duke@435 | 728 | // since the value read was unused, the optimizer removed the read. |
duke@435 | 729 | // If we ever have a concurrent touchahead thread, we'll want to use |
duke@435 | 730 | // a read, to avoid the potential of overwriting data (if a mutator |
duke@435 | 731 | // thread beats the touchahead thread to a page). There are various |
duke@435 | 732 | // ways of making sure this read is not optimized away: for example, |
duke@435 | 733 | // generating the code for a read procedure at runtime. |
duke@435 | 734 | *curr = 0; |
duke@435 | 735 | } |
duke@435 | 736 | } |
duke@435 | 737 | |
duke@435 | 738 | _high += bytes; |
duke@435 | 739 | return true; |
duke@435 | 740 | } |
duke@435 | 741 | |
duke@435 | 742 | // A page is uncommitted if the contents of the entire page is deemed unusable. |
duke@435 | 743 | // Continue to decrement the high() pointer until it reaches a page boundary |
duke@435 | 744 | // in which case that particular page can now be uncommitted. |
duke@435 | 745 | void VirtualSpace::shrink_by(size_t size) { |
duke@435 | 746 | if (committed_size() < size) |
duke@435 | 747 | fatal("Cannot shrink virtual space to negative size"); |
duke@435 | 748 | |
duke@435 | 749 | if (special()) { |
duke@435 | 750 | // don't uncommit if the entire space is pinned in memory |
duke@435 | 751 | _high -= size; |
duke@435 | 752 | return; |
duke@435 | 753 | } |
duke@435 | 754 | |
duke@435 | 755 | char* unaligned_new_high = high() - size; |
duke@435 | 756 | assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary"); |
duke@435 | 757 | |
duke@435 | 758 | // Calculate new unaligned address |
duke@435 | 759 | char* unaligned_upper_new_high = |
duke@435 | 760 | MAX2(unaligned_new_high, middle_high_boundary()); |
duke@435 | 761 | char* unaligned_middle_new_high = |
duke@435 | 762 | MAX2(unaligned_new_high, lower_high_boundary()); |
duke@435 | 763 | char* unaligned_lower_new_high = |
duke@435 | 764 | MAX2(unaligned_new_high, low_boundary()); |
duke@435 | 765 | |
duke@435 | 766 | // Align address to region's alignment |
duke@435 | 767 | char* aligned_upper_new_high = |
duke@435 | 768 | (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); |
duke@435 | 769 | char* aligned_middle_new_high = |
duke@435 | 770 | (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); |
duke@435 | 771 | char* aligned_lower_new_high = |
duke@435 | 772 | (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); |
duke@435 | 773 | |
duke@435 | 774 | // Determine which regions need to shrink |
duke@435 | 775 | size_t upper_needs = 0; |
duke@435 | 776 | if (aligned_upper_new_high < upper_high()) { |
duke@435 | 777 | upper_needs = |
duke@435 | 778 | pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char)); |
duke@435 | 779 | } |
duke@435 | 780 | size_t middle_needs = 0; |
duke@435 | 781 | if (aligned_middle_new_high < middle_high()) { |
duke@435 | 782 | middle_needs = |
duke@435 | 783 | pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char)); |
duke@435 | 784 | } |
duke@435 | 785 | size_t lower_needs = 0; |
duke@435 | 786 | if (aligned_lower_new_high < lower_high()) { |
duke@435 | 787 | lower_needs = |
duke@435 | 788 | pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char)); |
duke@435 | 789 | } |
duke@435 | 790 | |
duke@435 | 791 | // Check contiguity. |
duke@435 | 792 | assert(middle_high_boundary() <= upper_high() && |
duke@435 | 793 | upper_high() <= upper_high_boundary(), |
duke@435 | 794 | "high address must be contained within the region"); |
duke@435 | 795 | assert(lower_high_boundary() <= middle_high() && |
duke@435 | 796 | middle_high() <= middle_high_boundary(), |
duke@435 | 797 | "high address must be contained within the region"); |
duke@435 | 798 | assert(low_boundary() <= lower_high() && |
duke@435 | 799 | lower_high() <= lower_high_boundary(), |
duke@435 | 800 | "high address must be contained within the region"); |
duke@435 | 801 | |
duke@435 | 802 | // Uncommit |
duke@435 | 803 | if (upper_needs > 0) { |
duke@435 | 804 | assert(middle_high_boundary() <= aligned_upper_new_high && |
duke@435 | 805 | aligned_upper_new_high + upper_needs <= upper_high_boundary(), |
duke@435 | 806 | "must not shrink beyond region"); |
duke@435 | 807 | if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) { |
duke@435 | 808 | debug_only(warning("os::uncommit_memory failed")); |
duke@435 | 809 | return; |
duke@435 | 810 | } else { |
duke@435 | 811 | _upper_high -= upper_needs; |
duke@435 | 812 | } |
duke@435 | 813 | } |
duke@435 | 814 | if (middle_needs > 0) { |
duke@435 | 815 | assert(lower_high_boundary() <= aligned_middle_new_high && |
duke@435 | 816 | aligned_middle_new_high + middle_needs <= middle_high_boundary(), |
duke@435 | 817 | "must not shrink beyond region"); |
duke@435 | 818 | if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) { |
duke@435 | 819 | debug_only(warning("os::uncommit_memory failed")); |
duke@435 | 820 | return; |
duke@435 | 821 | } else { |
duke@435 | 822 | _middle_high -= middle_needs; |
duke@435 | 823 | } |
duke@435 | 824 | } |
duke@435 | 825 | if (lower_needs > 0) { |
duke@435 | 826 | assert(low_boundary() <= aligned_lower_new_high && |
duke@435 | 827 | aligned_lower_new_high + lower_needs <= lower_high_boundary(), |
duke@435 | 828 | "must not shrink beyond region"); |
duke@435 | 829 | if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) { |
duke@435 | 830 | debug_only(warning("os::uncommit_memory failed")); |
duke@435 | 831 | return; |
duke@435 | 832 | } else { |
duke@435 | 833 | _lower_high -= lower_needs; |
duke@435 | 834 | } |
duke@435 | 835 | } |
duke@435 | 836 | |
duke@435 | 837 | _high -= size; |
duke@435 | 838 | } |
duke@435 | 839 | |
duke@435 | 840 | #ifndef PRODUCT |
duke@435 | 841 | void VirtualSpace::check_for_contiguity() { |
duke@435 | 842 | // Check contiguity. |
duke@435 | 843 | assert(low_boundary() <= lower_high() && |
duke@435 | 844 | lower_high() <= lower_high_boundary(), |
duke@435 | 845 | "high address must be contained within the region"); |
duke@435 | 846 | assert(lower_high_boundary() <= middle_high() && |
duke@435 | 847 | middle_high() <= middle_high_boundary(), |
duke@435 | 848 | "high address must be contained within the region"); |
duke@435 | 849 | assert(middle_high_boundary() <= upper_high() && |
duke@435 | 850 | upper_high() <= upper_high_boundary(), |
duke@435 | 851 | "high address must be contained within the region"); |
duke@435 | 852 | assert(low() >= low_boundary(), "low"); |
duke@435 | 853 | assert(low_boundary() <= lower_high_boundary(), "lower high boundary"); |
duke@435 | 854 | assert(upper_high_boundary() <= high_boundary(), "upper high boundary"); |
duke@435 | 855 | assert(high() <= upper_high(), "upper high"); |
duke@435 | 856 | } |
duke@435 | 857 | |
duke@435 | 858 | void VirtualSpace::print() { |
duke@435 | 859 | tty->print ("Virtual space:"); |
duke@435 | 860 | if (special()) tty->print(" (pinned in memory)"); |
duke@435 | 861 | tty->cr(); |
duke@435 | 862 | tty->print_cr(" - committed: %ld", committed_size()); |
duke@435 | 863 | tty->print_cr(" - reserved: %ld", reserved_size()); |
duke@435 | 864 | tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high()); |
duke@435 | 865 | tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary()); |
duke@435 | 866 | } |
duke@435 | 867 | |
duke@435 | 868 | #endif |