src/share/vm/runtime/virtualspace.cpp

Fri, 26 Apr 2013 09:53:22 +0200

author
brutisso
date
Fri, 26 Apr 2013 09:53:22 +0200
changeset 5019
b294421fa3c5
parent 4465
203f64878aab
child 5255
a837fa3d3f86
permissions
-rw-r--r--

8012915: ReservedSpace::align_reserved_region() broken on Windows
Summary: remove unused constructors and helper methods for ReservedHeapSpace and ReservedSpace
Reviewed-by: mgerdin, jmasa, johnc, tschatzl

duke@435 1 /*
hseigel@4465 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "oops/markOop.hpp"
stefank@2314 27 #include "oops/oop.inline.hpp"
stefank@2314 28 #include "runtime/virtualspace.hpp"
zgu@3900 29 #include "services/memTracker.hpp"
stefank@2314 30 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 31 # include "os_linux.inline.hpp"
stefank@2314 32 #endif
stefank@2314 33 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 34 # include "os_solaris.inline.hpp"
stefank@2314 35 #endif
stefank@2314 36 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 37 # include "os_windows.inline.hpp"
stefank@2314 38 #endif
never@3156 39 #ifdef TARGET_OS_FAMILY_bsd
never@3156 40 # include "os_bsd.inline.hpp"
never@3156 41 #endif
duke@435 42
duke@435 43
duke@435 44 // ReservedSpace
duke@435 45 ReservedSpace::ReservedSpace(size_t size) {
coleenp@1091 46 initialize(size, 0, false, NULL, 0, false);
duke@435 47 }
duke@435 48
duke@435 49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
coleenp@672 50 bool large,
coleenp@672 51 char* requested_address,
coleenp@672 52 const size_t noaccess_prefix) {
coleenp@672 53 initialize(size+noaccess_prefix, alignment, large, requested_address,
coleenp@1091 54 noaccess_prefix, false);
coleenp@1091 55 }
coleenp@1091 56
coleenp@1091 57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
coleenp@1091 58 bool large,
coleenp@1091 59 bool executable) {
coleenp@1091 60 initialize(size, alignment, large, NULL, 0, executable);
duke@435 61 }
duke@435 62
kvn@1973 63 // Helper method.
kvn@1973 64 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
kvn@1973 65 const size_t size, bool special)
kvn@1973 66 {
kvn@1973 67 if (base == requested_address || requested_address == NULL)
kvn@1973 68 return false; // did not fail
kvn@1973 69
kvn@1973 70 if (base != NULL) {
kvn@1973 71 // Different reserve address may be acceptable in other cases
kvn@1973 72 // but for compressed oops heap should be at requested address.
kvn@1973 73 assert(UseCompressedOops, "currently requested address used only for compressed oops");
kvn@1973 74 if (PrintCompressedOopsMode) {
kvn@1973 75 tty->cr();
johnc@3022 76 tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
kvn@1973 77 }
kvn@1973 78 // OS ignored requested address. Try different address.
kvn@1973 79 if (special) {
kvn@1973 80 if (!os::release_memory_special(base, size)) {
kvn@1973 81 fatal("os::release_memory_special failed");
kvn@1973 82 }
kvn@1973 83 } else {
kvn@1973 84 if (!os::release_memory(base, size)) {
kvn@1973 85 fatal("os::release_memory failed");
kvn@1973 86 }
kvn@1973 87 }
kvn@1973 88 }
kvn@1973 89 return true;
kvn@1973 90 }
kvn@1973 91
duke@435 92 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
coleenp@672 93 char* requested_address,
coleenp@1091 94 const size_t noaccess_prefix,
coleenp@1091 95 bool executable) {
duke@435 96 const size_t granularity = os::vm_allocation_granularity();
johnc@3022 97 assert((size & (granularity - 1)) == 0,
duke@435 98 "size not aligned to os::vm_allocation_granularity()");
johnc@3022 99 assert((alignment & (granularity - 1)) == 0,
duke@435 100 "alignment not aligned to os::vm_allocation_granularity()");
duke@435 101 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
duke@435 102 "not a power of 2");
duke@435 103
johnc@3022 104 alignment = MAX2(alignment, (size_t)os::vm_page_size());
johnc@3022 105
johnc@3022 106 // Assert that if noaccess_prefix is used, it is the same as alignment.
johnc@3022 107 assert(noaccess_prefix == 0 ||
johnc@3022 108 noaccess_prefix == alignment, "noaccess prefix wrong");
johnc@3022 109
duke@435 110 _base = NULL;
duke@435 111 _size = 0;
duke@435 112 _special = false;
coleenp@1091 113 _executable = executable;
duke@435 114 _alignment = 0;
coleenp@672 115 _noaccess_prefix = 0;
duke@435 116 if (size == 0) {
duke@435 117 return;
duke@435 118 }
duke@435 119
duke@435 120 // If OS doesn't support demand paging for large page memory, we need
duke@435 121 // to use reserve_memory_special() to reserve and pin the entire region.
duke@435 122 bool special = large && !os::can_commit_large_page_memory();
duke@435 123 char* base = NULL;
duke@435 124
kvn@1973 125 if (requested_address != 0) {
kvn@1973 126 requested_address -= noaccess_prefix; // adjust requested address
kvn@1973 127 assert(requested_address != NULL, "huge noaccess prefix?");
kvn@1973 128 }
kvn@1973 129
duke@435 130 if (special) {
duke@435 131
coleenp@1091 132 base = os::reserve_memory_special(size, requested_address, executable);
duke@435 133
duke@435 134 if (base != NULL) {
kvn@1973 135 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
kvn@1973 136 // OS ignored requested address. Try different address.
kvn@1973 137 return;
kvn@1973 138 }
duke@435 139 // Check alignment constraints
johnc@3022 140 assert((uintptr_t) base % alignment == 0,
johnc@3022 141 "Large pages returned a non-aligned address");
duke@435 142 _special = true;
duke@435 143 } else {
duke@435 144 // failed; try to reserve regular memory below
kvn@1973 145 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
kvn@1973 146 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
kvn@1973 147 if (PrintCompressedOopsMode) {
kvn@1973 148 tty->cr();
kvn@1973 149 tty->print_cr("Reserve regular memory without large pages.");
kvn@1973 150 }
kvn@1973 151 }
duke@435 152 }
duke@435 153 }
duke@435 154
duke@435 155 if (base == NULL) {
duke@435 156 // Optimistically assume that the OSes returns an aligned base pointer.
duke@435 157 // When reserving a large address range, most OSes seem to align to at
duke@435 158 // least 64K.
duke@435 159
duke@435 160 // If the memory was requested at a particular address, use
duke@435 161 // os::attempt_reserve_memory_at() to avoid over mapping something
duke@435 162 // important. If available space is not detected, return NULL.
duke@435 163
duke@435 164 if (requested_address != 0) {
kvn@1973 165 base = os::attempt_reserve_memory_at(size, requested_address);
kvn@1973 166 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
kvn@1973 167 // OS ignored requested address. Try different address.
kvn@1973 168 base = NULL;
kvn@1973 169 }
duke@435 170 } else {
duke@435 171 base = os::reserve_memory(size, NULL, alignment);
duke@435 172 }
duke@435 173
duke@435 174 if (base == NULL) return;
duke@435 175
duke@435 176 // Check alignment constraints
johnc@3022 177 if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
duke@435 178 // Base not aligned, retry
duke@435 179 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
brutisso@4369 180 // Make sure that size is aligned
duke@435 181 size = align_size_up(size, alignment);
brutisso@4369 182 base = os::reserve_memory_aligned(size, alignment);
johnc@3022 183
johnc@3022 184 if (requested_address != 0 &&
johnc@3022 185 failed_to_reserve_as_requested(base, requested_address, size, false)) {
johnc@3022 186 // As a result of the alignment constraints, the allocated base differs
johnc@3022 187 // from the requested address. Return back to the caller who can
johnc@3022 188 // take remedial action (like try again without a requested address).
johnc@3022 189 assert(_base == NULL, "should be");
johnc@3022 190 return;
johnc@3022 191 }
duke@435 192 }
duke@435 193 }
duke@435 194 // Done
duke@435 195 _base = base;
duke@435 196 _size = size;
johnc@3022 197 _alignment = alignment;
coleenp@672 198 _noaccess_prefix = noaccess_prefix;
coleenp@672 199
coleenp@672 200 // Assert that if noaccess_prefix is used, it is the same as alignment.
coleenp@672 201 assert(noaccess_prefix == 0 ||
coleenp@672 202 noaccess_prefix == _alignment, "noaccess prefix wrong");
duke@435 203
duke@435 204 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
duke@435 205 "area must be distinguisable from marks for mark-sweep");
duke@435 206 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
duke@435 207 "area must be distinguisable from marks for mark-sweep");
duke@435 208 }
duke@435 209
duke@435 210
duke@435 211 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
coleenp@1091 212 bool special, bool executable) {
duke@435 213 assert((size % os::vm_allocation_granularity()) == 0,
duke@435 214 "size not allocation aligned");
duke@435 215 _base = base;
duke@435 216 _size = size;
duke@435 217 _alignment = alignment;
coleenp@672 218 _noaccess_prefix = 0;
duke@435 219 _special = special;
coleenp@1091 220 _executable = executable;
duke@435 221 }
duke@435 222
duke@435 223
duke@435 224 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
duke@435 225 bool split, bool realloc) {
duke@435 226 assert(partition_size <= size(), "partition failed");
duke@435 227 if (split) {
coleenp@1091 228 os::split_reserved_memory(base(), size(), partition_size, realloc);
duke@435 229 }
coleenp@1091 230 ReservedSpace result(base(), partition_size, alignment, special(),
coleenp@1091 231 executable());
duke@435 232 return result;
duke@435 233 }
duke@435 234
duke@435 235
duke@435 236 ReservedSpace
duke@435 237 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
duke@435 238 assert(partition_size <= size(), "partition failed");
duke@435 239 ReservedSpace result(base() + partition_size, size() - partition_size,
coleenp@1091 240 alignment, special(), executable());
duke@435 241 return result;
duke@435 242 }
duke@435 243
duke@435 244
duke@435 245 size_t ReservedSpace::page_align_size_up(size_t size) {
duke@435 246 return align_size_up(size, os::vm_page_size());
duke@435 247 }
duke@435 248
duke@435 249
duke@435 250 size_t ReservedSpace::page_align_size_down(size_t size) {
duke@435 251 return align_size_down(size, os::vm_page_size());
duke@435 252 }
duke@435 253
duke@435 254
duke@435 255 size_t ReservedSpace::allocation_align_size_up(size_t size) {
duke@435 256 return align_size_up(size, os::vm_allocation_granularity());
duke@435 257 }
duke@435 258
duke@435 259
duke@435 260 size_t ReservedSpace::allocation_align_size_down(size_t size) {
duke@435 261 return align_size_down(size, os::vm_allocation_granularity());
duke@435 262 }
duke@435 263
duke@435 264
duke@435 265 void ReservedSpace::release() {
duke@435 266 if (is_reserved()) {
coleenp@672 267 char *real_base = _base - _noaccess_prefix;
coleenp@672 268 const size_t real_size = _size + _noaccess_prefix;
duke@435 269 if (special()) {
coleenp@672 270 os::release_memory_special(real_base, real_size);
duke@435 271 } else{
coleenp@672 272 os::release_memory(real_base, real_size);
duke@435 273 }
duke@435 274 _base = NULL;
duke@435 275 _size = 0;
coleenp@672 276 _noaccess_prefix = 0;
duke@435 277 _special = false;
coleenp@1091 278 _executable = false;
duke@435 279 }
duke@435 280 }
duke@435 281
coleenp@672 282 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
kvn@1973 283 assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
coleenp@3561 284 (Universe::narrow_oop_base() != NULL) &&
kvn@1973 285 Universe::narrow_oop_use_implicit_null_checks()),
kvn@1973 286 "noaccess_prefix should be used only with non zero based compressed oops");
kvn@1973 287
kvn@1973 288 // If there is no noaccess prefix, return.
coleenp@672 289 if (_noaccess_prefix == 0) return;
coleenp@672 290
coleenp@672 291 assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
coleenp@672 292 "must be at least page size big");
coleenp@672 293
coleenp@672 294 // Protect memory at the base of the allocated region.
coleenp@672 295 // If special, the page was committed (only matters on windows)
coleenp@672 296 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
coleenp@672 297 _special)) {
coleenp@672 298 fatal("cannot protect protection page");
coleenp@672 299 }
kvn@1973 300 if (PrintCompressedOopsMode) {
kvn@1973 301 tty->cr();
kvn@1973 302 tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
kvn@1973 303 }
coleenp@672 304
coleenp@672 305 _base += _noaccess_prefix;
coleenp@672 306 _size -= _noaccess_prefix;
coleenp@672 307 assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
coleenp@672 308 "must be exactly of required size and alignment");
coleenp@672 309 }
coleenp@672 310
coleenp@672 311 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
coleenp@672 312 bool large, char* requested_address) :
coleenp@672 313 ReservedSpace(size, alignment, large,
coleenp@672 314 requested_address,
kvn@1077 315 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
kvn@1077 316 Universe::narrow_oop_use_implicit_null_checks()) ?
coleenp@760 317 lcm(os::vm_page_size(), alignment) : 0) {
zgu@3900 318 if (base() > 0) {
zgu@3900 319 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
zgu@3900 320 }
zgu@3900 321
coleenp@672 322 // Only reserved space for the java heap should have a noaccess_prefix
coleenp@672 323 // if using compressed oops.
coleenp@672 324 protect_noaccess_prefix(size);
coleenp@672 325 }
coleenp@672 326
coleenp@1091 327 // Reserve space for code segment. Same as Java heap only we mark this as
coleenp@1091 328 // executable.
coleenp@1091 329 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
coleenp@1091 330 size_t rs_align,
coleenp@1091 331 bool large) :
coleenp@1091 332 ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
zgu@3900 333 MemTracker::record_virtual_memory_type((address)base(), mtCode);
coleenp@1091 334 }
coleenp@1091 335
duke@435 336 // VirtualSpace
duke@435 337
duke@435 338 VirtualSpace::VirtualSpace() {
duke@435 339 _low_boundary = NULL;
duke@435 340 _high_boundary = NULL;
duke@435 341 _low = NULL;
duke@435 342 _high = NULL;
duke@435 343 _lower_high = NULL;
duke@435 344 _middle_high = NULL;
duke@435 345 _upper_high = NULL;
duke@435 346 _lower_high_boundary = NULL;
duke@435 347 _middle_high_boundary = NULL;
duke@435 348 _upper_high_boundary = NULL;
duke@435 349 _lower_alignment = 0;
duke@435 350 _middle_alignment = 0;
duke@435 351 _upper_alignment = 0;
coleenp@672 352 _special = false;
coleenp@1091 353 _executable = false;
duke@435 354 }
duke@435 355
duke@435 356
duke@435 357 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
duke@435 358 if(!rs.is_reserved()) return false; // allocation failed.
duke@435 359 assert(_low_boundary == NULL, "VirtualSpace already initialized");
duke@435 360 _low_boundary = rs.base();
duke@435 361 _high_boundary = low_boundary() + rs.size();
duke@435 362
duke@435 363 _low = low_boundary();
duke@435 364 _high = low();
duke@435 365
duke@435 366 _special = rs.special();
coleenp@1091 367 _executable = rs.executable();
duke@435 368
duke@435 369 // When a VirtualSpace begins life at a large size, make all future expansion
duke@435 370 // and shrinking occur aligned to a granularity of large pages. This avoids
duke@435 371 // fragmentation of physical addresses that inhibits the use of large pages
duke@435 372 // by the OS virtual memory system. Empirically, we see that with a 4MB
duke@435 373 // page size, the only spaces that get handled this way are codecache and
duke@435 374 // the heap itself, both of which provide a substantial performance
duke@435 375 // boost in many benchmarks when covered by large pages.
duke@435 376 //
duke@435 377 // No attempt is made to force large page alignment at the very top and
duke@435 378 // bottom of the space if they are not aligned so already.
duke@435 379 _lower_alignment = os::vm_page_size();
duke@435 380 _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
duke@435 381 _upper_alignment = os::vm_page_size();
duke@435 382
duke@435 383 // End of each region
duke@435 384 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
duke@435 385 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
duke@435 386 _upper_high_boundary = high_boundary();
duke@435 387
duke@435 388 // High address of each region
duke@435 389 _lower_high = low_boundary();
duke@435 390 _middle_high = lower_high_boundary();
duke@435 391 _upper_high = middle_high_boundary();
duke@435 392
duke@435 393 // commit to initial size
duke@435 394 if (committed_size > 0) {
duke@435 395 if (!expand_by(committed_size)) {
duke@435 396 return false;
duke@435 397 }
duke@435 398 }
duke@435 399 return true;
duke@435 400 }
duke@435 401
duke@435 402
duke@435 403 VirtualSpace::~VirtualSpace() {
duke@435 404 release();
duke@435 405 }
duke@435 406
duke@435 407
duke@435 408 void VirtualSpace::release() {
coleenp@672 409 // This does not release memory it never reserved.
coleenp@672 410 // Caller must release via rs.release();
duke@435 411 _low_boundary = NULL;
duke@435 412 _high_boundary = NULL;
duke@435 413 _low = NULL;
duke@435 414 _high = NULL;
duke@435 415 _lower_high = NULL;
duke@435 416 _middle_high = NULL;
duke@435 417 _upper_high = NULL;
duke@435 418 _lower_high_boundary = NULL;
duke@435 419 _middle_high_boundary = NULL;
duke@435 420 _upper_high_boundary = NULL;
duke@435 421 _lower_alignment = 0;
duke@435 422 _middle_alignment = 0;
duke@435 423 _upper_alignment = 0;
duke@435 424 _special = false;
coleenp@1091 425 _executable = false;
duke@435 426 }
duke@435 427
duke@435 428
duke@435 429 size_t VirtualSpace::committed_size() const {
duke@435 430 return pointer_delta(high(), low(), sizeof(char));
duke@435 431 }
duke@435 432
duke@435 433
duke@435 434 size_t VirtualSpace::reserved_size() const {
duke@435 435 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
duke@435 436 }
duke@435 437
duke@435 438
duke@435 439 size_t VirtualSpace::uncommitted_size() const {
duke@435 440 return reserved_size() - committed_size();
duke@435 441 }
duke@435 442
duke@435 443
duke@435 444 bool VirtualSpace::contains(const void* p) const {
duke@435 445 return low() <= (const char*) p && (const char*) p < high();
duke@435 446 }
duke@435 447
duke@435 448 /*
duke@435 449 First we need to determine if a particular virtual space is using large
duke@435 450 pages. This is done at the initialize function and only virtual spaces
duke@435 451 that are larger than LargePageSizeInBytes use large pages. Once we
duke@435 452 have determined this, all expand_by and shrink_by calls must grow and
duke@435 453 shrink by large page size chunks. If a particular request
duke@435 454 is within the current large page, the call to commit and uncommit memory
duke@435 455 can be ignored. In the case that the low and high boundaries of this
duke@435 456 space is not large page aligned, the pages leading to the first large
duke@435 457 page address and the pages after the last large page address must be
duke@435 458 allocated with default pages.
duke@435 459 */
duke@435 460 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
duke@435 461 if (uncommitted_size() < bytes) return false;
duke@435 462
duke@435 463 if (special()) {
duke@435 464 // don't commit memory if the entire space is pinned in memory
duke@435 465 _high += bytes;
duke@435 466 return true;
duke@435 467 }
duke@435 468
duke@435 469 char* previous_high = high();
duke@435 470 char* unaligned_new_high = high() + bytes;
duke@435 471 assert(unaligned_new_high <= high_boundary(),
duke@435 472 "cannot expand by more than upper boundary");
duke@435 473
duke@435 474 // Calculate where the new high for each of the regions should be. If
duke@435 475 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
duke@435 476 // then the unaligned lower and upper new highs would be the
duke@435 477 // lower_high() and upper_high() respectively.
duke@435 478 char* unaligned_lower_new_high =
duke@435 479 MIN2(unaligned_new_high, lower_high_boundary());
duke@435 480 char* unaligned_middle_new_high =
duke@435 481 MIN2(unaligned_new_high, middle_high_boundary());
duke@435 482 char* unaligned_upper_new_high =
duke@435 483 MIN2(unaligned_new_high, upper_high_boundary());
duke@435 484
duke@435 485 // Align the new highs based on the regions alignment. lower and upper
duke@435 486 // alignment will always be default page size. middle alignment will be
duke@435 487 // LargePageSizeInBytes if the actual size of the virtual space is in
duke@435 488 // fact larger than LargePageSizeInBytes.
duke@435 489 char* aligned_lower_new_high =
duke@435 490 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
duke@435 491 char* aligned_middle_new_high =
duke@435 492 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
duke@435 493 char* aligned_upper_new_high =
duke@435 494 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
duke@435 495
duke@435 496 // Determine which regions need to grow in this expand_by call.
duke@435 497 // If you are growing in the lower region, high() must be in that
duke@435 498 // region so calcuate the size based on high(). For the middle and
duke@435 499 // upper regions, determine the starting point of growth based on the
duke@435 500 // location of high(). By getting the MAX of the region's low address
duke@435 501 // (or the prevoius region's high address) and high(), we can tell if it
duke@435 502 // is an intra or inter region growth.
duke@435 503 size_t lower_needs = 0;
duke@435 504 if (aligned_lower_new_high > lower_high()) {
duke@435 505 lower_needs =
duke@435 506 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
duke@435 507 }
duke@435 508 size_t middle_needs = 0;
duke@435 509 if (aligned_middle_new_high > middle_high()) {
duke@435 510 middle_needs =
duke@435 511 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
duke@435 512 }
duke@435 513 size_t upper_needs = 0;
duke@435 514 if (aligned_upper_new_high > upper_high()) {
duke@435 515 upper_needs =
duke@435 516 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
duke@435 517 }
duke@435 518
duke@435 519 // Check contiguity.
duke@435 520 assert(low_boundary() <= lower_high() &&
duke@435 521 lower_high() <= lower_high_boundary(),
duke@435 522 "high address must be contained within the region");
duke@435 523 assert(lower_high_boundary() <= middle_high() &&
duke@435 524 middle_high() <= middle_high_boundary(),
duke@435 525 "high address must be contained within the region");
duke@435 526 assert(middle_high_boundary() <= upper_high() &&
duke@435 527 upper_high() <= upper_high_boundary(),
duke@435 528 "high address must be contained within the region");
duke@435 529
duke@435 530 // Commit regions
duke@435 531 if (lower_needs > 0) {
duke@435 532 assert(low_boundary() <= lower_high() &&
duke@435 533 lower_high() + lower_needs <= lower_high_boundary(),
duke@435 534 "must not expand beyond region");
coleenp@1091 535 if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
duke@435 536 debug_only(warning("os::commit_memory failed"));
duke@435 537 return false;
duke@435 538 } else {
duke@435 539 _lower_high += lower_needs;
duke@435 540 }
duke@435 541 }
duke@435 542 if (middle_needs > 0) {
duke@435 543 assert(lower_high_boundary() <= middle_high() &&
duke@435 544 middle_high() + middle_needs <= middle_high_boundary(),
duke@435 545 "must not expand beyond region");
coleenp@1091 546 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
coleenp@1091 547 _executable)) {
duke@435 548 debug_only(warning("os::commit_memory failed"));
duke@435 549 return false;
duke@435 550 }
duke@435 551 _middle_high += middle_needs;
duke@435 552 }
duke@435 553 if (upper_needs > 0) {
duke@435 554 assert(middle_high_boundary() <= upper_high() &&
duke@435 555 upper_high() + upper_needs <= upper_high_boundary(),
duke@435 556 "must not expand beyond region");
coleenp@1091 557 if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
duke@435 558 debug_only(warning("os::commit_memory failed"));
duke@435 559 return false;
duke@435 560 } else {
duke@435 561 _upper_high += upper_needs;
duke@435 562 }
duke@435 563 }
duke@435 564
duke@435 565 if (pre_touch || AlwaysPreTouch) {
duke@435 566 int vm_ps = os::vm_page_size();
duke@435 567 for (char* curr = previous_high;
duke@435 568 curr < unaligned_new_high;
duke@435 569 curr += vm_ps) {
duke@435 570 // Note the use of a write here; originally we tried just a read, but
duke@435 571 // since the value read was unused, the optimizer removed the read.
duke@435 572 // If we ever have a concurrent touchahead thread, we'll want to use
duke@435 573 // a read, to avoid the potential of overwriting data (if a mutator
duke@435 574 // thread beats the touchahead thread to a page). There are various
duke@435 575 // ways of making sure this read is not optimized away: for example,
duke@435 576 // generating the code for a read procedure at runtime.
duke@435 577 *curr = 0;
duke@435 578 }
duke@435 579 }
duke@435 580
duke@435 581 _high += bytes;
duke@435 582 return true;
duke@435 583 }
duke@435 584
duke@435 585 // A page is uncommitted if the contents of the entire page is deemed unusable.
duke@435 586 // Continue to decrement the high() pointer until it reaches a page boundary
duke@435 587 // in which case that particular page can now be uncommitted.
duke@435 588 void VirtualSpace::shrink_by(size_t size) {
duke@435 589 if (committed_size() < size)
duke@435 590 fatal("Cannot shrink virtual space to negative size");
duke@435 591
duke@435 592 if (special()) {
duke@435 593 // don't uncommit if the entire space is pinned in memory
duke@435 594 _high -= size;
duke@435 595 return;
duke@435 596 }
duke@435 597
duke@435 598 char* unaligned_new_high = high() - size;
duke@435 599 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
duke@435 600
duke@435 601 // Calculate new unaligned address
duke@435 602 char* unaligned_upper_new_high =
duke@435 603 MAX2(unaligned_new_high, middle_high_boundary());
duke@435 604 char* unaligned_middle_new_high =
duke@435 605 MAX2(unaligned_new_high, lower_high_boundary());
duke@435 606 char* unaligned_lower_new_high =
duke@435 607 MAX2(unaligned_new_high, low_boundary());
duke@435 608
duke@435 609 // Align address to region's alignment
duke@435 610 char* aligned_upper_new_high =
duke@435 611 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
duke@435 612 char* aligned_middle_new_high =
duke@435 613 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
duke@435 614 char* aligned_lower_new_high =
duke@435 615 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
duke@435 616
duke@435 617 // Determine which regions need to shrink
duke@435 618 size_t upper_needs = 0;
duke@435 619 if (aligned_upper_new_high < upper_high()) {
duke@435 620 upper_needs =
duke@435 621 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
duke@435 622 }
duke@435 623 size_t middle_needs = 0;
duke@435 624 if (aligned_middle_new_high < middle_high()) {
duke@435 625 middle_needs =
duke@435 626 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
duke@435 627 }
duke@435 628 size_t lower_needs = 0;
duke@435 629 if (aligned_lower_new_high < lower_high()) {
duke@435 630 lower_needs =
duke@435 631 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
duke@435 632 }
duke@435 633
duke@435 634 // Check contiguity.
duke@435 635 assert(middle_high_boundary() <= upper_high() &&
duke@435 636 upper_high() <= upper_high_boundary(),
duke@435 637 "high address must be contained within the region");
duke@435 638 assert(lower_high_boundary() <= middle_high() &&
duke@435 639 middle_high() <= middle_high_boundary(),
duke@435 640 "high address must be contained within the region");
duke@435 641 assert(low_boundary() <= lower_high() &&
duke@435 642 lower_high() <= lower_high_boundary(),
duke@435 643 "high address must be contained within the region");
duke@435 644
duke@435 645 // Uncommit
duke@435 646 if (upper_needs > 0) {
duke@435 647 assert(middle_high_boundary() <= aligned_upper_new_high &&
duke@435 648 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
duke@435 649 "must not shrink beyond region");
duke@435 650 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
duke@435 651 debug_only(warning("os::uncommit_memory failed"));
duke@435 652 return;
duke@435 653 } else {
duke@435 654 _upper_high -= upper_needs;
duke@435 655 }
duke@435 656 }
duke@435 657 if (middle_needs > 0) {
duke@435 658 assert(lower_high_boundary() <= aligned_middle_new_high &&
duke@435 659 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
duke@435 660 "must not shrink beyond region");
duke@435 661 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
duke@435 662 debug_only(warning("os::uncommit_memory failed"));
duke@435 663 return;
duke@435 664 } else {
duke@435 665 _middle_high -= middle_needs;
duke@435 666 }
duke@435 667 }
duke@435 668 if (lower_needs > 0) {
duke@435 669 assert(low_boundary() <= aligned_lower_new_high &&
duke@435 670 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
duke@435 671 "must not shrink beyond region");
duke@435 672 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
duke@435 673 debug_only(warning("os::uncommit_memory failed"));
duke@435 674 return;
duke@435 675 } else {
duke@435 676 _lower_high -= lower_needs;
duke@435 677 }
duke@435 678 }
duke@435 679
duke@435 680 _high -= size;
duke@435 681 }
duke@435 682
duke@435 683 #ifndef PRODUCT
duke@435 684 void VirtualSpace::check_for_contiguity() {
duke@435 685 // Check contiguity.
duke@435 686 assert(low_boundary() <= lower_high() &&
duke@435 687 lower_high() <= lower_high_boundary(),
duke@435 688 "high address must be contained within the region");
duke@435 689 assert(lower_high_boundary() <= middle_high() &&
duke@435 690 middle_high() <= middle_high_boundary(),
duke@435 691 "high address must be contained within the region");
duke@435 692 assert(middle_high_boundary() <= upper_high() &&
duke@435 693 upper_high() <= upper_high_boundary(),
duke@435 694 "high address must be contained within the region");
duke@435 695 assert(low() >= low_boundary(), "low");
duke@435 696 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
duke@435 697 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
duke@435 698 assert(high() <= upper_high(), "upper high");
duke@435 699 }
duke@435 700
duke@435 701 void VirtualSpace::print() {
duke@435 702 tty->print ("Virtual space:");
duke@435 703 if (special()) tty->print(" (pinned in memory)");
duke@435 704 tty->cr();
hseigel@4465 705 tty->print_cr(" - committed: " SIZE_FORMAT, committed_size());
hseigel@4465 706 tty->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
duke@435 707 tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
duke@435 708 tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
duke@435 709 }
duke@435 710
duke@435 711 #endif

mercurial