src/share/vm/runtime/virtualspace.cpp

Thu, 10 Apr 2008 15:49:16 -0400

author
sbohne
date
Thu, 10 Apr 2008 15:49:16 -0400
changeset 528
c6ff24ceec1c
parent 435
a61af66fc99e
child 672
1fdb98a17101
child 777
37f87013dfd8
permissions
-rw-r--r--

6686407: Fix for 6666698 broke -XX:BiasedLockingStartupDelay=0
Summary: Stack allocated VM_EnableBiasedLocking op must be marked as such
Reviewed-by: xlu, acorn, never, dholmes

duke@435 1 /*
duke@435 2 * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 #include "incls/_precompiled.incl"
duke@435 26 #include "incls/_virtualspace.cpp.incl"
duke@435 27
duke@435 28
duke@435 29 // ReservedSpace
duke@435 30 ReservedSpace::ReservedSpace(size_t size) {
duke@435 31 initialize(size, 0, false, NULL);
duke@435 32 }
duke@435 33
duke@435 34 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
duke@435 35 bool large, char* requested_address) {
duke@435 36 initialize(size, alignment, large, requested_address);
duke@435 37 }
duke@435 38
duke@435 39 char *
duke@435 40 ReservedSpace::align_reserved_region(char* addr, const size_t len,
duke@435 41 const size_t prefix_size,
duke@435 42 const size_t prefix_align,
duke@435 43 const size_t suffix_size,
duke@435 44 const size_t suffix_align)
duke@435 45 {
duke@435 46 assert(addr != NULL, "sanity");
duke@435 47 const size_t required_size = prefix_size + suffix_size;
duke@435 48 assert(len >= required_size, "len too small");
duke@435 49
duke@435 50 const size_t s = size_t(addr);
duke@435 51 const size_t beg_ofs = s + prefix_size & suffix_align - 1;
duke@435 52 const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
duke@435 53
duke@435 54 if (len < beg_delta + required_size) {
duke@435 55 return NULL; // Cannot do proper alignment.
duke@435 56 }
duke@435 57 const size_t end_delta = len - (beg_delta + required_size);
duke@435 58
duke@435 59 if (beg_delta != 0) {
duke@435 60 os::release_memory(addr, beg_delta);
duke@435 61 }
duke@435 62
duke@435 63 if (end_delta != 0) {
duke@435 64 char* release_addr = (char*) (s + beg_delta + required_size);
duke@435 65 os::release_memory(release_addr, end_delta);
duke@435 66 }
duke@435 67
duke@435 68 return (char*) (s + beg_delta);
duke@435 69 }
duke@435 70
duke@435 71 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
duke@435 72 const size_t prefix_size,
duke@435 73 const size_t prefix_align,
duke@435 74 const size_t suffix_size,
duke@435 75 const size_t suffix_align)
duke@435 76 {
duke@435 77 assert(reserve_size > prefix_size + suffix_size, "should not be here");
duke@435 78
duke@435 79 char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
duke@435 80 if (raw_addr == NULL) return NULL;
duke@435 81
duke@435 82 char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
duke@435 83 prefix_align, suffix_size,
duke@435 84 suffix_align);
duke@435 85 if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
duke@435 86 fatal("os::release_memory failed");
duke@435 87 }
duke@435 88
duke@435 89 #ifdef ASSERT
duke@435 90 if (result != NULL) {
duke@435 91 const size_t raw = size_t(raw_addr);
duke@435 92 const size_t res = size_t(result);
duke@435 93 assert(res >= raw, "alignment decreased start addr");
duke@435 94 assert(res + prefix_size + suffix_size <= raw + reserve_size,
duke@435 95 "alignment increased end addr");
duke@435 96 assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
duke@435 97 assert((res + prefix_size & suffix_align - 1) == 0,
duke@435 98 "bad alignment of suffix");
duke@435 99 }
duke@435 100 #endif
duke@435 101
duke@435 102 return result;
duke@435 103 }
duke@435 104
duke@435 105 ReservedSpace::ReservedSpace(const size_t prefix_size,
duke@435 106 const size_t prefix_align,
duke@435 107 const size_t suffix_size,
duke@435 108 const size_t suffix_align)
duke@435 109 {
duke@435 110 assert(prefix_size != 0, "sanity");
duke@435 111 assert(prefix_align != 0, "sanity");
duke@435 112 assert(suffix_size != 0, "sanity");
duke@435 113 assert(suffix_align != 0, "sanity");
duke@435 114 assert((prefix_size & prefix_align - 1) == 0,
duke@435 115 "prefix_size not divisible by prefix_align");
duke@435 116 assert((suffix_size & suffix_align - 1) == 0,
duke@435 117 "suffix_size not divisible by suffix_align");
duke@435 118 assert((suffix_align & prefix_align - 1) == 0,
duke@435 119 "suffix_align not divisible by prefix_align");
duke@435 120
duke@435 121 // On systems where the entire region has to be reserved and committed up
duke@435 122 // front, the compound alignment normally done by this method is unnecessary.
duke@435 123 const bool try_reserve_special = UseLargePages &&
duke@435 124 prefix_align == os::large_page_size();
duke@435 125 if (!os::can_commit_large_page_memory() && try_reserve_special) {
duke@435 126 initialize(prefix_size + suffix_size, prefix_align, true);
duke@435 127 return;
duke@435 128 }
duke@435 129
duke@435 130 _base = NULL;
duke@435 131 _size = 0;
duke@435 132 _alignment = 0;
duke@435 133 _special = false;
duke@435 134
duke@435 135 // Optimistically try to reserve the exact size needed.
duke@435 136 const size_t size = prefix_size + suffix_size;
duke@435 137 char* addr = os::reserve_memory(size, NULL, prefix_align);
duke@435 138 if (addr == NULL) return;
duke@435 139
duke@435 140 // Check whether the result has the needed alignment (unlikely unless
duke@435 141 // prefix_align == suffix_align).
duke@435 142 const size_t ofs = size_t(addr) + prefix_size & suffix_align - 1;
duke@435 143 if (ofs != 0) {
duke@435 144 // Wrong alignment. Release, allocate more space and do manual alignment.
duke@435 145 //
duke@435 146 // On most operating systems, another allocation with a somewhat larger size
duke@435 147 // will return an address "close to" that of the previous allocation. The
duke@435 148 // result is often the same address (if the kernel hands out virtual
duke@435 149 // addresses from low to high), or an address that is offset by the increase
duke@435 150 // in size. Exploit that to minimize the amount of extra space requested.
duke@435 151 if (!os::release_memory(addr, size)) {
duke@435 152 fatal("os::release_memory failed");
duke@435 153 }
duke@435 154
duke@435 155 const size_t extra = MAX2(ofs, suffix_align - ofs);
duke@435 156 addr = reserve_and_align(size + extra, prefix_size, prefix_align,
duke@435 157 suffix_size, suffix_align);
duke@435 158 if (addr == NULL) {
duke@435 159 // Try an even larger region. If this fails, address space is exhausted.
duke@435 160 addr = reserve_and_align(size + suffix_align, prefix_size,
duke@435 161 prefix_align, suffix_size, suffix_align);
duke@435 162 }
duke@435 163 }
duke@435 164
duke@435 165 _base = addr;
duke@435 166 _size = size;
duke@435 167 _alignment = prefix_align;
duke@435 168 }
duke@435 169
duke@435 170 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
duke@435 171 char* requested_address) {
duke@435 172 const size_t granularity = os::vm_allocation_granularity();
duke@435 173 assert((size & granularity - 1) == 0,
duke@435 174 "size not aligned to os::vm_allocation_granularity()");
duke@435 175 assert((alignment & granularity - 1) == 0,
duke@435 176 "alignment not aligned to os::vm_allocation_granularity()");
duke@435 177 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
duke@435 178 "not a power of 2");
duke@435 179
duke@435 180 _base = NULL;
duke@435 181 _size = 0;
duke@435 182 _special = false;
duke@435 183 _alignment = 0;
duke@435 184 if (size == 0) {
duke@435 185 return;
duke@435 186 }
duke@435 187
duke@435 188 // If OS doesn't support demand paging for large page memory, we need
duke@435 189 // to use reserve_memory_special() to reserve and pin the entire region.
duke@435 190 bool special = large && !os::can_commit_large_page_memory();
duke@435 191 char* base = NULL;
duke@435 192
duke@435 193 if (special) {
duke@435 194 // It's not hard to implement reserve_memory_special() such that it can
duke@435 195 // allocate at fixed address, but there seems no use of this feature
duke@435 196 // for now, so it's not implemented.
duke@435 197 assert(requested_address == NULL, "not implemented");
duke@435 198
duke@435 199 base = os::reserve_memory_special(size);
duke@435 200
duke@435 201 if (base != NULL) {
duke@435 202 // Check alignment constraints
duke@435 203 if (alignment > 0) {
duke@435 204 assert((uintptr_t) base % alignment == 0,
duke@435 205 "Large pages returned a non-aligned address");
duke@435 206 }
duke@435 207 _special = true;
duke@435 208 } else {
duke@435 209 // failed; try to reserve regular memory below
duke@435 210 }
duke@435 211 }
duke@435 212
duke@435 213 if (base == NULL) {
duke@435 214 // Optimistically assume that the OSes returns an aligned base pointer.
duke@435 215 // When reserving a large address range, most OSes seem to align to at
duke@435 216 // least 64K.
duke@435 217
duke@435 218 // If the memory was requested at a particular address, use
duke@435 219 // os::attempt_reserve_memory_at() to avoid over mapping something
duke@435 220 // important. If available space is not detected, return NULL.
duke@435 221
duke@435 222 if (requested_address != 0) {
duke@435 223 base = os::attempt_reserve_memory_at(size, requested_address);
duke@435 224 } else {
duke@435 225 base = os::reserve_memory(size, NULL, alignment);
duke@435 226 }
duke@435 227
duke@435 228 if (base == NULL) return;
duke@435 229
duke@435 230 // Check alignment constraints
duke@435 231 if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
duke@435 232 // Base not aligned, retry
duke@435 233 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
duke@435 234 // Reserve size large enough to do manual alignment and
duke@435 235 // increase size to a multiple of the desired alignment
duke@435 236 size = align_size_up(size, alignment);
duke@435 237 size_t extra_size = size + alignment;
duke@435 238 char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
duke@435 239 if (extra_base == NULL) return;
duke@435 240 // Do manual alignement
duke@435 241 base = (char*) align_size_up((uintptr_t) extra_base, alignment);
duke@435 242 assert(base >= extra_base, "just checking");
duke@435 243 // Release unused areas
duke@435 244 size_t unused_bottom_size = base - extra_base;
duke@435 245 size_t unused_top_size = extra_size - size - unused_bottom_size;
duke@435 246 assert(unused_bottom_size % os::vm_allocation_granularity() == 0,
duke@435 247 "size not allocation aligned");
duke@435 248 assert(unused_top_size % os::vm_allocation_granularity() == 0,
duke@435 249 "size not allocation aligned");
duke@435 250 if (unused_bottom_size > 0) {
duke@435 251 os::release_memory(extra_base, unused_bottom_size);
duke@435 252 }
duke@435 253 if (unused_top_size > 0) {
duke@435 254 os::release_memory(base + size, unused_top_size);
duke@435 255 }
duke@435 256 }
duke@435 257 }
duke@435 258 // Done
duke@435 259 _base = base;
duke@435 260 _size = size;
duke@435 261 _alignment = MAX2(alignment, (size_t) os::vm_page_size());
duke@435 262
duke@435 263 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
duke@435 264 "area must be distinguisable from marks for mark-sweep");
duke@435 265 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
duke@435 266 "area must be distinguisable from marks for mark-sweep");
duke@435 267 }
duke@435 268
duke@435 269
duke@435 270 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
duke@435 271 bool special) {
duke@435 272 assert((size % os::vm_allocation_granularity()) == 0,
duke@435 273 "size not allocation aligned");
duke@435 274 _base = base;
duke@435 275 _size = size;
duke@435 276 _alignment = alignment;
duke@435 277 _special = special;
duke@435 278 }
duke@435 279
duke@435 280
duke@435 281 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
duke@435 282 bool split, bool realloc) {
duke@435 283 assert(partition_size <= size(), "partition failed");
duke@435 284 if (split) {
duke@435 285 os::split_reserved_memory(_base, _size, partition_size, realloc);
duke@435 286 }
duke@435 287 ReservedSpace result(base(), partition_size, alignment, special());
duke@435 288 return result;
duke@435 289 }
duke@435 290
duke@435 291
duke@435 292 ReservedSpace
duke@435 293 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
duke@435 294 assert(partition_size <= size(), "partition failed");
duke@435 295 ReservedSpace result(base() + partition_size, size() - partition_size,
duke@435 296 alignment, special());
duke@435 297 return result;
duke@435 298 }
duke@435 299
duke@435 300
duke@435 301 size_t ReservedSpace::page_align_size_up(size_t size) {
duke@435 302 return align_size_up(size, os::vm_page_size());
duke@435 303 }
duke@435 304
duke@435 305
duke@435 306 size_t ReservedSpace::page_align_size_down(size_t size) {
duke@435 307 return align_size_down(size, os::vm_page_size());
duke@435 308 }
duke@435 309
duke@435 310
duke@435 311 size_t ReservedSpace::allocation_align_size_up(size_t size) {
duke@435 312 return align_size_up(size, os::vm_allocation_granularity());
duke@435 313 }
duke@435 314
duke@435 315
duke@435 316 size_t ReservedSpace::allocation_align_size_down(size_t size) {
duke@435 317 return align_size_down(size, os::vm_allocation_granularity());
duke@435 318 }
duke@435 319
duke@435 320
duke@435 321 void ReservedSpace::release() {
duke@435 322 if (is_reserved()) {
duke@435 323 if (special()) {
duke@435 324 os::release_memory_special(_base, _size);
duke@435 325 } else{
duke@435 326 os::release_memory(_base, _size);
duke@435 327 }
duke@435 328 _base = NULL;
duke@435 329 _size = 0;
duke@435 330 _special = false;
duke@435 331 }
duke@435 332 }
duke@435 333
duke@435 334
duke@435 335 // VirtualSpace
duke@435 336
duke@435 337 VirtualSpace::VirtualSpace() {
duke@435 338 _low_boundary = NULL;
duke@435 339 _high_boundary = NULL;
duke@435 340 _low = NULL;
duke@435 341 _high = NULL;
duke@435 342 _lower_high = NULL;
duke@435 343 _middle_high = NULL;
duke@435 344 _upper_high = NULL;
duke@435 345 _lower_high_boundary = NULL;
duke@435 346 _middle_high_boundary = NULL;
duke@435 347 _upper_high_boundary = NULL;
duke@435 348 _lower_alignment = 0;
duke@435 349 _middle_alignment = 0;
duke@435 350 _upper_alignment = 0;
duke@435 351 }
duke@435 352
duke@435 353
duke@435 354 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
duke@435 355 if(!rs.is_reserved()) return false; // allocation failed.
duke@435 356 assert(_low_boundary == NULL, "VirtualSpace already initialized");
duke@435 357 _low_boundary = rs.base();
duke@435 358 _high_boundary = low_boundary() + rs.size();
duke@435 359
duke@435 360 _low = low_boundary();
duke@435 361 _high = low();
duke@435 362
duke@435 363 _special = rs.special();
duke@435 364
duke@435 365 // When a VirtualSpace begins life at a large size, make all future expansion
duke@435 366 // and shrinking occur aligned to a granularity of large pages. This avoids
duke@435 367 // fragmentation of physical addresses that inhibits the use of large pages
duke@435 368 // by the OS virtual memory system. Empirically, we see that with a 4MB
duke@435 369 // page size, the only spaces that get handled this way are codecache and
duke@435 370 // the heap itself, both of which provide a substantial performance
duke@435 371 // boost in many benchmarks when covered by large pages.
duke@435 372 //
duke@435 373 // No attempt is made to force large page alignment at the very top and
duke@435 374 // bottom of the space if they are not aligned so already.
duke@435 375 _lower_alignment = os::vm_page_size();
duke@435 376 _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
duke@435 377 _upper_alignment = os::vm_page_size();
duke@435 378
duke@435 379 // End of each region
duke@435 380 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
duke@435 381 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
duke@435 382 _upper_high_boundary = high_boundary();
duke@435 383
duke@435 384 // High address of each region
duke@435 385 _lower_high = low_boundary();
duke@435 386 _middle_high = lower_high_boundary();
duke@435 387 _upper_high = middle_high_boundary();
duke@435 388
duke@435 389 // commit to initial size
duke@435 390 if (committed_size > 0) {
duke@435 391 if (!expand_by(committed_size)) {
duke@435 392 return false;
duke@435 393 }
duke@435 394 }
duke@435 395 return true;
duke@435 396 }
duke@435 397
duke@435 398
duke@435 399 VirtualSpace::~VirtualSpace() {
duke@435 400 release();
duke@435 401 }
duke@435 402
duke@435 403
duke@435 404 void VirtualSpace::release() {
duke@435 405 (void)os::release_memory(low_boundary(), reserved_size());
duke@435 406 _low_boundary = NULL;
duke@435 407 _high_boundary = NULL;
duke@435 408 _low = NULL;
duke@435 409 _high = NULL;
duke@435 410 _lower_high = NULL;
duke@435 411 _middle_high = NULL;
duke@435 412 _upper_high = NULL;
duke@435 413 _lower_high_boundary = NULL;
duke@435 414 _middle_high_boundary = NULL;
duke@435 415 _upper_high_boundary = NULL;
duke@435 416 _lower_alignment = 0;
duke@435 417 _middle_alignment = 0;
duke@435 418 _upper_alignment = 0;
duke@435 419 _special = false;
duke@435 420 }
duke@435 421
duke@435 422
duke@435 423 size_t VirtualSpace::committed_size() const {
duke@435 424 return pointer_delta(high(), low(), sizeof(char));
duke@435 425 }
duke@435 426
duke@435 427
duke@435 428 size_t VirtualSpace::reserved_size() const {
duke@435 429 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
duke@435 430 }
duke@435 431
duke@435 432
duke@435 433 size_t VirtualSpace::uncommitted_size() const {
duke@435 434 return reserved_size() - committed_size();
duke@435 435 }
duke@435 436
duke@435 437
duke@435 438 bool VirtualSpace::contains(const void* p) const {
duke@435 439 return low() <= (const char*) p && (const char*) p < high();
duke@435 440 }
duke@435 441
duke@435 442 /*
duke@435 443 First we need to determine if a particular virtual space is using large
duke@435 444 pages. This is done at the initialize function and only virtual spaces
duke@435 445 that are larger than LargePageSizeInBytes use large pages. Once we
duke@435 446 have determined this, all expand_by and shrink_by calls must grow and
duke@435 447 shrink by large page size chunks. If a particular request
duke@435 448 is within the current large page, the call to commit and uncommit memory
duke@435 449 can be ignored. In the case that the low and high boundaries of this
duke@435 450 space is not large page aligned, the pages leading to the first large
duke@435 451 page address and the pages after the last large page address must be
duke@435 452 allocated with default pages.
duke@435 453 */
duke@435 454 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
duke@435 455 if (uncommitted_size() < bytes) return false;
duke@435 456
duke@435 457 if (special()) {
duke@435 458 // don't commit memory if the entire space is pinned in memory
duke@435 459 _high += bytes;
duke@435 460 return true;
duke@435 461 }
duke@435 462
duke@435 463 char* previous_high = high();
duke@435 464 char* unaligned_new_high = high() + bytes;
duke@435 465 assert(unaligned_new_high <= high_boundary(),
duke@435 466 "cannot expand by more than upper boundary");
duke@435 467
duke@435 468 // Calculate where the new high for each of the regions should be. If
duke@435 469 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
duke@435 470 // then the unaligned lower and upper new highs would be the
duke@435 471 // lower_high() and upper_high() respectively.
duke@435 472 char* unaligned_lower_new_high =
duke@435 473 MIN2(unaligned_new_high, lower_high_boundary());
duke@435 474 char* unaligned_middle_new_high =
duke@435 475 MIN2(unaligned_new_high, middle_high_boundary());
duke@435 476 char* unaligned_upper_new_high =
duke@435 477 MIN2(unaligned_new_high, upper_high_boundary());
duke@435 478
duke@435 479 // Align the new highs based on the regions alignment. lower and upper
duke@435 480 // alignment will always be default page size. middle alignment will be
duke@435 481 // LargePageSizeInBytes if the actual size of the virtual space is in
duke@435 482 // fact larger than LargePageSizeInBytes.
duke@435 483 char* aligned_lower_new_high =
duke@435 484 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
duke@435 485 char* aligned_middle_new_high =
duke@435 486 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
duke@435 487 char* aligned_upper_new_high =
duke@435 488 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
duke@435 489
duke@435 490 // Determine which regions need to grow in this expand_by call.
duke@435 491 // If you are growing in the lower region, high() must be in that
duke@435 492 // region so calcuate the size based on high(). For the middle and
duke@435 493 // upper regions, determine the starting point of growth based on the
duke@435 494 // location of high(). By getting the MAX of the region's low address
duke@435 495 // (or the prevoius region's high address) and high(), we can tell if it
duke@435 496 // is an intra or inter region growth.
duke@435 497 size_t lower_needs = 0;
duke@435 498 if (aligned_lower_new_high > lower_high()) {
duke@435 499 lower_needs =
duke@435 500 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
duke@435 501 }
duke@435 502 size_t middle_needs = 0;
duke@435 503 if (aligned_middle_new_high > middle_high()) {
duke@435 504 middle_needs =
duke@435 505 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
duke@435 506 }
duke@435 507 size_t upper_needs = 0;
duke@435 508 if (aligned_upper_new_high > upper_high()) {
duke@435 509 upper_needs =
duke@435 510 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
duke@435 511 }
duke@435 512
duke@435 513 // Check contiguity.
duke@435 514 assert(low_boundary() <= lower_high() &&
duke@435 515 lower_high() <= lower_high_boundary(),
duke@435 516 "high address must be contained within the region");
duke@435 517 assert(lower_high_boundary() <= middle_high() &&
duke@435 518 middle_high() <= middle_high_boundary(),
duke@435 519 "high address must be contained within the region");
duke@435 520 assert(middle_high_boundary() <= upper_high() &&
duke@435 521 upper_high() <= upper_high_boundary(),
duke@435 522 "high address must be contained within the region");
duke@435 523
duke@435 524 // Commit regions
duke@435 525 if (lower_needs > 0) {
duke@435 526 assert(low_boundary() <= lower_high() &&
duke@435 527 lower_high() + lower_needs <= lower_high_boundary(),
duke@435 528 "must not expand beyond region");
duke@435 529 if (!os::commit_memory(lower_high(), lower_needs)) {
duke@435 530 debug_only(warning("os::commit_memory failed"));
duke@435 531 return false;
duke@435 532 } else {
duke@435 533 _lower_high += lower_needs;
duke@435 534 }
duke@435 535 }
duke@435 536 if (middle_needs > 0) {
duke@435 537 assert(lower_high_boundary() <= middle_high() &&
duke@435 538 middle_high() + middle_needs <= middle_high_boundary(),
duke@435 539 "must not expand beyond region");
duke@435 540 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment())) {
duke@435 541 debug_only(warning("os::commit_memory failed"));
duke@435 542 return false;
duke@435 543 }
duke@435 544 _middle_high += middle_needs;
duke@435 545 }
duke@435 546 if (upper_needs > 0) {
duke@435 547 assert(middle_high_boundary() <= upper_high() &&
duke@435 548 upper_high() + upper_needs <= upper_high_boundary(),
duke@435 549 "must not expand beyond region");
duke@435 550 if (!os::commit_memory(upper_high(), upper_needs)) {
duke@435 551 debug_only(warning("os::commit_memory failed"));
duke@435 552 return false;
duke@435 553 } else {
duke@435 554 _upper_high += upper_needs;
duke@435 555 }
duke@435 556 }
duke@435 557
duke@435 558 if (pre_touch || AlwaysPreTouch) {
duke@435 559 int vm_ps = os::vm_page_size();
duke@435 560 for (char* curr = previous_high;
duke@435 561 curr < unaligned_new_high;
duke@435 562 curr += vm_ps) {
duke@435 563 // Note the use of a write here; originally we tried just a read, but
duke@435 564 // since the value read was unused, the optimizer removed the read.
duke@435 565 // If we ever have a concurrent touchahead thread, we'll want to use
duke@435 566 // a read, to avoid the potential of overwriting data (if a mutator
duke@435 567 // thread beats the touchahead thread to a page). There are various
duke@435 568 // ways of making sure this read is not optimized away: for example,
duke@435 569 // generating the code for a read procedure at runtime.
duke@435 570 *curr = 0;
duke@435 571 }
duke@435 572 }
duke@435 573
duke@435 574 _high += bytes;
duke@435 575 return true;
duke@435 576 }
duke@435 577
duke@435 578 // A page is uncommitted if the contents of the entire page is deemed unusable.
duke@435 579 // Continue to decrement the high() pointer until it reaches a page boundary
duke@435 580 // in which case that particular page can now be uncommitted.
duke@435 581 void VirtualSpace::shrink_by(size_t size) {
duke@435 582 if (committed_size() < size)
duke@435 583 fatal("Cannot shrink virtual space to negative size");
duke@435 584
duke@435 585 if (special()) {
duke@435 586 // don't uncommit if the entire space is pinned in memory
duke@435 587 _high -= size;
duke@435 588 return;
duke@435 589 }
duke@435 590
duke@435 591 char* unaligned_new_high = high() - size;
duke@435 592 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
duke@435 593
duke@435 594 // Calculate new unaligned address
duke@435 595 char* unaligned_upper_new_high =
duke@435 596 MAX2(unaligned_new_high, middle_high_boundary());
duke@435 597 char* unaligned_middle_new_high =
duke@435 598 MAX2(unaligned_new_high, lower_high_boundary());
duke@435 599 char* unaligned_lower_new_high =
duke@435 600 MAX2(unaligned_new_high, low_boundary());
duke@435 601
duke@435 602 // Align address to region's alignment
duke@435 603 char* aligned_upper_new_high =
duke@435 604 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
duke@435 605 char* aligned_middle_new_high =
duke@435 606 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
duke@435 607 char* aligned_lower_new_high =
duke@435 608 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
duke@435 609
duke@435 610 // Determine which regions need to shrink
duke@435 611 size_t upper_needs = 0;
duke@435 612 if (aligned_upper_new_high < upper_high()) {
duke@435 613 upper_needs =
duke@435 614 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
duke@435 615 }
duke@435 616 size_t middle_needs = 0;
duke@435 617 if (aligned_middle_new_high < middle_high()) {
duke@435 618 middle_needs =
duke@435 619 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
duke@435 620 }
duke@435 621 size_t lower_needs = 0;
duke@435 622 if (aligned_lower_new_high < lower_high()) {
duke@435 623 lower_needs =
duke@435 624 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
duke@435 625 }
duke@435 626
duke@435 627 // Check contiguity.
duke@435 628 assert(middle_high_boundary() <= upper_high() &&
duke@435 629 upper_high() <= upper_high_boundary(),
duke@435 630 "high address must be contained within the region");
duke@435 631 assert(lower_high_boundary() <= middle_high() &&
duke@435 632 middle_high() <= middle_high_boundary(),
duke@435 633 "high address must be contained within the region");
duke@435 634 assert(low_boundary() <= lower_high() &&
duke@435 635 lower_high() <= lower_high_boundary(),
duke@435 636 "high address must be contained within the region");
duke@435 637
duke@435 638 // Uncommit
duke@435 639 if (upper_needs > 0) {
duke@435 640 assert(middle_high_boundary() <= aligned_upper_new_high &&
duke@435 641 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
duke@435 642 "must not shrink beyond region");
duke@435 643 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
duke@435 644 debug_only(warning("os::uncommit_memory failed"));
duke@435 645 return;
duke@435 646 } else {
duke@435 647 _upper_high -= upper_needs;
duke@435 648 }
duke@435 649 }
duke@435 650 if (middle_needs > 0) {
duke@435 651 assert(lower_high_boundary() <= aligned_middle_new_high &&
duke@435 652 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
duke@435 653 "must not shrink beyond region");
duke@435 654 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
duke@435 655 debug_only(warning("os::uncommit_memory failed"));
duke@435 656 return;
duke@435 657 } else {
duke@435 658 _middle_high -= middle_needs;
duke@435 659 }
duke@435 660 }
duke@435 661 if (lower_needs > 0) {
duke@435 662 assert(low_boundary() <= aligned_lower_new_high &&
duke@435 663 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
duke@435 664 "must not shrink beyond region");
duke@435 665 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
duke@435 666 debug_only(warning("os::uncommit_memory failed"));
duke@435 667 return;
duke@435 668 } else {
duke@435 669 _lower_high -= lower_needs;
duke@435 670 }
duke@435 671 }
duke@435 672
duke@435 673 _high -= size;
duke@435 674 }
duke@435 675
duke@435 676 #ifndef PRODUCT
duke@435 677 void VirtualSpace::check_for_contiguity() {
duke@435 678 // Check contiguity.
duke@435 679 assert(low_boundary() <= lower_high() &&
duke@435 680 lower_high() <= lower_high_boundary(),
duke@435 681 "high address must be contained within the region");
duke@435 682 assert(lower_high_boundary() <= middle_high() &&
duke@435 683 middle_high() <= middle_high_boundary(),
duke@435 684 "high address must be contained within the region");
duke@435 685 assert(middle_high_boundary() <= upper_high() &&
duke@435 686 upper_high() <= upper_high_boundary(),
duke@435 687 "high address must be contained within the region");
duke@435 688 assert(low() >= low_boundary(), "low");
duke@435 689 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
duke@435 690 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
duke@435 691 assert(high() <= upper_high(), "upper high");
duke@435 692 }
duke@435 693
duke@435 694 void VirtualSpace::print() {
duke@435 695 tty->print ("Virtual space:");
duke@435 696 if (special()) tty->print(" (pinned in memory)");
duke@435 697 tty->cr();
duke@435 698 tty->print_cr(" - committed: %ld", committed_size());
duke@435 699 tty->print_cr(" - reserved: %ld", reserved_size());
duke@435 700 tty->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
duke@435 701 tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
duke@435 702 }
duke@435 703
duke@435 704 #endif

mercurial