src/share/vm/runtime/virtualspace.cpp

Wed, 31 Jan 2018 19:24:57 -0500

author
dbuck
date
Wed, 31 Jan 2018 19:24:57 -0500
changeset 9289
427b2fb1944f
parent 7782
30e04eba9e29
child 7994
04ff2f6cd0eb
child 9475
61523d110335
permissions
-rw-r--r--

8189170: Add option to disable stack overflow checking in primordial thread for use with JNI_CreateJavaJVM
Reviewed-by: dcubed

duke@435 1 /*
drchase@6680 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "oops/markOop.hpp"
stefank@2314 27 #include "oops/oop.inline.hpp"
stefank@2314 28 #include "runtime/virtualspace.hpp"
zgu@3900 29 #include "services/memTracker.hpp"
stefank@2314 30 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 31 # include "os_linux.inline.hpp"
stefank@2314 32 #endif
stefank@2314 33 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 34 # include "os_solaris.inline.hpp"
stefank@2314 35 #endif
stefank@2314 36 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 37 # include "os_windows.inline.hpp"
stefank@2314 38 #endif
goetz@6461 39 #ifdef TARGET_OS_FAMILY_aix
goetz@6461 40 # include "os_aix.inline.hpp"
goetz@6461 41 #endif
never@3156 42 #ifdef TARGET_OS_FAMILY_bsd
never@3156 43 # include "os_bsd.inline.hpp"
never@3156 44 #endif
duke@435 45
drchase@6680 46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
duke@435 47
duke@435 48 // ReservedSpace
stefank@5578 49
stefank@5578 50 // Dummy constructor
stefank@5578 51 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
stefank@5578 52 _alignment(0), _special(false), _executable(false) {
stefank@5578 53 }
stefank@5578 54
tschatzl@7782 55 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
tschatzl@7782 56 bool has_preferred_page_size = preferred_page_size != 0;
ehelin@7780 57 // Want to use large pages where possible and pad with small pages.
tschatzl@7782 58 size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
stefank@5578 59 bool large_pages = page_size != (size_t)os::vm_page_size();
tschatzl@7781 60 size_t alignment;
tschatzl@7782 61 if (large_pages && has_preferred_page_size) {
tschatzl@7781 62 alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
tschatzl@7781 63 // ReservedSpace initialization requires size to be aligned to the given
tschatzl@7781 64 // alignment. Align the size up.
tschatzl@7781 65 size = align_size_up(size, alignment);
tschatzl@7781 66 } else {
tschatzl@7781 67 // Don't force the alignment to be large page aligned,
tschatzl@7781 68 // since that will waste memory.
tschatzl@7781 69 alignment = os::vm_allocation_granularity();
tschatzl@7781 70 }
stefank@5578 71 initialize(size, alignment, large_pages, NULL, 0, false);
duke@435 72 }
duke@435 73
duke@435 74 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
coleenp@672 75 bool large,
coleenp@672 76 char* requested_address,
coleenp@672 77 const size_t noaccess_prefix) {
coleenp@672 78 initialize(size+noaccess_prefix, alignment, large, requested_address,
coleenp@1091 79 noaccess_prefix, false);
coleenp@1091 80 }
coleenp@1091 81
coleenp@1091 82 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
coleenp@1091 83 bool large,
coleenp@1091 84 bool executable) {
coleenp@1091 85 initialize(size, alignment, large, NULL, 0, executable);
duke@435 86 }
duke@435 87
kvn@1973 88 // Helper method.
kvn@1973 89 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
kvn@1973 90 const size_t size, bool special)
kvn@1973 91 {
kvn@1973 92 if (base == requested_address || requested_address == NULL)
kvn@1973 93 return false; // did not fail
kvn@1973 94
kvn@1973 95 if (base != NULL) {
kvn@1973 96 // Different reserve address may be acceptable in other cases
kvn@1973 97 // but for compressed oops heap should be at requested address.
kvn@1973 98 assert(UseCompressedOops, "currently requested address used only for compressed oops");
kvn@1973 99 if (PrintCompressedOopsMode) {
kvn@1973 100 tty->cr();
johnc@3022 101 tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
kvn@1973 102 }
kvn@1973 103 // OS ignored requested address. Try different address.
kvn@1973 104 if (special) {
kvn@1973 105 if (!os::release_memory_special(base, size)) {
kvn@1973 106 fatal("os::release_memory_special failed");
kvn@1973 107 }
kvn@1973 108 } else {
kvn@1973 109 if (!os::release_memory(base, size)) {
kvn@1973 110 fatal("os::release_memory failed");
kvn@1973 111 }
kvn@1973 112 }
kvn@1973 113 }
kvn@1973 114 return true;
kvn@1973 115 }
kvn@1973 116
duke@435 117 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
coleenp@672 118 char* requested_address,
coleenp@1091 119 const size_t noaccess_prefix,
coleenp@1091 120 bool executable) {
duke@435 121 const size_t granularity = os::vm_allocation_granularity();
johnc@3022 122 assert((size & (granularity - 1)) == 0,
duke@435 123 "size not aligned to os::vm_allocation_granularity()");
johnc@3022 124 assert((alignment & (granularity - 1)) == 0,
duke@435 125 "alignment not aligned to os::vm_allocation_granularity()");
duke@435 126 assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
duke@435 127 "not a power of 2");
duke@435 128
johnc@3022 129 alignment = MAX2(alignment, (size_t)os::vm_page_size());
johnc@3022 130
johnc@3022 131 // Assert that if noaccess_prefix is used, it is the same as alignment.
johnc@3022 132 assert(noaccess_prefix == 0 ||
johnc@3022 133 noaccess_prefix == alignment, "noaccess prefix wrong");
johnc@3022 134
duke@435 135 _base = NULL;
duke@435 136 _size = 0;
duke@435 137 _special = false;
coleenp@1091 138 _executable = executable;
duke@435 139 _alignment = 0;
coleenp@672 140 _noaccess_prefix = 0;
duke@435 141 if (size == 0) {
duke@435 142 return;
duke@435 143 }
duke@435 144
duke@435 145 // If OS doesn't support demand paging for large page memory, we need
duke@435 146 // to use reserve_memory_special() to reserve and pin the entire region.
duke@435 147 bool special = large && !os::can_commit_large_page_memory();
duke@435 148 char* base = NULL;
duke@435 149
kvn@1973 150 if (requested_address != 0) {
kvn@1973 151 requested_address -= noaccess_prefix; // adjust requested address
kvn@1973 152 assert(requested_address != NULL, "huge noaccess prefix?");
kvn@1973 153 }
kvn@1973 154
duke@435 155 if (special) {
duke@435 156
stefank@5578 157 base = os::reserve_memory_special(size, alignment, requested_address, executable);
duke@435 158
duke@435 159 if (base != NULL) {
kvn@1973 160 if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
kvn@1973 161 // OS ignored requested address. Try different address.
kvn@1973 162 return;
kvn@1973 163 }
stefank@5578 164 // Check alignment constraints.
johnc@3022 165 assert((uintptr_t) base % alignment == 0,
stefank@5578 166 err_msg("Large pages returned a non-aligned address, base: "
stefank@5578 167 PTR_FORMAT " alignment: " PTR_FORMAT,
stefank@5578 168 base, (void*)(uintptr_t)alignment));
duke@435 169 _special = true;
duke@435 170 } else {
duke@435 171 // failed; try to reserve regular memory below
kvn@1973 172 if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
kvn@1973 173 !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
kvn@1973 174 if (PrintCompressedOopsMode) {
kvn@1973 175 tty->cr();
kvn@1973 176 tty->print_cr("Reserve regular memory without large pages.");
kvn@1973 177 }
kvn@1973 178 }
duke@435 179 }
duke@435 180 }
duke@435 181
duke@435 182 if (base == NULL) {
duke@435 183 // Optimistically assume that the OSes returns an aligned base pointer.
duke@435 184 // When reserving a large address range, most OSes seem to align to at
duke@435 185 // least 64K.
duke@435 186
duke@435 187 // If the memory was requested at a particular address, use
duke@435 188 // os::attempt_reserve_memory_at() to avoid over mapping something
duke@435 189 // important. If available space is not detected, return NULL.
duke@435 190
duke@435 191 if (requested_address != 0) {
kvn@1973 192 base = os::attempt_reserve_memory_at(size, requested_address);
kvn@1973 193 if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
kvn@1973 194 // OS ignored requested address. Try different address.
kvn@1973 195 base = NULL;
kvn@1973 196 }
duke@435 197 } else {
duke@435 198 base = os::reserve_memory(size, NULL, alignment);
duke@435 199 }
duke@435 200
duke@435 201 if (base == NULL) return;
duke@435 202
duke@435 203 // Check alignment constraints
johnc@3022 204 if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
duke@435 205 // Base not aligned, retry
duke@435 206 if (!os::release_memory(base, size)) fatal("os::release_memory failed");
brutisso@4369 207 // Make sure that size is aligned
duke@435 208 size = align_size_up(size, alignment);
brutisso@4369 209 base = os::reserve_memory_aligned(size, alignment);
johnc@3022 210
johnc@3022 211 if (requested_address != 0 &&
johnc@3022 212 failed_to_reserve_as_requested(base, requested_address, size, false)) {
johnc@3022 213 // As a result of the alignment constraints, the allocated base differs
johnc@3022 214 // from the requested address. Return back to the caller who can
johnc@3022 215 // take remedial action (like try again without a requested address).
johnc@3022 216 assert(_base == NULL, "should be");
johnc@3022 217 return;
johnc@3022 218 }
duke@435 219 }
duke@435 220 }
duke@435 221 // Done
duke@435 222 _base = base;
duke@435 223 _size = size;
johnc@3022 224 _alignment = alignment;
coleenp@672 225 _noaccess_prefix = noaccess_prefix;
coleenp@672 226
coleenp@672 227 // Assert that if noaccess_prefix is used, it is the same as alignment.
coleenp@672 228 assert(noaccess_prefix == 0 ||
coleenp@672 229 noaccess_prefix == _alignment, "noaccess prefix wrong");
duke@435 230
duke@435 231 assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
duke@435 232 "area must be distinguisable from marks for mark-sweep");
duke@435 233 assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
duke@435 234 "area must be distinguisable from marks for mark-sweep");
duke@435 235 }
duke@435 236
duke@435 237
duke@435 238 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
coleenp@1091 239 bool special, bool executable) {
duke@435 240 assert((size % os::vm_allocation_granularity()) == 0,
duke@435 241 "size not allocation aligned");
duke@435 242 _base = base;
duke@435 243 _size = size;
duke@435 244 _alignment = alignment;
coleenp@672 245 _noaccess_prefix = 0;
duke@435 246 _special = special;
coleenp@1091 247 _executable = executable;
duke@435 248 }
duke@435 249
duke@435 250
duke@435 251 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
duke@435 252 bool split, bool realloc) {
duke@435 253 assert(partition_size <= size(), "partition failed");
duke@435 254 if (split) {
coleenp@1091 255 os::split_reserved_memory(base(), size(), partition_size, realloc);
duke@435 256 }
coleenp@1091 257 ReservedSpace result(base(), partition_size, alignment, special(),
coleenp@1091 258 executable());
duke@435 259 return result;
duke@435 260 }
duke@435 261
duke@435 262
duke@435 263 ReservedSpace
duke@435 264 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
duke@435 265 assert(partition_size <= size(), "partition failed");
duke@435 266 ReservedSpace result(base() + partition_size, size() - partition_size,
coleenp@1091 267 alignment, special(), executable());
duke@435 268 return result;
duke@435 269 }
duke@435 270
duke@435 271
duke@435 272 size_t ReservedSpace::page_align_size_up(size_t size) {
duke@435 273 return align_size_up(size, os::vm_page_size());
duke@435 274 }
duke@435 275
duke@435 276
duke@435 277 size_t ReservedSpace::page_align_size_down(size_t size) {
duke@435 278 return align_size_down(size, os::vm_page_size());
duke@435 279 }
duke@435 280
duke@435 281
duke@435 282 size_t ReservedSpace::allocation_align_size_up(size_t size) {
duke@435 283 return align_size_up(size, os::vm_allocation_granularity());
duke@435 284 }
duke@435 285
duke@435 286
duke@435 287 size_t ReservedSpace::allocation_align_size_down(size_t size) {
duke@435 288 return align_size_down(size, os::vm_allocation_granularity());
duke@435 289 }
duke@435 290
duke@435 291
duke@435 292 void ReservedSpace::release() {
duke@435 293 if (is_reserved()) {
coleenp@672 294 char *real_base = _base - _noaccess_prefix;
coleenp@672 295 const size_t real_size = _size + _noaccess_prefix;
duke@435 296 if (special()) {
coleenp@672 297 os::release_memory_special(real_base, real_size);
duke@435 298 } else{
coleenp@672 299 os::release_memory(real_base, real_size);
duke@435 300 }
duke@435 301 _base = NULL;
duke@435 302 _size = 0;
coleenp@672 303 _noaccess_prefix = 0;
duke@435 304 _special = false;
coleenp@1091 305 _executable = false;
duke@435 306 }
duke@435 307 }
duke@435 308
coleenp@672 309 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
kvn@1973 310 assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
coleenp@3561 311 (Universe::narrow_oop_base() != NULL) &&
kvn@1973 312 Universe::narrow_oop_use_implicit_null_checks()),
kvn@1973 313 "noaccess_prefix should be used only with non zero based compressed oops");
kvn@1973 314
kvn@1973 315 // If there is no noaccess prefix, return.
coleenp@672 316 if (_noaccess_prefix == 0) return;
coleenp@672 317
coleenp@672 318 assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
coleenp@672 319 "must be at least page size big");
coleenp@672 320
coleenp@672 321 // Protect memory at the base of the allocated region.
coleenp@672 322 // If special, the page was committed (only matters on windows)
coleenp@672 323 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
coleenp@672 324 _special)) {
coleenp@672 325 fatal("cannot protect protection page");
coleenp@672 326 }
kvn@1973 327 if (PrintCompressedOopsMode) {
kvn@1973 328 tty->cr();
kvn@1973 329 tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
kvn@1973 330 }
coleenp@672 331
coleenp@672 332 _base += _noaccess_prefix;
coleenp@672 333 _size -= _noaccess_prefix;
coleenp@672 334 assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
coleenp@672 335 "must be exactly of required size and alignment");
coleenp@672 336 }
coleenp@672 337
coleenp@672 338 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
coleenp@672 339 bool large, char* requested_address) :
coleenp@672 340 ReservedSpace(size, alignment, large,
coleenp@672 341 requested_address,
kvn@1077 342 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
kvn@1077 343 Universe::narrow_oop_use_implicit_null_checks()) ?
coleenp@760 344 lcm(os::vm_page_size(), alignment) : 0) {
zgu@3900 345 if (base() > 0) {
zgu@3900 346 MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
zgu@3900 347 }
zgu@3900 348
coleenp@672 349 // Only reserved space for the java heap should have a noaccess_prefix
coleenp@672 350 // if using compressed oops.
coleenp@672 351 protect_noaccess_prefix(size);
coleenp@672 352 }
coleenp@672 353
coleenp@1091 354 // Reserve space for code segment. Same as Java heap only we mark this as
coleenp@1091 355 // executable.
coleenp@1091 356 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
coleenp@1091 357 size_t rs_align,
coleenp@1091 358 bool large) :
coleenp@1091 359 ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
zgu@3900 360 MemTracker::record_virtual_memory_type((address)base(), mtCode);
coleenp@1091 361 }
coleenp@1091 362
duke@435 363 // VirtualSpace
duke@435 364
duke@435 365 VirtualSpace::VirtualSpace() {
duke@435 366 _low_boundary = NULL;
duke@435 367 _high_boundary = NULL;
duke@435 368 _low = NULL;
duke@435 369 _high = NULL;
duke@435 370 _lower_high = NULL;
duke@435 371 _middle_high = NULL;
duke@435 372 _upper_high = NULL;
duke@435 373 _lower_high_boundary = NULL;
duke@435 374 _middle_high_boundary = NULL;
duke@435 375 _upper_high_boundary = NULL;
duke@435 376 _lower_alignment = 0;
duke@435 377 _middle_alignment = 0;
duke@435 378 _upper_alignment = 0;
coleenp@672 379 _special = false;
coleenp@1091 380 _executable = false;
duke@435 381 }
duke@435 382
duke@435 383
duke@435 384 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
ehelin@7780 385 const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
mgerdin@5859 386 return initialize_with_granularity(rs, committed_size, max_commit_granularity);
mgerdin@5859 387 }
mgerdin@5859 388
mgerdin@5859 389 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
duke@435 390 if(!rs.is_reserved()) return false; // allocation failed.
duke@435 391 assert(_low_boundary == NULL, "VirtualSpace already initialized");
mgerdin@5859 392 assert(max_commit_granularity > 0, "Granularity must be non-zero.");
mgerdin@5859 393
duke@435 394 _low_boundary = rs.base();
duke@435 395 _high_boundary = low_boundary() + rs.size();
duke@435 396
duke@435 397 _low = low_boundary();
duke@435 398 _high = low();
duke@435 399
duke@435 400 _special = rs.special();
coleenp@1091 401 _executable = rs.executable();
duke@435 402
duke@435 403 // When a VirtualSpace begins life at a large size, make all future expansion
duke@435 404 // and shrinking occur aligned to a granularity of large pages. This avoids
duke@435 405 // fragmentation of physical addresses that inhibits the use of large pages
duke@435 406 // by the OS virtual memory system. Empirically, we see that with a 4MB
duke@435 407 // page size, the only spaces that get handled this way are codecache and
duke@435 408 // the heap itself, both of which provide a substantial performance
duke@435 409 // boost in many benchmarks when covered by large pages.
duke@435 410 //
duke@435 411 // No attempt is made to force large page alignment at the very top and
duke@435 412 // bottom of the space if they are not aligned so already.
duke@435 413 _lower_alignment = os::vm_page_size();
mgerdin@5859 414 _middle_alignment = max_commit_granularity;
duke@435 415 _upper_alignment = os::vm_page_size();
duke@435 416
duke@435 417 // End of each region
duke@435 418 _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
duke@435 419 _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
duke@435 420 _upper_high_boundary = high_boundary();
duke@435 421
duke@435 422 // High address of each region
duke@435 423 _lower_high = low_boundary();
duke@435 424 _middle_high = lower_high_boundary();
duke@435 425 _upper_high = middle_high_boundary();
duke@435 426
duke@435 427 // commit to initial size
duke@435 428 if (committed_size > 0) {
duke@435 429 if (!expand_by(committed_size)) {
duke@435 430 return false;
duke@435 431 }
duke@435 432 }
duke@435 433 return true;
duke@435 434 }
duke@435 435
duke@435 436
duke@435 437 VirtualSpace::~VirtualSpace() {
duke@435 438 release();
duke@435 439 }
duke@435 440
duke@435 441
duke@435 442 void VirtualSpace::release() {
coleenp@672 443 // This does not release memory it never reserved.
coleenp@672 444 // Caller must release via rs.release();
duke@435 445 _low_boundary = NULL;
duke@435 446 _high_boundary = NULL;
duke@435 447 _low = NULL;
duke@435 448 _high = NULL;
duke@435 449 _lower_high = NULL;
duke@435 450 _middle_high = NULL;
duke@435 451 _upper_high = NULL;
duke@435 452 _lower_high_boundary = NULL;
duke@435 453 _middle_high_boundary = NULL;
duke@435 454 _upper_high_boundary = NULL;
duke@435 455 _lower_alignment = 0;
duke@435 456 _middle_alignment = 0;
duke@435 457 _upper_alignment = 0;
duke@435 458 _special = false;
coleenp@1091 459 _executable = false;
duke@435 460 }
duke@435 461
duke@435 462
duke@435 463 size_t VirtualSpace::committed_size() const {
duke@435 464 return pointer_delta(high(), low(), sizeof(char));
duke@435 465 }
duke@435 466
duke@435 467
duke@435 468 size_t VirtualSpace::reserved_size() const {
duke@435 469 return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
duke@435 470 }
duke@435 471
duke@435 472
duke@435 473 size_t VirtualSpace::uncommitted_size() const {
duke@435 474 return reserved_size() - committed_size();
duke@435 475 }
duke@435 476
stefank@5704 477 size_t VirtualSpace::actual_committed_size() const {
stefank@5704 478 // Special VirtualSpaces commit all reserved space up front.
stefank@5704 479 if (special()) {
stefank@5704 480 return reserved_size();
stefank@5704 481 }
stefank@5704 482
stefank@5704 483 size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char));
stefank@5704 484 size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char));
stefank@5704 485 size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char));
stefank@5704 486
stefank@5704 487 #ifdef ASSERT
stefank@5704 488 size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char));
stefank@5704 489 size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char));
stefank@5704 490 size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char));
stefank@5704 491
stefank@5704 492 if (committed_high > 0) {
stefank@5704 493 assert(committed_low == lower, "Must be");
stefank@5704 494 assert(committed_middle == middle, "Must be");
stefank@5704 495 }
stefank@5704 496
stefank@5704 497 if (committed_middle > 0) {
stefank@5704 498 assert(committed_low == lower, "Must be");
stefank@5704 499 }
stefank@5704 500 if (committed_middle < middle) {
stefank@5704 501 assert(committed_high == 0, "Must be");
stefank@5704 502 }
stefank@5704 503
stefank@5704 504 if (committed_low < lower) {
stefank@5704 505 assert(committed_high == 0, "Must be");
stefank@5704 506 assert(committed_middle == 0, "Must be");
stefank@5704 507 }
stefank@5704 508 #endif
stefank@5704 509
stefank@5704 510 return committed_low + committed_middle + committed_high;
stefank@5704 511 }
stefank@5704 512
duke@435 513
duke@435 514 bool VirtualSpace::contains(const void* p) const {
duke@435 515 return low() <= (const char*) p && (const char*) p < high();
duke@435 516 }
duke@435 517
duke@435 518 /*
duke@435 519 First we need to determine if a particular virtual space is using large
duke@435 520 pages. This is done at the initialize function and only virtual spaces
duke@435 521 that are larger than LargePageSizeInBytes use large pages. Once we
duke@435 522 have determined this, all expand_by and shrink_by calls must grow and
duke@435 523 shrink by large page size chunks. If a particular request
duke@435 524 is within the current large page, the call to commit and uncommit memory
duke@435 525 can be ignored. In the case that the low and high boundaries of this
duke@435 526 space is not large page aligned, the pages leading to the first large
duke@435 527 page address and the pages after the last large page address must be
duke@435 528 allocated with default pages.
duke@435 529 */
duke@435 530 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
duke@435 531 if (uncommitted_size() < bytes) return false;
duke@435 532
duke@435 533 if (special()) {
duke@435 534 // don't commit memory if the entire space is pinned in memory
duke@435 535 _high += bytes;
duke@435 536 return true;
duke@435 537 }
duke@435 538
duke@435 539 char* previous_high = high();
duke@435 540 char* unaligned_new_high = high() + bytes;
duke@435 541 assert(unaligned_new_high <= high_boundary(),
duke@435 542 "cannot expand by more than upper boundary");
duke@435 543
duke@435 544 // Calculate where the new high for each of the regions should be. If
duke@435 545 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
duke@435 546 // then the unaligned lower and upper new highs would be the
duke@435 547 // lower_high() and upper_high() respectively.
duke@435 548 char* unaligned_lower_new_high =
duke@435 549 MIN2(unaligned_new_high, lower_high_boundary());
duke@435 550 char* unaligned_middle_new_high =
duke@435 551 MIN2(unaligned_new_high, middle_high_boundary());
duke@435 552 char* unaligned_upper_new_high =
duke@435 553 MIN2(unaligned_new_high, upper_high_boundary());
duke@435 554
duke@435 555 // Align the new highs based on the regions alignment. lower and upper
duke@435 556 // alignment will always be default page size. middle alignment will be
duke@435 557 // LargePageSizeInBytes if the actual size of the virtual space is in
duke@435 558 // fact larger than LargePageSizeInBytes.
duke@435 559 char* aligned_lower_new_high =
duke@435 560 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
duke@435 561 char* aligned_middle_new_high =
duke@435 562 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
duke@435 563 char* aligned_upper_new_high =
duke@435 564 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
duke@435 565
duke@435 566 // Determine which regions need to grow in this expand_by call.
duke@435 567 // If you are growing in the lower region, high() must be in that
duke@435 568 // region so calcuate the size based on high(). For the middle and
duke@435 569 // upper regions, determine the starting point of growth based on the
duke@435 570 // location of high(). By getting the MAX of the region's low address
duke@435 571 // (or the prevoius region's high address) and high(), we can tell if it
duke@435 572 // is an intra or inter region growth.
duke@435 573 size_t lower_needs = 0;
duke@435 574 if (aligned_lower_new_high > lower_high()) {
duke@435 575 lower_needs =
duke@435 576 pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
duke@435 577 }
duke@435 578 size_t middle_needs = 0;
duke@435 579 if (aligned_middle_new_high > middle_high()) {
duke@435 580 middle_needs =
duke@435 581 pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
duke@435 582 }
duke@435 583 size_t upper_needs = 0;
duke@435 584 if (aligned_upper_new_high > upper_high()) {
duke@435 585 upper_needs =
duke@435 586 pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
duke@435 587 }
duke@435 588
duke@435 589 // Check contiguity.
duke@435 590 assert(low_boundary() <= lower_high() &&
duke@435 591 lower_high() <= lower_high_boundary(),
duke@435 592 "high address must be contained within the region");
duke@435 593 assert(lower_high_boundary() <= middle_high() &&
duke@435 594 middle_high() <= middle_high_boundary(),
duke@435 595 "high address must be contained within the region");
duke@435 596 assert(middle_high_boundary() <= upper_high() &&
duke@435 597 upper_high() <= upper_high_boundary(),
duke@435 598 "high address must be contained within the region");
duke@435 599
duke@435 600 // Commit regions
duke@435 601 if (lower_needs > 0) {
duke@435 602 assert(low_boundary() <= lower_high() &&
duke@435 603 lower_high() + lower_needs <= lower_high_boundary(),
duke@435 604 "must not expand beyond region");
coleenp@1091 605 if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
dcubed@5255 606 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
dcubed@5255 607 ", lower_needs=" SIZE_FORMAT ", %d) failed",
dcubed@5255 608 lower_high(), lower_needs, _executable);)
duke@435 609 return false;
duke@435 610 } else {
duke@435 611 _lower_high += lower_needs;
dcubed@5255 612 }
duke@435 613 }
duke@435 614 if (middle_needs > 0) {
duke@435 615 assert(lower_high_boundary() <= middle_high() &&
duke@435 616 middle_high() + middle_needs <= middle_high_boundary(),
duke@435 617 "must not expand beyond region");
coleenp@1091 618 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
coleenp@1091 619 _executable)) {
dcubed@5255 620 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
dcubed@5255 621 ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
dcubed@5255 622 ", %d) failed", middle_high(), middle_needs,
dcubed@5255 623 middle_alignment(), _executable);)
duke@435 624 return false;
duke@435 625 }
duke@435 626 _middle_high += middle_needs;
duke@435 627 }
duke@435 628 if (upper_needs > 0) {
duke@435 629 assert(middle_high_boundary() <= upper_high() &&
duke@435 630 upper_high() + upper_needs <= upper_high_boundary(),
duke@435 631 "must not expand beyond region");
coleenp@1091 632 if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
dcubed@5255 633 debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
dcubed@5255 634 ", upper_needs=" SIZE_FORMAT ", %d) failed",
dcubed@5255 635 upper_high(), upper_needs, _executable);)
duke@435 636 return false;
duke@435 637 } else {
duke@435 638 _upper_high += upper_needs;
duke@435 639 }
duke@435 640 }
duke@435 641
duke@435 642 if (pre_touch || AlwaysPreTouch) {
tschatzl@7777 643 os::pretouch_memory(previous_high, unaligned_new_high);
duke@435 644 }
duke@435 645
duke@435 646 _high += bytes;
duke@435 647 return true;
duke@435 648 }
duke@435 649
duke@435 650 // A page is uncommitted if the contents of the entire page is deemed unusable.
duke@435 651 // Continue to decrement the high() pointer until it reaches a page boundary
duke@435 652 // in which case that particular page can now be uncommitted.
duke@435 653 void VirtualSpace::shrink_by(size_t size) {
duke@435 654 if (committed_size() < size)
duke@435 655 fatal("Cannot shrink virtual space to negative size");
duke@435 656
duke@435 657 if (special()) {
duke@435 658 // don't uncommit if the entire space is pinned in memory
duke@435 659 _high -= size;
duke@435 660 return;
duke@435 661 }
duke@435 662
duke@435 663 char* unaligned_new_high = high() - size;
duke@435 664 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
duke@435 665
duke@435 666 // Calculate new unaligned address
duke@435 667 char* unaligned_upper_new_high =
duke@435 668 MAX2(unaligned_new_high, middle_high_boundary());
duke@435 669 char* unaligned_middle_new_high =
duke@435 670 MAX2(unaligned_new_high, lower_high_boundary());
duke@435 671 char* unaligned_lower_new_high =
duke@435 672 MAX2(unaligned_new_high, low_boundary());
duke@435 673
duke@435 674 // Align address to region's alignment
duke@435 675 char* aligned_upper_new_high =
duke@435 676 (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
duke@435 677 char* aligned_middle_new_high =
duke@435 678 (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
duke@435 679 char* aligned_lower_new_high =
duke@435 680 (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
duke@435 681
duke@435 682 // Determine which regions need to shrink
duke@435 683 size_t upper_needs = 0;
duke@435 684 if (aligned_upper_new_high < upper_high()) {
duke@435 685 upper_needs =
duke@435 686 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
duke@435 687 }
duke@435 688 size_t middle_needs = 0;
duke@435 689 if (aligned_middle_new_high < middle_high()) {
duke@435 690 middle_needs =
duke@435 691 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
duke@435 692 }
duke@435 693 size_t lower_needs = 0;
duke@435 694 if (aligned_lower_new_high < lower_high()) {
duke@435 695 lower_needs =
duke@435 696 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
duke@435 697 }
duke@435 698
duke@435 699 // Check contiguity.
duke@435 700 assert(middle_high_boundary() <= upper_high() &&
duke@435 701 upper_high() <= upper_high_boundary(),
duke@435 702 "high address must be contained within the region");
duke@435 703 assert(lower_high_boundary() <= middle_high() &&
duke@435 704 middle_high() <= middle_high_boundary(),
duke@435 705 "high address must be contained within the region");
duke@435 706 assert(low_boundary() <= lower_high() &&
duke@435 707 lower_high() <= lower_high_boundary(),
duke@435 708 "high address must be contained within the region");
duke@435 709
duke@435 710 // Uncommit
duke@435 711 if (upper_needs > 0) {
duke@435 712 assert(middle_high_boundary() <= aligned_upper_new_high &&
duke@435 713 aligned_upper_new_high + upper_needs <= upper_high_boundary(),
duke@435 714 "must not shrink beyond region");
duke@435 715 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
duke@435 716 debug_only(warning("os::uncommit_memory failed"));
duke@435 717 return;
duke@435 718 } else {
duke@435 719 _upper_high -= upper_needs;
duke@435 720 }
duke@435 721 }
duke@435 722 if (middle_needs > 0) {
duke@435 723 assert(lower_high_boundary() <= aligned_middle_new_high &&
duke@435 724 aligned_middle_new_high + middle_needs <= middle_high_boundary(),
duke@435 725 "must not shrink beyond region");
duke@435 726 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
duke@435 727 debug_only(warning("os::uncommit_memory failed"));
duke@435 728 return;
duke@435 729 } else {
duke@435 730 _middle_high -= middle_needs;
duke@435 731 }
duke@435 732 }
duke@435 733 if (lower_needs > 0) {
duke@435 734 assert(low_boundary() <= aligned_lower_new_high &&
duke@435 735 aligned_lower_new_high + lower_needs <= lower_high_boundary(),
duke@435 736 "must not shrink beyond region");
duke@435 737 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
duke@435 738 debug_only(warning("os::uncommit_memory failed"));
duke@435 739 return;
duke@435 740 } else {
duke@435 741 _lower_high -= lower_needs;
duke@435 742 }
duke@435 743 }
duke@435 744
duke@435 745 _high -= size;
duke@435 746 }
duke@435 747
duke@435 748 #ifndef PRODUCT
duke@435 749 void VirtualSpace::check_for_contiguity() {
duke@435 750 // Check contiguity.
duke@435 751 assert(low_boundary() <= lower_high() &&
duke@435 752 lower_high() <= lower_high_boundary(),
duke@435 753 "high address must be contained within the region");
duke@435 754 assert(lower_high_boundary() <= middle_high() &&
duke@435 755 middle_high() <= middle_high_boundary(),
duke@435 756 "high address must be contained within the region");
duke@435 757 assert(middle_high_boundary() <= upper_high() &&
duke@435 758 upper_high() <= upper_high_boundary(),
duke@435 759 "high address must be contained within the region");
duke@435 760 assert(low() >= low_boundary(), "low");
duke@435 761 assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
duke@435 762 assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
duke@435 763 assert(high() <= upper_high(), "upper high");
duke@435 764 }
duke@435 765
stefank@5708 766 void VirtualSpace::print_on(outputStream* out) {
stefank@5708 767 out->print ("Virtual space:");
stefank@5708 768 if (special()) out->print(" (pinned in memory)");
stefank@5708 769 out->cr();
stefank@5708 770 out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
stefank@5708 771 out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size());
stefank@5708 772 out->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high());
stefank@5708 773 out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary());
duke@435 774 }
duke@435 775
stefank@5708 776 void VirtualSpace::print() {
stefank@5708 777 print_on(tty);
stefank@5708 778 }
stefank@5578 779
stefank@5578 780 /////////////// Unit tests ///////////////
stefank@5578 781
stefank@5578 782 #ifndef PRODUCT
stefank@5578 783
stefank@5578 784 #define test_log(...) \
stefank@5578 785 do {\
stefank@5578 786 if (VerboseInternalVMTests) { \
stefank@5578 787 tty->print_cr(__VA_ARGS__); \
stefank@5578 788 tty->flush(); \
stefank@5578 789 }\
stefank@5578 790 } while (false)
stefank@5578 791
stefank@5578 792 class TestReservedSpace : AllStatic {
stefank@5578 793 public:
stefank@5578 794 static void small_page_write(void* addr, size_t size) {
stefank@5578 795 size_t page_size = os::vm_page_size();
stefank@5578 796
stefank@5578 797 char* end = (char*)addr + size;
stefank@5578 798 for (char* p = (char*)addr; p < end; p += page_size) {
stefank@5578 799 *p = 1;
stefank@5578 800 }
stefank@5578 801 }
stefank@5578 802
stefank@5578 803 static void release_memory_for_test(ReservedSpace rs) {
stefank@5578 804 if (rs.special()) {
stefank@5578 805 guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
stefank@5578 806 } else {
stefank@5578 807 guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
stefank@5578 808 }
stefank@5578 809 }
stefank@5578 810
stefank@5578 811 static void test_reserved_space1(size_t size, size_t alignment) {
stefank@5578 812 test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
stefank@5578 813
stefank@5578 814 assert(is_size_aligned(size, alignment), "Incorrect input parameters");
stefank@5578 815
stefank@5578 816 ReservedSpace rs(size, // size
stefank@5578 817 alignment, // alignment
stefank@5578 818 UseLargePages, // large
stefank@5578 819 NULL, // requested_address
stefank@5578 820 0); // noacces_prefix
stefank@5578 821
stefank@5578 822 test_log(" rs.special() == %d", rs.special());
stefank@5578 823
stefank@5578 824 assert(rs.base() != NULL, "Must be");
stefank@5578 825 assert(rs.size() == size, "Must be");
stefank@5578 826
stefank@5578 827 assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
stefank@5578 828 assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
stefank@5578 829
stefank@5578 830 if (rs.special()) {
stefank@5578 831 small_page_write(rs.base(), size);
stefank@5578 832 }
stefank@5578 833
stefank@5578 834 release_memory_for_test(rs);
stefank@5578 835 }
stefank@5578 836
stefank@5578 837 static void test_reserved_space2(size_t size) {
stefank@5578 838 test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
stefank@5578 839
stefank@5578 840 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
stefank@5578 841
stefank@5578 842 ReservedSpace rs(size);
stefank@5578 843
stefank@5578 844 test_log(" rs.special() == %d", rs.special());
stefank@5578 845
stefank@5578 846 assert(rs.base() != NULL, "Must be");
stefank@5578 847 assert(rs.size() == size, "Must be");
stefank@5578 848
stefank@5578 849 if (rs.special()) {
stefank@5578 850 small_page_write(rs.base(), size);
stefank@5578 851 }
stefank@5578 852
stefank@5578 853 release_memory_for_test(rs);
stefank@5578 854 }
stefank@5578 855
stefank@5578 856 static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
stefank@5578 857 test_log("test_reserved_space3(%p, %p, %d)",
stefank@5578 858 (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
stefank@5578 859
stefank@5578 860 assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
stefank@5578 861 assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
stefank@5578 862
stefank@5578 863 bool large = maybe_large && UseLargePages && size >= os::large_page_size();
stefank@5578 864
stefank@5578 865 ReservedSpace rs(size, alignment, large, false);
stefank@5578 866
stefank@5578 867 test_log(" rs.special() == %d", rs.special());
stefank@5578 868
stefank@5578 869 assert(rs.base() != NULL, "Must be");
stefank@5578 870 assert(rs.size() == size, "Must be");
stefank@5578 871
stefank@5578 872 if (rs.special()) {
stefank@5578 873 small_page_write(rs.base(), size);
stefank@5578 874 }
stefank@5578 875
stefank@5578 876 release_memory_for_test(rs);
stefank@5578 877 }
stefank@5578 878
stefank@5578 879
stefank@5578 880 static void test_reserved_space1() {
stefank@5578 881 size_t size = 2 * 1024 * 1024;
stefank@5578 882 size_t ag = os::vm_allocation_granularity();
stefank@5578 883
stefank@5578 884 test_reserved_space1(size, ag);
stefank@5578 885 test_reserved_space1(size * 2, ag);
stefank@5578 886 test_reserved_space1(size * 10, ag);
stefank@5578 887 }
stefank@5578 888
stefank@5578 889 static void test_reserved_space2() {
stefank@5578 890 size_t size = 2 * 1024 * 1024;
stefank@5578 891 size_t ag = os::vm_allocation_granularity();
stefank@5578 892
stefank@5578 893 test_reserved_space2(size * 1);
stefank@5578 894 test_reserved_space2(size * 2);
stefank@5578 895 test_reserved_space2(size * 10);
stefank@5578 896 test_reserved_space2(ag);
stefank@5578 897 test_reserved_space2(size - ag);
stefank@5578 898 test_reserved_space2(size);
stefank@5578 899 test_reserved_space2(size + ag);
stefank@5578 900 test_reserved_space2(size * 2);
stefank@5578 901 test_reserved_space2(size * 2 - ag);
stefank@5578 902 test_reserved_space2(size * 2 + ag);
stefank@5578 903 test_reserved_space2(size * 3);
stefank@5578 904 test_reserved_space2(size * 3 - ag);
stefank@5578 905 test_reserved_space2(size * 3 + ag);
stefank@5578 906 test_reserved_space2(size * 10);
stefank@5578 907 test_reserved_space2(size * 10 + size / 2);
stefank@5578 908 }
stefank@5578 909
stefank@5578 910 static void test_reserved_space3() {
stefank@5578 911 size_t ag = os::vm_allocation_granularity();
stefank@5578 912
stefank@5578 913 test_reserved_space3(ag, ag , false);
stefank@5578 914 test_reserved_space3(ag * 2, ag , false);
stefank@5578 915 test_reserved_space3(ag * 3, ag , false);
stefank@5578 916 test_reserved_space3(ag * 2, ag * 2, false);
stefank@5578 917 test_reserved_space3(ag * 4, ag * 2, false);
stefank@5578 918 test_reserved_space3(ag * 8, ag * 2, false);
stefank@5578 919 test_reserved_space3(ag * 4, ag * 4, false);
stefank@5578 920 test_reserved_space3(ag * 8, ag * 4, false);
stefank@5578 921 test_reserved_space3(ag * 16, ag * 4, false);
stefank@5578 922
stefank@5578 923 if (UseLargePages) {
stefank@5578 924 size_t lp = os::large_page_size();
stefank@5578 925
stefank@5578 926 // Without large pages
stefank@5578 927 test_reserved_space3(lp, ag * 4, false);
stefank@5578 928 test_reserved_space3(lp * 2, ag * 4, false);
stefank@5578 929 test_reserved_space3(lp * 4, ag * 4, false);
stefank@5578 930 test_reserved_space3(lp, lp , false);
stefank@5578 931 test_reserved_space3(lp * 2, lp , false);
stefank@5578 932 test_reserved_space3(lp * 3, lp , false);
stefank@5578 933 test_reserved_space3(lp * 2, lp * 2, false);
stefank@5578 934 test_reserved_space3(lp * 4, lp * 2, false);
stefank@5578 935 test_reserved_space3(lp * 8, lp * 2, false);
stefank@5578 936
stefank@5578 937 // With large pages
stefank@5578 938 test_reserved_space3(lp, ag * 4 , true);
stefank@5578 939 test_reserved_space3(lp * 2, ag * 4, true);
stefank@5578 940 test_reserved_space3(lp * 4, ag * 4, true);
stefank@5578 941 test_reserved_space3(lp, lp , true);
stefank@5578 942 test_reserved_space3(lp * 2, lp , true);
stefank@5578 943 test_reserved_space3(lp * 3, lp , true);
stefank@5578 944 test_reserved_space3(lp * 2, lp * 2, true);
stefank@5578 945 test_reserved_space3(lp * 4, lp * 2, true);
stefank@5578 946 test_reserved_space3(lp * 8, lp * 2, true);
stefank@5578 947 }
stefank@5578 948 }
stefank@5578 949
stefank@5578 950 static void test_reserved_space() {
stefank@5578 951 test_reserved_space1();
stefank@5578 952 test_reserved_space2();
stefank@5578 953 test_reserved_space3();
stefank@5578 954 }
stefank@5578 955 };
stefank@5578 956
stefank@5578 957 void TestReservedSpace_test() {
stefank@5578 958 TestReservedSpace::test_reserved_space();
stefank@5578 959 }
stefank@5578 960
stefank@5704 961 #define assert_equals(actual, expected) \
stefank@5704 962 assert(actual == expected, \
stefank@5704 963 err_msg("Got " SIZE_FORMAT " expected " \
stefank@5704 964 SIZE_FORMAT, actual, expected));
stefank@5704 965
stefank@5704 966 #define assert_ge(value1, value2) \
stefank@5704 967 assert(value1 >= value2, \
stefank@5704 968 err_msg("'" #value1 "': " SIZE_FORMAT " '" \
stefank@5704 969 #value2 "': " SIZE_FORMAT, value1, value2));
stefank@5704 970
stefank@5704 971 #define assert_lt(value1, value2) \
stefank@5704 972 assert(value1 < value2, \
stefank@5704 973 err_msg("'" #value1 "': " SIZE_FORMAT " '" \
stefank@5704 974 #value2 "': " SIZE_FORMAT, value1, value2));
stefank@5704 975
stefank@5704 976
stefank@5704 977 class TestVirtualSpace : AllStatic {
mgerdin@5859 978 enum TestLargePages {
mgerdin@5859 979 Default,
mgerdin@5859 980 Disable,
mgerdin@5859 981 Reserve,
mgerdin@5859 982 Commit
mgerdin@5859 983 };
mgerdin@5859 984
mgerdin@5859 985 static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
mgerdin@5859 986 switch(mode) {
mgerdin@5859 987 default:
mgerdin@5859 988 case Default:
mgerdin@5859 989 case Reserve:
mgerdin@5859 990 return ReservedSpace(reserve_size_aligned);
mgerdin@5859 991 case Disable:
mgerdin@5859 992 case Commit:
mgerdin@5859 993 return ReservedSpace(reserve_size_aligned,
mgerdin@5859 994 os::vm_allocation_granularity(),
mgerdin@5859 995 /* large */ false, /* exec */ false);
mgerdin@5859 996 }
mgerdin@5859 997 }
mgerdin@5859 998
mgerdin@5859 999 static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
mgerdin@5859 1000 switch(mode) {
mgerdin@5859 1001 default:
mgerdin@5859 1002 case Default:
mgerdin@5859 1003 case Reserve:
mgerdin@5859 1004 return vs.initialize(rs, 0);
mgerdin@5859 1005 case Disable:
mgerdin@5859 1006 return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
mgerdin@5859 1007 case Commit:
ehelin@7780 1008 return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
mgerdin@5859 1009 }
mgerdin@5859 1010 }
mgerdin@5859 1011
stefank@5704 1012 public:
mgerdin@5859 1013 static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
mgerdin@5859 1014 TestLargePages mode = Default) {
stefank@5704 1015 size_t granularity = os::vm_allocation_granularity();
stefank@5704 1016 size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
stefank@5704 1017
mgerdin@5859 1018 ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
stefank@5704 1019
stefank@5704 1020 assert(reserved.is_reserved(), "Must be");
stefank@5704 1021
stefank@5704 1022 VirtualSpace vs;
mgerdin@5859 1023 bool initialized = initialize_virtual_space(vs, reserved, mode);
stefank@5704 1024 assert(initialized, "Failed to initialize VirtualSpace");
stefank@5704 1025
stefank@5704 1026 vs.expand_by(commit_size, false);
stefank@5704 1027
stefank@5704 1028 if (vs.special()) {
stefank@5704 1029 assert_equals(vs.actual_committed_size(), reserve_size_aligned);
stefank@5704 1030 } else {
stefank@5704 1031 assert_ge(vs.actual_committed_size(), commit_size);
stefank@5704 1032 // Approximate the commit granularity.
mgerdin@5859 1033 // Make sure that we don't commit using large pages
mgerdin@5859 1034 // if large pages has been disabled for this VirtualSpace.
mgerdin@5859 1035 size_t commit_granularity = (mode == Disable || !UseLargePages) ?
mgerdin@5859 1036 os::vm_page_size() : os::large_page_size();
stefank@5704 1037 assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
stefank@5704 1038 }
stefank@5704 1039
stefank@5704 1040 reserved.release();
stefank@5704 1041 }
stefank@5704 1042
stefank@5704 1043 static void test_virtual_space_actual_committed_space_one_large_page() {
stefank@5704 1044 if (!UseLargePages) {
stefank@5704 1045 return;
stefank@5704 1046 }
stefank@5704 1047
stefank@5704 1048 size_t large_page_size = os::large_page_size();
stefank@5704 1049
stefank@5704 1050 ReservedSpace reserved(large_page_size, large_page_size, true, false);
stefank@5704 1051
stefank@5704 1052 assert(reserved.is_reserved(), "Must be");
stefank@5704 1053
stefank@5704 1054 VirtualSpace vs;
stefank@5704 1055 bool initialized = vs.initialize(reserved, 0);
stefank@5704 1056 assert(initialized, "Failed to initialize VirtualSpace");
stefank@5704 1057
stefank@5704 1058 vs.expand_by(large_page_size, false);
stefank@5704 1059
stefank@5704 1060 assert_equals(vs.actual_committed_size(), large_page_size);
stefank@5704 1061
stefank@5704 1062 reserved.release();
stefank@5704 1063 }
stefank@5704 1064
stefank@5704 1065 static void test_virtual_space_actual_committed_space() {
stefank@5704 1066 test_virtual_space_actual_committed_space(4 * K, 0);
stefank@5704 1067 test_virtual_space_actual_committed_space(4 * K, 4 * K);
stefank@5704 1068 test_virtual_space_actual_committed_space(8 * K, 0);
stefank@5704 1069 test_virtual_space_actual_committed_space(8 * K, 4 * K);
stefank@5704 1070 test_virtual_space_actual_committed_space(8 * K, 8 * K);
stefank@5704 1071 test_virtual_space_actual_committed_space(12 * K, 0);
stefank@5704 1072 test_virtual_space_actual_committed_space(12 * K, 4 * K);
stefank@5704 1073 test_virtual_space_actual_committed_space(12 * K, 8 * K);
stefank@5704 1074 test_virtual_space_actual_committed_space(12 * K, 12 * K);
stefank@5704 1075 test_virtual_space_actual_committed_space(64 * K, 0);
stefank@5704 1076 test_virtual_space_actual_committed_space(64 * K, 32 * K);
stefank@5704 1077 test_virtual_space_actual_committed_space(64 * K, 64 * K);
stefank@5704 1078 test_virtual_space_actual_committed_space(2 * M, 0);
stefank@5704 1079 test_virtual_space_actual_committed_space(2 * M, 4 * K);
stefank@5704 1080 test_virtual_space_actual_committed_space(2 * M, 64 * K);
stefank@5704 1081 test_virtual_space_actual_committed_space(2 * M, 1 * M);
stefank@5704 1082 test_virtual_space_actual_committed_space(2 * M, 2 * M);
stefank@5704 1083 test_virtual_space_actual_committed_space(10 * M, 0);
stefank@5704 1084 test_virtual_space_actual_committed_space(10 * M, 4 * K);
stefank@5704 1085 test_virtual_space_actual_committed_space(10 * M, 8 * K);
stefank@5704 1086 test_virtual_space_actual_committed_space(10 * M, 1 * M);
stefank@5704 1087 test_virtual_space_actual_committed_space(10 * M, 2 * M);
stefank@5704 1088 test_virtual_space_actual_committed_space(10 * M, 5 * M);
stefank@5704 1089 test_virtual_space_actual_committed_space(10 * M, 10 * M);
stefank@5704 1090 }
stefank@5704 1091
mgerdin@5859 1092 static void test_virtual_space_disable_large_pages() {
mgerdin@5859 1093 if (!UseLargePages) {
mgerdin@5859 1094 return;
mgerdin@5859 1095 }
mgerdin@5859 1096 // These test cases verify that if we force VirtualSpace to disable large pages
mgerdin@5859 1097 test_virtual_space_actual_committed_space(10 * M, 0, Disable);
mgerdin@5859 1098 test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
mgerdin@5859 1099 test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
mgerdin@5859 1100 test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
mgerdin@5859 1101 test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
mgerdin@5859 1102 test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
mgerdin@5859 1103 test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
mgerdin@5859 1104
mgerdin@5859 1105 test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
mgerdin@5859 1106 test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
mgerdin@5859 1107 test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
mgerdin@5859 1108 test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
mgerdin@5859 1109 test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
mgerdin@5859 1110 test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
mgerdin@5859 1111 test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
mgerdin@5859 1112
mgerdin@5859 1113 test_virtual_space_actual_committed_space(10 * M, 0, Commit);
mgerdin@5859 1114 test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
mgerdin@5859 1115 test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
mgerdin@5859 1116 test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
mgerdin@5859 1117 test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
mgerdin@5859 1118 test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
mgerdin@5859 1119 test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
mgerdin@5859 1120 }
mgerdin@5859 1121
stefank@5704 1122 static void test_virtual_space() {
stefank@5704 1123 test_virtual_space_actual_committed_space();
stefank@5704 1124 test_virtual_space_actual_committed_space_one_large_page();
mgerdin@5859 1125 test_virtual_space_disable_large_pages();
stefank@5704 1126 }
stefank@5704 1127 };
stefank@5704 1128
stefank@5704 1129 void TestVirtualSpace_test() {
stefank@5704 1130 TestVirtualSpace::test_virtual_space();
stefank@5704 1131 }
stefank@5704 1132
stefank@5578 1133 #endif // PRODUCT
stefank@5578 1134
duke@435 1135 #endif

mercurial