duke@435: /* drchase@6680: * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "oops/markOop.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "runtime/virtualspace.hpp" zgu@3900: #include "services/memTracker.hpp" stefank@2314: #ifdef TARGET_OS_FAMILY_linux stefank@2314: # include "os_linux.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_solaris stefank@2314: # include "os_solaris.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_windows stefank@2314: # include "os_windows.inline.hpp" stefank@2314: #endif goetz@6461: #ifdef TARGET_OS_FAMILY_aix goetz@6461: # include "os_aix.inline.hpp" goetz@6461: #endif never@3156: #ifdef TARGET_OS_FAMILY_bsd never@3156: # include "os_bsd.inline.hpp" never@3156: #endif duke@435: drchase@6680: PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC duke@435: duke@435: // ReservedSpace stefank@5578: stefank@5578: // Dummy constructor stefank@5578: ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), stefank@5578: _alignment(0), _special(false), _executable(false) { stefank@5578: } stefank@5578: tschatzl@7782: ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) { tschatzl@7782: bool has_preferred_page_size = preferred_page_size != 0; ehelin@7780: // Want to use large pages where possible and pad with small pages. tschatzl@7782: size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1); stefank@5578: bool large_pages = page_size != (size_t)os::vm_page_size(); tschatzl@7781: size_t alignment; tschatzl@7782: if (large_pages && has_preferred_page_size) { tschatzl@7781: alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); tschatzl@7781: // ReservedSpace initialization requires size to be aligned to the given tschatzl@7781: // alignment. Align the size up. tschatzl@7781: size = align_size_up(size, alignment); tschatzl@7781: } else { tschatzl@7781: // Don't force the alignment to be large page aligned, tschatzl@7781: // since that will waste memory. tschatzl@7781: alignment = os::vm_allocation_granularity(); tschatzl@7781: } stefank@5578: initialize(size, alignment, large_pages, NULL, 0, false); duke@435: } duke@435: duke@435: ReservedSpace::ReservedSpace(size_t size, size_t alignment, coleenp@672: bool large, coleenp@672: char* requested_address, coleenp@672: const size_t noaccess_prefix) { coleenp@672: initialize(size+noaccess_prefix, alignment, large, requested_address, coleenp@1091: noaccess_prefix, false); coleenp@1091: } coleenp@1091: coleenp@1091: ReservedSpace::ReservedSpace(size_t size, size_t alignment, coleenp@1091: bool large, coleenp@1091: bool executable) { coleenp@1091: initialize(size, alignment, large, NULL, 0, executable); duke@435: } duke@435: kvn@1973: // Helper method. kvn@1973: static bool failed_to_reserve_as_requested(char* base, char* requested_address, kvn@1973: const size_t size, bool special) kvn@1973: { kvn@1973: if (base == requested_address || requested_address == NULL) kvn@1973: return false; // did not fail kvn@1973: kvn@1973: if (base != NULL) { kvn@1973: // Different reserve address may be acceptable in other cases kvn@1973: // but for compressed oops heap should be at requested address. kvn@1973: assert(UseCompressedOops, "currently requested address used only for compressed oops"); kvn@1973: if (PrintCompressedOopsMode) { kvn@1973: tty->cr(); johnc@3022: tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address); kvn@1973: } kvn@1973: // OS ignored requested address. Try different address. kvn@1973: if (special) { kvn@1973: if (!os::release_memory_special(base, size)) { kvn@1973: fatal("os::release_memory_special failed"); kvn@1973: } kvn@1973: } else { kvn@1973: if (!os::release_memory(base, size)) { kvn@1973: fatal("os::release_memory failed"); kvn@1973: } kvn@1973: } kvn@1973: } kvn@1973: return true; kvn@1973: } kvn@1973: duke@435: void ReservedSpace::initialize(size_t size, size_t alignment, bool large, coleenp@672: char* requested_address, coleenp@1091: const size_t noaccess_prefix, coleenp@1091: bool executable) { duke@435: const size_t granularity = os::vm_allocation_granularity(); johnc@3022: assert((size & (granularity - 1)) == 0, duke@435: "size not aligned to os::vm_allocation_granularity()"); johnc@3022: assert((alignment & (granularity - 1)) == 0, duke@435: "alignment not aligned to os::vm_allocation_granularity()"); duke@435: assert(alignment == 0 || is_power_of_2((intptr_t)alignment), duke@435: "not a power of 2"); duke@435: johnc@3022: alignment = MAX2(alignment, (size_t)os::vm_page_size()); johnc@3022: johnc@3022: // Assert that if noaccess_prefix is used, it is the same as alignment. johnc@3022: assert(noaccess_prefix == 0 || johnc@3022: noaccess_prefix == alignment, "noaccess prefix wrong"); johnc@3022: duke@435: _base = NULL; duke@435: _size = 0; duke@435: _special = false; coleenp@1091: _executable = executable; duke@435: _alignment = 0; coleenp@672: _noaccess_prefix = 0; duke@435: if (size == 0) { duke@435: return; duke@435: } duke@435: duke@435: // If OS doesn't support demand paging for large page memory, we need duke@435: // to use reserve_memory_special() to reserve and pin the entire region. duke@435: bool special = large && !os::can_commit_large_page_memory(); duke@435: char* base = NULL; duke@435: kvn@1973: if (requested_address != 0) { kvn@1973: requested_address -= noaccess_prefix; // adjust requested address kvn@1973: assert(requested_address != NULL, "huge noaccess prefix?"); kvn@1973: } kvn@1973: duke@435: if (special) { duke@435: stefank@5578: base = os::reserve_memory_special(size, alignment, requested_address, executable); duke@435: duke@435: if (base != NULL) { kvn@1973: if (failed_to_reserve_as_requested(base, requested_address, size, true)) { kvn@1973: // OS ignored requested address. Try different address. kvn@1973: return; kvn@1973: } stefank@5578: // Check alignment constraints. johnc@3022: assert((uintptr_t) base % alignment == 0, stefank@5578: err_msg("Large pages returned a non-aligned address, base: " stefank@5578: PTR_FORMAT " alignment: " PTR_FORMAT, stefank@5578: base, (void*)(uintptr_t)alignment)); duke@435: _special = true; duke@435: } else { duke@435: // failed; try to reserve regular memory below kvn@1973: if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || kvn@1973: !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { kvn@1973: if (PrintCompressedOopsMode) { kvn@1973: tty->cr(); kvn@1973: tty->print_cr("Reserve regular memory without large pages."); kvn@1973: } kvn@1973: } duke@435: } duke@435: } duke@435: duke@435: if (base == NULL) { duke@435: // Optimistically assume that the OSes returns an aligned base pointer. duke@435: // When reserving a large address range, most OSes seem to align to at duke@435: // least 64K. duke@435: duke@435: // If the memory was requested at a particular address, use duke@435: // os::attempt_reserve_memory_at() to avoid over mapping something duke@435: // important. If available space is not detected, return NULL. duke@435: duke@435: if (requested_address != 0) { kvn@1973: base = os::attempt_reserve_memory_at(size, requested_address); kvn@1973: if (failed_to_reserve_as_requested(base, requested_address, size, false)) { kvn@1973: // OS ignored requested address. Try different address. kvn@1973: base = NULL; kvn@1973: } duke@435: } else { duke@435: base = os::reserve_memory(size, NULL, alignment); duke@435: } duke@435: duke@435: if (base == NULL) return; duke@435: duke@435: // Check alignment constraints johnc@3022: if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) { duke@435: // Base not aligned, retry duke@435: if (!os::release_memory(base, size)) fatal("os::release_memory failed"); brutisso@4369: // Make sure that size is aligned duke@435: size = align_size_up(size, alignment); brutisso@4369: base = os::reserve_memory_aligned(size, alignment); johnc@3022: johnc@3022: if (requested_address != 0 && johnc@3022: failed_to_reserve_as_requested(base, requested_address, size, false)) { johnc@3022: // As a result of the alignment constraints, the allocated base differs johnc@3022: // from the requested address. Return back to the caller who can johnc@3022: // take remedial action (like try again without a requested address). johnc@3022: assert(_base == NULL, "should be"); johnc@3022: return; johnc@3022: } duke@435: } duke@435: } duke@435: // Done duke@435: _base = base; duke@435: _size = size; johnc@3022: _alignment = alignment; coleenp@672: _noaccess_prefix = noaccess_prefix; coleenp@672: coleenp@672: // Assert that if noaccess_prefix is used, it is the same as alignment. coleenp@672: assert(noaccess_prefix == 0 || coleenp@672: noaccess_prefix == _alignment, "noaccess prefix wrong"); duke@435: duke@435: assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base, duke@435: "area must be distinguisable from marks for mark-sweep"); duke@435: assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size], duke@435: "area must be distinguisable from marks for mark-sweep"); duke@435: } duke@435: duke@435: duke@435: ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, coleenp@1091: bool special, bool executable) { duke@435: assert((size % os::vm_allocation_granularity()) == 0, duke@435: "size not allocation aligned"); duke@435: _base = base; duke@435: _size = size; duke@435: _alignment = alignment; coleenp@672: _noaccess_prefix = 0; duke@435: _special = special; coleenp@1091: _executable = executable; duke@435: } duke@435: duke@435: duke@435: ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, duke@435: bool split, bool realloc) { duke@435: assert(partition_size <= size(), "partition failed"); duke@435: if (split) { coleenp@1091: os::split_reserved_memory(base(), size(), partition_size, realloc); duke@435: } coleenp@1091: ReservedSpace result(base(), partition_size, alignment, special(), coleenp@1091: executable()); duke@435: return result; duke@435: } duke@435: duke@435: duke@435: ReservedSpace duke@435: ReservedSpace::last_part(size_t partition_size, size_t alignment) { duke@435: assert(partition_size <= size(), "partition failed"); duke@435: ReservedSpace result(base() + partition_size, size() - partition_size, coleenp@1091: alignment, special(), executable()); duke@435: return result; duke@435: } duke@435: duke@435: duke@435: size_t ReservedSpace::page_align_size_up(size_t size) { duke@435: return align_size_up(size, os::vm_page_size()); duke@435: } duke@435: duke@435: duke@435: size_t ReservedSpace::page_align_size_down(size_t size) { duke@435: return align_size_down(size, os::vm_page_size()); duke@435: } duke@435: duke@435: duke@435: size_t ReservedSpace::allocation_align_size_up(size_t size) { duke@435: return align_size_up(size, os::vm_allocation_granularity()); duke@435: } duke@435: duke@435: duke@435: size_t ReservedSpace::allocation_align_size_down(size_t size) { duke@435: return align_size_down(size, os::vm_allocation_granularity()); duke@435: } duke@435: duke@435: duke@435: void ReservedSpace::release() { duke@435: if (is_reserved()) { coleenp@672: char *real_base = _base - _noaccess_prefix; coleenp@672: const size_t real_size = _size + _noaccess_prefix; duke@435: if (special()) { coleenp@672: os::release_memory_special(real_base, real_size); duke@435: } else{ coleenp@672: os::release_memory(real_base, real_size); duke@435: } duke@435: _base = NULL; duke@435: _size = 0; coleenp@672: _noaccess_prefix = 0; duke@435: _special = false; coleenp@1091: _executable = false; duke@435: } duke@435: } duke@435: coleenp@672: void ReservedSpace::protect_noaccess_prefix(const size_t size) { kvn@1973: assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL && coleenp@3561: (Universe::narrow_oop_base() != NULL) && kvn@1973: Universe::narrow_oop_use_implicit_null_checks()), kvn@1973: "noaccess_prefix should be used only with non zero based compressed oops"); kvn@1973: kvn@1973: // If there is no noaccess prefix, return. coleenp@672: if (_noaccess_prefix == 0) return; coleenp@672: coleenp@672: assert(_noaccess_prefix >= (size_t)os::vm_page_size(), coleenp@672: "must be at least page size big"); coleenp@672: coleenp@672: // Protect memory at the base of the allocated region. coleenp@672: // If special, the page was committed (only matters on windows) coleenp@672: if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, coleenp@672: _special)) { coleenp@672: fatal("cannot protect protection page"); coleenp@672: } kvn@1973: if (PrintCompressedOopsMode) { kvn@1973: tty->cr(); kvn@1973: tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix); kvn@1973: } coleenp@672: coleenp@672: _base += _noaccess_prefix; coleenp@672: _size -= _noaccess_prefix; coleenp@672: assert((size == _size) && ((uintptr_t)_base % _alignment == 0), coleenp@672: "must be exactly of required size and alignment"); coleenp@672: } coleenp@672: coleenp@672: ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, coleenp@672: bool large, char* requested_address) : coleenp@672: ReservedSpace(size, alignment, large, coleenp@672: requested_address, kvn@1077: (UseCompressedOops && (Universe::narrow_oop_base() != NULL) && kvn@1077: Universe::narrow_oop_use_implicit_null_checks()) ? coleenp@760: lcm(os::vm_page_size(), alignment) : 0) { zgu@3900: if (base() > 0) { zgu@3900: MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); zgu@3900: } zgu@3900: coleenp@672: // Only reserved space for the java heap should have a noaccess_prefix coleenp@672: // if using compressed oops. coleenp@672: protect_noaccess_prefix(size); coleenp@672: } coleenp@672: coleenp@1091: // Reserve space for code segment. Same as Java heap only we mark this as coleenp@1091: // executable. coleenp@1091: ReservedCodeSpace::ReservedCodeSpace(size_t r_size, coleenp@1091: size_t rs_align, coleenp@1091: bool large) : coleenp@1091: ReservedSpace(r_size, rs_align, large, /*executable*/ true) { zgu@3900: MemTracker::record_virtual_memory_type((address)base(), mtCode); coleenp@1091: } coleenp@1091: duke@435: // VirtualSpace duke@435: duke@435: VirtualSpace::VirtualSpace() { duke@435: _low_boundary = NULL; duke@435: _high_boundary = NULL; duke@435: _low = NULL; duke@435: _high = NULL; duke@435: _lower_high = NULL; duke@435: _middle_high = NULL; duke@435: _upper_high = NULL; duke@435: _lower_high_boundary = NULL; duke@435: _middle_high_boundary = NULL; duke@435: _upper_high_boundary = NULL; duke@435: _lower_alignment = 0; duke@435: _middle_alignment = 0; duke@435: _upper_alignment = 0; coleenp@672: _special = false; coleenp@1091: _executable = false; duke@435: } duke@435: duke@435: duke@435: bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { ehelin@7780: const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1); mgerdin@5859: return initialize_with_granularity(rs, committed_size, max_commit_granularity); mgerdin@5859: } mgerdin@5859: mgerdin@5859: bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) { duke@435: if(!rs.is_reserved()) return false; // allocation failed. duke@435: assert(_low_boundary == NULL, "VirtualSpace already initialized"); mgerdin@5859: assert(max_commit_granularity > 0, "Granularity must be non-zero."); mgerdin@5859: duke@435: _low_boundary = rs.base(); duke@435: _high_boundary = low_boundary() + rs.size(); duke@435: duke@435: _low = low_boundary(); duke@435: _high = low(); duke@435: duke@435: _special = rs.special(); coleenp@1091: _executable = rs.executable(); duke@435: duke@435: // When a VirtualSpace begins life at a large size, make all future expansion duke@435: // and shrinking occur aligned to a granularity of large pages. This avoids duke@435: // fragmentation of physical addresses that inhibits the use of large pages duke@435: // by the OS virtual memory system. Empirically, we see that with a 4MB duke@435: // page size, the only spaces that get handled this way are codecache and duke@435: // the heap itself, both of which provide a substantial performance duke@435: // boost in many benchmarks when covered by large pages. duke@435: // duke@435: // No attempt is made to force large page alignment at the very top and duke@435: // bottom of the space if they are not aligned so already. duke@435: _lower_alignment = os::vm_page_size(); mgerdin@5859: _middle_alignment = max_commit_granularity; duke@435: _upper_alignment = os::vm_page_size(); duke@435: duke@435: // End of each region duke@435: _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment()); duke@435: _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment()); duke@435: _upper_high_boundary = high_boundary(); duke@435: duke@435: // High address of each region duke@435: _lower_high = low_boundary(); duke@435: _middle_high = lower_high_boundary(); duke@435: _upper_high = middle_high_boundary(); duke@435: duke@435: // commit to initial size duke@435: if (committed_size > 0) { duke@435: if (!expand_by(committed_size)) { duke@435: return false; duke@435: } duke@435: } duke@435: return true; duke@435: } duke@435: duke@435: duke@435: VirtualSpace::~VirtualSpace() { duke@435: release(); duke@435: } duke@435: duke@435: duke@435: void VirtualSpace::release() { coleenp@672: // This does not release memory it never reserved. coleenp@672: // Caller must release via rs.release(); duke@435: _low_boundary = NULL; duke@435: _high_boundary = NULL; duke@435: _low = NULL; duke@435: _high = NULL; duke@435: _lower_high = NULL; duke@435: _middle_high = NULL; duke@435: _upper_high = NULL; duke@435: _lower_high_boundary = NULL; duke@435: _middle_high_boundary = NULL; duke@435: _upper_high_boundary = NULL; duke@435: _lower_alignment = 0; duke@435: _middle_alignment = 0; duke@435: _upper_alignment = 0; duke@435: _special = false; coleenp@1091: _executable = false; duke@435: } duke@435: duke@435: duke@435: size_t VirtualSpace::committed_size() const { duke@435: return pointer_delta(high(), low(), sizeof(char)); duke@435: } duke@435: duke@435: duke@435: size_t VirtualSpace::reserved_size() const { duke@435: return pointer_delta(high_boundary(), low_boundary(), sizeof(char)); duke@435: } duke@435: duke@435: duke@435: size_t VirtualSpace::uncommitted_size() const { duke@435: return reserved_size() - committed_size(); duke@435: } duke@435: stefank@5704: size_t VirtualSpace::actual_committed_size() const { stefank@5704: // Special VirtualSpaces commit all reserved space up front. stefank@5704: if (special()) { stefank@5704: return reserved_size(); stefank@5704: } stefank@5704: stefank@5704: size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char)); stefank@5704: size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char)); stefank@5704: size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char)); stefank@5704: stefank@5704: #ifdef ASSERT stefank@5704: size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char)); stefank@5704: size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char)); stefank@5704: size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char)); stefank@5704: stefank@5704: if (committed_high > 0) { stefank@5704: assert(committed_low == lower, "Must be"); stefank@5704: assert(committed_middle == middle, "Must be"); stefank@5704: } stefank@5704: stefank@5704: if (committed_middle > 0) { stefank@5704: assert(committed_low == lower, "Must be"); stefank@5704: } stefank@5704: if (committed_middle < middle) { stefank@5704: assert(committed_high == 0, "Must be"); stefank@5704: } stefank@5704: stefank@5704: if (committed_low < lower) { stefank@5704: assert(committed_high == 0, "Must be"); stefank@5704: assert(committed_middle == 0, "Must be"); stefank@5704: } stefank@5704: #endif stefank@5704: stefank@5704: return committed_low + committed_middle + committed_high; stefank@5704: } stefank@5704: duke@435: duke@435: bool VirtualSpace::contains(const void* p) const { duke@435: return low() <= (const char*) p && (const char*) p < high(); duke@435: } duke@435: duke@435: /* duke@435: First we need to determine if a particular virtual space is using large duke@435: pages. This is done at the initialize function and only virtual spaces duke@435: that are larger than LargePageSizeInBytes use large pages. Once we duke@435: have determined this, all expand_by and shrink_by calls must grow and duke@435: shrink by large page size chunks. If a particular request duke@435: is within the current large page, the call to commit and uncommit memory duke@435: can be ignored. In the case that the low and high boundaries of this duke@435: space is not large page aligned, the pages leading to the first large duke@435: page address and the pages after the last large page address must be duke@435: allocated with default pages. duke@435: */ duke@435: bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) { duke@435: if (uncommitted_size() < bytes) return false; duke@435: duke@435: if (special()) { duke@435: // don't commit memory if the entire space is pinned in memory duke@435: _high += bytes; duke@435: return true; duke@435: } duke@435: duke@435: char* previous_high = high(); duke@435: char* unaligned_new_high = high() + bytes; duke@435: assert(unaligned_new_high <= high_boundary(), duke@435: "cannot expand by more than upper boundary"); duke@435: duke@435: // Calculate where the new high for each of the regions should be. If duke@435: // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned duke@435: // then the unaligned lower and upper new highs would be the duke@435: // lower_high() and upper_high() respectively. duke@435: char* unaligned_lower_new_high = duke@435: MIN2(unaligned_new_high, lower_high_boundary()); duke@435: char* unaligned_middle_new_high = duke@435: MIN2(unaligned_new_high, middle_high_boundary()); duke@435: char* unaligned_upper_new_high = duke@435: MIN2(unaligned_new_high, upper_high_boundary()); duke@435: duke@435: // Align the new highs based on the regions alignment. lower and upper duke@435: // alignment will always be default page size. middle alignment will be duke@435: // LargePageSizeInBytes if the actual size of the virtual space is in duke@435: // fact larger than LargePageSizeInBytes. duke@435: char* aligned_lower_new_high = duke@435: (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); duke@435: char* aligned_middle_new_high = duke@435: (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); duke@435: char* aligned_upper_new_high = duke@435: (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); duke@435: duke@435: // Determine which regions need to grow in this expand_by call. duke@435: // If you are growing in the lower region, high() must be in that duke@435: // region so calcuate the size based on high(). For the middle and duke@435: // upper regions, determine the starting point of growth based on the duke@435: // location of high(). By getting the MAX of the region's low address duke@435: // (or the prevoius region's high address) and high(), we can tell if it duke@435: // is an intra or inter region growth. duke@435: size_t lower_needs = 0; duke@435: if (aligned_lower_new_high > lower_high()) { duke@435: lower_needs = duke@435: pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char)); duke@435: } duke@435: size_t middle_needs = 0; duke@435: if (aligned_middle_new_high > middle_high()) { duke@435: middle_needs = duke@435: pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char)); duke@435: } duke@435: size_t upper_needs = 0; duke@435: if (aligned_upper_new_high > upper_high()) { duke@435: upper_needs = duke@435: pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char)); duke@435: } duke@435: duke@435: // Check contiguity. duke@435: assert(low_boundary() <= lower_high() && duke@435: lower_high() <= lower_high_boundary(), duke@435: "high address must be contained within the region"); duke@435: assert(lower_high_boundary() <= middle_high() && duke@435: middle_high() <= middle_high_boundary(), duke@435: "high address must be contained within the region"); duke@435: assert(middle_high_boundary() <= upper_high() && duke@435: upper_high() <= upper_high_boundary(), duke@435: "high address must be contained within the region"); duke@435: duke@435: // Commit regions duke@435: if (lower_needs > 0) { duke@435: assert(low_boundary() <= lower_high() && duke@435: lower_high() + lower_needs <= lower_high_boundary(), duke@435: "must not expand beyond region"); coleenp@1091: if (!os::commit_memory(lower_high(), lower_needs, _executable)) { dcubed@5255: debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT dcubed@5255: ", lower_needs=" SIZE_FORMAT ", %d) failed", dcubed@5255: lower_high(), lower_needs, _executable);) duke@435: return false; duke@435: } else { duke@435: _lower_high += lower_needs; dcubed@5255: } duke@435: } duke@435: if (middle_needs > 0) { duke@435: assert(lower_high_boundary() <= middle_high() && duke@435: middle_high() + middle_needs <= middle_high_boundary(), duke@435: "must not expand beyond region"); coleenp@1091: if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(), coleenp@1091: _executable)) { dcubed@5255: debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT dcubed@5255: ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT dcubed@5255: ", %d) failed", middle_high(), middle_needs, dcubed@5255: middle_alignment(), _executable);) duke@435: return false; duke@435: } duke@435: _middle_high += middle_needs; duke@435: } duke@435: if (upper_needs > 0) { duke@435: assert(middle_high_boundary() <= upper_high() && duke@435: upper_high() + upper_needs <= upper_high_boundary(), duke@435: "must not expand beyond region"); coleenp@1091: if (!os::commit_memory(upper_high(), upper_needs, _executable)) { dcubed@5255: debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT dcubed@5255: ", upper_needs=" SIZE_FORMAT ", %d) failed", dcubed@5255: upper_high(), upper_needs, _executable);) duke@435: return false; duke@435: } else { duke@435: _upper_high += upper_needs; duke@435: } duke@435: } duke@435: duke@435: if (pre_touch || AlwaysPreTouch) { tschatzl@7777: os::pretouch_memory(previous_high, unaligned_new_high); duke@435: } duke@435: duke@435: _high += bytes; duke@435: return true; duke@435: } duke@435: duke@435: // A page is uncommitted if the contents of the entire page is deemed unusable. duke@435: // Continue to decrement the high() pointer until it reaches a page boundary duke@435: // in which case that particular page can now be uncommitted. duke@435: void VirtualSpace::shrink_by(size_t size) { duke@435: if (committed_size() < size) duke@435: fatal("Cannot shrink virtual space to negative size"); duke@435: duke@435: if (special()) { duke@435: // don't uncommit if the entire space is pinned in memory duke@435: _high -= size; duke@435: return; duke@435: } duke@435: duke@435: char* unaligned_new_high = high() - size; duke@435: assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary"); duke@435: duke@435: // Calculate new unaligned address duke@435: char* unaligned_upper_new_high = duke@435: MAX2(unaligned_new_high, middle_high_boundary()); duke@435: char* unaligned_middle_new_high = duke@435: MAX2(unaligned_new_high, lower_high_boundary()); duke@435: char* unaligned_lower_new_high = duke@435: MAX2(unaligned_new_high, low_boundary()); duke@435: duke@435: // Align address to region's alignment duke@435: char* aligned_upper_new_high = duke@435: (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); duke@435: char* aligned_middle_new_high = duke@435: (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); duke@435: char* aligned_lower_new_high = duke@435: (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); duke@435: duke@435: // Determine which regions need to shrink duke@435: size_t upper_needs = 0; duke@435: if (aligned_upper_new_high < upper_high()) { duke@435: upper_needs = duke@435: pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char)); duke@435: } duke@435: size_t middle_needs = 0; duke@435: if (aligned_middle_new_high < middle_high()) { duke@435: middle_needs = duke@435: pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char)); duke@435: } duke@435: size_t lower_needs = 0; duke@435: if (aligned_lower_new_high < lower_high()) { duke@435: lower_needs = duke@435: pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char)); duke@435: } duke@435: duke@435: // Check contiguity. duke@435: assert(middle_high_boundary() <= upper_high() && duke@435: upper_high() <= upper_high_boundary(), duke@435: "high address must be contained within the region"); duke@435: assert(lower_high_boundary() <= middle_high() && duke@435: middle_high() <= middle_high_boundary(), duke@435: "high address must be contained within the region"); duke@435: assert(low_boundary() <= lower_high() && duke@435: lower_high() <= lower_high_boundary(), duke@435: "high address must be contained within the region"); duke@435: duke@435: // Uncommit duke@435: if (upper_needs > 0) { duke@435: assert(middle_high_boundary() <= aligned_upper_new_high && duke@435: aligned_upper_new_high + upper_needs <= upper_high_boundary(), duke@435: "must not shrink beyond region"); duke@435: if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) { duke@435: debug_only(warning("os::uncommit_memory failed")); duke@435: return; duke@435: } else { duke@435: _upper_high -= upper_needs; duke@435: } duke@435: } duke@435: if (middle_needs > 0) { duke@435: assert(lower_high_boundary() <= aligned_middle_new_high && duke@435: aligned_middle_new_high + middle_needs <= middle_high_boundary(), duke@435: "must not shrink beyond region"); duke@435: if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) { duke@435: debug_only(warning("os::uncommit_memory failed")); duke@435: return; duke@435: } else { duke@435: _middle_high -= middle_needs; duke@435: } duke@435: } duke@435: if (lower_needs > 0) { duke@435: assert(low_boundary() <= aligned_lower_new_high && duke@435: aligned_lower_new_high + lower_needs <= lower_high_boundary(), duke@435: "must not shrink beyond region"); duke@435: if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) { duke@435: debug_only(warning("os::uncommit_memory failed")); duke@435: return; duke@435: } else { duke@435: _lower_high -= lower_needs; duke@435: } duke@435: } duke@435: duke@435: _high -= size; duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: void VirtualSpace::check_for_contiguity() { duke@435: // Check contiguity. duke@435: assert(low_boundary() <= lower_high() && duke@435: lower_high() <= lower_high_boundary(), duke@435: "high address must be contained within the region"); duke@435: assert(lower_high_boundary() <= middle_high() && duke@435: middle_high() <= middle_high_boundary(), duke@435: "high address must be contained within the region"); duke@435: assert(middle_high_boundary() <= upper_high() && duke@435: upper_high() <= upper_high_boundary(), duke@435: "high address must be contained within the region"); duke@435: assert(low() >= low_boundary(), "low"); duke@435: assert(low_boundary() <= lower_high_boundary(), "lower high boundary"); duke@435: assert(upper_high_boundary() <= high_boundary(), "upper high boundary"); duke@435: assert(high() <= upper_high(), "upper high"); duke@435: } duke@435: stefank@5708: void VirtualSpace::print_on(outputStream* out) { stefank@5708: out->print ("Virtual space:"); stefank@5708: if (special()) out->print(" (pinned in memory)"); stefank@5708: out->cr(); stefank@5708: out->print_cr(" - committed: " SIZE_FORMAT, committed_size()); stefank@5708: out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size()); stefank@5708: out->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high()); stefank@5708: out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary()); duke@435: } duke@435: stefank@5708: void VirtualSpace::print() { stefank@5708: print_on(tty); stefank@5708: } stefank@5578: stefank@5578: /////////////// Unit tests /////////////// stefank@5578: stefank@5578: #ifndef PRODUCT stefank@5578: stefank@5578: #define test_log(...) \ stefank@5578: do {\ stefank@5578: if (VerboseInternalVMTests) { \ stefank@5578: tty->print_cr(__VA_ARGS__); \ stefank@5578: tty->flush(); \ stefank@5578: }\ stefank@5578: } while (false) stefank@5578: stefank@5578: class TestReservedSpace : AllStatic { stefank@5578: public: stefank@5578: static void small_page_write(void* addr, size_t size) { stefank@5578: size_t page_size = os::vm_page_size(); stefank@5578: stefank@5578: char* end = (char*)addr + size; stefank@5578: for (char* p = (char*)addr; p < end; p += page_size) { stefank@5578: *p = 1; stefank@5578: } stefank@5578: } stefank@5578: stefank@5578: static void release_memory_for_test(ReservedSpace rs) { stefank@5578: if (rs.special()) { stefank@5578: guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail"); stefank@5578: } else { stefank@5578: guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail"); stefank@5578: } stefank@5578: } stefank@5578: stefank@5578: static void test_reserved_space1(size_t size, size_t alignment) { stefank@5578: test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size); stefank@5578: stefank@5578: assert(is_size_aligned(size, alignment), "Incorrect input parameters"); stefank@5578: stefank@5578: ReservedSpace rs(size, // size stefank@5578: alignment, // alignment stefank@5578: UseLargePages, // large stefank@5578: NULL, // requested_address stefank@5578: 0); // noacces_prefix stefank@5578: stefank@5578: test_log(" rs.special() == %d", rs.special()); stefank@5578: stefank@5578: assert(rs.base() != NULL, "Must be"); stefank@5578: assert(rs.size() == size, "Must be"); stefank@5578: stefank@5578: assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses"); stefank@5578: assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses"); stefank@5578: stefank@5578: if (rs.special()) { stefank@5578: small_page_write(rs.base(), size); stefank@5578: } stefank@5578: stefank@5578: release_memory_for_test(rs); stefank@5578: } stefank@5578: stefank@5578: static void test_reserved_space2(size_t size) { stefank@5578: test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size); stefank@5578: stefank@5578: assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); stefank@5578: stefank@5578: ReservedSpace rs(size); stefank@5578: stefank@5578: test_log(" rs.special() == %d", rs.special()); stefank@5578: stefank@5578: assert(rs.base() != NULL, "Must be"); stefank@5578: assert(rs.size() == size, "Must be"); stefank@5578: stefank@5578: if (rs.special()) { stefank@5578: small_page_write(rs.base(), size); stefank@5578: } stefank@5578: stefank@5578: release_memory_for_test(rs); stefank@5578: } stefank@5578: stefank@5578: static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) { stefank@5578: test_log("test_reserved_space3(%p, %p, %d)", stefank@5578: (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large); stefank@5578: stefank@5578: assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); stefank@5578: assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment"); stefank@5578: stefank@5578: bool large = maybe_large && UseLargePages && size >= os::large_page_size(); stefank@5578: stefank@5578: ReservedSpace rs(size, alignment, large, false); stefank@5578: stefank@5578: test_log(" rs.special() == %d", rs.special()); stefank@5578: stefank@5578: assert(rs.base() != NULL, "Must be"); stefank@5578: assert(rs.size() == size, "Must be"); stefank@5578: stefank@5578: if (rs.special()) { stefank@5578: small_page_write(rs.base(), size); stefank@5578: } stefank@5578: stefank@5578: release_memory_for_test(rs); stefank@5578: } stefank@5578: stefank@5578: stefank@5578: static void test_reserved_space1() { stefank@5578: size_t size = 2 * 1024 * 1024; stefank@5578: size_t ag = os::vm_allocation_granularity(); stefank@5578: stefank@5578: test_reserved_space1(size, ag); stefank@5578: test_reserved_space1(size * 2, ag); stefank@5578: test_reserved_space1(size * 10, ag); stefank@5578: } stefank@5578: stefank@5578: static void test_reserved_space2() { stefank@5578: size_t size = 2 * 1024 * 1024; stefank@5578: size_t ag = os::vm_allocation_granularity(); stefank@5578: stefank@5578: test_reserved_space2(size * 1); stefank@5578: test_reserved_space2(size * 2); stefank@5578: test_reserved_space2(size * 10); stefank@5578: test_reserved_space2(ag); stefank@5578: test_reserved_space2(size - ag); stefank@5578: test_reserved_space2(size); stefank@5578: test_reserved_space2(size + ag); stefank@5578: test_reserved_space2(size * 2); stefank@5578: test_reserved_space2(size * 2 - ag); stefank@5578: test_reserved_space2(size * 2 + ag); stefank@5578: test_reserved_space2(size * 3); stefank@5578: test_reserved_space2(size * 3 - ag); stefank@5578: test_reserved_space2(size * 3 + ag); stefank@5578: test_reserved_space2(size * 10); stefank@5578: test_reserved_space2(size * 10 + size / 2); stefank@5578: } stefank@5578: stefank@5578: static void test_reserved_space3() { stefank@5578: size_t ag = os::vm_allocation_granularity(); stefank@5578: stefank@5578: test_reserved_space3(ag, ag , false); stefank@5578: test_reserved_space3(ag * 2, ag , false); stefank@5578: test_reserved_space3(ag * 3, ag , false); stefank@5578: test_reserved_space3(ag * 2, ag * 2, false); stefank@5578: test_reserved_space3(ag * 4, ag * 2, false); stefank@5578: test_reserved_space3(ag * 8, ag * 2, false); stefank@5578: test_reserved_space3(ag * 4, ag * 4, false); stefank@5578: test_reserved_space3(ag * 8, ag * 4, false); stefank@5578: test_reserved_space3(ag * 16, ag * 4, false); stefank@5578: stefank@5578: if (UseLargePages) { stefank@5578: size_t lp = os::large_page_size(); stefank@5578: stefank@5578: // Without large pages stefank@5578: test_reserved_space3(lp, ag * 4, false); stefank@5578: test_reserved_space3(lp * 2, ag * 4, false); stefank@5578: test_reserved_space3(lp * 4, ag * 4, false); stefank@5578: test_reserved_space3(lp, lp , false); stefank@5578: test_reserved_space3(lp * 2, lp , false); stefank@5578: test_reserved_space3(lp * 3, lp , false); stefank@5578: test_reserved_space3(lp * 2, lp * 2, false); stefank@5578: test_reserved_space3(lp * 4, lp * 2, false); stefank@5578: test_reserved_space3(lp * 8, lp * 2, false); stefank@5578: stefank@5578: // With large pages stefank@5578: test_reserved_space3(lp, ag * 4 , true); stefank@5578: test_reserved_space3(lp * 2, ag * 4, true); stefank@5578: test_reserved_space3(lp * 4, ag * 4, true); stefank@5578: test_reserved_space3(lp, lp , true); stefank@5578: test_reserved_space3(lp * 2, lp , true); stefank@5578: test_reserved_space3(lp * 3, lp , true); stefank@5578: test_reserved_space3(lp * 2, lp * 2, true); stefank@5578: test_reserved_space3(lp * 4, lp * 2, true); stefank@5578: test_reserved_space3(lp * 8, lp * 2, true); stefank@5578: } stefank@5578: } stefank@5578: stefank@5578: static void test_reserved_space() { stefank@5578: test_reserved_space1(); stefank@5578: test_reserved_space2(); stefank@5578: test_reserved_space3(); stefank@5578: } stefank@5578: }; stefank@5578: stefank@5578: void TestReservedSpace_test() { stefank@5578: TestReservedSpace::test_reserved_space(); stefank@5578: } stefank@5578: stefank@5704: #define assert_equals(actual, expected) \ stefank@5704: assert(actual == expected, \ stefank@5704: err_msg("Got " SIZE_FORMAT " expected " \ stefank@5704: SIZE_FORMAT, actual, expected)); stefank@5704: stefank@5704: #define assert_ge(value1, value2) \ stefank@5704: assert(value1 >= value2, \ stefank@5704: err_msg("'" #value1 "': " SIZE_FORMAT " '" \ stefank@5704: #value2 "': " SIZE_FORMAT, value1, value2)); stefank@5704: stefank@5704: #define assert_lt(value1, value2) \ stefank@5704: assert(value1 < value2, \ stefank@5704: err_msg("'" #value1 "': " SIZE_FORMAT " '" \ stefank@5704: #value2 "': " SIZE_FORMAT, value1, value2)); stefank@5704: stefank@5704: stefank@5704: class TestVirtualSpace : AllStatic { mgerdin@5859: enum TestLargePages { mgerdin@5859: Default, mgerdin@5859: Disable, mgerdin@5859: Reserve, mgerdin@5859: Commit mgerdin@5859: }; mgerdin@5859: mgerdin@5859: static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) { mgerdin@5859: switch(mode) { mgerdin@5859: default: mgerdin@5859: case Default: mgerdin@5859: case Reserve: mgerdin@5859: return ReservedSpace(reserve_size_aligned); mgerdin@5859: case Disable: mgerdin@5859: case Commit: mgerdin@5859: return ReservedSpace(reserve_size_aligned, mgerdin@5859: os::vm_allocation_granularity(), mgerdin@5859: /* large */ false, /* exec */ false); mgerdin@5859: } mgerdin@5859: } mgerdin@5859: mgerdin@5859: static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) { mgerdin@5859: switch(mode) { mgerdin@5859: default: mgerdin@5859: case Default: mgerdin@5859: case Reserve: mgerdin@5859: return vs.initialize(rs, 0); mgerdin@5859: case Disable: mgerdin@5859: return vs.initialize_with_granularity(rs, 0, os::vm_page_size()); mgerdin@5859: case Commit: ehelin@7780: return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1)); mgerdin@5859: } mgerdin@5859: } mgerdin@5859: stefank@5704: public: mgerdin@5859: static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size, mgerdin@5859: TestLargePages mode = Default) { stefank@5704: size_t granularity = os::vm_allocation_granularity(); stefank@5704: size_t reserve_size_aligned = align_size_up(reserve_size, granularity); stefank@5704: mgerdin@5859: ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode); stefank@5704: stefank@5704: assert(reserved.is_reserved(), "Must be"); stefank@5704: stefank@5704: VirtualSpace vs; mgerdin@5859: bool initialized = initialize_virtual_space(vs, reserved, mode); stefank@5704: assert(initialized, "Failed to initialize VirtualSpace"); stefank@5704: stefank@5704: vs.expand_by(commit_size, false); stefank@5704: stefank@5704: if (vs.special()) { stefank@5704: assert_equals(vs.actual_committed_size(), reserve_size_aligned); stefank@5704: } else { stefank@5704: assert_ge(vs.actual_committed_size(), commit_size); stefank@5704: // Approximate the commit granularity. mgerdin@5859: // Make sure that we don't commit using large pages mgerdin@5859: // if large pages has been disabled for this VirtualSpace. mgerdin@5859: size_t commit_granularity = (mode == Disable || !UseLargePages) ? mgerdin@5859: os::vm_page_size() : os::large_page_size(); stefank@5704: assert_lt(vs.actual_committed_size(), commit_size + commit_granularity); stefank@5704: } stefank@5704: stefank@5704: reserved.release(); stefank@5704: } stefank@5704: stefank@5704: static void test_virtual_space_actual_committed_space_one_large_page() { stefank@5704: if (!UseLargePages) { stefank@5704: return; stefank@5704: } stefank@5704: stefank@5704: size_t large_page_size = os::large_page_size(); stefank@5704: stefank@5704: ReservedSpace reserved(large_page_size, large_page_size, true, false); stefank@5704: stefank@5704: assert(reserved.is_reserved(), "Must be"); stefank@5704: stefank@5704: VirtualSpace vs; stefank@5704: bool initialized = vs.initialize(reserved, 0); stefank@5704: assert(initialized, "Failed to initialize VirtualSpace"); stefank@5704: stefank@5704: vs.expand_by(large_page_size, false); stefank@5704: stefank@5704: assert_equals(vs.actual_committed_size(), large_page_size); stefank@5704: stefank@5704: reserved.release(); stefank@5704: } stefank@5704: stefank@5704: static void test_virtual_space_actual_committed_space() { stefank@5704: test_virtual_space_actual_committed_space(4 * K, 0); stefank@5704: test_virtual_space_actual_committed_space(4 * K, 4 * K); stefank@5704: test_virtual_space_actual_committed_space(8 * K, 0); stefank@5704: test_virtual_space_actual_committed_space(8 * K, 4 * K); stefank@5704: test_virtual_space_actual_committed_space(8 * K, 8 * K); stefank@5704: test_virtual_space_actual_committed_space(12 * K, 0); stefank@5704: test_virtual_space_actual_committed_space(12 * K, 4 * K); stefank@5704: test_virtual_space_actual_committed_space(12 * K, 8 * K); stefank@5704: test_virtual_space_actual_committed_space(12 * K, 12 * K); stefank@5704: test_virtual_space_actual_committed_space(64 * K, 0); stefank@5704: test_virtual_space_actual_committed_space(64 * K, 32 * K); stefank@5704: test_virtual_space_actual_committed_space(64 * K, 64 * K); stefank@5704: test_virtual_space_actual_committed_space(2 * M, 0); stefank@5704: test_virtual_space_actual_committed_space(2 * M, 4 * K); stefank@5704: test_virtual_space_actual_committed_space(2 * M, 64 * K); stefank@5704: test_virtual_space_actual_committed_space(2 * M, 1 * M); stefank@5704: test_virtual_space_actual_committed_space(2 * M, 2 * M); stefank@5704: test_virtual_space_actual_committed_space(10 * M, 0); stefank@5704: test_virtual_space_actual_committed_space(10 * M, 4 * K); stefank@5704: test_virtual_space_actual_committed_space(10 * M, 8 * K); stefank@5704: test_virtual_space_actual_committed_space(10 * M, 1 * M); stefank@5704: test_virtual_space_actual_committed_space(10 * M, 2 * M); stefank@5704: test_virtual_space_actual_committed_space(10 * M, 5 * M); stefank@5704: test_virtual_space_actual_committed_space(10 * M, 10 * M); stefank@5704: } stefank@5704: mgerdin@5859: static void test_virtual_space_disable_large_pages() { mgerdin@5859: if (!UseLargePages) { mgerdin@5859: return; mgerdin@5859: } mgerdin@5859: // These test cases verify that if we force VirtualSpace to disable large pages mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 0, Disable); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable); mgerdin@5859: mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 0, Reserve); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve); mgerdin@5859: mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 0, Commit); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit); mgerdin@5859: test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit); mgerdin@5859: } mgerdin@5859: stefank@5704: static void test_virtual_space() { stefank@5704: test_virtual_space_actual_committed_space(); stefank@5704: test_virtual_space_actual_committed_space_one_large_page(); mgerdin@5859: test_virtual_space_disable_large_pages(); stefank@5704: } stefank@5704: }; stefank@5704: stefank@5704: void TestVirtualSpace_test() { stefank@5704: TestVirtualSpace::test_virtual_space(); stefank@5704: } stefank@5704: stefank@5578: #endif // PRODUCT stefank@5578: duke@435: #endif