aoqi@0: /* aoqi@0: * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #include "precompiled.hpp" aoqi@0: #include "oops/markOop.hpp" aoqi@0: #include "oops/oop.inline.hpp" aoqi@0: #include "runtime/virtualspace.hpp" aoqi@0: #include "services/memTracker.hpp" aoqi@0: #ifdef TARGET_OS_FAMILY_linux aoqi@0: # include "os_linux.inline.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_OS_FAMILY_solaris aoqi@0: # include "os_solaris.inline.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_OS_FAMILY_windows aoqi@0: # include "os_windows.inline.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_OS_FAMILY_aix aoqi@0: # include "os_aix.inline.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_OS_FAMILY_bsd aoqi@0: # include "os_bsd.inline.hpp" aoqi@0: #endif aoqi@0: aoqi@0: PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC aoqi@0: aoqi@0: // ReservedSpace aoqi@0: aoqi@0: // Dummy constructor aoqi@0: ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), aoqi@0: _alignment(0), _special(false), _executable(false) { aoqi@0: } aoqi@0: aoqi@0: ReservedSpace::ReservedSpace(size_t size) { aoqi@0: size_t page_size = os::page_size_for_region(size, size, 1); aoqi@0: bool large_pages = page_size != (size_t)os::vm_page_size(); aoqi@0: // Don't force the alignment to be large page aligned, aoqi@0: // since that will waste memory. aoqi@0: size_t alignment = os::vm_allocation_granularity(); aoqi@0: initialize(size, alignment, large_pages, NULL, 0, false); aoqi@0: } aoqi@0: aoqi@0: ReservedSpace::ReservedSpace(size_t size, size_t alignment, aoqi@0: bool large, aoqi@0: char* requested_address, aoqi@0: const size_t noaccess_prefix) { aoqi@0: initialize(size+noaccess_prefix, alignment, large, requested_address, aoqi@0: noaccess_prefix, false); aoqi@0: } aoqi@0: aoqi@0: ReservedSpace::ReservedSpace(size_t size, size_t alignment, aoqi@0: bool large, aoqi@0: bool executable) { aoqi@0: initialize(size, alignment, large, NULL, 0, executable); aoqi@0: } aoqi@0: aoqi@0: // Helper method. aoqi@0: static bool failed_to_reserve_as_requested(char* base, char* requested_address, aoqi@0: const size_t size, bool special) aoqi@0: { aoqi@0: if (base == requested_address || requested_address == NULL) aoqi@0: return false; // did not fail aoqi@0: aoqi@0: if (base != NULL) { aoqi@0: // Different reserve address may be acceptable in other cases aoqi@0: // but for compressed oops heap should be at requested address. aoqi@0: assert(UseCompressedOops, "currently requested address used only for compressed oops"); aoqi@0: if (PrintCompressedOopsMode) { aoqi@0: tty->cr(); aoqi@0: tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address); aoqi@0: } aoqi@0: // OS ignored requested address. Try different address. aoqi@0: if (special) { aoqi@0: if (!os::release_memory_special(base, size)) { aoqi@0: fatal("os::release_memory_special failed"); aoqi@0: } aoqi@0: } else { aoqi@0: if (!os::release_memory(base, size)) { aoqi@0: fatal("os::release_memory failed"); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: void ReservedSpace::initialize(size_t size, size_t alignment, bool large, aoqi@0: char* requested_address, aoqi@0: const size_t noaccess_prefix, aoqi@0: bool executable) { aoqi@0: const size_t granularity = os::vm_allocation_granularity(); aoqi@0: assert((size & (granularity - 1)) == 0, aoqi@0: "size not aligned to os::vm_allocation_granularity()"); aoqi@0: assert((alignment & (granularity - 1)) == 0, aoqi@0: "alignment not aligned to os::vm_allocation_granularity()"); aoqi@0: assert(alignment == 0 || is_power_of_2((intptr_t)alignment), aoqi@0: "not a power of 2"); aoqi@0: aoqi@0: alignment = MAX2(alignment, (size_t)os::vm_page_size()); aoqi@0: aoqi@0: // Assert that if noaccess_prefix is used, it is the same as alignment. aoqi@0: assert(noaccess_prefix == 0 || aoqi@0: noaccess_prefix == alignment, "noaccess prefix wrong"); aoqi@0: aoqi@0: _base = NULL; aoqi@0: _size = 0; aoqi@0: _special = false; aoqi@0: _executable = executable; aoqi@0: _alignment = 0; aoqi@0: _noaccess_prefix = 0; aoqi@0: if (size == 0) { aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: // If OS doesn't support demand paging for large page memory, we need aoqi@0: // to use reserve_memory_special() to reserve and pin the entire region. aoqi@0: bool special = large && !os::can_commit_large_page_memory(); aoqi@0: char* base = NULL; aoqi@0: aoqi@0: if (requested_address != 0) { aoqi@0: requested_address -= noaccess_prefix; // adjust requested address aoqi@0: assert(requested_address != NULL, "huge noaccess prefix?"); aoqi@0: } aoqi@0: aoqi@0: if (special) { aoqi@0: aoqi@0: base = os::reserve_memory_special(size, alignment, requested_address, executable); aoqi@0: aoqi@0: if (base != NULL) { aoqi@0: if (failed_to_reserve_as_requested(base, requested_address, size, true)) { aoqi@0: // OS ignored requested address. Try different address. aoqi@0: return; aoqi@0: } aoqi@0: // Check alignment constraints. aoqi@0: assert((uintptr_t) base % alignment == 0, aoqi@0: err_msg("Large pages returned a non-aligned address, base: " aoqi@0: PTR_FORMAT " alignment: " PTR_FORMAT, aoqi@0: base, (void*)(uintptr_t)alignment)); aoqi@0: _special = true; aoqi@0: } else { aoqi@0: // failed; try to reserve regular memory below aoqi@0: if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || aoqi@0: !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { aoqi@0: if (PrintCompressedOopsMode) { aoqi@0: tty->cr(); aoqi@0: tty->print_cr("Reserve regular memory without large pages."); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (base == NULL) { aoqi@0: // Optimistically assume that the OSes returns an aligned base pointer. aoqi@0: // When reserving a large address range, most OSes seem to align to at aoqi@0: // least 64K. aoqi@0: aoqi@0: // If the memory was requested at a particular address, use aoqi@0: // os::attempt_reserve_memory_at() to avoid over mapping something aoqi@0: // important. If available space is not detected, return NULL. aoqi@0: aoqi@0: if (requested_address != 0) { aoqi@0: base = os::attempt_reserve_memory_at(size, requested_address); aoqi@0: if (failed_to_reserve_as_requested(base, requested_address, size, false)) { aoqi@0: // OS ignored requested address. Try different address. aoqi@0: base = NULL; aoqi@0: } aoqi@0: } else { aoqi@0: base = os::reserve_memory(size, NULL, alignment); aoqi@0: } aoqi@0: aoqi@0: if (base == NULL) return; aoqi@0: aoqi@0: // Check alignment constraints aoqi@0: if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) { aoqi@0: // Base not aligned, retry aoqi@0: if (!os::release_memory(base, size)) fatal("os::release_memory failed"); aoqi@0: // Make sure that size is aligned aoqi@0: size = align_size_up(size, alignment); aoqi@0: base = os::reserve_memory_aligned(size, alignment); aoqi@0: aoqi@0: if (requested_address != 0 && aoqi@0: failed_to_reserve_as_requested(base, requested_address, size, false)) { aoqi@0: // As a result of the alignment constraints, the allocated base differs aoqi@0: // from the requested address. Return back to the caller who can aoqi@0: // take remedial action (like try again without a requested address). aoqi@0: assert(_base == NULL, "should be"); aoqi@0: return; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: // Done aoqi@0: _base = base; aoqi@0: _size = size; aoqi@0: _alignment = alignment; aoqi@0: _noaccess_prefix = noaccess_prefix; aoqi@0: aoqi@0: // Assert that if noaccess_prefix is used, it is the same as alignment. aoqi@0: assert(noaccess_prefix == 0 || aoqi@0: noaccess_prefix == _alignment, "noaccess prefix wrong"); aoqi@0: aoqi@0: assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base, aoqi@0: "area must be distinguisable from marks for mark-sweep"); aoqi@0: assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size], aoqi@0: "area must be distinguisable from marks for mark-sweep"); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, aoqi@0: bool special, bool executable) { aoqi@0: assert((size % os::vm_allocation_granularity()) == 0, aoqi@0: "size not allocation aligned"); aoqi@0: _base = base; aoqi@0: _size = size; aoqi@0: _alignment = alignment; aoqi@0: _noaccess_prefix = 0; aoqi@0: _special = special; aoqi@0: _executable = executable; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, aoqi@0: bool split, bool realloc) { aoqi@0: assert(partition_size <= size(), "partition failed"); aoqi@0: if (split) { aoqi@0: os::split_reserved_memory(base(), size(), partition_size, realloc); aoqi@0: } aoqi@0: ReservedSpace result(base(), partition_size, alignment, special(), aoqi@0: executable()); aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: ReservedSpace aoqi@0: ReservedSpace::last_part(size_t partition_size, size_t alignment) { aoqi@0: assert(partition_size <= size(), "partition failed"); aoqi@0: ReservedSpace result(base() + partition_size, size() - partition_size, aoqi@0: alignment, special(), executable()); aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: size_t ReservedSpace::page_align_size_up(size_t size) { aoqi@0: return align_size_up(size, os::vm_page_size()); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: size_t ReservedSpace::page_align_size_down(size_t size) { aoqi@0: return align_size_down(size, os::vm_page_size()); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: size_t ReservedSpace::allocation_align_size_up(size_t size) { aoqi@0: return align_size_up(size, os::vm_allocation_granularity()); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: size_t ReservedSpace::allocation_align_size_down(size_t size) { aoqi@0: return align_size_down(size, os::vm_allocation_granularity()); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void ReservedSpace::release() { aoqi@0: if (is_reserved()) { aoqi@0: char *real_base = _base - _noaccess_prefix; aoqi@0: const size_t real_size = _size + _noaccess_prefix; aoqi@0: if (special()) { aoqi@0: os::release_memory_special(real_base, real_size); aoqi@0: } else{ aoqi@0: os::release_memory(real_base, real_size); aoqi@0: } aoqi@0: _base = NULL; aoqi@0: _size = 0; aoqi@0: _noaccess_prefix = 0; aoqi@0: _special = false; aoqi@0: _executable = false; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void ReservedSpace::protect_noaccess_prefix(const size_t size) { aoqi@0: assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL && aoqi@0: (Universe::narrow_oop_base() != NULL) && aoqi@0: Universe::narrow_oop_use_implicit_null_checks()), aoqi@0: "noaccess_prefix should be used only with non zero based compressed oops"); aoqi@0: aoqi@0: // If there is no noaccess prefix, return. aoqi@0: if (_noaccess_prefix == 0) return; aoqi@0: aoqi@0: assert(_noaccess_prefix >= (size_t)os::vm_page_size(), aoqi@0: "must be at least page size big"); aoqi@0: aoqi@0: // Protect memory at the base of the allocated region. aoqi@0: // If special, the page was committed (only matters on windows) aoqi@0: if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, aoqi@0: _special)) { aoqi@0: fatal("cannot protect protection page"); aoqi@0: } aoqi@0: if (PrintCompressedOopsMode) { aoqi@0: tty->cr(); aoqi@0: tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix); aoqi@0: } aoqi@0: aoqi@0: _base += _noaccess_prefix; aoqi@0: _size -= _noaccess_prefix; aoqi@0: assert((size == _size) && ((uintptr_t)_base % _alignment == 0), aoqi@0: "must be exactly of required size and alignment"); aoqi@0: } aoqi@0: aoqi@0: ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, aoqi@0: bool large, char* requested_address) : aoqi@0: ReservedSpace(size, alignment, large, aoqi@0: requested_address, aoqi@0: (UseCompressedOops && (Universe::narrow_oop_base() != NULL) && aoqi@0: Universe::narrow_oop_use_implicit_null_checks()) ? aoqi@0: lcm(os::vm_page_size(), alignment) : 0) { aoqi@0: if (base() > 0) { aoqi@0: MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); aoqi@0: } aoqi@0: aoqi@0: // Only reserved space for the java heap should have a noaccess_prefix aoqi@0: // if using compressed oops. aoqi@0: protect_noaccess_prefix(size); aoqi@0: } aoqi@0: aoqi@0: // Reserve space for code segment. Same as Java heap only we mark this as aoqi@0: // executable. aoqi@0: ReservedCodeSpace::ReservedCodeSpace(size_t r_size, aoqi@0: size_t rs_align, aoqi@0: bool large) : aoqi@0: ReservedSpace(r_size, rs_align, large, /*executable*/ true) { aoqi@0: MemTracker::record_virtual_memory_type((address)base(), mtCode); aoqi@0: } aoqi@0: aoqi@0: // VirtualSpace aoqi@0: aoqi@0: VirtualSpace::VirtualSpace() { aoqi@0: _low_boundary = NULL; aoqi@0: _high_boundary = NULL; aoqi@0: _low = NULL; aoqi@0: _high = NULL; aoqi@0: _lower_high = NULL; aoqi@0: _middle_high = NULL; aoqi@0: _upper_high = NULL; aoqi@0: _lower_high_boundary = NULL; aoqi@0: _middle_high_boundary = NULL; aoqi@0: _upper_high_boundary = NULL; aoqi@0: _lower_alignment = 0; aoqi@0: _middle_alignment = 0; aoqi@0: _upper_alignment = 0; aoqi@0: _special = false; aoqi@0: _executable = false; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { aoqi@0: const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1); aoqi@0: return initialize_with_granularity(rs, committed_size, max_commit_granularity); aoqi@0: } aoqi@0: aoqi@0: bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) { aoqi@0: if(!rs.is_reserved()) return false; // allocation failed. aoqi@0: assert(_low_boundary == NULL, "VirtualSpace already initialized"); aoqi@0: assert(max_commit_granularity > 0, "Granularity must be non-zero."); aoqi@0: aoqi@0: _low_boundary = rs.base(); aoqi@0: _high_boundary = low_boundary() + rs.size(); aoqi@0: aoqi@0: _low = low_boundary(); aoqi@0: _high = low(); aoqi@0: aoqi@0: _special = rs.special(); aoqi@0: _executable = rs.executable(); aoqi@0: aoqi@0: // When a VirtualSpace begins life at a large size, make all future expansion aoqi@0: // and shrinking occur aligned to a granularity of large pages. This avoids aoqi@0: // fragmentation of physical addresses that inhibits the use of large pages aoqi@0: // by the OS virtual memory system. Empirically, we see that with a 4MB aoqi@0: // page size, the only spaces that get handled this way are codecache and aoqi@0: // the heap itself, both of which provide a substantial performance aoqi@0: // boost in many benchmarks when covered by large pages. aoqi@0: // aoqi@0: // No attempt is made to force large page alignment at the very top and aoqi@0: // bottom of the space if they are not aligned so already. aoqi@0: _lower_alignment = os::vm_page_size(); aoqi@0: _middle_alignment = max_commit_granularity; aoqi@0: _upper_alignment = os::vm_page_size(); aoqi@0: aoqi@0: // End of each region aoqi@0: _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment()); aoqi@0: _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment()); aoqi@0: _upper_high_boundary = high_boundary(); aoqi@0: aoqi@0: // High address of each region aoqi@0: _lower_high = low_boundary(); aoqi@0: _middle_high = lower_high_boundary(); aoqi@0: _upper_high = middle_high_boundary(); aoqi@0: aoqi@0: // commit to initial size aoqi@0: if (committed_size > 0) { aoqi@0: if (!expand_by(committed_size)) { aoqi@0: return false; aoqi@0: } aoqi@0: } aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: VirtualSpace::~VirtualSpace() { aoqi@0: release(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void VirtualSpace::release() { aoqi@0: // This does not release memory it never reserved. aoqi@0: // Caller must release via rs.release(); aoqi@0: _low_boundary = NULL; aoqi@0: _high_boundary = NULL; aoqi@0: _low = NULL; aoqi@0: _high = NULL; aoqi@0: _lower_high = NULL; aoqi@0: _middle_high = NULL; aoqi@0: _upper_high = NULL; aoqi@0: _lower_high_boundary = NULL; aoqi@0: _middle_high_boundary = NULL; aoqi@0: _upper_high_boundary = NULL; aoqi@0: _lower_alignment = 0; aoqi@0: _middle_alignment = 0; aoqi@0: _upper_alignment = 0; aoqi@0: _special = false; aoqi@0: _executable = false; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: size_t VirtualSpace::committed_size() const { aoqi@0: return pointer_delta(high(), low(), sizeof(char)); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: size_t VirtualSpace::reserved_size() const { aoqi@0: return pointer_delta(high_boundary(), low_boundary(), sizeof(char)); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: size_t VirtualSpace::uncommitted_size() const { aoqi@0: return reserved_size() - committed_size(); aoqi@0: } aoqi@0: aoqi@0: size_t VirtualSpace::actual_committed_size() const { aoqi@0: // Special VirtualSpaces commit all reserved space up front. aoqi@0: if (special()) { aoqi@0: return reserved_size(); aoqi@0: } aoqi@0: aoqi@0: size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char)); aoqi@0: size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char)); aoqi@0: size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char)); aoqi@0: aoqi@0: #ifdef ASSERT aoqi@0: size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char)); aoqi@0: size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char)); aoqi@0: size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char)); aoqi@0: aoqi@0: if (committed_high > 0) { aoqi@0: assert(committed_low == lower, "Must be"); aoqi@0: assert(committed_middle == middle, "Must be"); aoqi@0: } aoqi@0: aoqi@0: if (committed_middle > 0) { aoqi@0: assert(committed_low == lower, "Must be"); aoqi@0: } aoqi@0: if (committed_middle < middle) { aoqi@0: assert(committed_high == 0, "Must be"); aoqi@0: } aoqi@0: aoqi@0: if (committed_low < lower) { aoqi@0: assert(committed_high == 0, "Must be"); aoqi@0: assert(committed_middle == 0, "Must be"); aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: return committed_low + committed_middle + committed_high; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: bool VirtualSpace::contains(const void* p) const { aoqi@0: return low() <= (const char*) p && (const char*) p < high(); aoqi@0: } aoqi@0: aoqi@0: /* aoqi@0: First we need to determine if a particular virtual space is using large aoqi@0: pages. This is done at the initialize function and only virtual spaces aoqi@0: that are larger than LargePageSizeInBytes use large pages. Once we aoqi@0: have determined this, all expand_by and shrink_by calls must grow and aoqi@0: shrink by large page size chunks. If a particular request aoqi@0: is within the current large page, the call to commit and uncommit memory aoqi@0: can be ignored. In the case that the low and high boundaries of this aoqi@0: space is not large page aligned, the pages leading to the first large aoqi@0: page address and the pages after the last large page address must be aoqi@0: allocated with default pages. aoqi@0: */ aoqi@0: bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) { aoqi@0: if (uncommitted_size() < bytes) return false; aoqi@0: aoqi@0: if (special()) { aoqi@0: // don't commit memory if the entire space is pinned in memory aoqi@0: _high += bytes; aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: char* previous_high = high(); aoqi@0: char* unaligned_new_high = high() + bytes; aoqi@0: assert(unaligned_new_high <= high_boundary(), aoqi@0: "cannot expand by more than upper boundary"); aoqi@0: aoqi@0: // Calculate where the new high for each of the regions should be. If aoqi@0: // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned aoqi@0: // then the unaligned lower and upper new highs would be the aoqi@0: // lower_high() and upper_high() respectively. aoqi@0: char* unaligned_lower_new_high = aoqi@0: MIN2(unaligned_new_high, lower_high_boundary()); aoqi@0: char* unaligned_middle_new_high = aoqi@0: MIN2(unaligned_new_high, middle_high_boundary()); aoqi@0: char* unaligned_upper_new_high = aoqi@0: MIN2(unaligned_new_high, upper_high_boundary()); aoqi@0: aoqi@0: // Align the new highs based on the regions alignment. lower and upper aoqi@0: // alignment will always be default page size. middle alignment will be aoqi@0: // LargePageSizeInBytes if the actual size of the virtual space is in aoqi@0: // fact larger than LargePageSizeInBytes. aoqi@0: char* aligned_lower_new_high = aoqi@0: (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); aoqi@0: char* aligned_middle_new_high = aoqi@0: (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); aoqi@0: char* aligned_upper_new_high = aoqi@0: (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); aoqi@0: aoqi@0: // Determine which regions need to grow in this expand_by call. aoqi@0: // If you are growing in the lower region, high() must be in that aoqi@0: // region so calcuate the size based on high(). For the middle and aoqi@0: // upper regions, determine the starting point of growth based on the aoqi@0: // location of high(). By getting the MAX of the region's low address aoqi@0: // (or the prevoius region's high address) and high(), we can tell if it aoqi@0: // is an intra or inter region growth. aoqi@0: size_t lower_needs = 0; aoqi@0: if (aligned_lower_new_high > lower_high()) { aoqi@0: lower_needs = aoqi@0: pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char)); aoqi@0: } aoqi@0: size_t middle_needs = 0; aoqi@0: if (aligned_middle_new_high > middle_high()) { aoqi@0: middle_needs = aoqi@0: pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char)); aoqi@0: } aoqi@0: size_t upper_needs = 0; aoqi@0: if (aligned_upper_new_high > upper_high()) { aoqi@0: upper_needs = aoqi@0: pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char)); aoqi@0: } aoqi@0: aoqi@0: // Check contiguity. aoqi@0: assert(low_boundary() <= lower_high() && aoqi@0: lower_high() <= lower_high_boundary(), aoqi@0: "high address must be contained within the region"); aoqi@0: assert(lower_high_boundary() <= middle_high() && aoqi@0: middle_high() <= middle_high_boundary(), aoqi@0: "high address must be contained within the region"); aoqi@0: assert(middle_high_boundary() <= upper_high() && aoqi@0: upper_high() <= upper_high_boundary(), aoqi@0: "high address must be contained within the region"); aoqi@0: aoqi@0: // Commit regions aoqi@0: if (lower_needs > 0) { aoqi@0: assert(low_boundary() <= lower_high() && aoqi@0: lower_high() + lower_needs <= lower_high_boundary(), aoqi@0: "must not expand beyond region"); aoqi@0: if (!os::commit_memory(lower_high(), lower_needs, _executable)) { aoqi@0: debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT aoqi@0: ", lower_needs=" SIZE_FORMAT ", %d) failed", aoqi@0: lower_high(), lower_needs, _executable);) aoqi@0: return false; aoqi@0: } else { aoqi@0: _lower_high += lower_needs; aoqi@0: } aoqi@0: } aoqi@0: if (middle_needs > 0) { aoqi@0: assert(lower_high_boundary() <= middle_high() && aoqi@0: middle_high() + middle_needs <= middle_high_boundary(), aoqi@0: "must not expand beyond region"); aoqi@0: if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(), aoqi@0: _executable)) { aoqi@0: debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT aoqi@0: ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT aoqi@0: ", %d) failed", middle_high(), middle_needs, aoqi@0: middle_alignment(), _executable);) aoqi@0: return false; aoqi@0: } aoqi@0: _middle_high += middle_needs; aoqi@0: } aoqi@0: if (upper_needs > 0) { aoqi@0: assert(middle_high_boundary() <= upper_high() && aoqi@0: upper_high() + upper_needs <= upper_high_boundary(), aoqi@0: "must not expand beyond region"); aoqi@0: if (!os::commit_memory(upper_high(), upper_needs, _executable)) { aoqi@0: debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT aoqi@0: ", upper_needs=" SIZE_FORMAT ", %d) failed", aoqi@0: upper_high(), upper_needs, _executable);) aoqi@0: return false; aoqi@0: } else { aoqi@0: _upper_high += upper_needs; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (pre_touch || AlwaysPreTouch) { aoqi@0: int vm_ps = os::vm_page_size(); aoqi@0: for (char* curr = previous_high; aoqi@0: curr < unaligned_new_high; aoqi@0: curr += vm_ps) { aoqi@0: // Note the use of a write here; originally we tried just a read, but aoqi@0: // since the value read was unused, the optimizer removed the read. aoqi@0: // If we ever have a concurrent touchahead thread, we'll want to use aoqi@0: // a read, to avoid the potential of overwriting data (if a mutator aoqi@0: // thread beats the touchahead thread to a page). There are various aoqi@0: // ways of making sure this read is not optimized away: for example, aoqi@0: // generating the code for a read procedure at runtime. aoqi@0: *curr = 0; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: _high += bytes; aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: // A page is uncommitted if the contents of the entire page is deemed unusable. aoqi@0: // Continue to decrement the high() pointer until it reaches a page boundary aoqi@0: // in which case that particular page can now be uncommitted. aoqi@0: void VirtualSpace::shrink_by(size_t size) { aoqi@0: if (committed_size() < size) aoqi@0: fatal("Cannot shrink virtual space to negative size"); aoqi@0: aoqi@0: if (special()) { aoqi@0: // don't uncommit if the entire space is pinned in memory aoqi@0: _high -= size; aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: char* unaligned_new_high = high() - size; aoqi@0: assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary"); aoqi@0: aoqi@0: // Calculate new unaligned address aoqi@0: char* unaligned_upper_new_high = aoqi@0: MAX2(unaligned_new_high, middle_high_boundary()); aoqi@0: char* unaligned_middle_new_high = aoqi@0: MAX2(unaligned_new_high, lower_high_boundary()); aoqi@0: char* unaligned_lower_new_high = aoqi@0: MAX2(unaligned_new_high, low_boundary()); aoqi@0: aoqi@0: // Align address to region's alignment aoqi@0: char* aligned_upper_new_high = aoqi@0: (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment()); aoqi@0: char* aligned_middle_new_high = aoqi@0: (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment()); aoqi@0: char* aligned_lower_new_high = aoqi@0: (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment()); aoqi@0: aoqi@0: // Determine which regions need to shrink aoqi@0: size_t upper_needs = 0; aoqi@0: if (aligned_upper_new_high < upper_high()) { aoqi@0: upper_needs = aoqi@0: pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char)); aoqi@0: } aoqi@0: size_t middle_needs = 0; aoqi@0: if (aligned_middle_new_high < middle_high()) { aoqi@0: middle_needs = aoqi@0: pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char)); aoqi@0: } aoqi@0: size_t lower_needs = 0; aoqi@0: if (aligned_lower_new_high < lower_high()) { aoqi@0: lower_needs = aoqi@0: pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char)); aoqi@0: } aoqi@0: aoqi@0: // Check contiguity. aoqi@0: assert(middle_high_boundary() <= upper_high() && aoqi@0: upper_high() <= upper_high_boundary(), aoqi@0: "high address must be contained within the region"); aoqi@0: assert(lower_high_boundary() <= middle_high() && aoqi@0: middle_high() <= middle_high_boundary(), aoqi@0: "high address must be contained within the region"); aoqi@0: assert(low_boundary() <= lower_high() && aoqi@0: lower_high() <= lower_high_boundary(), aoqi@0: "high address must be contained within the region"); aoqi@0: aoqi@0: // Uncommit aoqi@0: if (upper_needs > 0) { aoqi@0: assert(middle_high_boundary() <= aligned_upper_new_high && aoqi@0: aligned_upper_new_high + upper_needs <= upper_high_boundary(), aoqi@0: "must not shrink beyond region"); aoqi@0: if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) { aoqi@0: debug_only(warning("os::uncommit_memory failed")); aoqi@0: return; aoqi@0: } else { aoqi@0: _upper_high -= upper_needs; aoqi@0: } aoqi@0: } aoqi@0: if (middle_needs > 0) { aoqi@0: assert(lower_high_boundary() <= aligned_middle_new_high && aoqi@0: aligned_middle_new_high + middle_needs <= middle_high_boundary(), aoqi@0: "must not shrink beyond region"); aoqi@0: if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) { aoqi@0: debug_only(warning("os::uncommit_memory failed")); aoqi@0: return; aoqi@0: } else { aoqi@0: _middle_high -= middle_needs; aoqi@0: } aoqi@0: } aoqi@0: if (lower_needs > 0) { aoqi@0: assert(low_boundary() <= aligned_lower_new_high && aoqi@0: aligned_lower_new_high + lower_needs <= lower_high_boundary(), aoqi@0: "must not shrink beyond region"); aoqi@0: if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) { aoqi@0: debug_only(warning("os::uncommit_memory failed")); aoqi@0: return; aoqi@0: } else { aoqi@0: _lower_high -= lower_needs; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: _high -= size; aoqi@0: } aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: void VirtualSpace::check_for_contiguity() { aoqi@0: // Check contiguity. aoqi@0: assert(low_boundary() <= lower_high() && aoqi@0: lower_high() <= lower_high_boundary(), aoqi@0: "high address must be contained within the region"); aoqi@0: assert(lower_high_boundary() <= middle_high() && aoqi@0: middle_high() <= middle_high_boundary(), aoqi@0: "high address must be contained within the region"); aoqi@0: assert(middle_high_boundary() <= upper_high() && aoqi@0: upper_high() <= upper_high_boundary(), aoqi@0: "high address must be contained within the region"); aoqi@0: assert(low() >= low_boundary(), "low"); aoqi@0: assert(low_boundary() <= lower_high_boundary(), "lower high boundary"); aoqi@0: assert(upper_high_boundary() <= high_boundary(), "upper high boundary"); aoqi@0: assert(high() <= upper_high(), "upper high"); aoqi@0: } aoqi@0: aoqi@0: void VirtualSpace::print_on(outputStream* out) { aoqi@0: out->print ("Virtual space:"); aoqi@0: if (special()) out->print(" (pinned in memory)"); aoqi@0: out->cr(); aoqi@0: out->print_cr(" - committed: " SIZE_FORMAT, committed_size()); aoqi@0: out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size()); aoqi@0: out->print_cr(" - [low, high]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low(), high()); aoqi@0: out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", low_boundary(), high_boundary()); aoqi@0: } aoqi@0: aoqi@0: void VirtualSpace::print() { aoqi@0: print_on(tty); aoqi@0: } aoqi@0: aoqi@0: /////////////// Unit tests /////////////// aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: aoqi@0: #define test_log(...) \ aoqi@0: do {\ aoqi@0: if (VerboseInternalVMTests) { \ aoqi@0: tty->print_cr(__VA_ARGS__); \ aoqi@0: tty->flush(); \ aoqi@0: }\ aoqi@0: } while (false) aoqi@0: aoqi@0: class TestReservedSpace : AllStatic { aoqi@0: public: aoqi@0: static void small_page_write(void* addr, size_t size) { aoqi@0: size_t page_size = os::vm_page_size(); aoqi@0: aoqi@0: char* end = (char*)addr + size; aoqi@0: for (char* p = (char*)addr; p < end; p += page_size) { aoqi@0: *p = 1; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: static void release_memory_for_test(ReservedSpace rs) { aoqi@0: if (rs.special()) { aoqi@0: guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail"); aoqi@0: } else { aoqi@0: guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail"); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: static void test_reserved_space1(size_t size, size_t alignment) { aoqi@0: test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size); aoqi@0: aoqi@0: assert(is_size_aligned(size, alignment), "Incorrect input parameters"); aoqi@0: aoqi@0: ReservedSpace rs(size, // size aoqi@0: alignment, // alignment aoqi@0: UseLargePages, // large aoqi@0: NULL, // requested_address aoqi@0: 0); // noacces_prefix aoqi@0: aoqi@0: test_log(" rs.special() == %d", rs.special()); aoqi@0: aoqi@0: assert(rs.base() != NULL, "Must be"); aoqi@0: assert(rs.size() == size, "Must be"); aoqi@0: aoqi@0: assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses"); aoqi@0: assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses"); aoqi@0: aoqi@0: if (rs.special()) { aoqi@0: small_page_write(rs.base(), size); aoqi@0: } aoqi@0: aoqi@0: release_memory_for_test(rs); aoqi@0: } aoqi@0: aoqi@0: static void test_reserved_space2(size_t size) { aoqi@0: test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size); aoqi@0: aoqi@0: assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); aoqi@0: aoqi@0: ReservedSpace rs(size); aoqi@0: aoqi@0: test_log(" rs.special() == %d", rs.special()); aoqi@0: aoqi@0: assert(rs.base() != NULL, "Must be"); aoqi@0: assert(rs.size() == size, "Must be"); aoqi@0: aoqi@0: if (rs.special()) { aoqi@0: small_page_write(rs.base(), size); aoqi@0: } aoqi@0: aoqi@0: release_memory_for_test(rs); aoqi@0: } aoqi@0: aoqi@0: static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) { aoqi@0: test_log("test_reserved_space3(%p, %p, %d)", aoqi@0: (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large); aoqi@0: aoqi@0: assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); aoqi@0: assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment"); aoqi@0: aoqi@0: bool large = maybe_large && UseLargePages && size >= os::large_page_size(); aoqi@0: aoqi@0: ReservedSpace rs(size, alignment, large, false); aoqi@0: aoqi@0: test_log(" rs.special() == %d", rs.special()); aoqi@0: aoqi@0: assert(rs.base() != NULL, "Must be"); aoqi@0: assert(rs.size() == size, "Must be"); aoqi@0: aoqi@0: if (rs.special()) { aoqi@0: small_page_write(rs.base(), size); aoqi@0: } aoqi@0: aoqi@0: release_memory_for_test(rs); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: static void test_reserved_space1() { aoqi@0: size_t size = 2 * 1024 * 1024; aoqi@0: size_t ag = os::vm_allocation_granularity(); aoqi@0: aoqi@0: test_reserved_space1(size, ag); aoqi@0: test_reserved_space1(size * 2, ag); aoqi@0: test_reserved_space1(size * 10, ag); aoqi@0: } aoqi@0: aoqi@0: static void test_reserved_space2() { aoqi@0: size_t size = 2 * 1024 * 1024; aoqi@0: size_t ag = os::vm_allocation_granularity(); aoqi@0: aoqi@0: test_reserved_space2(size * 1); aoqi@0: test_reserved_space2(size * 2); aoqi@0: test_reserved_space2(size * 10); aoqi@0: test_reserved_space2(ag); aoqi@0: test_reserved_space2(size - ag); aoqi@0: test_reserved_space2(size); aoqi@0: test_reserved_space2(size + ag); aoqi@0: test_reserved_space2(size * 2); aoqi@0: test_reserved_space2(size * 2 - ag); aoqi@0: test_reserved_space2(size * 2 + ag); aoqi@0: test_reserved_space2(size * 3); aoqi@0: test_reserved_space2(size * 3 - ag); aoqi@0: test_reserved_space2(size * 3 + ag); aoqi@0: test_reserved_space2(size * 10); aoqi@0: test_reserved_space2(size * 10 + size / 2); aoqi@0: } aoqi@0: aoqi@0: static void test_reserved_space3() { aoqi@0: size_t ag = os::vm_allocation_granularity(); aoqi@0: aoqi@0: test_reserved_space3(ag, ag , false); aoqi@0: test_reserved_space3(ag * 2, ag , false); aoqi@0: test_reserved_space3(ag * 3, ag , false); aoqi@0: test_reserved_space3(ag * 2, ag * 2, false); aoqi@0: test_reserved_space3(ag * 4, ag * 2, false); aoqi@0: test_reserved_space3(ag * 8, ag * 2, false); aoqi@0: test_reserved_space3(ag * 4, ag * 4, false); aoqi@0: test_reserved_space3(ag * 8, ag * 4, false); aoqi@0: test_reserved_space3(ag * 16, ag * 4, false); aoqi@0: aoqi@0: if (UseLargePages) { aoqi@0: size_t lp = os::large_page_size(); aoqi@0: aoqi@0: // Without large pages aoqi@0: test_reserved_space3(lp, ag * 4, false); aoqi@0: test_reserved_space3(lp * 2, ag * 4, false); aoqi@0: test_reserved_space3(lp * 4, ag * 4, false); aoqi@0: test_reserved_space3(lp, lp , false); aoqi@0: test_reserved_space3(lp * 2, lp , false); aoqi@0: test_reserved_space3(lp * 3, lp , false); aoqi@0: test_reserved_space3(lp * 2, lp * 2, false); aoqi@0: test_reserved_space3(lp * 4, lp * 2, false); aoqi@0: test_reserved_space3(lp * 8, lp * 2, false); aoqi@0: aoqi@0: // With large pages aoqi@0: test_reserved_space3(lp, ag * 4 , true); aoqi@0: test_reserved_space3(lp * 2, ag * 4, true); aoqi@0: test_reserved_space3(lp * 4, ag * 4, true); aoqi@0: test_reserved_space3(lp, lp , true); aoqi@0: test_reserved_space3(lp * 2, lp , true); aoqi@0: test_reserved_space3(lp * 3, lp , true); aoqi@0: test_reserved_space3(lp * 2, lp * 2, true); aoqi@0: test_reserved_space3(lp * 4, lp * 2, true); aoqi@0: test_reserved_space3(lp * 8, lp * 2, true); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: static void test_reserved_space() { aoqi@0: test_reserved_space1(); aoqi@0: test_reserved_space2(); aoqi@0: test_reserved_space3(); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: void TestReservedSpace_test() { aoqi@0: TestReservedSpace::test_reserved_space(); aoqi@0: } aoqi@0: aoqi@0: #define assert_equals(actual, expected) \ aoqi@0: assert(actual == expected, \ aoqi@0: err_msg("Got " SIZE_FORMAT " expected " \ aoqi@0: SIZE_FORMAT, actual, expected)); aoqi@0: aoqi@0: #define assert_ge(value1, value2) \ aoqi@0: assert(value1 >= value2, \ aoqi@0: err_msg("'" #value1 "': " SIZE_FORMAT " '" \ aoqi@0: #value2 "': " SIZE_FORMAT, value1, value2)); aoqi@0: aoqi@0: #define assert_lt(value1, value2) \ aoqi@0: assert(value1 < value2, \ aoqi@0: err_msg("'" #value1 "': " SIZE_FORMAT " '" \ aoqi@0: #value2 "': " SIZE_FORMAT, value1, value2)); aoqi@0: aoqi@0: aoqi@0: class TestVirtualSpace : AllStatic { aoqi@0: enum TestLargePages { aoqi@0: Default, aoqi@0: Disable, aoqi@0: Reserve, aoqi@0: Commit aoqi@0: }; aoqi@0: aoqi@0: static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) { aoqi@0: switch(mode) { aoqi@0: default: aoqi@0: case Default: aoqi@0: case Reserve: aoqi@0: return ReservedSpace(reserve_size_aligned); aoqi@0: case Disable: aoqi@0: case Commit: aoqi@0: return ReservedSpace(reserve_size_aligned, aoqi@0: os::vm_allocation_granularity(), aoqi@0: /* large */ false, /* exec */ false); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) { aoqi@0: switch(mode) { aoqi@0: default: aoqi@0: case Default: aoqi@0: case Reserve: aoqi@0: return vs.initialize(rs, 0); aoqi@0: case Disable: aoqi@0: return vs.initialize_with_granularity(rs, 0, os::vm_page_size()); aoqi@0: case Commit: aoqi@0: return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1)); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: public: aoqi@0: static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size, aoqi@0: TestLargePages mode = Default) { aoqi@0: size_t granularity = os::vm_allocation_granularity(); aoqi@0: size_t reserve_size_aligned = align_size_up(reserve_size, granularity); aoqi@0: aoqi@0: ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode); aoqi@0: aoqi@0: assert(reserved.is_reserved(), "Must be"); aoqi@0: aoqi@0: VirtualSpace vs; aoqi@0: bool initialized = initialize_virtual_space(vs, reserved, mode); aoqi@0: assert(initialized, "Failed to initialize VirtualSpace"); aoqi@0: aoqi@0: vs.expand_by(commit_size, false); aoqi@0: aoqi@0: if (vs.special()) { aoqi@0: assert_equals(vs.actual_committed_size(), reserve_size_aligned); aoqi@0: } else { aoqi@0: assert_ge(vs.actual_committed_size(), commit_size); aoqi@0: // Approximate the commit granularity. aoqi@0: // Make sure that we don't commit using large pages aoqi@0: // if large pages has been disabled for this VirtualSpace. aoqi@0: size_t commit_granularity = (mode == Disable || !UseLargePages) ? aoqi@0: os::vm_page_size() : os::large_page_size(); aoqi@0: assert_lt(vs.actual_committed_size(), commit_size + commit_granularity); aoqi@0: } aoqi@0: aoqi@0: reserved.release(); aoqi@0: } aoqi@0: aoqi@0: static void test_virtual_space_actual_committed_space_one_large_page() { aoqi@0: if (!UseLargePages) { aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: size_t large_page_size = os::large_page_size(); aoqi@0: aoqi@0: ReservedSpace reserved(large_page_size, large_page_size, true, false); aoqi@0: aoqi@0: assert(reserved.is_reserved(), "Must be"); aoqi@0: aoqi@0: VirtualSpace vs; aoqi@0: bool initialized = vs.initialize(reserved, 0); aoqi@0: assert(initialized, "Failed to initialize VirtualSpace"); aoqi@0: aoqi@0: vs.expand_by(large_page_size, false); aoqi@0: aoqi@0: assert_equals(vs.actual_committed_size(), large_page_size); aoqi@0: aoqi@0: reserved.release(); aoqi@0: } aoqi@0: aoqi@0: static void test_virtual_space_actual_committed_space() { aoqi@0: test_virtual_space_actual_committed_space(4 * K, 0); aoqi@0: test_virtual_space_actual_committed_space(4 * K, 4 * K); aoqi@0: test_virtual_space_actual_committed_space(8 * K, 0); aoqi@0: test_virtual_space_actual_committed_space(8 * K, 4 * K); aoqi@0: test_virtual_space_actual_committed_space(8 * K, 8 * K); aoqi@0: test_virtual_space_actual_committed_space(12 * K, 0); aoqi@0: test_virtual_space_actual_committed_space(12 * K, 4 * K); aoqi@0: test_virtual_space_actual_committed_space(12 * K, 8 * K); aoqi@0: test_virtual_space_actual_committed_space(12 * K, 12 * K); aoqi@0: test_virtual_space_actual_committed_space(64 * K, 0); aoqi@0: test_virtual_space_actual_committed_space(64 * K, 32 * K); aoqi@0: test_virtual_space_actual_committed_space(64 * K, 64 * K); aoqi@0: test_virtual_space_actual_committed_space(2 * M, 0); aoqi@0: test_virtual_space_actual_committed_space(2 * M, 4 * K); aoqi@0: test_virtual_space_actual_committed_space(2 * M, 64 * K); aoqi@0: test_virtual_space_actual_committed_space(2 * M, 1 * M); aoqi@0: test_virtual_space_actual_committed_space(2 * M, 2 * M); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 0); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 4 * K); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 8 * K); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 1 * M); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 2 * M); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 5 * M); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 10 * M); aoqi@0: } aoqi@0: aoqi@0: static void test_virtual_space_disable_large_pages() { aoqi@0: if (!UseLargePages) { aoqi@0: return; aoqi@0: } aoqi@0: // These test cases verify that if we force VirtualSpace to disable large pages aoqi@0: test_virtual_space_actual_committed_space(10 * M, 0, Disable); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable); aoqi@0: aoqi@0: test_virtual_space_actual_committed_space(10 * M, 0, Reserve); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve); aoqi@0: aoqi@0: test_virtual_space_actual_committed_space(10 * M, 0, Commit); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit); aoqi@0: test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit); aoqi@0: } aoqi@0: aoqi@0: static void test_virtual_space() { aoqi@0: test_virtual_space_actual_committed_space(); aoqi@0: test_virtual_space_actual_committed_space_one_large_page(); aoqi@0: test_virtual_space_disable_large_pages(); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: void TestVirtualSpace_test() { aoqi@0: TestVirtualSpace::test_virtual_space(); aoqi@0: } aoqi@0: aoqi@0: #endif // PRODUCT aoqi@0: aoqi@0: #endif