src/share/vm/runtime/virtualspace.cpp

Fri, 16 Jan 2015 10:29:12 +0100

author
ehelin
date
Fri, 16 Jan 2015 10:29:12 +0100
changeset 7780
5788dbd1f2d6
parent 7778
c2ce24504334
child 7781
33e421924c67
permissions
-rw-r--r--

8066875: VirtualSpace does not use large pages
Reviewed-by: stefank, tschatzl, anoll, thartmann

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "oops/markOop.hpp"
    27 #include "oops/oop.inline.hpp"
    28 #include "runtime/virtualspace.hpp"
    29 #include "services/memTracker.hpp"
    30 #ifdef TARGET_OS_FAMILY_linux
    31 # include "os_linux.inline.hpp"
    32 #endif
    33 #ifdef TARGET_OS_FAMILY_solaris
    34 # include "os_solaris.inline.hpp"
    35 #endif
    36 #ifdef TARGET_OS_FAMILY_windows
    37 # include "os_windows.inline.hpp"
    38 #endif
    39 #ifdef TARGET_OS_FAMILY_aix
    40 # include "os_aix.inline.hpp"
    41 #endif
    42 #ifdef TARGET_OS_FAMILY_bsd
    43 # include "os_bsd.inline.hpp"
    44 #endif
    46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    48 // ReservedSpace
    50 // Dummy constructor
    51 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
    52     _alignment(0), _special(false), _executable(false) {
    53 }
    55 ReservedSpace::ReservedSpace(size_t size) {
    56   // Want to use large pages where possible and pad with small pages.
    57   size_t page_size = os::page_size_for_region_unaligned(size, 1);
    58   bool large_pages = page_size != (size_t)os::vm_page_size();
    59   // Don't force the alignment to be large page aligned,
    60   // since that will waste memory.
    61   size_t alignment = os::vm_allocation_granularity();
    62   initialize(size, alignment, large_pages, NULL, 0, false);
    63 }
    65 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    66                              bool large,
    67                              char* requested_address,
    68                              const size_t noaccess_prefix) {
    69   initialize(size+noaccess_prefix, alignment, large, requested_address,
    70              noaccess_prefix, false);
    71 }
    73 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    74                              bool large,
    75                              bool executable) {
    76   initialize(size, alignment, large, NULL, 0, executable);
    77 }
    79 // Helper method.
    80 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
    81                                            const size_t size, bool special)
    82 {
    83   if (base == requested_address || requested_address == NULL)
    84     return false; // did not fail
    86   if (base != NULL) {
    87     // Different reserve address may be acceptable in other cases
    88     // but for compressed oops heap should be at requested address.
    89     assert(UseCompressedOops, "currently requested address used only for compressed oops");
    90     if (PrintCompressedOopsMode) {
    91       tty->cr();
    92       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
    93     }
    94     // OS ignored requested address. Try different address.
    95     if (special) {
    96       if (!os::release_memory_special(base, size)) {
    97         fatal("os::release_memory_special failed");
    98       }
    99     } else {
   100       if (!os::release_memory(base, size)) {
   101         fatal("os::release_memory failed");
   102       }
   103     }
   104   }
   105   return true;
   106 }
   108 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
   109                                char* requested_address,
   110                                const size_t noaccess_prefix,
   111                                bool executable) {
   112   const size_t granularity = os::vm_allocation_granularity();
   113   assert((size & (granularity - 1)) == 0,
   114          "size not aligned to os::vm_allocation_granularity()");
   115   assert((alignment & (granularity - 1)) == 0,
   116          "alignment not aligned to os::vm_allocation_granularity()");
   117   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
   118          "not a power of 2");
   120   alignment = MAX2(alignment, (size_t)os::vm_page_size());
   122   // Assert that if noaccess_prefix is used, it is the same as alignment.
   123   assert(noaccess_prefix == 0 ||
   124          noaccess_prefix == alignment, "noaccess prefix wrong");
   126   _base = NULL;
   127   _size = 0;
   128   _special = false;
   129   _executable = executable;
   130   _alignment = 0;
   131   _noaccess_prefix = 0;
   132   if (size == 0) {
   133     return;
   134   }
   136   // If OS doesn't support demand paging for large page memory, we need
   137   // to use reserve_memory_special() to reserve and pin the entire region.
   138   bool special = large && !os::can_commit_large_page_memory();
   139   char* base = NULL;
   141   if (requested_address != 0) {
   142     requested_address -= noaccess_prefix; // adjust requested address
   143     assert(requested_address != NULL, "huge noaccess prefix?");
   144   }
   146   if (special) {
   148     base = os::reserve_memory_special(size, alignment, requested_address, executable);
   150     if (base != NULL) {
   151       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
   152         // OS ignored requested address. Try different address.
   153         return;
   154       }
   155       // Check alignment constraints.
   156       assert((uintptr_t) base % alignment == 0,
   157              err_msg("Large pages returned a non-aligned address, base: "
   158                  PTR_FORMAT " alignment: " PTR_FORMAT,
   159                  base, (void*)(uintptr_t)alignment));
   160       _special = true;
   161     } else {
   162       // failed; try to reserve regular memory below
   163       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
   164                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
   165         if (PrintCompressedOopsMode) {
   166           tty->cr();
   167           tty->print_cr("Reserve regular memory without large pages.");
   168         }
   169       }
   170     }
   171   }
   173   if (base == NULL) {
   174     // Optimistically assume that the OSes returns an aligned base pointer.
   175     // When reserving a large address range, most OSes seem to align to at
   176     // least 64K.
   178     // If the memory was requested at a particular address, use
   179     // os::attempt_reserve_memory_at() to avoid over mapping something
   180     // important.  If available space is not detected, return NULL.
   182     if (requested_address != 0) {
   183       base = os::attempt_reserve_memory_at(size, requested_address);
   184       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
   185         // OS ignored requested address. Try different address.
   186         base = NULL;
   187       }
   188     } else {
   189       base = os::reserve_memory(size, NULL, alignment);
   190     }
   192     if (base == NULL) return;
   194     // Check alignment constraints
   195     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
   196       // Base not aligned, retry
   197       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
   198       // Make sure that size is aligned
   199       size = align_size_up(size, alignment);
   200       base = os::reserve_memory_aligned(size, alignment);
   202       if (requested_address != 0 &&
   203           failed_to_reserve_as_requested(base, requested_address, size, false)) {
   204         // As a result of the alignment constraints, the allocated base differs
   205         // from the requested address. Return back to the caller who can
   206         // take remedial action (like try again without a requested address).
   207         assert(_base == NULL, "should be");
   208         return;
   209       }
   210     }
   211   }
   212   // Done
   213   _base = base;
   214   _size = size;
   215   _alignment = alignment;
   216   _noaccess_prefix = noaccess_prefix;
   218   // Assert that if noaccess_prefix is used, it is the same as alignment.
   219   assert(noaccess_prefix == 0 ||
   220          noaccess_prefix == _alignment, "noaccess prefix wrong");
   222   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
   223          "area must be distinguisable from marks for mark-sweep");
   224   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
   225          "area must be distinguisable from marks for mark-sweep");
   226 }
   229 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
   230                              bool special, bool executable) {
   231   assert((size % os::vm_allocation_granularity()) == 0,
   232          "size not allocation aligned");
   233   _base = base;
   234   _size = size;
   235   _alignment = alignment;
   236   _noaccess_prefix = 0;
   237   _special = special;
   238   _executable = executable;
   239 }
   242 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
   243                                         bool split, bool realloc) {
   244   assert(partition_size <= size(), "partition failed");
   245   if (split) {
   246     os::split_reserved_memory(base(), size(), partition_size, realloc);
   247   }
   248   ReservedSpace result(base(), partition_size, alignment, special(),
   249                        executable());
   250   return result;
   251 }
   254 ReservedSpace
   255 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
   256   assert(partition_size <= size(), "partition failed");
   257   ReservedSpace result(base() + partition_size, size() - partition_size,
   258                        alignment, special(), executable());
   259   return result;
   260 }
   263 size_t ReservedSpace::page_align_size_up(size_t size) {
   264   return align_size_up(size, os::vm_page_size());
   265 }
   268 size_t ReservedSpace::page_align_size_down(size_t size) {
   269   return align_size_down(size, os::vm_page_size());
   270 }
   273 size_t ReservedSpace::allocation_align_size_up(size_t size) {
   274   return align_size_up(size, os::vm_allocation_granularity());
   275 }
   278 size_t ReservedSpace::allocation_align_size_down(size_t size) {
   279   return align_size_down(size, os::vm_allocation_granularity());
   280 }
   283 void ReservedSpace::release() {
   284   if (is_reserved()) {
   285     char *real_base = _base - _noaccess_prefix;
   286     const size_t real_size = _size + _noaccess_prefix;
   287     if (special()) {
   288       os::release_memory_special(real_base, real_size);
   289     } else{
   290       os::release_memory(real_base, real_size);
   291     }
   292     _base = NULL;
   293     _size = 0;
   294     _noaccess_prefix = 0;
   295     _special = false;
   296     _executable = false;
   297   }
   298 }
   300 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
   301   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
   302                                       (Universe::narrow_oop_base() != NULL) &&
   303                                       Universe::narrow_oop_use_implicit_null_checks()),
   304          "noaccess_prefix should be used only with non zero based compressed oops");
   306   // If there is no noaccess prefix, return.
   307   if (_noaccess_prefix == 0) return;
   309   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
   310          "must be at least page size big");
   312   // Protect memory at the base of the allocated region.
   313   // If special, the page was committed (only matters on windows)
   314   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
   315                           _special)) {
   316     fatal("cannot protect protection page");
   317   }
   318   if (PrintCompressedOopsMode) {
   319     tty->cr();
   320     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
   321   }
   323   _base += _noaccess_prefix;
   324   _size -= _noaccess_prefix;
   325   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
   326          "must be exactly of required size and alignment");
   327 }
   329 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
   330                                      bool large, char* requested_address) :
   331   ReservedSpace(size, alignment, large,
   332                 requested_address,
   333                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
   334                  Universe::narrow_oop_use_implicit_null_checks()) ?
   335                   lcm(os::vm_page_size(), alignment) : 0) {
   336   if (base() > 0) {
   337     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
   338   }
   340   // Only reserved space for the java heap should have a noaccess_prefix
   341   // if using compressed oops.
   342   protect_noaccess_prefix(size);
   343 }
   345 // Reserve space for code segment.  Same as Java heap only we mark this as
   346 // executable.
   347 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
   348                                      size_t rs_align,
   349                                      bool large) :
   350   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
   351   MemTracker::record_virtual_memory_type((address)base(), mtCode);
   352 }
   354 // VirtualSpace
   356 VirtualSpace::VirtualSpace() {
   357   _low_boundary           = NULL;
   358   _high_boundary          = NULL;
   359   _low                    = NULL;
   360   _high                   = NULL;
   361   _lower_high             = NULL;
   362   _middle_high            = NULL;
   363   _upper_high             = NULL;
   364   _lower_high_boundary    = NULL;
   365   _middle_high_boundary   = NULL;
   366   _upper_high_boundary    = NULL;
   367   _lower_alignment        = 0;
   368   _middle_alignment       = 0;
   369   _upper_alignment        = 0;
   370   _special                = false;
   371   _executable             = false;
   372 }
   375 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
   376   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
   377   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
   378 }
   380 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
   381   if(!rs.is_reserved()) return false;  // allocation failed.
   382   assert(_low_boundary == NULL, "VirtualSpace already initialized");
   383   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
   385   _low_boundary  = rs.base();
   386   _high_boundary = low_boundary() + rs.size();
   388   _low = low_boundary();
   389   _high = low();
   391   _special = rs.special();
   392   _executable = rs.executable();
   394   // When a VirtualSpace begins life at a large size, make all future expansion
   395   // and shrinking occur aligned to a granularity of large pages.  This avoids
   396   // fragmentation of physical addresses that inhibits the use of large pages
   397   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
   398   // page size, the only spaces that get handled this way are codecache and
   399   // the heap itself, both of which provide a substantial performance
   400   // boost in many benchmarks when covered by large pages.
   401   //
   402   // No attempt is made to force large page alignment at the very top and
   403   // bottom of the space if they are not aligned so already.
   404   _lower_alignment  = os::vm_page_size();
   405   _middle_alignment = max_commit_granularity;
   406   _upper_alignment  = os::vm_page_size();
   408   // End of each region
   409   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
   410   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
   411   _upper_high_boundary = high_boundary();
   413   // High address of each region
   414   _lower_high = low_boundary();
   415   _middle_high = lower_high_boundary();
   416   _upper_high = middle_high_boundary();
   418   // commit to initial size
   419   if (committed_size > 0) {
   420     if (!expand_by(committed_size)) {
   421       return false;
   422     }
   423   }
   424   return true;
   425 }
   428 VirtualSpace::~VirtualSpace() {
   429   release();
   430 }
   433 void VirtualSpace::release() {
   434   // This does not release memory it never reserved.
   435   // Caller must release via rs.release();
   436   _low_boundary           = NULL;
   437   _high_boundary          = NULL;
   438   _low                    = NULL;
   439   _high                   = NULL;
   440   _lower_high             = NULL;
   441   _middle_high            = NULL;
   442   _upper_high             = NULL;
   443   _lower_high_boundary    = NULL;
   444   _middle_high_boundary   = NULL;
   445   _upper_high_boundary    = NULL;
   446   _lower_alignment        = 0;
   447   _middle_alignment       = 0;
   448   _upper_alignment        = 0;
   449   _special                = false;
   450   _executable             = false;
   451 }
   454 size_t VirtualSpace::committed_size() const {
   455   return pointer_delta(high(), low(), sizeof(char));
   456 }
   459 size_t VirtualSpace::reserved_size() const {
   460   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
   461 }
   464 size_t VirtualSpace::uncommitted_size()  const {
   465   return reserved_size() - committed_size();
   466 }
   468 size_t VirtualSpace::actual_committed_size() const {
   469   // Special VirtualSpaces commit all reserved space up front.
   470   if (special()) {
   471     return reserved_size();
   472   }
   474   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
   475   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
   476   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
   478 #ifdef ASSERT
   479   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
   480   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
   481   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
   483   if (committed_high > 0) {
   484     assert(committed_low == lower, "Must be");
   485     assert(committed_middle == middle, "Must be");
   486   }
   488   if (committed_middle > 0) {
   489     assert(committed_low == lower, "Must be");
   490   }
   491   if (committed_middle < middle) {
   492     assert(committed_high == 0, "Must be");
   493   }
   495   if (committed_low < lower) {
   496     assert(committed_high == 0, "Must be");
   497     assert(committed_middle == 0, "Must be");
   498   }
   499 #endif
   501   return committed_low + committed_middle + committed_high;
   502 }
   505 bool VirtualSpace::contains(const void* p) const {
   506   return low() <= (const char*) p && (const char*) p < high();
   507 }
   509 /*
   510    First we need to determine if a particular virtual space is using large
   511    pages.  This is done at the initialize function and only virtual spaces
   512    that are larger than LargePageSizeInBytes use large pages.  Once we
   513    have determined this, all expand_by and shrink_by calls must grow and
   514    shrink by large page size chunks.  If a particular request
   515    is within the current large page, the call to commit and uncommit memory
   516    can be ignored.  In the case that the low and high boundaries of this
   517    space is not large page aligned, the pages leading to the first large
   518    page address and the pages after the last large page address must be
   519    allocated with default pages.
   520 */
   521 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
   522   if (uncommitted_size() < bytes) return false;
   524   if (special()) {
   525     // don't commit memory if the entire space is pinned in memory
   526     _high += bytes;
   527     return true;
   528   }
   530   char* previous_high = high();
   531   char* unaligned_new_high = high() + bytes;
   532   assert(unaligned_new_high <= high_boundary(),
   533          "cannot expand by more than upper boundary");
   535   // Calculate where the new high for each of the regions should be.  If
   536   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
   537   // then the unaligned lower and upper new highs would be the
   538   // lower_high() and upper_high() respectively.
   539   char* unaligned_lower_new_high =
   540     MIN2(unaligned_new_high, lower_high_boundary());
   541   char* unaligned_middle_new_high =
   542     MIN2(unaligned_new_high, middle_high_boundary());
   543   char* unaligned_upper_new_high =
   544     MIN2(unaligned_new_high, upper_high_boundary());
   546   // Align the new highs based on the regions alignment.  lower and upper
   547   // alignment will always be default page size.  middle alignment will be
   548   // LargePageSizeInBytes if the actual size of the virtual space is in
   549   // fact larger than LargePageSizeInBytes.
   550   char* aligned_lower_new_high =
   551     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   552   char* aligned_middle_new_high =
   553     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   554   char* aligned_upper_new_high =
   555     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   557   // Determine which regions need to grow in this expand_by call.
   558   // If you are growing in the lower region, high() must be in that
   559   // region so calcuate the size based on high().  For the middle and
   560   // upper regions, determine the starting point of growth based on the
   561   // location of high().  By getting the MAX of the region's low address
   562   // (or the prevoius region's high address) and high(), we can tell if it
   563   // is an intra or inter region growth.
   564   size_t lower_needs = 0;
   565   if (aligned_lower_new_high > lower_high()) {
   566     lower_needs =
   567       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
   568   }
   569   size_t middle_needs = 0;
   570   if (aligned_middle_new_high > middle_high()) {
   571     middle_needs =
   572       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
   573   }
   574   size_t upper_needs = 0;
   575   if (aligned_upper_new_high > upper_high()) {
   576     upper_needs =
   577       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
   578   }
   580   // Check contiguity.
   581   assert(low_boundary() <= lower_high() &&
   582          lower_high() <= lower_high_boundary(),
   583          "high address must be contained within the region");
   584   assert(lower_high_boundary() <= middle_high() &&
   585          middle_high() <= middle_high_boundary(),
   586          "high address must be contained within the region");
   587   assert(middle_high_boundary() <= upper_high() &&
   588          upper_high() <= upper_high_boundary(),
   589          "high address must be contained within the region");
   591   // Commit regions
   592   if (lower_needs > 0) {
   593     assert(low_boundary() <= lower_high() &&
   594            lower_high() + lower_needs <= lower_high_boundary(),
   595            "must not expand beyond region");
   596     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
   597       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
   598                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
   599                          lower_high(), lower_needs, _executable);)
   600       return false;
   601     } else {
   602       _lower_high += lower_needs;
   603     }
   604   }
   605   if (middle_needs > 0) {
   606     assert(lower_high_boundary() <= middle_high() &&
   607            middle_high() + middle_needs <= middle_high_boundary(),
   608            "must not expand beyond region");
   609     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
   610                            _executable)) {
   611       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
   612                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
   613                          ", %d) failed", middle_high(), middle_needs,
   614                          middle_alignment(), _executable);)
   615       return false;
   616     }
   617     _middle_high += middle_needs;
   618   }
   619   if (upper_needs > 0) {
   620     assert(middle_high_boundary() <= upper_high() &&
   621            upper_high() + upper_needs <= upper_high_boundary(),
   622            "must not expand beyond region");
   623     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
   624       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
   625                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
   626                          upper_high(), upper_needs, _executable);)
   627       return false;
   628     } else {
   629       _upper_high += upper_needs;
   630     }
   631   }
   633   if (pre_touch || AlwaysPreTouch) {
   634     os::pretouch_memory(previous_high, unaligned_new_high);
   635   }
   637   _high += bytes;
   638   return true;
   639 }
   641 // A page is uncommitted if the contents of the entire page is deemed unusable.
   642 // Continue to decrement the high() pointer until it reaches a page boundary
   643 // in which case that particular page can now be uncommitted.
   644 void VirtualSpace::shrink_by(size_t size) {
   645   if (committed_size() < size)
   646     fatal("Cannot shrink virtual space to negative size");
   648   if (special()) {
   649     // don't uncommit if the entire space is pinned in memory
   650     _high -= size;
   651     return;
   652   }
   654   char* unaligned_new_high = high() - size;
   655   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
   657   // Calculate new unaligned address
   658   char* unaligned_upper_new_high =
   659     MAX2(unaligned_new_high, middle_high_boundary());
   660   char* unaligned_middle_new_high =
   661     MAX2(unaligned_new_high, lower_high_boundary());
   662   char* unaligned_lower_new_high =
   663     MAX2(unaligned_new_high, low_boundary());
   665   // Align address to region's alignment
   666   char* aligned_upper_new_high =
   667     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   668   char* aligned_middle_new_high =
   669     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   670   char* aligned_lower_new_high =
   671     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   673   // Determine which regions need to shrink
   674   size_t upper_needs = 0;
   675   if (aligned_upper_new_high < upper_high()) {
   676     upper_needs =
   677       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
   678   }
   679   size_t middle_needs = 0;
   680   if (aligned_middle_new_high < middle_high()) {
   681     middle_needs =
   682       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
   683   }
   684   size_t lower_needs = 0;
   685   if (aligned_lower_new_high < lower_high()) {
   686     lower_needs =
   687       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
   688   }
   690   // Check contiguity.
   691   assert(middle_high_boundary() <= upper_high() &&
   692          upper_high() <= upper_high_boundary(),
   693          "high address must be contained within the region");
   694   assert(lower_high_boundary() <= middle_high() &&
   695          middle_high() <= middle_high_boundary(),
   696          "high address must be contained within the region");
   697   assert(low_boundary() <= lower_high() &&
   698          lower_high() <= lower_high_boundary(),
   699          "high address must be contained within the region");
   701   // Uncommit
   702   if (upper_needs > 0) {
   703     assert(middle_high_boundary() <= aligned_upper_new_high &&
   704            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
   705            "must not shrink beyond region");
   706     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
   707       debug_only(warning("os::uncommit_memory failed"));
   708       return;
   709     } else {
   710       _upper_high -= upper_needs;
   711     }
   712   }
   713   if (middle_needs > 0) {
   714     assert(lower_high_boundary() <= aligned_middle_new_high &&
   715            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
   716            "must not shrink beyond region");
   717     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
   718       debug_only(warning("os::uncommit_memory failed"));
   719       return;
   720     } else {
   721       _middle_high -= middle_needs;
   722     }
   723   }
   724   if (lower_needs > 0) {
   725     assert(low_boundary() <= aligned_lower_new_high &&
   726            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
   727            "must not shrink beyond region");
   728     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
   729       debug_only(warning("os::uncommit_memory failed"));
   730       return;
   731     } else {
   732       _lower_high -= lower_needs;
   733     }
   734   }
   736   _high -= size;
   737 }
   739 #ifndef PRODUCT
   740 void VirtualSpace::check_for_contiguity() {
   741   // Check contiguity.
   742   assert(low_boundary() <= lower_high() &&
   743          lower_high() <= lower_high_boundary(),
   744          "high address must be contained within the region");
   745   assert(lower_high_boundary() <= middle_high() &&
   746          middle_high() <= middle_high_boundary(),
   747          "high address must be contained within the region");
   748   assert(middle_high_boundary() <= upper_high() &&
   749          upper_high() <= upper_high_boundary(),
   750          "high address must be contained within the region");
   751   assert(low() >= low_boundary(), "low");
   752   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
   753   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
   754   assert(high() <= upper_high(), "upper high");
   755 }
   757 void VirtualSpace::print_on(outputStream* out) {
   758   out->print   ("Virtual space:");
   759   if (special()) out->print(" (pinned in memory)");
   760   out->cr();
   761   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
   762   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
   763   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
   764   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
   765 }
   767 void VirtualSpace::print() {
   768   print_on(tty);
   769 }
   771 /////////////// Unit tests ///////////////
   773 #ifndef PRODUCT
   775 #define test_log(...) \
   776   do {\
   777     if (VerboseInternalVMTests) { \
   778       tty->print_cr(__VA_ARGS__); \
   779       tty->flush(); \
   780     }\
   781   } while (false)
   783 class TestReservedSpace : AllStatic {
   784  public:
   785   static void small_page_write(void* addr, size_t size) {
   786     size_t page_size = os::vm_page_size();
   788     char* end = (char*)addr + size;
   789     for (char* p = (char*)addr; p < end; p += page_size) {
   790       *p = 1;
   791     }
   792   }
   794   static void release_memory_for_test(ReservedSpace rs) {
   795     if (rs.special()) {
   796       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
   797     } else {
   798       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
   799     }
   800   }
   802   static void test_reserved_space1(size_t size, size_t alignment) {
   803     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
   805     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
   807     ReservedSpace rs(size,          // size
   808                      alignment,     // alignment
   809                      UseLargePages, // large
   810                      NULL,          // requested_address
   811                      0);            // noacces_prefix
   813     test_log(" rs.special() == %d", rs.special());
   815     assert(rs.base() != NULL, "Must be");
   816     assert(rs.size() == size, "Must be");
   818     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
   819     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
   821     if (rs.special()) {
   822       small_page_write(rs.base(), size);
   823     }
   825     release_memory_for_test(rs);
   826   }
   828   static void test_reserved_space2(size_t size) {
   829     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
   831     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
   833     ReservedSpace rs(size);
   835     test_log(" rs.special() == %d", rs.special());
   837     assert(rs.base() != NULL, "Must be");
   838     assert(rs.size() == size, "Must be");
   840     if (rs.special()) {
   841       small_page_write(rs.base(), size);
   842     }
   844     release_memory_for_test(rs);
   845   }
   847   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
   848     test_log("test_reserved_space3(%p, %p, %d)",
   849         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
   851     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
   852     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
   854     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
   856     ReservedSpace rs(size, alignment, large, false);
   858     test_log(" rs.special() == %d", rs.special());
   860     assert(rs.base() != NULL, "Must be");
   861     assert(rs.size() == size, "Must be");
   863     if (rs.special()) {
   864       small_page_write(rs.base(), size);
   865     }
   867     release_memory_for_test(rs);
   868   }
   871   static void test_reserved_space1() {
   872     size_t size = 2 * 1024 * 1024;
   873     size_t ag   = os::vm_allocation_granularity();
   875     test_reserved_space1(size,      ag);
   876     test_reserved_space1(size * 2,  ag);
   877     test_reserved_space1(size * 10, ag);
   878   }
   880   static void test_reserved_space2() {
   881     size_t size = 2 * 1024 * 1024;
   882     size_t ag = os::vm_allocation_granularity();
   884     test_reserved_space2(size * 1);
   885     test_reserved_space2(size * 2);
   886     test_reserved_space2(size * 10);
   887     test_reserved_space2(ag);
   888     test_reserved_space2(size - ag);
   889     test_reserved_space2(size);
   890     test_reserved_space2(size + ag);
   891     test_reserved_space2(size * 2);
   892     test_reserved_space2(size * 2 - ag);
   893     test_reserved_space2(size * 2 + ag);
   894     test_reserved_space2(size * 3);
   895     test_reserved_space2(size * 3 - ag);
   896     test_reserved_space2(size * 3 + ag);
   897     test_reserved_space2(size * 10);
   898     test_reserved_space2(size * 10 + size / 2);
   899   }
   901   static void test_reserved_space3() {
   902     size_t ag = os::vm_allocation_granularity();
   904     test_reserved_space3(ag,      ag    , false);
   905     test_reserved_space3(ag * 2,  ag    , false);
   906     test_reserved_space3(ag * 3,  ag    , false);
   907     test_reserved_space3(ag * 2,  ag * 2, false);
   908     test_reserved_space3(ag * 4,  ag * 2, false);
   909     test_reserved_space3(ag * 8,  ag * 2, false);
   910     test_reserved_space3(ag * 4,  ag * 4, false);
   911     test_reserved_space3(ag * 8,  ag * 4, false);
   912     test_reserved_space3(ag * 16, ag * 4, false);
   914     if (UseLargePages) {
   915       size_t lp = os::large_page_size();
   917       // Without large pages
   918       test_reserved_space3(lp,     ag * 4, false);
   919       test_reserved_space3(lp * 2, ag * 4, false);
   920       test_reserved_space3(lp * 4, ag * 4, false);
   921       test_reserved_space3(lp,     lp    , false);
   922       test_reserved_space3(lp * 2, lp    , false);
   923       test_reserved_space3(lp * 3, lp    , false);
   924       test_reserved_space3(lp * 2, lp * 2, false);
   925       test_reserved_space3(lp * 4, lp * 2, false);
   926       test_reserved_space3(lp * 8, lp * 2, false);
   928       // With large pages
   929       test_reserved_space3(lp, ag * 4    , true);
   930       test_reserved_space3(lp * 2, ag * 4, true);
   931       test_reserved_space3(lp * 4, ag * 4, true);
   932       test_reserved_space3(lp, lp        , true);
   933       test_reserved_space3(lp * 2, lp    , true);
   934       test_reserved_space3(lp * 3, lp    , true);
   935       test_reserved_space3(lp * 2, lp * 2, true);
   936       test_reserved_space3(lp * 4, lp * 2, true);
   937       test_reserved_space3(lp * 8, lp * 2, true);
   938     }
   939   }
   941   static void test_reserved_space() {
   942     test_reserved_space1();
   943     test_reserved_space2();
   944     test_reserved_space3();
   945   }
   946 };
   948 void TestReservedSpace_test() {
   949   TestReservedSpace::test_reserved_space();
   950 }
   952 #define assert_equals(actual, expected)     \
   953   assert(actual == expected,                \
   954     err_msg("Got " SIZE_FORMAT " expected " \
   955       SIZE_FORMAT, actual, expected));
   957 #define assert_ge(value1, value2)                  \
   958   assert(value1 >= value2,                         \
   959     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
   960       #value2 "': " SIZE_FORMAT, value1, value2));
   962 #define assert_lt(value1, value2)                  \
   963   assert(value1 < value2,                          \
   964     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
   965       #value2 "': " SIZE_FORMAT, value1, value2));
   968 class TestVirtualSpace : AllStatic {
   969   enum TestLargePages {
   970     Default,
   971     Disable,
   972     Reserve,
   973     Commit
   974   };
   976   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
   977     switch(mode) {
   978     default:
   979     case Default:
   980     case Reserve:
   981       return ReservedSpace(reserve_size_aligned);
   982     case Disable:
   983     case Commit:
   984       return ReservedSpace(reserve_size_aligned,
   985                            os::vm_allocation_granularity(),
   986                            /* large */ false, /* exec */ false);
   987     }
   988   }
   990   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
   991     switch(mode) {
   992     default:
   993     case Default:
   994     case Reserve:
   995       return vs.initialize(rs, 0);
   996     case Disable:
   997       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
   998     case Commit:
   999       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
  1003  public:
  1004   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
  1005                                                         TestLargePages mode = Default) {
  1006     size_t granularity = os::vm_allocation_granularity();
  1007     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
  1009     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
  1011     assert(reserved.is_reserved(), "Must be");
  1013     VirtualSpace vs;
  1014     bool initialized = initialize_virtual_space(vs, reserved, mode);
  1015     assert(initialized, "Failed to initialize VirtualSpace");
  1017     vs.expand_by(commit_size, false);
  1019     if (vs.special()) {
  1020       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
  1021     } else {
  1022       assert_ge(vs.actual_committed_size(), commit_size);
  1023       // Approximate the commit granularity.
  1024       // Make sure that we don't commit using large pages
  1025       // if large pages has been disabled for this VirtualSpace.
  1026       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
  1027                                    os::vm_page_size() : os::large_page_size();
  1028       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
  1031     reserved.release();
  1034   static void test_virtual_space_actual_committed_space_one_large_page() {
  1035     if (!UseLargePages) {
  1036       return;
  1039     size_t large_page_size = os::large_page_size();
  1041     ReservedSpace reserved(large_page_size, large_page_size, true, false);
  1043     assert(reserved.is_reserved(), "Must be");
  1045     VirtualSpace vs;
  1046     bool initialized = vs.initialize(reserved, 0);
  1047     assert(initialized, "Failed to initialize VirtualSpace");
  1049     vs.expand_by(large_page_size, false);
  1051     assert_equals(vs.actual_committed_size(), large_page_size);
  1053     reserved.release();
  1056   static void test_virtual_space_actual_committed_space() {
  1057     test_virtual_space_actual_committed_space(4 * K, 0);
  1058     test_virtual_space_actual_committed_space(4 * K, 4 * K);
  1059     test_virtual_space_actual_committed_space(8 * K, 0);
  1060     test_virtual_space_actual_committed_space(8 * K, 4 * K);
  1061     test_virtual_space_actual_committed_space(8 * K, 8 * K);
  1062     test_virtual_space_actual_committed_space(12 * K, 0);
  1063     test_virtual_space_actual_committed_space(12 * K, 4 * K);
  1064     test_virtual_space_actual_committed_space(12 * K, 8 * K);
  1065     test_virtual_space_actual_committed_space(12 * K, 12 * K);
  1066     test_virtual_space_actual_committed_space(64 * K, 0);
  1067     test_virtual_space_actual_committed_space(64 * K, 32 * K);
  1068     test_virtual_space_actual_committed_space(64 * K, 64 * K);
  1069     test_virtual_space_actual_committed_space(2 * M, 0);
  1070     test_virtual_space_actual_committed_space(2 * M, 4 * K);
  1071     test_virtual_space_actual_committed_space(2 * M, 64 * K);
  1072     test_virtual_space_actual_committed_space(2 * M, 1 * M);
  1073     test_virtual_space_actual_committed_space(2 * M, 2 * M);
  1074     test_virtual_space_actual_committed_space(10 * M, 0);
  1075     test_virtual_space_actual_committed_space(10 * M, 4 * K);
  1076     test_virtual_space_actual_committed_space(10 * M, 8 * K);
  1077     test_virtual_space_actual_committed_space(10 * M, 1 * M);
  1078     test_virtual_space_actual_committed_space(10 * M, 2 * M);
  1079     test_virtual_space_actual_committed_space(10 * M, 5 * M);
  1080     test_virtual_space_actual_committed_space(10 * M, 10 * M);
  1083   static void test_virtual_space_disable_large_pages() {
  1084     if (!UseLargePages) {
  1085       return;
  1087     // These test cases verify that if we force VirtualSpace to disable large pages
  1088     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
  1089     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
  1090     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
  1091     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
  1092     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
  1093     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
  1094     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
  1096     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
  1097     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
  1098     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
  1099     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
  1100     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
  1101     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
  1102     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
  1104     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
  1105     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
  1106     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
  1107     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
  1108     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
  1109     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
  1110     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
  1113   static void test_virtual_space() {
  1114     test_virtual_space_actual_committed_space();
  1115     test_virtual_space_actual_committed_space_one_large_page();
  1116     test_virtual_space_disable_large_pages();
  1118 };
  1120 void TestVirtualSpace_test() {
  1121   TestVirtualSpace::test_virtual_space();
  1124 #endif // PRODUCT
  1126 #endif

mercurial