src/share/vm/runtime/virtualspace.cpp

Tue, 24 Jun 2014 15:50:50 +0200

author
ehelin
date
Tue, 24 Jun 2014 15:50:50 +0200
changeset 7778
c2ce24504334
parent 7777
340ca8812af9
child 7780
5788dbd1f2d6
permissions
-rw-r--r--

8049864: TestParallelHeapSizeFlags fails with unexpected heap size
Reviewed-by: sjohanss, jmasa

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "oops/markOop.hpp"
    27 #include "oops/oop.inline.hpp"
    28 #include "runtime/virtualspace.hpp"
    29 #include "services/memTracker.hpp"
    30 #ifdef TARGET_OS_FAMILY_linux
    31 # include "os_linux.inline.hpp"
    32 #endif
    33 #ifdef TARGET_OS_FAMILY_solaris
    34 # include "os_solaris.inline.hpp"
    35 #endif
    36 #ifdef TARGET_OS_FAMILY_windows
    37 # include "os_windows.inline.hpp"
    38 #endif
    39 #ifdef TARGET_OS_FAMILY_aix
    40 # include "os_aix.inline.hpp"
    41 #endif
    42 #ifdef TARGET_OS_FAMILY_bsd
    43 # include "os_bsd.inline.hpp"
    44 #endif
    46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    48 // ReservedSpace
    50 // Dummy constructor
    51 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
    52     _alignment(0), _special(false), _executable(false) {
    53 }
    55 ReservedSpace::ReservedSpace(size_t size) {
    56   size_t page_size = os::page_size_for_region(size, 1);
    57   bool large_pages = page_size != (size_t)os::vm_page_size();
    58   // Don't force the alignment to be large page aligned,
    59   // since that will waste memory.
    60   size_t alignment = os::vm_allocation_granularity();
    61   initialize(size, alignment, large_pages, NULL, 0, false);
    62 }
    64 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    65                              bool large,
    66                              char* requested_address,
    67                              const size_t noaccess_prefix) {
    68   initialize(size+noaccess_prefix, alignment, large, requested_address,
    69              noaccess_prefix, false);
    70 }
    72 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    73                              bool large,
    74                              bool executable) {
    75   initialize(size, alignment, large, NULL, 0, executable);
    76 }
    78 // Helper method.
    79 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
    80                                            const size_t size, bool special)
    81 {
    82   if (base == requested_address || requested_address == NULL)
    83     return false; // did not fail
    85   if (base != NULL) {
    86     // Different reserve address may be acceptable in other cases
    87     // but for compressed oops heap should be at requested address.
    88     assert(UseCompressedOops, "currently requested address used only for compressed oops");
    89     if (PrintCompressedOopsMode) {
    90       tty->cr();
    91       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
    92     }
    93     // OS ignored requested address. Try different address.
    94     if (special) {
    95       if (!os::release_memory_special(base, size)) {
    96         fatal("os::release_memory_special failed");
    97       }
    98     } else {
    99       if (!os::release_memory(base, size)) {
   100         fatal("os::release_memory failed");
   101       }
   102     }
   103   }
   104   return true;
   105 }
   107 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
   108                                char* requested_address,
   109                                const size_t noaccess_prefix,
   110                                bool executable) {
   111   const size_t granularity = os::vm_allocation_granularity();
   112   assert((size & (granularity - 1)) == 0,
   113          "size not aligned to os::vm_allocation_granularity()");
   114   assert((alignment & (granularity - 1)) == 0,
   115          "alignment not aligned to os::vm_allocation_granularity()");
   116   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
   117          "not a power of 2");
   119   alignment = MAX2(alignment, (size_t)os::vm_page_size());
   121   // Assert that if noaccess_prefix is used, it is the same as alignment.
   122   assert(noaccess_prefix == 0 ||
   123          noaccess_prefix == alignment, "noaccess prefix wrong");
   125   _base = NULL;
   126   _size = 0;
   127   _special = false;
   128   _executable = executable;
   129   _alignment = 0;
   130   _noaccess_prefix = 0;
   131   if (size == 0) {
   132     return;
   133   }
   135   // If OS doesn't support demand paging for large page memory, we need
   136   // to use reserve_memory_special() to reserve and pin the entire region.
   137   bool special = large && !os::can_commit_large_page_memory();
   138   char* base = NULL;
   140   if (requested_address != 0) {
   141     requested_address -= noaccess_prefix; // adjust requested address
   142     assert(requested_address != NULL, "huge noaccess prefix?");
   143   }
   145   if (special) {
   147     base = os::reserve_memory_special(size, alignment, requested_address, executable);
   149     if (base != NULL) {
   150       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
   151         // OS ignored requested address. Try different address.
   152         return;
   153       }
   154       // Check alignment constraints.
   155       assert((uintptr_t) base % alignment == 0,
   156              err_msg("Large pages returned a non-aligned address, base: "
   157                  PTR_FORMAT " alignment: " PTR_FORMAT,
   158                  base, (void*)(uintptr_t)alignment));
   159       _special = true;
   160     } else {
   161       // failed; try to reserve regular memory below
   162       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
   163                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
   164         if (PrintCompressedOopsMode) {
   165           tty->cr();
   166           tty->print_cr("Reserve regular memory without large pages.");
   167         }
   168       }
   169     }
   170   }
   172   if (base == NULL) {
   173     // Optimistically assume that the OSes returns an aligned base pointer.
   174     // When reserving a large address range, most OSes seem to align to at
   175     // least 64K.
   177     // If the memory was requested at a particular address, use
   178     // os::attempt_reserve_memory_at() to avoid over mapping something
   179     // important.  If available space is not detected, return NULL.
   181     if (requested_address != 0) {
   182       base = os::attempt_reserve_memory_at(size, requested_address);
   183       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
   184         // OS ignored requested address. Try different address.
   185         base = NULL;
   186       }
   187     } else {
   188       base = os::reserve_memory(size, NULL, alignment);
   189     }
   191     if (base == NULL) return;
   193     // Check alignment constraints
   194     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
   195       // Base not aligned, retry
   196       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
   197       // Make sure that size is aligned
   198       size = align_size_up(size, alignment);
   199       base = os::reserve_memory_aligned(size, alignment);
   201       if (requested_address != 0 &&
   202           failed_to_reserve_as_requested(base, requested_address, size, false)) {
   203         // As a result of the alignment constraints, the allocated base differs
   204         // from the requested address. Return back to the caller who can
   205         // take remedial action (like try again without a requested address).
   206         assert(_base == NULL, "should be");
   207         return;
   208       }
   209     }
   210   }
   211   // Done
   212   _base = base;
   213   _size = size;
   214   _alignment = alignment;
   215   _noaccess_prefix = noaccess_prefix;
   217   // Assert that if noaccess_prefix is used, it is the same as alignment.
   218   assert(noaccess_prefix == 0 ||
   219          noaccess_prefix == _alignment, "noaccess prefix wrong");
   221   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
   222          "area must be distinguisable from marks for mark-sweep");
   223   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
   224          "area must be distinguisable from marks for mark-sweep");
   225 }
   228 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
   229                              bool special, bool executable) {
   230   assert((size % os::vm_allocation_granularity()) == 0,
   231          "size not allocation aligned");
   232   _base = base;
   233   _size = size;
   234   _alignment = alignment;
   235   _noaccess_prefix = 0;
   236   _special = special;
   237   _executable = executable;
   238 }
   241 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
   242                                         bool split, bool realloc) {
   243   assert(partition_size <= size(), "partition failed");
   244   if (split) {
   245     os::split_reserved_memory(base(), size(), partition_size, realloc);
   246   }
   247   ReservedSpace result(base(), partition_size, alignment, special(),
   248                        executable());
   249   return result;
   250 }
   253 ReservedSpace
   254 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
   255   assert(partition_size <= size(), "partition failed");
   256   ReservedSpace result(base() + partition_size, size() - partition_size,
   257                        alignment, special(), executable());
   258   return result;
   259 }
   262 size_t ReservedSpace::page_align_size_up(size_t size) {
   263   return align_size_up(size, os::vm_page_size());
   264 }
   267 size_t ReservedSpace::page_align_size_down(size_t size) {
   268   return align_size_down(size, os::vm_page_size());
   269 }
   272 size_t ReservedSpace::allocation_align_size_up(size_t size) {
   273   return align_size_up(size, os::vm_allocation_granularity());
   274 }
   277 size_t ReservedSpace::allocation_align_size_down(size_t size) {
   278   return align_size_down(size, os::vm_allocation_granularity());
   279 }
   282 void ReservedSpace::release() {
   283   if (is_reserved()) {
   284     char *real_base = _base - _noaccess_prefix;
   285     const size_t real_size = _size + _noaccess_prefix;
   286     if (special()) {
   287       os::release_memory_special(real_base, real_size);
   288     } else{
   289       os::release_memory(real_base, real_size);
   290     }
   291     _base = NULL;
   292     _size = 0;
   293     _noaccess_prefix = 0;
   294     _special = false;
   295     _executable = false;
   296   }
   297 }
   299 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
   300   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
   301                                       (Universe::narrow_oop_base() != NULL) &&
   302                                       Universe::narrow_oop_use_implicit_null_checks()),
   303          "noaccess_prefix should be used only with non zero based compressed oops");
   305   // If there is no noaccess prefix, return.
   306   if (_noaccess_prefix == 0) return;
   308   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
   309          "must be at least page size big");
   311   // Protect memory at the base of the allocated region.
   312   // If special, the page was committed (only matters on windows)
   313   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
   314                           _special)) {
   315     fatal("cannot protect protection page");
   316   }
   317   if (PrintCompressedOopsMode) {
   318     tty->cr();
   319     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
   320   }
   322   _base += _noaccess_prefix;
   323   _size -= _noaccess_prefix;
   324   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
   325          "must be exactly of required size and alignment");
   326 }
   328 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
   329                                      bool large, char* requested_address) :
   330   ReservedSpace(size, alignment, large,
   331                 requested_address,
   332                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
   333                  Universe::narrow_oop_use_implicit_null_checks()) ?
   334                   lcm(os::vm_page_size(), alignment) : 0) {
   335   if (base() > 0) {
   336     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
   337   }
   339   // Only reserved space for the java heap should have a noaccess_prefix
   340   // if using compressed oops.
   341   protect_noaccess_prefix(size);
   342 }
   344 // Reserve space for code segment.  Same as Java heap only we mark this as
   345 // executable.
   346 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
   347                                      size_t rs_align,
   348                                      bool large) :
   349   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
   350   MemTracker::record_virtual_memory_type((address)base(), mtCode);
   351 }
   353 // VirtualSpace
   355 VirtualSpace::VirtualSpace() {
   356   _low_boundary           = NULL;
   357   _high_boundary          = NULL;
   358   _low                    = NULL;
   359   _high                   = NULL;
   360   _lower_high             = NULL;
   361   _middle_high            = NULL;
   362   _upper_high             = NULL;
   363   _lower_high_boundary    = NULL;
   364   _middle_high_boundary   = NULL;
   365   _upper_high_boundary    = NULL;
   366   _lower_alignment        = 0;
   367   _middle_alignment       = 0;
   368   _upper_alignment        = 0;
   369   _special                = false;
   370   _executable             = false;
   371 }
   374 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
   375   const size_t max_commit_granularity = os::page_size_for_region(rs.size(), 1);
   376   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
   377 }
   379 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
   380   if(!rs.is_reserved()) return false;  // allocation failed.
   381   assert(_low_boundary == NULL, "VirtualSpace already initialized");
   382   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
   384   _low_boundary  = rs.base();
   385   _high_boundary = low_boundary() + rs.size();
   387   _low = low_boundary();
   388   _high = low();
   390   _special = rs.special();
   391   _executable = rs.executable();
   393   // When a VirtualSpace begins life at a large size, make all future expansion
   394   // and shrinking occur aligned to a granularity of large pages.  This avoids
   395   // fragmentation of physical addresses that inhibits the use of large pages
   396   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
   397   // page size, the only spaces that get handled this way are codecache and
   398   // the heap itself, both of which provide a substantial performance
   399   // boost in many benchmarks when covered by large pages.
   400   //
   401   // No attempt is made to force large page alignment at the very top and
   402   // bottom of the space if they are not aligned so already.
   403   _lower_alignment  = os::vm_page_size();
   404   _middle_alignment = max_commit_granularity;
   405   _upper_alignment  = os::vm_page_size();
   407   // End of each region
   408   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
   409   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
   410   _upper_high_boundary = high_boundary();
   412   // High address of each region
   413   _lower_high = low_boundary();
   414   _middle_high = lower_high_boundary();
   415   _upper_high = middle_high_boundary();
   417   // commit to initial size
   418   if (committed_size > 0) {
   419     if (!expand_by(committed_size)) {
   420       return false;
   421     }
   422   }
   423   return true;
   424 }
   427 VirtualSpace::~VirtualSpace() {
   428   release();
   429 }
   432 void VirtualSpace::release() {
   433   // This does not release memory it never reserved.
   434   // Caller must release via rs.release();
   435   _low_boundary           = NULL;
   436   _high_boundary          = NULL;
   437   _low                    = NULL;
   438   _high                   = NULL;
   439   _lower_high             = NULL;
   440   _middle_high            = NULL;
   441   _upper_high             = NULL;
   442   _lower_high_boundary    = NULL;
   443   _middle_high_boundary   = NULL;
   444   _upper_high_boundary    = NULL;
   445   _lower_alignment        = 0;
   446   _middle_alignment       = 0;
   447   _upper_alignment        = 0;
   448   _special                = false;
   449   _executable             = false;
   450 }
   453 size_t VirtualSpace::committed_size() const {
   454   return pointer_delta(high(), low(), sizeof(char));
   455 }
   458 size_t VirtualSpace::reserved_size() const {
   459   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
   460 }
   463 size_t VirtualSpace::uncommitted_size()  const {
   464   return reserved_size() - committed_size();
   465 }
   467 size_t VirtualSpace::actual_committed_size() const {
   468   // Special VirtualSpaces commit all reserved space up front.
   469   if (special()) {
   470     return reserved_size();
   471   }
   473   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
   474   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
   475   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
   477 #ifdef ASSERT
   478   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
   479   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
   480   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
   482   if (committed_high > 0) {
   483     assert(committed_low == lower, "Must be");
   484     assert(committed_middle == middle, "Must be");
   485   }
   487   if (committed_middle > 0) {
   488     assert(committed_low == lower, "Must be");
   489   }
   490   if (committed_middle < middle) {
   491     assert(committed_high == 0, "Must be");
   492   }
   494   if (committed_low < lower) {
   495     assert(committed_high == 0, "Must be");
   496     assert(committed_middle == 0, "Must be");
   497   }
   498 #endif
   500   return committed_low + committed_middle + committed_high;
   501 }
   504 bool VirtualSpace::contains(const void* p) const {
   505   return low() <= (const char*) p && (const char*) p < high();
   506 }
   508 /*
   509    First we need to determine if a particular virtual space is using large
   510    pages.  This is done at the initialize function and only virtual spaces
   511    that are larger than LargePageSizeInBytes use large pages.  Once we
   512    have determined this, all expand_by and shrink_by calls must grow and
   513    shrink by large page size chunks.  If a particular request
   514    is within the current large page, the call to commit and uncommit memory
   515    can be ignored.  In the case that the low and high boundaries of this
   516    space is not large page aligned, the pages leading to the first large
   517    page address and the pages after the last large page address must be
   518    allocated with default pages.
   519 */
   520 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
   521   if (uncommitted_size() < bytes) return false;
   523   if (special()) {
   524     // don't commit memory if the entire space is pinned in memory
   525     _high += bytes;
   526     return true;
   527   }
   529   char* previous_high = high();
   530   char* unaligned_new_high = high() + bytes;
   531   assert(unaligned_new_high <= high_boundary(),
   532          "cannot expand by more than upper boundary");
   534   // Calculate where the new high for each of the regions should be.  If
   535   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
   536   // then the unaligned lower and upper new highs would be the
   537   // lower_high() and upper_high() respectively.
   538   char* unaligned_lower_new_high =
   539     MIN2(unaligned_new_high, lower_high_boundary());
   540   char* unaligned_middle_new_high =
   541     MIN2(unaligned_new_high, middle_high_boundary());
   542   char* unaligned_upper_new_high =
   543     MIN2(unaligned_new_high, upper_high_boundary());
   545   // Align the new highs based on the regions alignment.  lower and upper
   546   // alignment will always be default page size.  middle alignment will be
   547   // LargePageSizeInBytes if the actual size of the virtual space is in
   548   // fact larger than LargePageSizeInBytes.
   549   char* aligned_lower_new_high =
   550     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   551   char* aligned_middle_new_high =
   552     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   553   char* aligned_upper_new_high =
   554     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   556   // Determine which regions need to grow in this expand_by call.
   557   // If you are growing in the lower region, high() must be in that
   558   // region so calcuate the size based on high().  For the middle and
   559   // upper regions, determine the starting point of growth based on the
   560   // location of high().  By getting the MAX of the region's low address
   561   // (or the prevoius region's high address) and high(), we can tell if it
   562   // is an intra or inter region growth.
   563   size_t lower_needs = 0;
   564   if (aligned_lower_new_high > lower_high()) {
   565     lower_needs =
   566       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
   567   }
   568   size_t middle_needs = 0;
   569   if (aligned_middle_new_high > middle_high()) {
   570     middle_needs =
   571       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
   572   }
   573   size_t upper_needs = 0;
   574   if (aligned_upper_new_high > upper_high()) {
   575     upper_needs =
   576       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
   577   }
   579   // Check contiguity.
   580   assert(low_boundary() <= lower_high() &&
   581          lower_high() <= lower_high_boundary(),
   582          "high address must be contained within the region");
   583   assert(lower_high_boundary() <= middle_high() &&
   584          middle_high() <= middle_high_boundary(),
   585          "high address must be contained within the region");
   586   assert(middle_high_boundary() <= upper_high() &&
   587          upper_high() <= upper_high_boundary(),
   588          "high address must be contained within the region");
   590   // Commit regions
   591   if (lower_needs > 0) {
   592     assert(low_boundary() <= lower_high() &&
   593            lower_high() + lower_needs <= lower_high_boundary(),
   594            "must not expand beyond region");
   595     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
   596       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
   597                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
   598                          lower_high(), lower_needs, _executable);)
   599       return false;
   600     } else {
   601       _lower_high += lower_needs;
   602     }
   603   }
   604   if (middle_needs > 0) {
   605     assert(lower_high_boundary() <= middle_high() &&
   606            middle_high() + middle_needs <= middle_high_boundary(),
   607            "must not expand beyond region");
   608     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
   609                            _executable)) {
   610       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
   611                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
   612                          ", %d) failed", middle_high(), middle_needs,
   613                          middle_alignment(), _executable);)
   614       return false;
   615     }
   616     _middle_high += middle_needs;
   617   }
   618   if (upper_needs > 0) {
   619     assert(middle_high_boundary() <= upper_high() &&
   620            upper_high() + upper_needs <= upper_high_boundary(),
   621            "must not expand beyond region");
   622     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
   623       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
   624                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
   625                          upper_high(), upper_needs, _executable);)
   626       return false;
   627     } else {
   628       _upper_high += upper_needs;
   629     }
   630   }
   632   if (pre_touch || AlwaysPreTouch) {
   633     os::pretouch_memory(previous_high, unaligned_new_high);
   634   }
   636   _high += bytes;
   637   return true;
   638 }
   640 // A page is uncommitted if the contents of the entire page is deemed unusable.
   641 // Continue to decrement the high() pointer until it reaches a page boundary
   642 // in which case that particular page can now be uncommitted.
   643 void VirtualSpace::shrink_by(size_t size) {
   644   if (committed_size() < size)
   645     fatal("Cannot shrink virtual space to negative size");
   647   if (special()) {
   648     // don't uncommit if the entire space is pinned in memory
   649     _high -= size;
   650     return;
   651   }
   653   char* unaligned_new_high = high() - size;
   654   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
   656   // Calculate new unaligned address
   657   char* unaligned_upper_new_high =
   658     MAX2(unaligned_new_high, middle_high_boundary());
   659   char* unaligned_middle_new_high =
   660     MAX2(unaligned_new_high, lower_high_boundary());
   661   char* unaligned_lower_new_high =
   662     MAX2(unaligned_new_high, low_boundary());
   664   // Align address to region's alignment
   665   char* aligned_upper_new_high =
   666     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   667   char* aligned_middle_new_high =
   668     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   669   char* aligned_lower_new_high =
   670     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   672   // Determine which regions need to shrink
   673   size_t upper_needs = 0;
   674   if (aligned_upper_new_high < upper_high()) {
   675     upper_needs =
   676       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
   677   }
   678   size_t middle_needs = 0;
   679   if (aligned_middle_new_high < middle_high()) {
   680     middle_needs =
   681       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
   682   }
   683   size_t lower_needs = 0;
   684   if (aligned_lower_new_high < lower_high()) {
   685     lower_needs =
   686       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
   687   }
   689   // Check contiguity.
   690   assert(middle_high_boundary() <= upper_high() &&
   691          upper_high() <= upper_high_boundary(),
   692          "high address must be contained within the region");
   693   assert(lower_high_boundary() <= middle_high() &&
   694          middle_high() <= middle_high_boundary(),
   695          "high address must be contained within the region");
   696   assert(low_boundary() <= lower_high() &&
   697          lower_high() <= lower_high_boundary(),
   698          "high address must be contained within the region");
   700   // Uncommit
   701   if (upper_needs > 0) {
   702     assert(middle_high_boundary() <= aligned_upper_new_high &&
   703            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
   704            "must not shrink beyond region");
   705     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
   706       debug_only(warning("os::uncommit_memory failed"));
   707       return;
   708     } else {
   709       _upper_high -= upper_needs;
   710     }
   711   }
   712   if (middle_needs > 0) {
   713     assert(lower_high_boundary() <= aligned_middle_new_high &&
   714            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
   715            "must not shrink beyond region");
   716     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
   717       debug_only(warning("os::uncommit_memory failed"));
   718       return;
   719     } else {
   720       _middle_high -= middle_needs;
   721     }
   722   }
   723   if (lower_needs > 0) {
   724     assert(low_boundary() <= aligned_lower_new_high &&
   725            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
   726            "must not shrink beyond region");
   727     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
   728       debug_only(warning("os::uncommit_memory failed"));
   729       return;
   730     } else {
   731       _lower_high -= lower_needs;
   732     }
   733   }
   735   _high -= size;
   736 }
   738 #ifndef PRODUCT
   739 void VirtualSpace::check_for_contiguity() {
   740   // Check contiguity.
   741   assert(low_boundary() <= lower_high() &&
   742          lower_high() <= lower_high_boundary(),
   743          "high address must be contained within the region");
   744   assert(lower_high_boundary() <= middle_high() &&
   745          middle_high() <= middle_high_boundary(),
   746          "high address must be contained within the region");
   747   assert(middle_high_boundary() <= upper_high() &&
   748          upper_high() <= upper_high_boundary(),
   749          "high address must be contained within the region");
   750   assert(low() >= low_boundary(), "low");
   751   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
   752   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
   753   assert(high() <= upper_high(), "upper high");
   754 }
   756 void VirtualSpace::print_on(outputStream* out) {
   757   out->print   ("Virtual space:");
   758   if (special()) out->print(" (pinned in memory)");
   759   out->cr();
   760   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
   761   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
   762   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
   763   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
   764 }
   766 void VirtualSpace::print() {
   767   print_on(tty);
   768 }
   770 /////////////// Unit tests ///////////////
   772 #ifndef PRODUCT
   774 #define test_log(...) \
   775   do {\
   776     if (VerboseInternalVMTests) { \
   777       tty->print_cr(__VA_ARGS__); \
   778       tty->flush(); \
   779     }\
   780   } while (false)
   782 class TestReservedSpace : AllStatic {
   783  public:
   784   static void small_page_write(void* addr, size_t size) {
   785     size_t page_size = os::vm_page_size();
   787     char* end = (char*)addr + size;
   788     for (char* p = (char*)addr; p < end; p += page_size) {
   789       *p = 1;
   790     }
   791   }
   793   static void release_memory_for_test(ReservedSpace rs) {
   794     if (rs.special()) {
   795       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
   796     } else {
   797       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
   798     }
   799   }
   801   static void test_reserved_space1(size_t size, size_t alignment) {
   802     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
   804     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
   806     ReservedSpace rs(size,          // size
   807                      alignment,     // alignment
   808                      UseLargePages, // large
   809                      NULL,          // requested_address
   810                      0);            // noacces_prefix
   812     test_log(" rs.special() == %d", rs.special());
   814     assert(rs.base() != NULL, "Must be");
   815     assert(rs.size() == size, "Must be");
   817     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
   818     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
   820     if (rs.special()) {
   821       small_page_write(rs.base(), size);
   822     }
   824     release_memory_for_test(rs);
   825   }
   827   static void test_reserved_space2(size_t size) {
   828     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
   830     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
   832     ReservedSpace rs(size);
   834     test_log(" rs.special() == %d", rs.special());
   836     assert(rs.base() != NULL, "Must be");
   837     assert(rs.size() == size, "Must be");
   839     if (rs.special()) {
   840       small_page_write(rs.base(), size);
   841     }
   843     release_memory_for_test(rs);
   844   }
   846   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
   847     test_log("test_reserved_space3(%p, %p, %d)",
   848         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
   850     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
   851     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
   853     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
   855     ReservedSpace rs(size, alignment, large, false);
   857     test_log(" rs.special() == %d", rs.special());
   859     assert(rs.base() != NULL, "Must be");
   860     assert(rs.size() == size, "Must be");
   862     if (rs.special()) {
   863       small_page_write(rs.base(), size);
   864     }
   866     release_memory_for_test(rs);
   867   }
   870   static void test_reserved_space1() {
   871     size_t size = 2 * 1024 * 1024;
   872     size_t ag   = os::vm_allocation_granularity();
   874     test_reserved_space1(size,      ag);
   875     test_reserved_space1(size * 2,  ag);
   876     test_reserved_space1(size * 10, ag);
   877   }
   879   static void test_reserved_space2() {
   880     size_t size = 2 * 1024 * 1024;
   881     size_t ag = os::vm_allocation_granularity();
   883     test_reserved_space2(size * 1);
   884     test_reserved_space2(size * 2);
   885     test_reserved_space2(size * 10);
   886     test_reserved_space2(ag);
   887     test_reserved_space2(size - ag);
   888     test_reserved_space2(size);
   889     test_reserved_space2(size + ag);
   890     test_reserved_space2(size * 2);
   891     test_reserved_space2(size * 2 - ag);
   892     test_reserved_space2(size * 2 + ag);
   893     test_reserved_space2(size * 3);
   894     test_reserved_space2(size * 3 - ag);
   895     test_reserved_space2(size * 3 + ag);
   896     test_reserved_space2(size * 10);
   897     test_reserved_space2(size * 10 + size / 2);
   898   }
   900   static void test_reserved_space3() {
   901     size_t ag = os::vm_allocation_granularity();
   903     test_reserved_space3(ag,      ag    , false);
   904     test_reserved_space3(ag * 2,  ag    , false);
   905     test_reserved_space3(ag * 3,  ag    , false);
   906     test_reserved_space3(ag * 2,  ag * 2, false);
   907     test_reserved_space3(ag * 4,  ag * 2, false);
   908     test_reserved_space3(ag * 8,  ag * 2, false);
   909     test_reserved_space3(ag * 4,  ag * 4, false);
   910     test_reserved_space3(ag * 8,  ag * 4, false);
   911     test_reserved_space3(ag * 16, ag * 4, false);
   913     if (UseLargePages) {
   914       size_t lp = os::large_page_size();
   916       // Without large pages
   917       test_reserved_space3(lp,     ag * 4, false);
   918       test_reserved_space3(lp * 2, ag * 4, false);
   919       test_reserved_space3(lp * 4, ag * 4, false);
   920       test_reserved_space3(lp,     lp    , false);
   921       test_reserved_space3(lp * 2, lp    , false);
   922       test_reserved_space3(lp * 3, lp    , false);
   923       test_reserved_space3(lp * 2, lp * 2, false);
   924       test_reserved_space3(lp * 4, lp * 2, false);
   925       test_reserved_space3(lp * 8, lp * 2, false);
   927       // With large pages
   928       test_reserved_space3(lp, ag * 4    , true);
   929       test_reserved_space3(lp * 2, ag * 4, true);
   930       test_reserved_space3(lp * 4, ag * 4, true);
   931       test_reserved_space3(lp, lp        , true);
   932       test_reserved_space3(lp * 2, lp    , true);
   933       test_reserved_space3(lp * 3, lp    , true);
   934       test_reserved_space3(lp * 2, lp * 2, true);
   935       test_reserved_space3(lp * 4, lp * 2, true);
   936       test_reserved_space3(lp * 8, lp * 2, true);
   937     }
   938   }
   940   static void test_reserved_space() {
   941     test_reserved_space1();
   942     test_reserved_space2();
   943     test_reserved_space3();
   944   }
   945 };
   947 void TestReservedSpace_test() {
   948   TestReservedSpace::test_reserved_space();
   949 }
   951 #define assert_equals(actual, expected)     \
   952   assert(actual == expected,                \
   953     err_msg("Got " SIZE_FORMAT " expected " \
   954       SIZE_FORMAT, actual, expected));
   956 #define assert_ge(value1, value2)                  \
   957   assert(value1 >= value2,                         \
   958     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
   959       #value2 "': " SIZE_FORMAT, value1, value2));
   961 #define assert_lt(value1, value2)                  \
   962   assert(value1 < value2,                          \
   963     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
   964       #value2 "': " SIZE_FORMAT, value1, value2));
   967 class TestVirtualSpace : AllStatic {
   968   enum TestLargePages {
   969     Default,
   970     Disable,
   971     Reserve,
   972     Commit
   973   };
   975   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
   976     switch(mode) {
   977     default:
   978     case Default:
   979     case Reserve:
   980       return ReservedSpace(reserve_size_aligned);
   981     case Disable:
   982     case Commit:
   983       return ReservedSpace(reserve_size_aligned,
   984                            os::vm_allocation_granularity(),
   985                            /* large */ false, /* exec */ false);
   986     }
   987   }
   989   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
   990     switch(mode) {
   991     default:
   992     case Default:
   993     case Reserve:
   994       return vs.initialize(rs, 0);
   995     case Disable:
   996       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
   997     case Commit:
   998       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), 1));
   999     }
  1002  public:
  1003   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
  1004                                                         TestLargePages mode = Default) {
  1005     size_t granularity = os::vm_allocation_granularity();
  1006     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
  1008     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
  1010     assert(reserved.is_reserved(), "Must be");
  1012     VirtualSpace vs;
  1013     bool initialized = initialize_virtual_space(vs, reserved, mode);
  1014     assert(initialized, "Failed to initialize VirtualSpace");
  1016     vs.expand_by(commit_size, false);
  1018     if (vs.special()) {
  1019       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
  1020     } else {
  1021       assert_ge(vs.actual_committed_size(), commit_size);
  1022       // Approximate the commit granularity.
  1023       // Make sure that we don't commit using large pages
  1024       // if large pages has been disabled for this VirtualSpace.
  1025       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
  1026                                    os::vm_page_size() : os::large_page_size();
  1027       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
  1030     reserved.release();
  1033   static void test_virtual_space_actual_committed_space_one_large_page() {
  1034     if (!UseLargePages) {
  1035       return;
  1038     size_t large_page_size = os::large_page_size();
  1040     ReservedSpace reserved(large_page_size, large_page_size, true, false);
  1042     assert(reserved.is_reserved(), "Must be");
  1044     VirtualSpace vs;
  1045     bool initialized = vs.initialize(reserved, 0);
  1046     assert(initialized, "Failed to initialize VirtualSpace");
  1048     vs.expand_by(large_page_size, false);
  1050     assert_equals(vs.actual_committed_size(), large_page_size);
  1052     reserved.release();
  1055   static void test_virtual_space_actual_committed_space() {
  1056     test_virtual_space_actual_committed_space(4 * K, 0);
  1057     test_virtual_space_actual_committed_space(4 * K, 4 * K);
  1058     test_virtual_space_actual_committed_space(8 * K, 0);
  1059     test_virtual_space_actual_committed_space(8 * K, 4 * K);
  1060     test_virtual_space_actual_committed_space(8 * K, 8 * K);
  1061     test_virtual_space_actual_committed_space(12 * K, 0);
  1062     test_virtual_space_actual_committed_space(12 * K, 4 * K);
  1063     test_virtual_space_actual_committed_space(12 * K, 8 * K);
  1064     test_virtual_space_actual_committed_space(12 * K, 12 * K);
  1065     test_virtual_space_actual_committed_space(64 * K, 0);
  1066     test_virtual_space_actual_committed_space(64 * K, 32 * K);
  1067     test_virtual_space_actual_committed_space(64 * K, 64 * K);
  1068     test_virtual_space_actual_committed_space(2 * M, 0);
  1069     test_virtual_space_actual_committed_space(2 * M, 4 * K);
  1070     test_virtual_space_actual_committed_space(2 * M, 64 * K);
  1071     test_virtual_space_actual_committed_space(2 * M, 1 * M);
  1072     test_virtual_space_actual_committed_space(2 * M, 2 * M);
  1073     test_virtual_space_actual_committed_space(10 * M, 0);
  1074     test_virtual_space_actual_committed_space(10 * M, 4 * K);
  1075     test_virtual_space_actual_committed_space(10 * M, 8 * K);
  1076     test_virtual_space_actual_committed_space(10 * M, 1 * M);
  1077     test_virtual_space_actual_committed_space(10 * M, 2 * M);
  1078     test_virtual_space_actual_committed_space(10 * M, 5 * M);
  1079     test_virtual_space_actual_committed_space(10 * M, 10 * M);
  1082   static void test_virtual_space_disable_large_pages() {
  1083     if (!UseLargePages) {
  1084       return;
  1086     // These test cases verify that if we force VirtualSpace to disable large pages
  1087     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
  1088     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
  1089     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
  1090     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
  1091     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
  1092     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
  1093     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
  1095     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
  1096     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
  1097     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
  1098     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
  1099     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
  1100     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
  1101     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
  1103     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
  1104     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
  1105     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
  1106     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
  1107     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
  1108     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
  1109     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
  1112   static void test_virtual_space() {
  1113     test_virtual_space_actual_committed_space();
  1114     test_virtual_space_actual_committed_space_one_large_page();
  1115     test_virtual_space_disable_large_pages();
  1117 };
  1119 void TestVirtualSpace_test() {
  1120   TestVirtualSpace::test_virtual_space();
  1123 #endif // PRODUCT
  1125 #endif

mercurial