src/share/vm/runtime/virtualspace.cpp

Wed, 18 Sep 2013 07:02:10 -0700

author
dcubed
date
Wed, 18 Sep 2013 07:02:10 -0700
changeset 5743
63147986a428
parent 5578
4c84d351cca9
child 5704
c4c768305a8f
child 6462
e2722a66aba7
permissions
-rw-r--r--

8019835: Strings interned in different threads equal but does not ==
Summary: Add -XX:+VerifyStringTableAtExit option and code to verify StringTable invariants.
Reviewed-by: rdurbin, sspitsyn, coleenp

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "oops/markOop.hpp"
    27 #include "oops/oop.inline.hpp"
    28 #include "runtime/virtualspace.hpp"
    29 #include "services/memTracker.hpp"
    30 #ifdef TARGET_OS_FAMILY_linux
    31 # include "os_linux.inline.hpp"
    32 #endif
    33 #ifdef TARGET_OS_FAMILY_solaris
    34 # include "os_solaris.inline.hpp"
    35 #endif
    36 #ifdef TARGET_OS_FAMILY_windows
    37 # include "os_windows.inline.hpp"
    38 #endif
    39 #ifdef TARGET_OS_FAMILY_bsd
    40 # include "os_bsd.inline.hpp"
    41 #endif
    44 // ReservedSpace
    46 // Dummy constructor
    47 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
    48     _alignment(0), _special(false), _executable(false) {
    49 }
    51 ReservedSpace::ReservedSpace(size_t size) {
    52   size_t page_size = os::page_size_for_region(size, size, 1);
    53   bool large_pages = page_size != (size_t)os::vm_page_size();
    54   // Don't force the alignment to be large page aligned,
    55   // since that will waste memory.
    56   size_t alignment = os::vm_allocation_granularity();
    57   initialize(size, alignment, large_pages, NULL, 0, false);
    58 }
    60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    61                              bool large,
    62                              char* requested_address,
    63                              const size_t noaccess_prefix) {
    64   initialize(size+noaccess_prefix, alignment, large, requested_address,
    65              noaccess_prefix, false);
    66 }
    68 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    69                              bool large,
    70                              bool executable) {
    71   initialize(size, alignment, large, NULL, 0, executable);
    72 }
    74 // Helper method.
    75 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
    76                                            const size_t size, bool special)
    77 {
    78   if (base == requested_address || requested_address == NULL)
    79     return false; // did not fail
    81   if (base != NULL) {
    82     // Different reserve address may be acceptable in other cases
    83     // but for compressed oops heap should be at requested address.
    84     assert(UseCompressedOops, "currently requested address used only for compressed oops");
    85     if (PrintCompressedOopsMode) {
    86       tty->cr();
    87       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
    88     }
    89     // OS ignored requested address. Try different address.
    90     if (special) {
    91       if (!os::release_memory_special(base, size)) {
    92         fatal("os::release_memory_special failed");
    93       }
    94     } else {
    95       if (!os::release_memory(base, size)) {
    96         fatal("os::release_memory failed");
    97       }
    98     }
    99   }
   100   return true;
   101 }
   103 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
   104                                char* requested_address,
   105                                const size_t noaccess_prefix,
   106                                bool executable) {
   107   const size_t granularity = os::vm_allocation_granularity();
   108   assert((size & (granularity - 1)) == 0,
   109          "size not aligned to os::vm_allocation_granularity()");
   110   assert((alignment & (granularity - 1)) == 0,
   111          "alignment not aligned to os::vm_allocation_granularity()");
   112   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
   113          "not a power of 2");
   115   alignment = MAX2(alignment, (size_t)os::vm_page_size());
   117   // Assert that if noaccess_prefix is used, it is the same as alignment.
   118   assert(noaccess_prefix == 0 ||
   119          noaccess_prefix == alignment, "noaccess prefix wrong");
   121   _base = NULL;
   122   _size = 0;
   123   _special = false;
   124   _executable = executable;
   125   _alignment = 0;
   126   _noaccess_prefix = 0;
   127   if (size == 0) {
   128     return;
   129   }
   131   // If OS doesn't support demand paging for large page memory, we need
   132   // to use reserve_memory_special() to reserve and pin the entire region.
   133   bool special = large && !os::can_commit_large_page_memory();
   134   char* base = NULL;
   136   if (requested_address != 0) {
   137     requested_address -= noaccess_prefix; // adjust requested address
   138     assert(requested_address != NULL, "huge noaccess prefix?");
   139   }
   141   if (special) {
   143     base = os::reserve_memory_special(size, alignment, requested_address, executable);
   145     if (base != NULL) {
   146       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
   147         // OS ignored requested address. Try different address.
   148         return;
   149       }
   150       // Check alignment constraints.
   151       assert((uintptr_t) base % alignment == 0,
   152              err_msg("Large pages returned a non-aligned address, base: "
   153                  PTR_FORMAT " alignment: " PTR_FORMAT,
   154                  base, (void*)(uintptr_t)alignment));
   155       _special = true;
   156     } else {
   157       // failed; try to reserve regular memory below
   158       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
   159                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
   160         if (PrintCompressedOopsMode) {
   161           tty->cr();
   162           tty->print_cr("Reserve regular memory without large pages.");
   163         }
   164       }
   165     }
   166   }
   168   if (base == NULL) {
   169     // Optimistically assume that the OSes returns an aligned base pointer.
   170     // When reserving a large address range, most OSes seem to align to at
   171     // least 64K.
   173     // If the memory was requested at a particular address, use
   174     // os::attempt_reserve_memory_at() to avoid over mapping something
   175     // important.  If available space is not detected, return NULL.
   177     if (requested_address != 0) {
   178       base = os::attempt_reserve_memory_at(size, requested_address);
   179       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
   180         // OS ignored requested address. Try different address.
   181         base = NULL;
   182       }
   183     } else {
   184       base = os::reserve_memory(size, NULL, alignment);
   185     }
   187     if (base == NULL) return;
   189     // Check alignment constraints
   190     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
   191       // Base not aligned, retry
   192       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
   193       // Make sure that size is aligned
   194       size = align_size_up(size, alignment);
   195       base = os::reserve_memory_aligned(size, alignment);
   197       if (requested_address != 0 &&
   198           failed_to_reserve_as_requested(base, requested_address, size, false)) {
   199         // As a result of the alignment constraints, the allocated base differs
   200         // from the requested address. Return back to the caller who can
   201         // take remedial action (like try again without a requested address).
   202         assert(_base == NULL, "should be");
   203         return;
   204       }
   205     }
   206   }
   207   // Done
   208   _base = base;
   209   _size = size;
   210   _alignment = alignment;
   211   _noaccess_prefix = noaccess_prefix;
   213   // Assert that if noaccess_prefix is used, it is the same as alignment.
   214   assert(noaccess_prefix == 0 ||
   215          noaccess_prefix == _alignment, "noaccess prefix wrong");
   217   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
   218          "area must be distinguisable from marks for mark-sweep");
   219   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
   220          "area must be distinguisable from marks for mark-sweep");
   221 }
   224 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
   225                              bool special, bool executable) {
   226   assert((size % os::vm_allocation_granularity()) == 0,
   227          "size not allocation aligned");
   228   _base = base;
   229   _size = size;
   230   _alignment = alignment;
   231   _noaccess_prefix = 0;
   232   _special = special;
   233   _executable = executable;
   234 }
   237 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
   238                                         bool split, bool realloc) {
   239   assert(partition_size <= size(), "partition failed");
   240   if (split) {
   241     os::split_reserved_memory(base(), size(), partition_size, realloc);
   242   }
   243   ReservedSpace result(base(), partition_size, alignment, special(),
   244                        executable());
   245   return result;
   246 }
   249 ReservedSpace
   250 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
   251   assert(partition_size <= size(), "partition failed");
   252   ReservedSpace result(base() + partition_size, size() - partition_size,
   253                        alignment, special(), executable());
   254   return result;
   255 }
   258 size_t ReservedSpace::page_align_size_up(size_t size) {
   259   return align_size_up(size, os::vm_page_size());
   260 }
   263 size_t ReservedSpace::page_align_size_down(size_t size) {
   264   return align_size_down(size, os::vm_page_size());
   265 }
   268 size_t ReservedSpace::allocation_align_size_up(size_t size) {
   269   return align_size_up(size, os::vm_allocation_granularity());
   270 }
   273 size_t ReservedSpace::allocation_align_size_down(size_t size) {
   274   return align_size_down(size, os::vm_allocation_granularity());
   275 }
   278 void ReservedSpace::release() {
   279   if (is_reserved()) {
   280     char *real_base = _base - _noaccess_prefix;
   281     const size_t real_size = _size + _noaccess_prefix;
   282     if (special()) {
   283       os::release_memory_special(real_base, real_size);
   284     } else{
   285       os::release_memory(real_base, real_size);
   286     }
   287     _base = NULL;
   288     _size = 0;
   289     _noaccess_prefix = 0;
   290     _special = false;
   291     _executable = false;
   292   }
   293 }
   295 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
   296   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
   297                                       (Universe::narrow_oop_base() != NULL) &&
   298                                       Universe::narrow_oop_use_implicit_null_checks()),
   299          "noaccess_prefix should be used only with non zero based compressed oops");
   301   // If there is no noaccess prefix, return.
   302   if (_noaccess_prefix == 0) return;
   304   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
   305          "must be at least page size big");
   307   // Protect memory at the base of the allocated region.
   308   // If special, the page was committed (only matters on windows)
   309   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
   310                           _special)) {
   311     fatal("cannot protect protection page");
   312   }
   313   if (PrintCompressedOopsMode) {
   314     tty->cr();
   315     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
   316   }
   318   _base += _noaccess_prefix;
   319   _size -= _noaccess_prefix;
   320   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
   321          "must be exactly of required size and alignment");
   322 }
   324 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
   325                                      bool large, char* requested_address) :
   326   ReservedSpace(size, alignment, large,
   327                 requested_address,
   328                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
   329                  Universe::narrow_oop_use_implicit_null_checks()) ?
   330                   lcm(os::vm_page_size(), alignment) : 0) {
   331   if (base() > 0) {
   332     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
   333   }
   335   // Only reserved space for the java heap should have a noaccess_prefix
   336   // if using compressed oops.
   337   protect_noaccess_prefix(size);
   338 }
   340 // Reserve space for code segment.  Same as Java heap only we mark this as
   341 // executable.
   342 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
   343                                      size_t rs_align,
   344                                      bool large) :
   345   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
   346   MemTracker::record_virtual_memory_type((address)base(), mtCode);
   347 }
   349 // VirtualSpace
   351 VirtualSpace::VirtualSpace() {
   352   _low_boundary           = NULL;
   353   _high_boundary          = NULL;
   354   _low                    = NULL;
   355   _high                   = NULL;
   356   _lower_high             = NULL;
   357   _middle_high            = NULL;
   358   _upper_high             = NULL;
   359   _lower_high_boundary    = NULL;
   360   _middle_high_boundary   = NULL;
   361   _upper_high_boundary    = NULL;
   362   _lower_alignment        = 0;
   363   _middle_alignment       = 0;
   364   _upper_alignment        = 0;
   365   _special                = false;
   366   _executable             = false;
   367 }
   370 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
   371   if(!rs.is_reserved()) return false;  // allocation failed.
   372   assert(_low_boundary == NULL, "VirtualSpace already initialized");
   373   _low_boundary  = rs.base();
   374   _high_boundary = low_boundary() + rs.size();
   376   _low = low_boundary();
   377   _high = low();
   379   _special = rs.special();
   380   _executable = rs.executable();
   382   // When a VirtualSpace begins life at a large size, make all future expansion
   383   // and shrinking occur aligned to a granularity of large pages.  This avoids
   384   // fragmentation of physical addresses that inhibits the use of large pages
   385   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
   386   // page size, the only spaces that get handled this way are codecache and
   387   // the heap itself, both of which provide a substantial performance
   388   // boost in many benchmarks when covered by large pages.
   389   //
   390   // No attempt is made to force large page alignment at the very top and
   391   // bottom of the space if they are not aligned so already.
   392   _lower_alignment  = os::vm_page_size();
   393   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
   394   _upper_alignment  = os::vm_page_size();
   396   // End of each region
   397   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
   398   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
   399   _upper_high_boundary = high_boundary();
   401   // High address of each region
   402   _lower_high = low_boundary();
   403   _middle_high = lower_high_boundary();
   404   _upper_high = middle_high_boundary();
   406   // commit to initial size
   407   if (committed_size > 0) {
   408     if (!expand_by(committed_size)) {
   409       return false;
   410     }
   411   }
   412   return true;
   413 }
   416 VirtualSpace::~VirtualSpace() {
   417   release();
   418 }
   421 void VirtualSpace::release() {
   422   // This does not release memory it never reserved.
   423   // Caller must release via rs.release();
   424   _low_boundary           = NULL;
   425   _high_boundary          = NULL;
   426   _low                    = NULL;
   427   _high                   = NULL;
   428   _lower_high             = NULL;
   429   _middle_high            = NULL;
   430   _upper_high             = NULL;
   431   _lower_high_boundary    = NULL;
   432   _middle_high_boundary   = NULL;
   433   _upper_high_boundary    = NULL;
   434   _lower_alignment        = 0;
   435   _middle_alignment       = 0;
   436   _upper_alignment        = 0;
   437   _special                = false;
   438   _executable             = false;
   439 }
   442 size_t VirtualSpace::committed_size() const {
   443   return pointer_delta(high(), low(), sizeof(char));
   444 }
   447 size_t VirtualSpace::reserved_size() const {
   448   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
   449 }
   452 size_t VirtualSpace::uncommitted_size()  const {
   453   return reserved_size() - committed_size();
   454 }
   457 bool VirtualSpace::contains(const void* p) const {
   458   return low() <= (const char*) p && (const char*) p < high();
   459 }
   461 /*
   462    First we need to determine if a particular virtual space is using large
   463    pages.  This is done at the initialize function and only virtual spaces
   464    that are larger than LargePageSizeInBytes use large pages.  Once we
   465    have determined this, all expand_by and shrink_by calls must grow and
   466    shrink by large page size chunks.  If a particular request
   467    is within the current large page, the call to commit and uncommit memory
   468    can be ignored.  In the case that the low and high boundaries of this
   469    space is not large page aligned, the pages leading to the first large
   470    page address and the pages after the last large page address must be
   471    allocated with default pages.
   472 */
   473 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
   474   if (uncommitted_size() < bytes) return false;
   476   if (special()) {
   477     // don't commit memory if the entire space is pinned in memory
   478     _high += bytes;
   479     return true;
   480   }
   482   char* previous_high = high();
   483   char* unaligned_new_high = high() + bytes;
   484   assert(unaligned_new_high <= high_boundary(),
   485          "cannot expand by more than upper boundary");
   487   // Calculate where the new high for each of the regions should be.  If
   488   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
   489   // then the unaligned lower and upper new highs would be the
   490   // lower_high() and upper_high() respectively.
   491   char* unaligned_lower_new_high =
   492     MIN2(unaligned_new_high, lower_high_boundary());
   493   char* unaligned_middle_new_high =
   494     MIN2(unaligned_new_high, middle_high_boundary());
   495   char* unaligned_upper_new_high =
   496     MIN2(unaligned_new_high, upper_high_boundary());
   498   // Align the new highs based on the regions alignment.  lower and upper
   499   // alignment will always be default page size.  middle alignment will be
   500   // LargePageSizeInBytes if the actual size of the virtual space is in
   501   // fact larger than LargePageSizeInBytes.
   502   char* aligned_lower_new_high =
   503     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   504   char* aligned_middle_new_high =
   505     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   506   char* aligned_upper_new_high =
   507     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   509   // Determine which regions need to grow in this expand_by call.
   510   // If you are growing in the lower region, high() must be in that
   511   // region so calcuate the size based on high().  For the middle and
   512   // upper regions, determine the starting point of growth based on the
   513   // location of high().  By getting the MAX of the region's low address
   514   // (or the prevoius region's high address) and high(), we can tell if it
   515   // is an intra or inter region growth.
   516   size_t lower_needs = 0;
   517   if (aligned_lower_new_high > lower_high()) {
   518     lower_needs =
   519       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
   520   }
   521   size_t middle_needs = 0;
   522   if (aligned_middle_new_high > middle_high()) {
   523     middle_needs =
   524       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
   525   }
   526   size_t upper_needs = 0;
   527   if (aligned_upper_new_high > upper_high()) {
   528     upper_needs =
   529       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
   530   }
   532   // Check contiguity.
   533   assert(low_boundary() <= lower_high() &&
   534          lower_high() <= lower_high_boundary(),
   535          "high address must be contained within the region");
   536   assert(lower_high_boundary() <= middle_high() &&
   537          middle_high() <= middle_high_boundary(),
   538          "high address must be contained within the region");
   539   assert(middle_high_boundary() <= upper_high() &&
   540          upper_high() <= upper_high_boundary(),
   541          "high address must be contained within the region");
   543   // Commit regions
   544   if (lower_needs > 0) {
   545     assert(low_boundary() <= lower_high() &&
   546            lower_high() + lower_needs <= lower_high_boundary(),
   547            "must not expand beyond region");
   548     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
   549       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
   550                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
   551                          lower_high(), lower_needs, _executable);)
   552       return false;
   553     } else {
   554       _lower_high += lower_needs;
   555     }
   556   }
   557   if (middle_needs > 0) {
   558     assert(lower_high_boundary() <= middle_high() &&
   559            middle_high() + middle_needs <= middle_high_boundary(),
   560            "must not expand beyond region");
   561     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
   562                            _executable)) {
   563       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
   564                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
   565                          ", %d) failed", middle_high(), middle_needs,
   566                          middle_alignment(), _executable);)
   567       return false;
   568     }
   569     _middle_high += middle_needs;
   570   }
   571   if (upper_needs > 0) {
   572     assert(middle_high_boundary() <= upper_high() &&
   573            upper_high() + upper_needs <= upper_high_boundary(),
   574            "must not expand beyond region");
   575     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
   576       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
   577                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
   578                          upper_high(), upper_needs, _executable);)
   579       return false;
   580     } else {
   581       _upper_high += upper_needs;
   582     }
   583   }
   585   if (pre_touch || AlwaysPreTouch) {
   586     int vm_ps = os::vm_page_size();
   587     for (char* curr = previous_high;
   588          curr < unaligned_new_high;
   589          curr += vm_ps) {
   590       // Note the use of a write here; originally we tried just a read, but
   591       // since the value read was unused, the optimizer removed the read.
   592       // If we ever have a concurrent touchahead thread, we'll want to use
   593       // a read, to avoid the potential of overwriting data (if a mutator
   594       // thread beats the touchahead thread to a page).  There are various
   595       // ways of making sure this read is not optimized away: for example,
   596       // generating the code for a read procedure at runtime.
   597       *curr = 0;
   598     }
   599   }
   601   _high += bytes;
   602   return true;
   603 }
   605 // A page is uncommitted if the contents of the entire page is deemed unusable.
   606 // Continue to decrement the high() pointer until it reaches a page boundary
   607 // in which case that particular page can now be uncommitted.
   608 void VirtualSpace::shrink_by(size_t size) {
   609   if (committed_size() < size)
   610     fatal("Cannot shrink virtual space to negative size");
   612   if (special()) {
   613     // don't uncommit if the entire space is pinned in memory
   614     _high -= size;
   615     return;
   616   }
   618   char* unaligned_new_high = high() - size;
   619   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
   621   // Calculate new unaligned address
   622   char* unaligned_upper_new_high =
   623     MAX2(unaligned_new_high, middle_high_boundary());
   624   char* unaligned_middle_new_high =
   625     MAX2(unaligned_new_high, lower_high_boundary());
   626   char* unaligned_lower_new_high =
   627     MAX2(unaligned_new_high, low_boundary());
   629   // Align address to region's alignment
   630   char* aligned_upper_new_high =
   631     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   632   char* aligned_middle_new_high =
   633     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   634   char* aligned_lower_new_high =
   635     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   637   // Determine which regions need to shrink
   638   size_t upper_needs = 0;
   639   if (aligned_upper_new_high < upper_high()) {
   640     upper_needs =
   641       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
   642   }
   643   size_t middle_needs = 0;
   644   if (aligned_middle_new_high < middle_high()) {
   645     middle_needs =
   646       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
   647   }
   648   size_t lower_needs = 0;
   649   if (aligned_lower_new_high < lower_high()) {
   650     lower_needs =
   651       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
   652   }
   654   // Check contiguity.
   655   assert(middle_high_boundary() <= upper_high() &&
   656          upper_high() <= upper_high_boundary(),
   657          "high address must be contained within the region");
   658   assert(lower_high_boundary() <= middle_high() &&
   659          middle_high() <= middle_high_boundary(),
   660          "high address must be contained within the region");
   661   assert(low_boundary() <= lower_high() &&
   662          lower_high() <= lower_high_boundary(),
   663          "high address must be contained within the region");
   665   // Uncommit
   666   if (upper_needs > 0) {
   667     assert(middle_high_boundary() <= aligned_upper_new_high &&
   668            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
   669            "must not shrink beyond region");
   670     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
   671       debug_only(warning("os::uncommit_memory failed"));
   672       return;
   673     } else {
   674       _upper_high -= upper_needs;
   675     }
   676   }
   677   if (middle_needs > 0) {
   678     assert(lower_high_boundary() <= aligned_middle_new_high &&
   679            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
   680            "must not shrink beyond region");
   681     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
   682       debug_only(warning("os::uncommit_memory failed"));
   683       return;
   684     } else {
   685       _middle_high -= middle_needs;
   686     }
   687   }
   688   if (lower_needs > 0) {
   689     assert(low_boundary() <= aligned_lower_new_high &&
   690            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
   691            "must not shrink beyond region");
   692     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
   693       debug_only(warning("os::uncommit_memory failed"));
   694       return;
   695     } else {
   696       _lower_high -= lower_needs;
   697     }
   698   }
   700   _high -= size;
   701 }
   703 #ifndef PRODUCT
   704 void VirtualSpace::check_for_contiguity() {
   705   // Check contiguity.
   706   assert(low_boundary() <= lower_high() &&
   707          lower_high() <= lower_high_boundary(),
   708          "high address must be contained within the region");
   709   assert(lower_high_boundary() <= middle_high() &&
   710          middle_high() <= middle_high_boundary(),
   711          "high address must be contained within the region");
   712   assert(middle_high_boundary() <= upper_high() &&
   713          upper_high() <= upper_high_boundary(),
   714          "high address must be contained within the region");
   715   assert(low() >= low_boundary(), "low");
   716   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
   717   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
   718   assert(high() <= upper_high(), "upper high");
   719 }
   721 void VirtualSpace::print() {
   722   tty->print   ("Virtual space:");
   723   if (special()) tty->print(" (pinned in memory)");
   724   tty->cr();
   725   tty->print_cr(" - committed: " SIZE_FORMAT, committed_size());
   726   tty->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
   727   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
   728   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
   729 }
   732 /////////////// Unit tests ///////////////
   734 #ifndef PRODUCT
   736 #define test_log(...) \
   737   do {\
   738     if (VerboseInternalVMTests) { \
   739       tty->print_cr(__VA_ARGS__); \
   740       tty->flush(); \
   741     }\
   742   } while (false)
   744 class TestReservedSpace : AllStatic {
   745  public:
   746   static void small_page_write(void* addr, size_t size) {
   747     size_t page_size = os::vm_page_size();
   749     char* end = (char*)addr + size;
   750     for (char* p = (char*)addr; p < end; p += page_size) {
   751       *p = 1;
   752     }
   753   }
   755   static void release_memory_for_test(ReservedSpace rs) {
   756     if (rs.special()) {
   757       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
   758     } else {
   759       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
   760     }
   761   }
   763   static void test_reserved_space1(size_t size, size_t alignment) {
   764     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
   766     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
   768     ReservedSpace rs(size,          // size
   769                      alignment,     // alignment
   770                      UseLargePages, // large
   771                      NULL,          // requested_address
   772                      0);            // noacces_prefix
   774     test_log(" rs.special() == %d", rs.special());
   776     assert(rs.base() != NULL, "Must be");
   777     assert(rs.size() == size, "Must be");
   779     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
   780     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
   782     if (rs.special()) {
   783       small_page_write(rs.base(), size);
   784     }
   786     release_memory_for_test(rs);
   787   }
   789   static void test_reserved_space2(size_t size) {
   790     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
   792     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
   794     ReservedSpace rs(size);
   796     test_log(" rs.special() == %d", rs.special());
   798     assert(rs.base() != NULL, "Must be");
   799     assert(rs.size() == size, "Must be");
   801     if (rs.special()) {
   802       small_page_write(rs.base(), size);
   803     }
   805     release_memory_for_test(rs);
   806   }
   808   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
   809     test_log("test_reserved_space3(%p, %p, %d)",
   810         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
   812     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
   813     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
   815     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
   817     ReservedSpace rs(size, alignment, large, false);
   819     test_log(" rs.special() == %d", rs.special());
   821     assert(rs.base() != NULL, "Must be");
   822     assert(rs.size() == size, "Must be");
   824     if (rs.special()) {
   825       small_page_write(rs.base(), size);
   826     }
   828     release_memory_for_test(rs);
   829   }
   832   static void test_reserved_space1() {
   833     size_t size = 2 * 1024 * 1024;
   834     size_t ag   = os::vm_allocation_granularity();
   836     test_reserved_space1(size,      ag);
   837     test_reserved_space1(size * 2,  ag);
   838     test_reserved_space1(size * 10, ag);
   839   }
   841   static void test_reserved_space2() {
   842     size_t size = 2 * 1024 * 1024;
   843     size_t ag = os::vm_allocation_granularity();
   845     test_reserved_space2(size * 1);
   846     test_reserved_space2(size * 2);
   847     test_reserved_space2(size * 10);
   848     test_reserved_space2(ag);
   849     test_reserved_space2(size - ag);
   850     test_reserved_space2(size);
   851     test_reserved_space2(size + ag);
   852     test_reserved_space2(size * 2);
   853     test_reserved_space2(size * 2 - ag);
   854     test_reserved_space2(size * 2 + ag);
   855     test_reserved_space2(size * 3);
   856     test_reserved_space2(size * 3 - ag);
   857     test_reserved_space2(size * 3 + ag);
   858     test_reserved_space2(size * 10);
   859     test_reserved_space2(size * 10 + size / 2);
   860   }
   862   static void test_reserved_space3() {
   863     size_t ag = os::vm_allocation_granularity();
   865     test_reserved_space3(ag,      ag    , false);
   866     test_reserved_space3(ag * 2,  ag    , false);
   867     test_reserved_space3(ag * 3,  ag    , false);
   868     test_reserved_space3(ag * 2,  ag * 2, false);
   869     test_reserved_space3(ag * 4,  ag * 2, false);
   870     test_reserved_space3(ag * 8,  ag * 2, false);
   871     test_reserved_space3(ag * 4,  ag * 4, false);
   872     test_reserved_space3(ag * 8,  ag * 4, false);
   873     test_reserved_space3(ag * 16, ag * 4, false);
   875     if (UseLargePages) {
   876       size_t lp = os::large_page_size();
   878       // Without large pages
   879       test_reserved_space3(lp,     ag * 4, false);
   880       test_reserved_space3(lp * 2, ag * 4, false);
   881       test_reserved_space3(lp * 4, ag * 4, false);
   882       test_reserved_space3(lp,     lp    , false);
   883       test_reserved_space3(lp * 2, lp    , false);
   884       test_reserved_space3(lp * 3, lp    , false);
   885       test_reserved_space3(lp * 2, lp * 2, false);
   886       test_reserved_space3(lp * 4, lp * 2, false);
   887       test_reserved_space3(lp * 8, lp * 2, false);
   889       // With large pages
   890       test_reserved_space3(lp, ag * 4    , true);
   891       test_reserved_space3(lp * 2, ag * 4, true);
   892       test_reserved_space3(lp * 4, ag * 4, true);
   893       test_reserved_space3(lp, lp        , true);
   894       test_reserved_space3(lp * 2, lp    , true);
   895       test_reserved_space3(lp * 3, lp    , true);
   896       test_reserved_space3(lp * 2, lp * 2, true);
   897       test_reserved_space3(lp * 4, lp * 2, true);
   898       test_reserved_space3(lp * 8, lp * 2, true);
   899     }
   900   }
   902   static void test_reserved_space() {
   903     test_reserved_space1();
   904     test_reserved_space2();
   905     test_reserved_space3();
   906   }
   907 };
   909 void TestReservedSpace_test() {
   910   TestReservedSpace::test_reserved_space();
   911 }
   913 #endif // PRODUCT
   915 #endif

mercurial