src/share/vm/runtime/virtualspace.cpp

Fri, 26 Apr 2013 09:53:22 +0200

author
brutisso
date
Fri, 26 Apr 2013 09:53:22 +0200
changeset 5019
b294421fa3c5
parent 4465
203f64878aab
child 5255
a837fa3d3f86
permissions
-rw-r--r--

8012915: ReservedSpace::align_reserved_region() broken on Windows
Summary: remove unused constructors and helper methods for ReservedHeapSpace and ReservedSpace
Reviewed-by: mgerdin, jmasa, johnc, tschatzl

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "oops/markOop.hpp"
    27 #include "oops/oop.inline.hpp"
    28 #include "runtime/virtualspace.hpp"
    29 #include "services/memTracker.hpp"
    30 #ifdef TARGET_OS_FAMILY_linux
    31 # include "os_linux.inline.hpp"
    32 #endif
    33 #ifdef TARGET_OS_FAMILY_solaris
    34 # include "os_solaris.inline.hpp"
    35 #endif
    36 #ifdef TARGET_OS_FAMILY_windows
    37 # include "os_windows.inline.hpp"
    38 #endif
    39 #ifdef TARGET_OS_FAMILY_bsd
    40 # include "os_bsd.inline.hpp"
    41 #endif
    44 // ReservedSpace
    45 ReservedSpace::ReservedSpace(size_t size) {
    46   initialize(size, 0, false, NULL, 0, false);
    47 }
    49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    50                              bool large,
    51                              char* requested_address,
    52                              const size_t noaccess_prefix) {
    53   initialize(size+noaccess_prefix, alignment, large, requested_address,
    54              noaccess_prefix, false);
    55 }
    57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    58                              bool large,
    59                              bool executable) {
    60   initialize(size, alignment, large, NULL, 0, executable);
    61 }
    63 // Helper method.
    64 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
    65                                            const size_t size, bool special)
    66 {
    67   if (base == requested_address || requested_address == NULL)
    68     return false; // did not fail
    70   if (base != NULL) {
    71     // Different reserve address may be acceptable in other cases
    72     // but for compressed oops heap should be at requested address.
    73     assert(UseCompressedOops, "currently requested address used only for compressed oops");
    74     if (PrintCompressedOopsMode) {
    75       tty->cr();
    76       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
    77     }
    78     // OS ignored requested address. Try different address.
    79     if (special) {
    80       if (!os::release_memory_special(base, size)) {
    81         fatal("os::release_memory_special failed");
    82       }
    83     } else {
    84       if (!os::release_memory(base, size)) {
    85         fatal("os::release_memory failed");
    86       }
    87     }
    88   }
    89   return true;
    90 }
    92 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
    93                                char* requested_address,
    94                                const size_t noaccess_prefix,
    95                                bool executable) {
    96   const size_t granularity = os::vm_allocation_granularity();
    97   assert((size & (granularity - 1)) == 0,
    98          "size not aligned to os::vm_allocation_granularity()");
    99   assert((alignment & (granularity - 1)) == 0,
   100          "alignment not aligned to os::vm_allocation_granularity()");
   101   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
   102          "not a power of 2");
   104   alignment = MAX2(alignment, (size_t)os::vm_page_size());
   106   // Assert that if noaccess_prefix is used, it is the same as alignment.
   107   assert(noaccess_prefix == 0 ||
   108          noaccess_prefix == alignment, "noaccess prefix wrong");
   110   _base = NULL;
   111   _size = 0;
   112   _special = false;
   113   _executable = executable;
   114   _alignment = 0;
   115   _noaccess_prefix = 0;
   116   if (size == 0) {
   117     return;
   118   }
   120   // If OS doesn't support demand paging for large page memory, we need
   121   // to use reserve_memory_special() to reserve and pin the entire region.
   122   bool special = large && !os::can_commit_large_page_memory();
   123   char* base = NULL;
   125   if (requested_address != 0) {
   126     requested_address -= noaccess_prefix; // adjust requested address
   127     assert(requested_address != NULL, "huge noaccess prefix?");
   128   }
   130   if (special) {
   132     base = os::reserve_memory_special(size, requested_address, executable);
   134     if (base != NULL) {
   135       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
   136         // OS ignored requested address. Try different address.
   137         return;
   138       }
   139       // Check alignment constraints
   140       assert((uintptr_t) base % alignment == 0,
   141              "Large pages returned a non-aligned address");
   142       _special = true;
   143     } else {
   144       // failed; try to reserve regular memory below
   145       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
   146                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
   147         if (PrintCompressedOopsMode) {
   148           tty->cr();
   149           tty->print_cr("Reserve regular memory without large pages.");
   150         }
   151       }
   152     }
   153   }
   155   if (base == NULL) {
   156     // Optimistically assume that the OSes returns an aligned base pointer.
   157     // When reserving a large address range, most OSes seem to align to at
   158     // least 64K.
   160     // If the memory was requested at a particular address, use
   161     // os::attempt_reserve_memory_at() to avoid over mapping something
   162     // important.  If available space is not detected, return NULL.
   164     if (requested_address != 0) {
   165       base = os::attempt_reserve_memory_at(size, requested_address);
   166       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
   167         // OS ignored requested address. Try different address.
   168         base = NULL;
   169       }
   170     } else {
   171       base = os::reserve_memory(size, NULL, alignment);
   172     }
   174     if (base == NULL) return;
   176     // Check alignment constraints
   177     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
   178       // Base not aligned, retry
   179       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
   180       // Make sure that size is aligned
   181       size = align_size_up(size, alignment);
   182       base = os::reserve_memory_aligned(size, alignment);
   184       if (requested_address != 0 &&
   185           failed_to_reserve_as_requested(base, requested_address, size, false)) {
   186         // As a result of the alignment constraints, the allocated base differs
   187         // from the requested address. Return back to the caller who can
   188         // take remedial action (like try again without a requested address).
   189         assert(_base == NULL, "should be");
   190         return;
   191       }
   192     }
   193   }
   194   // Done
   195   _base = base;
   196   _size = size;
   197   _alignment = alignment;
   198   _noaccess_prefix = noaccess_prefix;
   200   // Assert that if noaccess_prefix is used, it is the same as alignment.
   201   assert(noaccess_prefix == 0 ||
   202          noaccess_prefix == _alignment, "noaccess prefix wrong");
   204   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
   205          "area must be distinguisable from marks for mark-sweep");
   206   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
   207          "area must be distinguisable from marks for mark-sweep");
   208 }
   211 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
   212                              bool special, bool executable) {
   213   assert((size % os::vm_allocation_granularity()) == 0,
   214          "size not allocation aligned");
   215   _base = base;
   216   _size = size;
   217   _alignment = alignment;
   218   _noaccess_prefix = 0;
   219   _special = special;
   220   _executable = executable;
   221 }
   224 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
   225                                         bool split, bool realloc) {
   226   assert(partition_size <= size(), "partition failed");
   227   if (split) {
   228     os::split_reserved_memory(base(), size(), partition_size, realloc);
   229   }
   230   ReservedSpace result(base(), partition_size, alignment, special(),
   231                        executable());
   232   return result;
   233 }
   236 ReservedSpace
   237 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
   238   assert(partition_size <= size(), "partition failed");
   239   ReservedSpace result(base() + partition_size, size() - partition_size,
   240                        alignment, special(), executable());
   241   return result;
   242 }
   245 size_t ReservedSpace::page_align_size_up(size_t size) {
   246   return align_size_up(size, os::vm_page_size());
   247 }
   250 size_t ReservedSpace::page_align_size_down(size_t size) {
   251   return align_size_down(size, os::vm_page_size());
   252 }
   255 size_t ReservedSpace::allocation_align_size_up(size_t size) {
   256   return align_size_up(size, os::vm_allocation_granularity());
   257 }
   260 size_t ReservedSpace::allocation_align_size_down(size_t size) {
   261   return align_size_down(size, os::vm_allocation_granularity());
   262 }
   265 void ReservedSpace::release() {
   266   if (is_reserved()) {
   267     char *real_base = _base - _noaccess_prefix;
   268     const size_t real_size = _size + _noaccess_prefix;
   269     if (special()) {
   270       os::release_memory_special(real_base, real_size);
   271     } else{
   272       os::release_memory(real_base, real_size);
   273     }
   274     _base = NULL;
   275     _size = 0;
   276     _noaccess_prefix = 0;
   277     _special = false;
   278     _executable = false;
   279   }
   280 }
   282 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
   283   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
   284                                       (Universe::narrow_oop_base() != NULL) &&
   285                                       Universe::narrow_oop_use_implicit_null_checks()),
   286          "noaccess_prefix should be used only with non zero based compressed oops");
   288   // If there is no noaccess prefix, return.
   289   if (_noaccess_prefix == 0) return;
   291   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
   292          "must be at least page size big");
   294   // Protect memory at the base of the allocated region.
   295   // If special, the page was committed (only matters on windows)
   296   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
   297                           _special)) {
   298     fatal("cannot protect protection page");
   299   }
   300   if (PrintCompressedOopsMode) {
   301     tty->cr();
   302     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
   303   }
   305   _base += _noaccess_prefix;
   306   _size -= _noaccess_prefix;
   307   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
   308          "must be exactly of required size and alignment");
   309 }
   311 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
   312                                      bool large, char* requested_address) :
   313   ReservedSpace(size, alignment, large,
   314                 requested_address,
   315                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
   316                  Universe::narrow_oop_use_implicit_null_checks()) ?
   317                   lcm(os::vm_page_size(), alignment) : 0) {
   318   if (base() > 0) {
   319     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
   320   }
   322   // Only reserved space for the java heap should have a noaccess_prefix
   323   // if using compressed oops.
   324   protect_noaccess_prefix(size);
   325 }
   327 // Reserve space for code segment.  Same as Java heap only we mark this as
   328 // executable.
   329 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
   330                                      size_t rs_align,
   331                                      bool large) :
   332   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
   333   MemTracker::record_virtual_memory_type((address)base(), mtCode);
   334 }
   336 // VirtualSpace
   338 VirtualSpace::VirtualSpace() {
   339   _low_boundary           = NULL;
   340   _high_boundary          = NULL;
   341   _low                    = NULL;
   342   _high                   = NULL;
   343   _lower_high             = NULL;
   344   _middle_high            = NULL;
   345   _upper_high             = NULL;
   346   _lower_high_boundary    = NULL;
   347   _middle_high_boundary   = NULL;
   348   _upper_high_boundary    = NULL;
   349   _lower_alignment        = 0;
   350   _middle_alignment       = 0;
   351   _upper_alignment        = 0;
   352   _special                = false;
   353   _executable             = false;
   354 }
   357 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
   358   if(!rs.is_reserved()) return false;  // allocation failed.
   359   assert(_low_boundary == NULL, "VirtualSpace already initialized");
   360   _low_boundary  = rs.base();
   361   _high_boundary = low_boundary() + rs.size();
   363   _low = low_boundary();
   364   _high = low();
   366   _special = rs.special();
   367   _executable = rs.executable();
   369   // When a VirtualSpace begins life at a large size, make all future expansion
   370   // and shrinking occur aligned to a granularity of large pages.  This avoids
   371   // fragmentation of physical addresses that inhibits the use of large pages
   372   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
   373   // page size, the only spaces that get handled this way are codecache and
   374   // the heap itself, both of which provide a substantial performance
   375   // boost in many benchmarks when covered by large pages.
   376   //
   377   // No attempt is made to force large page alignment at the very top and
   378   // bottom of the space if they are not aligned so already.
   379   _lower_alignment  = os::vm_page_size();
   380   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
   381   _upper_alignment  = os::vm_page_size();
   383   // End of each region
   384   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
   385   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
   386   _upper_high_boundary = high_boundary();
   388   // High address of each region
   389   _lower_high = low_boundary();
   390   _middle_high = lower_high_boundary();
   391   _upper_high = middle_high_boundary();
   393   // commit to initial size
   394   if (committed_size > 0) {
   395     if (!expand_by(committed_size)) {
   396       return false;
   397     }
   398   }
   399   return true;
   400 }
   403 VirtualSpace::~VirtualSpace() {
   404   release();
   405 }
   408 void VirtualSpace::release() {
   409   // This does not release memory it never reserved.
   410   // Caller must release via rs.release();
   411   _low_boundary           = NULL;
   412   _high_boundary          = NULL;
   413   _low                    = NULL;
   414   _high                   = NULL;
   415   _lower_high             = NULL;
   416   _middle_high            = NULL;
   417   _upper_high             = NULL;
   418   _lower_high_boundary    = NULL;
   419   _middle_high_boundary   = NULL;
   420   _upper_high_boundary    = NULL;
   421   _lower_alignment        = 0;
   422   _middle_alignment       = 0;
   423   _upper_alignment        = 0;
   424   _special                = false;
   425   _executable             = false;
   426 }
   429 size_t VirtualSpace::committed_size() const {
   430   return pointer_delta(high(), low(), sizeof(char));
   431 }
   434 size_t VirtualSpace::reserved_size() const {
   435   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
   436 }
   439 size_t VirtualSpace::uncommitted_size()  const {
   440   return reserved_size() - committed_size();
   441 }
   444 bool VirtualSpace::contains(const void* p) const {
   445   return low() <= (const char*) p && (const char*) p < high();
   446 }
   448 /*
   449    First we need to determine if a particular virtual space is using large
   450    pages.  This is done at the initialize function and only virtual spaces
   451    that are larger than LargePageSizeInBytes use large pages.  Once we
   452    have determined this, all expand_by and shrink_by calls must grow and
   453    shrink by large page size chunks.  If a particular request
   454    is within the current large page, the call to commit and uncommit memory
   455    can be ignored.  In the case that the low and high boundaries of this
   456    space is not large page aligned, the pages leading to the first large
   457    page address and the pages after the last large page address must be
   458    allocated with default pages.
   459 */
   460 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
   461   if (uncommitted_size() < bytes) return false;
   463   if (special()) {
   464     // don't commit memory if the entire space is pinned in memory
   465     _high += bytes;
   466     return true;
   467   }
   469   char* previous_high = high();
   470   char* unaligned_new_high = high() + bytes;
   471   assert(unaligned_new_high <= high_boundary(),
   472          "cannot expand by more than upper boundary");
   474   // Calculate where the new high for each of the regions should be.  If
   475   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
   476   // then the unaligned lower and upper new highs would be the
   477   // lower_high() and upper_high() respectively.
   478   char* unaligned_lower_new_high =
   479     MIN2(unaligned_new_high, lower_high_boundary());
   480   char* unaligned_middle_new_high =
   481     MIN2(unaligned_new_high, middle_high_boundary());
   482   char* unaligned_upper_new_high =
   483     MIN2(unaligned_new_high, upper_high_boundary());
   485   // Align the new highs based on the regions alignment.  lower and upper
   486   // alignment will always be default page size.  middle alignment will be
   487   // LargePageSizeInBytes if the actual size of the virtual space is in
   488   // fact larger than LargePageSizeInBytes.
   489   char* aligned_lower_new_high =
   490     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   491   char* aligned_middle_new_high =
   492     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   493   char* aligned_upper_new_high =
   494     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   496   // Determine which regions need to grow in this expand_by call.
   497   // If you are growing in the lower region, high() must be in that
   498   // region so calcuate the size based on high().  For the middle and
   499   // upper regions, determine the starting point of growth based on the
   500   // location of high().  By getting the MAX of the region's low address
   501   // (or the prevoius region's high address) and high(), we can tell if it
   502   // is an intra or inter region growth.
   503   size_t lower_needs = 0;
   504   if (aligned_lower_new_high > lower_high()) {
   505     lower_needs =
   506       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
   507   }
   508   size_t middle_needs = 0;
   509   if (aligned_middle_new_high > middle_high()) {
   510     middle_needs =
   511       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
   512   }
   513   size_t upper_needs = 0;
   514   if (aligned_upper_new_high > upper_high()) {
   515     upper_needs =
   516       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
   517   }
   519   // Check contiguity.
   520   assert(low_boundary() <= lower_high() &&
   521          lower_high() <= lower_high_boundary(),
   522          "high address must be contained within the region");
   523   assert(lower_high_boundary() <= middle_high() &&
   524          middle_high() <= middle_high_boundary(),
   525          "high address must be contained within the region");
   526   assert(middle_high_boundary() <= upper_high() &&
   527          upper_high() <= upper_high_boundary(),
   528          "high address must be contained within the region");
   530   // Commit regions
   531   if (lower_needs > 0) {
   532     assert(low_boundary() <= lower_high() &&
   533            lower_high() + lower_needs <= lower_high_boundary(),
   534            "must not expand beyond region");
   535     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
   536       debug_only(warning("os::commit_memory failed"));
   537       return false;
   538     } else {
   539       _lower_high += lower_needs;
   540      }
   541   }
   542   if (middle_needs > 0) {
   543     assert(lower_high_boundary() <= middle_high() &&
   544            middle_high() + middle_needs <= middle_high_boundary(),
   545            "must not expand beyond region");
   546     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
   547                            _executable)) {
   548       debug_only(warning("os::commit_memory failed"));
   549       return false;
   550     }
   551     _middle_high += middle_needs;
   552   }
   553   if (upper_needs > 0) {
   554     assert(middle_high_boundary() <= upper_high() &&
   555            upper_high() + upper_needs <= upper_high_boundary(),
   556            "must not expand beyond region");
   557     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
   558       debug_only(warning("os::commit_memory failed"));
   559       return false;
   560     } else {
   561       _upper_high += upper_needs;
   562     }
   563   }
   565   if (pre_touch || AlwaysPreTouch) {
   566     int vm_ps = os::vm_page_size();
   567     for (char* curr = previous_high;
   568          curr < unaligned_new_high;
   569          curr += vm_ps) {
   570       // Note the use of a write here; originally we tried just a read, but
   571       // since the value read was unused, the optimizer removed the read.
   572       // If we ever have a concurrent touchahead thread, we'll want to use
   573       // a read, to avoid the potential of overwriting data (if a mutator
   574       // thread beats the touchahead thread to a page).  There are various
   575       // ways of making sure this read is not optimized away: for example,
   576       // generating the code for a read procedure at runtime.
   577       *curr = 0;
   578     }
   579   }
   581   _high += bytes;
   582   return true;
   583 }
   585 // A page is uncommitted if the contents of the entire page is deemed unusable.
   586 // Continue to decrement the high() pointer until it reaches a page boundary
   587 // in which case that particular page can now be uncommitted.
   588 void VirtualSpace::shrink_by(size_t size) {
   589   if (committed_size() < size)
   590     fatal("Cannot shrink virtual space to negative size");
   592   if (special()) {
   593     // don't uncommit if the entire space is pinned in memory
   594     _high -= size;
   595     return;
   596   }
   598   char* unaligned_new_high = high() - size;
   599   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
   601   // Calculate new unaligned address
   602   char* unaligned_upper_new_high =
   603     MAX2(unaligned_new_high, middle_high_boundary());
   604   char* unaligned_middle_new_high =
   605     MAX2(unaligned_new_high, lower_high_boundary());
   606   char* unaligned_lower_new_high =
   607     MAX2(unaligned_new_high, low_boundary());
   609   // Align address to region's alignment
   610   char* aligned_upper_new_high =
   611     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   612   char* aligned_middle_new_high =
   613     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   614   char* aligned_lower_new_high =
   615     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   617   // Determine which regions need to shrink
   618   size_t upper_needs = 0;
   619   if (aligned_upper_new_high < upper_high()) {
   620     upper_needs =
   621       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
   622   }
   623   size_t middle_needs = 0;
   624   if (aligned_middle_new_high < middle_high()) {
   625     middle_needs =
   626       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
   627   }
   628   size_t lower_needs = 0;
   629   if (aligned_lower_new_high < lower_high()) {
   630     lower_needs =
   631       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
   632   }
   634   // Check contiguity.
   635   assert(middle_high_boundary() <= upper_high() &&
   636          upper_high() <= upper_high_boundary(),
   637          "high address must be contained within the region");
   638   assert(lower_high_boundary() <= middle_high() &&
   639          middle_high() <= middle_high_boundary(),
   640          "high address must be contained within the region");
   641   assert(low_boundary() <= lower_high() &&
   642          lower_high() <= lower_high_boundary(),
   643          "high address must be contained within the region");
   645   // Uncommit
   646   if (upper_needs > 0) {
   647     assert(middle_high_boundary() <= aligned_upper_new_high &&
   648            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
   649            "must not shrink beyond region");
   650     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
   651       debug_only(warning("os::uncommit_memory failed"));
   652       return;
   653     } else {
   654       _upper_high -= upper_needs;
   655     }
   656   }
   657   if (middle_needs > 0) {
   658     assert(lower_high_boundary() <= aligned_middle_new_high &&
   659            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
   660            "must not shrink beyond region");
   661     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
   662       debug_only(warning("os::uncommit_memory failed"));
   663       return;
   664     } else {
   665       _middle_high -= middle_needs;
   666     }
   667   }
   668   if (lower_needs > 0) {
   669     assert(low_boundary() <= aligned_lower_new_high &&
   670            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
   671            "must not shrink beyond region");
   672     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
   673       debug_only(warning("os::uncommit_memory failed"));
   674       return;
   675     } else {
   676       _lower_high -= lower_needs;
   677     }
   678   }
   680   _high -= size;
   681 }
   683 #ifndef PRODUCT
   684 void VirtualSpace::check_for_contiguity() {
   685   // Check contiguity.
   686   assert(low_boundary() <= lower_high() &&
   687          lower_high() <= lower_high_boundary(),
   688          "high address must be contained within the region");
   689   assert(lower_high_boundary() <= middle_high() &&
   690          middle_high() <= middle_high_boundary(),
   691          "high address must be contained within the region");
   692   assert(middle_high_boundary() <= upper_high() &&
   693          upper_high() <= upper_high_boundary(),
   694          "high address must be contained within the region");
   695   assert(low() >= low_boundary(), "low");
   696   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
   697   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
   698   assert(high() <= upper_high(), "upper high");
   699 }
   701 void VirtualSpace::print() {
   702   tty->print   ("Virtual space:");
   703   if (special()) tty->print(" (pinned in memory)");
   704   tty->cr();
   705   tty->print_cr(" - committed: " SIZE_FORMAT, committed_size());
   706   tty->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
   707   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
   708   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
   709 }
   711 #endif

mercurial