src/share/vm/runtime/virtualspace.cpp

Thu, 12 Mar 2009 10:37:46 -0700

author
kvn
date
Thu, 12 Mar 2009 10:37:46 -0700
changeset 1077
660978a2a31a
parent 815
eb28cf662f56
child 1091
6bdd6923ba16
permissions
-rw-r--r--

6791178: Specialize for zero as the compressed oop vm heap base
Summary: Use zero based compressed oops if java heap is below 32gb and unscaled compressed oops if java heap is below 4gb.
Reviewed-by: never, twisti, jcoomes, coleenp

     1 /*
     2  * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_virtualspace.cpp.incl"
    29 // ReservedSpace
    30 ReservedSpace::ReservedSpace(size_t size) {
    31   initialize(size, 0, false, NULL, 0);
    32 }
    34 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    35                              bool large,
    36                              char* requested_address,
    37                              const size_t noaccess_prefix) {
    38   initialize(size+noaccess_prefix, alignment, large, requested_address,
    39              noaccess_prefix);
    40 }
    42 char *
    43 ReservedSpace::align_reserved_region(char* addr, const size_t len,
    44                                      const size_t prefix_size,
    45                                      const size_t prefix_align,
    46                                      const size_t suffix_size,
    47                                      const size_t suffix_align)
    48 {
    49   assert(addr != NULL, "sanity");
    50   const size_t required_size = prefix_size + suffix_size;
    51   assert(len >= required_size, "len too small");
    53   const size_t s = size_t(addr);
    54   const size_t beg_ofs = s + prefix_size & suffix_align - 1;
    55   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
    57   if (len < beg_delta + required_size) {
    58      return NULL; // Cannot do proper alignment.
    59   }
    60   const size_t end_delta = len - (beg_delta + required_size);
    62   if (beg_delta != 0) {
    63     os::release_memory(addr, beg_delta);
    64   }
    66   if (end_delta != 0) {
    67     char* release_addr = (char*) (s + beg_delta + required_size);
    68     os::release_memory(release_addr, end_delta);
    69   }
    71   return (char*) (s + beg_delta);
    72 }
    74 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
    75                                        const size_t prefix_size,
    76                                        const size_t prefix_align,
    77                                        const size_t suffix_size,
    78                                        const size_t suffix_align)
    79 {
    80   assert(reserve_size > prefix_size + suffix_size, "should not be here");
    82   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
    83   if (raw_addr == NULL) return NULL;
    85   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
    86                                        prefix_align, suffix_size,
    87                                        suffix_align);
    88   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
    89     fatal("os::release_memory failed");
    90   }
    92 #ifdef ASSERT
    93   if (result != NULL) {
    94     const size_t raw = size_t(raw_addr);
    95     const size_t res = size_t(result);
    96     assert(res >= raw, "alignment decreased start addr");
    97     assert(res + prefix_size + suffix_size <= raw + reserve_size,
    98            "alignment increased end addr");
    99     assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
   100     assert((res + prefix_size & suffix_align - 1) == 0,
   101            "bad alignment of suffix");
   102   }
   103 #endif
   105   return result;
   106 }
   108 ReservedSpace::ReservedSpace(const size_t prefix_size,
   109                              const size_t prefix_align,
   110                              const size_t suffix_size,
   111                              const size_t suffix_align,
   112                              char* requested_address,
   113                              const size_t noaccess_prefix)
   114 {
   115   assert(prefix_size != 0, "sanity");
   116   assert(prefix_align != 0, "sanity");
   117   assert(suffix_size != 0, "sanity");
   118   assert(suffix_align != 0, "sanity");
   119   assert((prefix_size & prefix_align - 1) == 0,
   120     "prefix_size not divisible by prefix_align");
   121   assert((suffix_size & suffix_align - 1) == 0,
   122     "suffix_size not divisible by suffix_align");
   123   assert((suffix_align & prefix_align - 1) == 0,
   124     "suffix_align not divisible by prefix_align");
   126   // Add in noaccess_prefix to prefix_size;
   127   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
   128   const size_t size = adjusted_prefix_size + suffix_size;
   130   // On systems where the entire region has to be reserved and committed up
   131   // front, the compound alignment normally done by this method is unnecessary.
   132   const bool try_reserve_special = UseLargePages &&
   133     prefix_align == os::large_page_size();
   134   if (!os::can_commit_large_page_memory() && try_reserve_special) {
   135     initialize(size, prefix_align, true, requested_address, noaccess_prefix);
   136     return;
   137   }
   139   _base = NULL;
   140   _size = 0;
   141   _alignment = 0;
   142   _special = false;
   143   _noaccess_prefix = 0;
   145   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
   146   assert(noaccess_prefix == 0 ||
   147          noaccess_prefix == prefix_align, "noaccess prefix wrong");
   149   // Optimistically try to reserve the exact size needed.
   150   char* addr;
   151   if (requested_address != 0) {
   152     addr = os::attempt_reserve_memory_at(size,
   153                                          requested_address-noaccess_prefix);
   154   } else {
   155     addr = os::reserve_memory(size, NULL, prefix_align);
   156   }
   157   if (addr == NULL) return;
   159   // Check whether the result has the needed alignment (unlikely unless
   160   // prefix_align == suffix_align).
   161   const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1;
   162   if (ofs != 0) {
   163     // Wrong alignment.  Release, allocate more space and do manual alignment.
   164     //
   165     // On most operating systems, another allocation with a somewhat larger size
   166     // will return an address "close to" that of the previous allocation.  The
   167     // result is often the same address (if the kernel hands out virtual
   168     // addresses from low to high), or an address that is offset by the increase
   169     // in size.  Exploit that to minimize the amount of extra space requested.
   170     if (!os::release_memory(addr, size)) {
   171       fatal("os::release_memory failed");
   172     }
   174     const size_t extra = MAX2(ofs, suffix_align - ofs);
   175     addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
   176                              suffix_size, suffix_align);
   177     if (addr == NULL) {
   178       // Try an even larger region.  If this fails, address space is exhausted.
   179       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
   180                                prefix_align, suffix_size, suffix_align);
   181     }
   182   }
   184   _base = addr;
   185   _size = size;
   186   _alignment = prefix_align;
   187   _noaccess_prefix = noaccess_prefix;
   188 }
   190 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
   191                                char* requested_address,
   192                                const size_t noaccess_prefix) {
   193   const size_t granularity = os::vm_allocation_granularity();
   194   assert((size & granularity - 1) == 0,
   195          "size not aligned to os::vm_allocation_granularity()");
   196   assert((alignment & granularity - 1) == 0,
   197          "alignment not aligned to os::vm_allocation_granularity()");
   198   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
   199          "not a power of 2");
   201   _base = NULL;
   202   _size = 0;
   203   _special = false;
   204   _alignment = 0;
   205   _noaccess_prefix = 0;
   206   if (size == 0) {
   207     return;
   208   }
   210   // If OS doesn't support demand paging for large page memory, we need
   211   // to use reserve_memory_special() to reserve and pin the entire region.
   212   bool special = large && !os::can_commit_large_page_memory();
   213   char* base = NULL;
   215   if (special) {
   217     base = os::reserve_memory_special(size, requested_address);
   219     if (base != NULL) {
   220       // Check alignment constraints
   221       if (alignment > 0) {
   222         assert((uintptr_t) base % alignment == 0,
   223                "Large pages returned a non-aligned address");
   224       }
   225       _special = true;
   226     } else {
   227       // failed; try to reserve regular memory below
   228     }
   229   }
   231   if (base == NULL) {
   232     // Optimistically assume that the OSes returns an aligned base pointer.
   233     // When reserving a large address range, most OSes seem to align to at
   234     // least 64K.
   236     // If the memory was requested at a particular address, use
   237     // os::attempt_reserve_memory_at() to avoid over mapping something
   238     // important.  If available space is not detected, return NULL.
   240     if (requested_address != 0) {
   241       base = os::attempt_reserve_memory_at(size,
   242                                            requested_address-noaccess_prefix);
   243     } else {
   244       base = os::reserve_memory(size, NULL, alignment);
   245     }
   247     if (base == NULL) return;
   249     // Check alignment constraints
   250     if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
   251       // Base not aligned, retry
   252       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
   253       // Reserve size large enough to do manual alignment and
   254       // increase size to a multiple of the desired alignment
   255       size = align_size_up(size, alignment);
   256       size_t extra_size = size + alignment;
   257       do {
   258         char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
   259         if (extra_base == NULL) return;
   260         // Do manual alignement
   261         base = (char*) align_size_up((uintptr_t) extra_base, alignment);
   262         assert(base >= extra_base, "just checking");
   263         // Re-reserve the region at the aligned base address.
   264         os::release_memory(extra_base, extra_size);
   265         base = os::reserve_memory(size, base);
   266       } while (base == NULL);
   267     }
   268   }
   269   // Done
   270   _base = base;
   271   _size = size;
   272   _alignment = MAX2(alignment, (size_t) os::vm_page_size());
   273   _noaccess_prefix = noaccess_prefix;
   275   // Assert that if noaccess_prefix is used, it is the same as alignment.
   276   assert(noaccess_prefix == 0 ||
   277          noaccess_prefix == _alignment, "noaccess prefix wrong");
   279   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
   280          "area must be distinguisable from marks for mark-sweep");
   281   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
   282          "area must be distinguisable from marks for mark-sweep");
   283 }
   286 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
   287                              bool special) {
   288   assert((size % os::vm_allocation_granularity()) == 0,
   289          "size not allocation aligned");
   290   _base = base;
   291   _size = size;
   292   _alignment = alignment;
   293   _noaccess_prefix = 0;
   294   _special = special;
   295 }
   298 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
   299                                         bool split, bool realloc) {
   300   assert(partition_size <= size(), "partition failed");
   301   if (split) {
   302     os::split_reserved_memory(_base, _size, partition_size, realloc);
   303   }
   304   ReservedSpace result(base(), partition_size, alignment, special());
   305   return result;
   306 }
   309 ReservedSpace
   310 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
   311   assert(partition_size <= size(), "partition failed");
   312   ReservedSpace result(base() + partition_size, size() - partition_size,
   313                        alignment, special());
   314   return result;
   315 }
   318 size_t ReservedSpace::page_align_size_up(size_t size) {
   319   return align_size_up(size, os::vm_page_size());
   320 }
   323 size_t ReservedSpace::page_align_size_down(size_t size) {
   324   return align_size_down(size, os::vm_page_size());
   325 }
   328 size_t ReservedSpace::allocation_align_size_up(size_t size) {
   329   return align_size_up(size, os::vm_allocation_granularity());
   330 }
   333 size_t ReservedSpace::allocation_align_size_down(size_t size) {
   334   return align_size_down(size, os::vm_allocation_granularity());
   335 }
   338 void ReservedSpace::release() {
   339   if (is_reserved()) {
   340     char *real_base = _base - _noaccess_prefix;
   341     const size_t real_size = _size + _noaccess_prefix;
   342     if (special()) {
   343       os::release_memory_special(real_base, real_size);
   344     } else{
   345       os::release_memory(real_base, real_size);
   346     }
   347     _base = NULL;
   348     _size = 0;
   349     _noaccess_prefix = 0;
   350     _special = false;
   351   }
   352 }
   354 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
   355   // If there is noaccess prefix, return.
   356   if (_noaccess_prefix == 0) return;
   358   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
   359          "must be at least page size big");
   361   // Protect memory at the base of the allocated region.
   362   // If special, the page was committed (only matters on windows)
   363   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
   364                           _special)) {
   365     fatal("cannot protect protection page");
   366   }
   368   _base += _noaccess_prefix;
   369   _size -= _noaccess_prefix;
   370   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
   371          "must be exactly of required size and alignment");
   372 }
   374 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
   375                                      bool large, char* requested_address) :
   376   ReservedSpace(size, alignment, large,
   377                 requested_address,
   378                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
   379                  Universe::narrow_oop_use_implicit_null_checks()) ?
   380                   lcm(os::vm_page_size(), alignment) : 0) {
   381   // Only reserved space for the java heap should have a noaccess_prefix
   382   // if using compressed oops.
   383   protect_noaccess_prefix(size);
   384 }
   386 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
   387                                      const size_t prefix_align,
   388                                      const size_t suffix_size,
   389                                      const size_t suffix_align,
   390                                      char* requested_address) :
   391   ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
   392                 requested_address,
   393                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
   394                  Universe::narrow_oop_use_implicit_null_checks()) ?
   395                   lcm(os::vm_page_size(), prefix_align) : 0) {
   396   protect_noaccess_prefix(prefix_size+suffix_size);
   397 }
   399 // VirtualSpace
   401 VirtualSpace::VirtualSpace() {
   402   _low_boundary           = NULL;
   403   _high_boundary          = NULL;
   404   _low                    = NULL;
   405   _high                   = NULL;
   406   _lower_high             = NULL;
   407   _middle_high            = NULL;
   408   _upper_high             = NULL;
   409   _lower_high_boundary    = NULL;
   410   _middle_high_boundary   = NULL;
   411   _upper_high_boundary    = NULL;
   412   _lower_alignment        = 0;
   413   _middle_alignment       = 0;
   414   _upper_alignment        = 0;
   415   _special                = false;
   416 }
   419 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
   420   if(!rs.is_reserved()) return false;  // allocation failed.
   421   assert(_low_boundary == NULL, "VirtualSpace already initialized");
   422   _low_boundary  = rs.base();
   423   _high_boundary = low_boundary() + rs.size();
   425   _low = low_boundary();
   426   _high = low();
   428   _special = rs.special();
   430   // When a VirtualSpace begins life at a large size, make all future expansion
   431   // and shrinking occur aligned to a granularity of large pages.  This avoids
   432   // fragmentation of physical addresses that inhibits the use of large pages
   433   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
   434   // page size, the only spaces that get handled this way are codecache and
   435   // the heap itself, both of which provide a substantial performance
   436   // boost in many benchmarks when covered by large pages.
   437   //
   438   // No attempt is made to force large page alignment at the very top and
   439   // bottom of the space if they are not aligned so already.
   440   _lower_alignment  = os::vm_page_size();
   441   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
   442   _upper_alignment  = os::vm_page_size();
   444   // End of each region
   445   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
   446   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
   447   _upper_high_boundary = high_boundary();
   449   // High address of each region
   450   _lower_high = low_boundary();
   451   _middle_high = lower_high_boundary();
   452   _upper_high = middle_high_boundary();
   454   // commit to initial size
   455   if (committed_size > 0) {
   456     if (!expand_by(committed_size)) {
   457       return false;
   458     }
   459   }
   460   return true;
   461 }
   464 VirtualSpace::~VirtualSpace() {
   465   release();
   466 }
   469 void VirtualSpace::release() {
   470   // This does not release memory it never reserved.
   471   // Caller must release via rs.release();
   472   _low_boundary           = NULL;
   473   _high_boundary          = NULL;
   474   _low                    = NULL;
   475   _high                   = NULL;
   476   _lower_high             = NULL;
   477   _middle_high            = NULL;
   478   _upper_high             = NULL;
   479   _lower_high_boundary    = NULL;
   480   _middle_high_boundary   = NULL;
   481   _upper_high_boundary    = NULL;
   482   _lower_alignment        = 0;
   483   _middle_alignment       = 0;
   484   _upper_alignment        = 0;
   485   _special                = false;
   486 }
   489 size_t VirtualSpace::committed_size() const {
   490   return pointer_delta(high(), low(), sizeof(char));
   491 }
   494 size_t VirtualSpace::reserved_size() const {
   495   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
   496 }
   499 size_t VirtualSpace::uncommitted_size()  const {
   500   return reserved_size() - committed_size();
   501 }
   504 bool VirtualSpace::contains(const void* p) const {
   505   return low() <= (const char*) p && (const char*) p < high();
   506 }
   508 /*
   509    First we need to determine if a particular virtual space is using large
   510    pages.  This is done at the initialize function and only virtual spaces
   511    that are larger than LargePageSizeInBytes use large pages.  Once we
   512    have determined this, all expand_by and shrink_by calls must grow and
   513    shrink by large page size chunks.  If a particular request
   514    is within the current large page, the call to commit and uncommit memory
   515    can be ignored.  In the case that the low and high boundaries of this
   516    space is not large page aligned, the pages leading to the first large
   517    page address and the pages after the last large page address must be
   518    allocated with default pages.
   519 */
   520 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
   521   if (uncommitted_size() < bytes) return false;
   523   if (special()) {
   524     // don't commit memory if the entire space is pinned in memory
   525     _high += bytes;
   526     return true;
   527   }
   529   char* previous_high = high();
   530   char* unaligned_new_high = high() + bytes;
   531   assert(unaligned_new_high <= high_boundary(),
   532          "cannot expand by more than upper boundary");
   534   // Calculate where the new high for each of the regions should be.  If
   535   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
   536   // then the unaligned lower and upper new highs would be the
   537   // lower_high() and upper_high() respectively.
   538   char* unaligned_lower_new_high =
   539     MIN2(unaligned_new_high, lower_high_boundary());
   540   char* unaligned_middle_new_high =
   541     MIN2(unaligned_new_high, middle_high_boundary());
   542   char* unaligned_upper_new_high =
   543     MIN2(unaligned_new_high, upper_high_boundary());
   545   // Align the new highs based on the regions alignment.  lower and upper
   546   // alignment will always be default page size.  middle alignment will be
   547   // LargePageSizeInBytes if the actual size of the virtual space is in
   548   // fact larger than LargePageSizeInBytes.
   549   char* aligned_lower_new_high =
   550     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   551   char* aligned_middle_new_high =
   552     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   553   char* aligned_upper_new_high =
   554     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   556   // Determine which regions need to grow in this expand_by call.
   557   // If you are growing in the lower region, high() must be in that
   558   // region so calcuate the size based on high().  For the middle and
   559   // upper regions, determine the starting point of growth based on the
   560   // location of high().  By getting the MAX of the region's low address
   561   // (or the prevoius region's high address) and high(), we can tell if it
   562   // is an intra or inter region growth.
   563   size_t lower_needs = 0;
   564   if (aligned_lower_new_high > lower_high()) {
   565     lower_needs =
   566       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
   567   }
   568   size_t middle_needs = 0;
   569   if (aligned_middle_new_high > middle_high()) {
   570     middle_needs =
   571       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
   572   }
   573   size_t upper_needs = 0;
   574   if (aligned_upper_new_high > upper_high()) {
   575     upper_needs =
   576       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
   577   }
   579   // Check contiguity.
   580   assert(low_boundary() <= lower_high() &&
   581          lower_high() <= lower_high_boundary(),
   582          "high address must be contained within the region");
   583   assert(lower_high_boundary() <= middle_high() &&
   584          middle_high() <= middle_high_boundary(),
   585          "high address must be contained within the region");
   586   assert(middle_high_boundary() <= upper_high() &&
   587          upper_high() <= upper_high_boundary(),
   588          "high address must be contained within the region");
   590   // Commit regions
   591   if (lower_needs > 0) {
   592     assert(low_boundary() <= lower_high() &&
   593            lower_high() + lower_needs <= lower_high_boundary(),
   594            "must not expand beyond region");
   595     if (!os::commit_memory(lower_high(), lower_needs)) {
   596       debug_only(warning("os::commit_memory failed"));
   597       return false;
   598     } else {
   599       _lower_high += lower_needs;
   600      }
   601   }
   602   if (middle_needs > 0) {
   603     assert(lower_high_boundary() <= middle_high() &&
   604            middle_high() + middle_needs <= middle_high_boundary(),
   605            "must not expand beyond region");
   606     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment())) {
   607       debug_only(warning("os::commit_memory failed"));
   608       return false;
   609     }
   610     _middle_high += middle_needs;
   611   }
   612   if (upper_needs > 0) {
   613     assert(middle_high_boundary() <= upper_high() &&
   614            upper_high() + upper_needs <= upper_high_boundary(),
   615            "must not expand beyond region");
   616     if (!os::commit_memory(upper_high(), upper_needs)) {
   617       debug_only(warning("os::commit_memory failed"));
   618       return false;
   619     } else {
   620       _upper_high += upper_needs;
   621     }
   622   }
   624   if (pre_touch || AlwaysPreTouch) {
   625     int vm_ps = os::vm_page_size();
   626     for (char* curr = previous_high;
   627          curr < unaligned_new_high;
   628          curr += vm_ps) {
   629       // Note the use of a write here; originally we tried just a read, but
   630       // since the value read was unused, the optimizer removed the read.
   631       // If we ever have a concurrent touchahead thread, we'll want to use
   632       // a read, to avoid the potential of overwriting data (if a mutator
   633       // thread beats the touchahead thread to a page).  There are various
   634       // ways of making sure this read is not optimized away: for example,
   635       // generating the code for a read procedure at runtime.
   636       *curr = 0;
   637     }
   638   }
   640   _high += bytes;
   641   return true;
   642 }
   644 // A page is uncommitted if the contents of the entire page is deemed unusable.
   645 // Continue to decrement the high() pointer until it reaches a page boundary
   646 // in which case that particular page can now be uncommitted.
   647 void VirtualSpace::shrink_by(size_t size) {
   648   if (committed_size() < size)
   649     fatal("Cannot shrink virtual space to negative size");
   651   if (special()) {
   652     // don't uncommit if the entire space is pinned in memory
   653     _high -= size;
   654     return;
   655   }
   657   char* unaligned_new_high = high() - size;
   658   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
   660   // Calculate new unaligned address
   661   char* unaligned_upper_new_high =
   662     MAX2(unaligned_new_high, middle_high_boundary());
   663   char* unaligned_middle_new_high =
   664     MAX2(unaligned_new_high, lower_high_boundary());
   665   char* unaligned_lower_new_high =
   666     MAX2(unaligned_new_high, low_boundary());
   668   // Align address to region's alignment
   669   char* aligned_upper_new_high =
   670     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   671   char* aligned_middle_new_high =
   672     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   673   char* aligned_lower_new_high =
   674     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   676   // Determine which regions need to shrink
   677   size_t upper_needs = 0;
   678   if (aligned_upper_new_high < upper_high()) {
   679     upper_needs =
   680       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
   681   }
   682   size_t middle_needs = 0;
   683   if (aligned_middle_new_high < middle_high()) {
   684     middle_needs =
   685       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
   686   }
   687   size_t lower_needs = 0;
   688   if (aligned_lower_new_high < lower_high()) {
   689     lower_needs =
   690       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
   691   }
   693   // Check contiguity.
   694   assert(middle_high_boundary() <= upper_high() &&
   695          upper_high() <= upper_high_boundary(),
   696          "high address must be contained within the region");
   697   assert(lower_high_boundary() <= middle_high() &&
   698          middle_high() <= middle_high_boundary(),
   699          "high address must be contained within the region");
   700   assert(low_boundary() <= lower_high() &&
   701          lower_high() <= lower_high_boundary(),
   702          "high address must be contained within the region");
   704   // Uncommit
   705   if (upper_needs > 0) {
   706     assert(middle_high_boundary() <= aligned_upper_new_high &&
   707            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
   708            "must not shrink beyond region");
   709     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
   710       debug_only(warning("os::uncommit_memory failed"));
   711       return;
   712     } else {
   713       _upper_high -= upper_needs;
   714     }
   715   }
   716   if (middle_needs > 0) {
   717     assert(lower_high_boundary() <= aligned_middle_new_high &&
   718            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
   719            "must not shrink beyond region");
   720     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
   721       debug_only(warning("os::uncommit_memory failed"));
   722       return;
   723     } else {
   724       _middle_high -= middle_needs;
   725     }
   726   }
   727   if (lower_needs > 0) {
   728     assert(low_boundary() <= aligned_lower_new_high &&
   729            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
   730            "must not shrink beyond region");
   731     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
   732       debug_only(warning("os::uncommit_memory failed"));
   733       return;
   734     } else {
   735       _lower_high -= lower_needs;
   736     }
   737   }
   739   _high -= size;
   740 }
   742 #ifndef PRODUCT
   743 void VirtualSpace::check_for_contiguity() {
   744   // Check contiguity.
   745   assert(low_boundary() <= lower_high() &&
   746          lower_high() <= lower_high_boundary(),
   747          "high address must be contained within the region");
   748   assert(lower_high_boundary() <= middle_high() &&
   749          middle_high() <= middle_high_boundary(),
   750          "high address must be contained within the region");
   751   assert(middle_high_boundary() <= upper_high() &&
   752          upper_high() <= upper_high_boundary(),
   753          "high address must be contained within the region");
   754   assert(low() >= low_boundary(), "low");
   755   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
   756   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
   757   assert(high() <= upper_high(), "upper high");
   758 }
   760 void VirtualSpace::print() {
   761   tty->print   ("Virtual space:");
   762   if (special()) tty->print(" (pinned in memory)");
   763   tty->cr();
   764   tty->print_cr(" - committed: %ld", committed_size());
   765   tty->print_cr(" - reserved:  %ld", reserved_size());
   766   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
   767   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
   768 }
   770 #endif

mercurial