src/share/vm/runtime/virtualspace.cpp

Tue, 07 Oct 2008 11:01:35 -0700

author
trims
date
Tue, 07 Oct 2008 11:01:35 -0700
changeset 815
eb28cf662f56
parent 798
032ddb9432ad
parent 772
9ee9cf798b59
child 1077
660978a2a31a
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_virtualspace.cpp.incl"
    29 // ReservedSpace
    30 ReservedSpace::ReservedSpace(size_t size) {
    31   initialize(size, 0, false, NULL, 0);
    32 }
    34 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    35                              bool large,
    36                              char* requested_address,
    37                              const size_t noaccess_prefix) {
    38   initialize(size+noaccess_prefix, alignment, large, requested_address,
    39              noaccess_prefix);
    40 }
    42 char *
    43 ReservedSpace::align_reserved_region(char* addr, const size_t len,
    44                                      const size_t prefix_size,
    45                                      const size_t prefix_align,
    46                                      const size_t suffix_size,
    47                                      const size_t suffix_align)
    48 {
    49   assert(addr != NULL, "sanity");
    50   const size_t required_size = prefix_size + suffix_size;
    51   assert(len >= required_size, "len too small");
    53   const size_t s = size_t(addr);
    54   const size_t beg_ofs = s + prefix_size & suffix_align - 1;
    55   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
    57   if (len < beg_delta + required_size) {
    58      return NULL; // Cannot do proper alignment.
    59   }
    60   const size_t end_delta = len - (beg_delta + required_size);
    62   if (beg_delta != 0) {
    63     os::release_memory(addr, beg_delta);
    64   }
    66   if (end_delta != 0) {
    67     char* release_addr = (char*) (s + beg_delta + required_size);
    68     os::release_memory(release_addr, end_delta);
    69   }
    71   return (char*) (s + beg_delta);
    72 }
    74 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
    75                                        const size_t prefix_size,
    76                                        const size_t prefix_align,
    77                                        const size_t suffix_size,
    78                                        const size_t suffix_align)
    79 {
    80   assert(reserve_size > prefix_size + suffix_size, "should not be here");
    82   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
    83   if (raw_addr == NULL) return NULL;
    85   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
    86                                        prefix_align, suffix_size,
    87                                        suffix_align);
    88   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
    89     fatal("os::release_memory failed");
    90   }
    92 #ifdef ASSERT
    93   if (result != NULL) {
    94     const size_t raw = size_t(raw_addr);
    95     const size_t res = size_t(result);
    96     assert(res >= raw, "alignment decreased start addr");
    97     assert(res + prefix_size + suffix_size <= raw + reserve_size,
    98            "alignment increased end addr");
    99     assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
   100     assert((res + prefix_size & suffix_align - 1) == 0,
   101            "bad alignment of suffix");
   102   }
   103 #endif
   105   return result;
   106 }
   108 ReservedSpace::ReservedSpace(const size_t prefix_size,
   109                              const size_t prefix_align,
   110                              const size_t suffix_size,
   111                              const size_t suffix_align,
   112                              const size_t noaccess_prefix)
   113 {
   114   assert(prefix_size != 0, "sanity");
   115   assert(prefix_align != 0, "sanity");
   116   assert(suffix_size != 0, "sanity");
   117   assert(suffix_align != 0, "sanity");
   118   assert((prefix_size & prefix_align - 1) == 0,
   119     "prefix_size not divisible by prefix_align");
   120   assert((suffix_size & suffix_align - 1) == 0,
   121     "suffix_size not divisible by suffix_align");
   122   assert((suffix_align & prefix_align - 1) == 0,
   123     "suffix_align not divisible by prefix_align");
   125   // Add in noaccess_prefix to prefix_size;
   126   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
   127   const size_t size = adjusted_prefix_size + suffix_size;
   129   // On systems where the entire region has to be reserved and committed up
   130   // front, the compound alignment normally done by this method is unnecessary.
   131   const bool try_reserve_special = UseLargePages &&
   132     prefix_align == os::large_page_size();
   133   if (!os::can_commit_large_page_memory() && try_reserve_special) {
   134     initialize(size, prefix_align, true, NULL, noaccess_prefix);
   135     return;
   136   }
   138   _base = NULL;
   139   _size = 0;
   140   _alignment = 0;
   141   _special = false;
   142   _noaccess_prefix = 0;
   144   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
   145   assert(noaccess_prefix == 0 ||
   146          noaccess_prefix == prefix_align, "noaccess prefix wrong");
   148   // Optimistically try to reserve the exact size needed.
   149   char* addr = os::reserve_memory(size, NULL, prefix_align);
   150   if (addr == NULL) return;
   152   // Check whether the result has the needed alignment (unlikely unless
   153   // prefix_align == suffix_align).
   154   const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1;
   155   if (ofs != 0) {
   156     // Wrong alignment.  Release, allocate more space and do manual alignment.
   157     //
   158     // On most operating systems, another allocation with a somewhat larger size
   159     // will return an address "close to" that of the previous allocation.  The
   160     // result is often the same address (if the kernel hands out virtual
   161     // addresses from low to high), or an address that is offset by the increase
   162     // in size.  Exploit that to minimize the amount of extra space requested.
   163     if (!os::release_memory(addr, size)) {
   164       fatal("os::release_memory failed");
   165     }
   167     const size_t extra = MAX2(ofs, suffix_align - ofs);
   168     addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
   169                              suffix_size, suffix_align);
   170     if (addr == NULL) {
   171       // Try an even larger region.  If this fails, address space is exhausted.
   172       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
   173                                prefix_align, suffix_size, suffix_align);
   174     }
   175   }
   177   _base = addr;
   178   _size = size;
   179   _alignment = prefix_align;
   180   _noaccess_prefix = noaccess_prefix;
   181 }
   183 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
   184                                char* requested_address,
   185                                const size_t noaccess_prefix) {
   186   const size_t granularity = os::vm_allocation_granularity();
   187   assert((size & granularity - 1) == 0,
   188          "size not aligned to os::vm_allocation_granularity()");
   189   assert((alignment & granularity - 1) == 0,
   190          "alignment not aligned to os::vm_allocation_granularity()");
   191   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
   192          "not a power of 2");
   194   _base = NULL;
   195   _size = 0;
   196   _special = false;
   197   _alignment = 0;
   198   _noaccess_prefix = 0;
   199   if (size == 0) {
   200     return;
   201   }
   203   // If OS doesn't support demand paging for large page memory, we need
   204   // to use reserve_memory_special() to reserve and pin the entire region.
   205   bool special = large && !os::can_commit_large_page_memory();
   206   char* base = NULL;
   208   if (special) {
   209     // It's not hard to implement reserve_memory_special() such that it can
   210     // allocate at fixed address, but there seems no use of this feature
   211     // for now, so it's not implemented.
   212     assert(requested_address == NULL, "not implemented");
   214     base = os::reserve_memory_special(size);
   216     if (base != NULL) {
   217       // Check alignment constraints
   218       if (alignment > 0) {
   219         assert((uintptr_t) base % alignment == 0,
   220                "Large pages returned a non-aligned address");
   221       }
   222       _special = true;
   223     } else {
   224       // failed; try to reserve regular memory below
   225     }
   226   }
   228   if (base == NULL) {
   229     // Optimistically assume that the OSes returns an aligned base pointer.
   230     // When reserving a large address range, most OSes seem to align to at
   231     // least 64K.
   233     // If the memory was requested at a particular address, use
   234     // os::attempt_reserve_memory_at() to avoid over mapping something
   235     // important.  If available space is not detected, return NULL.
   237     if (requested_address != 0) {
   238       base = os::attempt_reserve_memory_at(size,
   239                                            requested_address-noaccess_prefix);
   240     } else {
   241       base = os::reserve_memory(size, NULL, alignment);
   242     }
   244     if (base == NULL) return;
   246     // Check alignment constraints
   247     if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
   248       // Base not aligned, retry
   249       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
   250       // Reserve size large enough to do manual alignment and
   251       // increase size to a multiple of the desired alignment
   252       size = align_size_up(size, alignment);
   253       size_t extra_size = size + alignment;
   254       do {
   255         char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
   256         if (extra_base == NULL) return;
   257         // Do manual alignement
   258         base = (char*) align_size_up((uintptr_t) extra_base, alignment);
   259         assert(base >= extra_base, "just checking");
   260         // Re-reserve the region at the aligned base address.
   261         os::release_memory(extra_base, extra_size);
   262         base = os::reserve_memory(size, base);
   263       } while (base == NULL);
   264     }
   265   }
   266   // Done
   267   _base = base;
   268   _size = size;
   269   _alignment = MAX2(alignment, (size_t) os::vm_page_size());
   270   _noaccess_prefix = noaccess_prefix;
   272   // Assert that if noaccess_prefix is used, it is the same as alignment.
   273   assert(noaccess_prefix == 0 ||
   274          noaccess_prefix == _alignment, "noaccess prefix wrong");
   276   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
   277          "area must be distinguisable from marks for mark-sweep");
   278   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
   279          "area must be distinguisable from marks for mark-sweep");
   280 }
   283 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
   284                              bool special) {
   285   assert((size % os::vm_allocation_granularity()) == 0,
   286          "size not allocation aligned");
   287   _base = base;
   288   _size = size;
   289   _alignment = alignment;
   290   _noaccess_prefix = 0;
   291   _special = special;
   292 }
   295 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
   296                                         bool split, bool realloc) {
   297   assert(partition_size <= size(), "partition failed");
   298   if (split) {
   299     os::split_reserved_memory(_base, _size, partition_size, realloc);
   300   }
   301   ReservedSpace result(base(), partition_size, alignment, special());
   302   return result;
   303 }
   306 ReservedSpace
   307 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
   308   assert(partition_size <= size(), "partition failed");
   309   ReservedSpace result(base() + partition_size, size() - partition_size,
   310                        alignment, special());
   311   return result;
   312 }
   315 size_t ReservedSpace::page_align_size_up(size_t size) {
   316   return align_size_up(size, os::vm_page_size());
   317 }
   320 size_t ReservedSpace::page_align_size_down(size_t size) {
   321   return align_size_down(size, os::vm_page_size());
   322 }
   325 size_t ReservedSpace::allocation_align_size_up(size_t size) {
   326   return align_size_up(size, os::vm_allocation_granularity());
   327 }
   330 size_t ReservedSpace::allocation_align_size_down(size_t size) {
   331   return align_size_down(size, os::vm_allocation_granularity());
   332 }
   335 void ReservedSpace::release() {
   336   if (is_reserved()) {
   337     char *real_base = _base - _noaccess_prefix;
   338     const size_t real_size = _size + _noaccess_prefix;
   339     if (special()) {
   340       os::release_memory_special(real_base, real_size);
   341     } else{
   342       os::release_memory(real_base, real_size);
   343     }
   344     _base = NULL;
   345     _size = 0;
   346     _noaccess_prefix = 0;
   347     _special = false;
   348   }
   349 }
   351 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
   352   // If there is noaccess prefix, return.
   353   if (_noaccess_prefix == 0) return;
   355   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
   356          "must be at least page size big");
   358   // Protect memory at the base of the allocated region.
   359   // If special, the page was committed (only matters on windows)
   360   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
   361                           _special)) {
   362     fatal("cannot protect protection page");
   363   }
   365   _base += _noaccess_prefix;
   366   _size -= _noaccess_prefix;
   367   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
   368          "must be exactly of required size and alignment");
   369 }
   371 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
   372                                      bool large, char* requested_address) :
   373   ReservedSpace(size, alignment, large,
   374                 requested_address,
   375                 UseCompressedOops && UseImplicitNullCheckForNarrowOop ?
   376                   lcm(os::vm_page_size(), alignment) : 0) {
   377   // Only reserved space for the java heap should have a noaccess_prefix
   378   // if using compressed oops.
   379   protect_noaccess_prefix(size);
   380 }
   382 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
   383                                      const size_t prefix_align,
   384                                      const size_t suffix_size,
   385                                      const size_t suffix_align) :
   386   ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
   387                 UseCompressedOops && UseImplicitNullCheckForNarrowOop ?
   388                   lcm(os::vm_page_size(), prefix_align) : 0) {
   389   protect_noaccess_prefix(prefix_size+suffix_size);
   390 }
   392 // VirtualSpace
   394 VirtualSpace::VirtualSpace() {
   395   _low_boundary           = NULL;
   396   _high_boundary          = NULL;
   397   _low                    = NULL;
   398   _high                   = NULL;
   399   _lower_high             = NULL;
   400   _middle_high            = NULL;
   401   _upper_high             = NULL;
   402   _lower_high_boundary    = NULL;
   403   _middle_high_boundary   = NULL;
   404   _upper_high_boundary    = NULL;
   405   _lower_alignment        = 0;
   406   _middle_alignment       = 0;
   407   _upper_alignment        = 0;
   408   _special                = false;
   409 }
   412 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
   413   if(!rs.is_reserved()) return false;  // allocation failed.
   414   assert(_low_boundary == NULL, "VirtualSpace already initialized");
   415   _low_boundary  = rs.base();
   416   _high_boundary = low_boundary() + rs.size();
   418   _low = low_boundary();
   419   _high = low();
   421   _special = rs.special();
   423   // When a VirtualSpace begins life at a large size, make all future expansion
   424   // and shrinking occur aligned to a granularity of large pages.  This avoids
   425   // fragmentation of physical addresses that inhibits the use of large pages
   426   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
   427   // page size, the only spaces that get handled this way are codecache and
   428   // the heap itself, both of which provide a substantial performance
   429   // boost in many benchmarks when covered by large pages.
   430   //
   431   // No attempt is made to force large page alignment at the very top and
   432   // bottom of the space if they are not aligned so already.
   433   _lower_alignment  = os::vm_page_size();
   434   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
   435   _upper_alignment  = os::vm_page_size();
   437   // End of each region
   438   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
   439   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
   440   _upper_high_boundary = high_boundary();
   442   // High address of each region
   443   _lower_high = low_boundary();
   444   _middle_high = lower_high_boundary();
   445   _upper_high = middle_high_boundary();
   447   // commit to initial size
   448   if (committed_size > 0) {
   449     if (!expand_by(committed_size)) {
   450       return false;
   451     }
   452   }
   453   return true;
   454 }
   457 VirtualSpace::~VirtualSpace() {
   458   release();
   459 }
   462 void VirtualSpace::release() {
   463   // This does not release memory it never reserved.
   464   // Caller must release via rs.release();
   465   _low_boundary           = NULL;
   466   _high_boundary          = NULL;
   467   _low                    = NULL;
   468   _high                   = NULL;
   469   _lower_high             = NULL;
   470   _middle_high            = NULL;
   471   _upper_high             = NULL;
   472   _lower_high_boundary    = NULL;
   473   _middle_high_boundary   = NULL;
   474   _upper_high_boundary    = NULL;
   475   _lower_alignment        = 0;
   476   _middle_alignment       = 0;
   477   _upper_alignment        = 0;
   478   _special                = false;
   479 }
   482 size_t VirtualSpace::committed_size() const {
   483   return pointer_delta(high(), low(), sizeof(char));
   484 }
   487 size_t VirtualSpace::reserved_size() const {
   488   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
   489 }
   492 size_t VirtualSpace::uncommitted_size()  const {
   493   return reserved_size() - committed_size();
   494 }
   497 bool VirtualSpace::contains(const void* p) const {
   498   return low() <= (const char*) p && (const char*) p < high();
   499 }
   501 /*
   502    First we need to determine if a particular virtual space is using large
   503    pages.  This is done at the initialize function and only virtual spaces
   504    that are larger than LargePageSizeInBytes use large pages.  Once we
   505    have determined this, all expand_by and shrink_by calls must grow and
   506    shrink by large page size chunks.  If a particular request
   507    is within the current large page, the call to commit and uncommit memory
   508    can be ignored.  In the case that the low and high boundaries of this
   509    space is not large page aligned, the pages leading to the first large
   510    page address and the pages after the last large page address must be
   511    allocated with default pages.
   512 */
   513 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
   514   if (uncommitted_size() < bytes) return false;
   516   if (special()) {
   517     // don't commit memory if the entire space is pinned in memory
   518     _high += bytes;
   519     return true;
   520   }
   522   char* previous_high = high();
   523   char* unaligned_new_high = high() + bytes;
   524   assert(unaligned_new_high <= high_boundary(),
   525          "cannot expand by more than upper boundary");
   527   // Calculate where the new high for each of the regions should be.  If
   528   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
   529   // then the unaligned lower and upper new highs would be the
   530   // lower_high() and upper_high() respectively.
   531   char* unaligned_lower_new_high =
   532     MIN2(unaligned_new_high, lower_high_boundary());
   533   char* unaligned_middle_new_high =
   534     MIN2(unaligned_new_high, middle_high_boundary());
   535   char* unaligned_upper_new_high =
   536     MIN2(unaligned_new_high, upper_high_boundary());
   538   // Align the new highs based on the regions alignment.  lower and upper
   539   // alignment will always be default page size.  middle alignment will be
   540   // LargePageSizeInBytes if the actual size of the virtual space is in
   541   // fact larger than LargePageSizeInBytes.
   542   char* aligned_lower_new_high =
   543     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   544   char* aligned_middle_new_high =
   545     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   546   char* aligned_upper_new_high =
   547     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   549   // Determine which regions need to grow in this expand_by call.
   550   // If you are growing in the lower region, high() must be in that
   551   // region so calcuate the size based on high().  For the middle and
   552   // upper regions, determine the starting point of growth based on the
   553   // location of high().  By getting the MAX of the region's low address
   554   // (or the prevoius region's high address) and high(), we can tell if it
   555   // is an intra or inter region growth.
   556   size_t lower_needs = 0;
   557   if (aligned_lower_new_high > lower_high()) {
   558     lower_needs =
   559       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
   560   }
   561   size_t middle_needs = 0;
   562   if (aligned_middle_new_high > middle_high()) {
   563     middle_needs =
   564       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
   565   }
   566   size_t upper_needs = 0;
   567   if (aligned_upper_new_high > upper_high()) {
   568     upper_needs =
   569       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
   570   }
   572   // Check contiguity.
   573   assert(low_boundary() <= lower_high() &&
   574          lower_high() <= lower_high_boundary(),
   575          "high address must be contained within the region");
   576   assert(lower_high_boundary() <= middle_high() &&
   577          middle_high() <= middle_high_boundary(),
   578          "high address must be contained within the region");
   579   assert(middle_high_boundary() <= upper_high() &&
   580          upper_high() <= upper_high_boundary(),
   581          "high address must be contained within the region");
   583   // Commit regions
   584   if (lower_needs > 0) {
   585     assert(low_boundary() <= lower_high() &&
   586            lower_high() + lower_needs <= lower_high_boundary(),
   587            "must not expand beyond region");
   588     if (!os::commit_memory(lower_high(), lower_needs)) {
   589       debug_only(warning("os::commit_memory failed"));
   590       return false;
   591     } else {
   592       _lower_high += lower_needs;
   593      }
   594   }
   595   if (middle_needs > 0) {
   596     assert(lower_high_boundary() <= middle_high() &&
   597            middle_high() + middle_needs <= middle_high_boundary(),
   598            "must not expand beyond region");
   599     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment())) {
   600       debug_only(warning("os::commit_memory failed"));
   601       return false;
   602     }
   603     _middle_high += middle_needs;
   604   }
   605   if (upper_needs > 0) {
   606     assert(middle_high_boundary() <= upper_high() &&
   607            upper_high() + upper_needs <= upper_high_boundary(),
   608            "must not expand beyond region");
   609     if (!os::commit_memory(upper_high(), upper_needs)) {
   610       debug_only(warning("os::commit_memory failed"));
   611       return false;
   612     } else {
   613       _upper_high += upper_needs;
   614     }
   615   }
   617   if (pre_touch || AlwaysPreTouch) {
   618     int vm_ps = os::vm_page_size();
   619     for (char* curr = previous_high;
   620          curr < unaligned_new_high;
   621          curr += vm_ps) {
   622       // Note the use of a write here; originally we tried just a read, but
   623       // since the value read was unused, the optimizer removed the read.
   624       // If we ever have a concurrent touchahead thread, we'll want to use
   625       // a read, to avoid the potential of overwriting data (if a mutator
   626       // thread beats the touchahead thread to a page).  There are various
   627       // ways of making sure this read is not optimized away: for example,
   628       // generating the code for a read procedure at runtime.
   629       *curr = 0;
   630     }
   631   }
   633   _high += bytes;
   634   return true;
   635 }
   637 // A page is uncommitted if the contents of the entire page is deemed unusable.
   638 // Continue to decrement the high() pointer until it reaches a page boundary
   639 // in which case that particular page can now be uncommitted.
   640 void VirtualSpace::shrink_by(size_t size) {
   641   if (committed_size() < size)
   642     fatal("Cannot shrink virtual space to negative size");
   644   if (special()) {
   645     // don't uncommit if the entire space is pinned in memory
   646     _high -= size;
   647     return;
   648   }
   650   char* unaligned_new_high = high() - size;
   651   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
   653   // Calculate new unaligned address
   654   char* unaligned_upper_new_high =
   655     MAX2(unaligned_new_high, middle_high_boundary());
   656   char* unaligned_middle_new_high =
   657     MAX2(unaligned_new_high, lower_high_boundary());
   658   char* unaligned_lower_new_high =
   659     MAX2(unaligned_new_high, low_boundary());
   661   // Align address to region's alignment
   662   char* aligned_upper_new_high =
   663     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   664   char* aligned_middle_new_high =
   665     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   666   char* aligned_lower_new_high =
   667     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   669   // Determine which regions need to shrink
   670   size_t upper_needs = 0;
   671   if (aligned_upper_new_high < upper_high()) {
   672     upper_needs =
   673       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
   674   }
   675   size_t middle_needs = 0;
   676   if (aligned_middle_new_high < middle_high()) {
   677     middle_needs =
   678       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
   679   }
   680   size_t lower_needs = 0;
   681   if (aligned_lower_new_high < lower_high()) {
   682     lower_needs =
   683       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
   684   }
   686   // Check contiguity.
   687   assert(middle_high_boundary() <= upper_high() &&
   688          upper_high() <= upper_high_boundary(),
   689          "high address must be contained within the region");
   690   assert(lower_high_boundary() <= middle_high() &&
   691          middle_high() <= middle_high_boundary(),
   692          "high address must be contained within the region");
   693   assert(low_boundary() <= lower_high() &&
   694          lower_high() <= lower_high_boundary(),
   695          "high address must be contained within the region");
   697   // Uncommit
   698   if (upper_needs > 0) {
   699     assert(middle_high_boundary() <= aligned_upper_new_high &&
   700            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
   701            "must not shrink beyond region");
   702     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
   703       debug_only(warning("os::uncommit_memory failed"));
   704       return;
   705     } else {
   706       _upper_high -= upper_needs;
   707     }
   708   }
   709   if (middle_needs > 0) {
   710     assert(lower_high_boundary() <= aligned_middle_new_high &&
   711            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
   712            "must not shrink beyond region");
   713     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
   714       debug_only(warning("os::uncommit_memory failed"));
   715       return;
   716     } else {
   717       _middle_high -= middle_needs;
   718     }
   719   }
   720   if (lower_needs > 0) {
   721     assert(low_boundary() <= aligned_lower_new_high &&
   722            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
   723            "must not shrink beyond region");
   724     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
   725       debug_only(warning("os::uncommit_memory failed"));
   726       return;
   727     } else {
   728       _lower_high -= lower_needs;
   729     }
   730   }
   732   _high -= size;
   733 }
   735 #ifndef PRODUCT
   736 void VirtualSpace::check_for_contiguity() {
   737   // Check contiguity.
   738   assert(low_boundary() <= lower_high() &&
   739          lower_high() <= lower_high_boundary(),
   740          "high address must be contained within the region");
   741   assert(lower_high_boundary() <= middle_high() &&
   742          middle_high() <= middle_high_boundary(),
   743          "high address must be contained within the region");
   744   assert(middle_high_boundary() <= upper_high() &&
   745          upper_high() <= upper_high_boundary(),
   746          "high address must be contained within the region");
   747   assert(low() >= low_boundary(), "low");
   748   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
   749   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
   750   assert(high() <= upper_high(), "upper high");
   751 }
   753 void VirtualSpace::print() {
   754   tty->print   ("Virtual space:");
   755   if (special()) tty->print(" (pinned in memory)");
   756   tty->cr();
   757   tty->print_cr(" - committed: %ld", committed_size());
   758   tty->print_cr(" - reserved:  %ld", reserved_size());
   759   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
   760   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
   761 }
   763 #endif

mercurial