src/share/vm/runtime/virtualspace.cpp

Tue, 24 Jul 2012 10:51:00 -0700

author
twisti
date
Tue, 24 Jul 2012 10:51:00 -0700
changeset 3969
1d7922586cf6
parent 3900
d2a62e0f25eb
child 4037
da91efe96a93
permissions
-rw-r--r--

7023639: JSR 292 method handle invocation needs a fast path for compiled code
6984705: JSR 292 method handle creation should not go through JNI
Summary: remove assembly code for JDK 7 chained method handles
Reviewed-by: jrose, twisti, kvn, mhaupt
Contributed-by: John Rose <john.r.rose@oracle.com>, Christian Thalinger <christian.thalinger@oracle.com>, Michael Haupt <michael.haupt@oracle.com>

     1 /*
     2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "oops/markOop.hpp"
    27 #include "oops/oop.inline.hpp"
    28 #include "runtime/virtualspace.hpp"
    29 #include "services/memTracker.hpp"
    30 #ifdef TARGET_OS_FAMILY_linux
    31 # include "os_linux.inline.hpp"
    32 #endif
    33 #ifdef TARGET_OS_FAMILY_solaris
    34 # include "os_solaris.inline.hpp"
    35 #endif
    36 #ifdef TARGET_OS_FAMILY_windows
    37 # include "os_windows.inline.hpp"
    38 #endif
    39 #ifdef TARGET_OS_FAMILY_bsd
    40 # include "os_bsd.inline.hpp"
    41 #endif
    44 // ReservedSpace
    45 ReservedSpace::ReservedSpace(size_t size) {
    46   initialize(size, 0, false, NULL, 0, false);
    47 }
    49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    50                              bool large,
    51                              char* requested_address,
    52                              const size_t noaccess_prefix) {
    53   initialize(size+noaccess_prefix, alignment, large, requested_address,
    54              noaccess_prefix, false);
    55 }
    57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
    58                              bool large,
    59                              bool executable) {
    60   initialize(size, alignment, large, NULL, 0, executable);
    61 }
    63 char *
    64 ReservedSpace::align_reserved_region(char* addr, const size_t len,
    65                                      const size_t prefix_size,
    66                                      const size_t prefix_align,
    67                                      const size_t suffix_size,
    68                                      const size_t suffix_align)
    69 {
    70   assert(addr != NULL, "sanity");
    71   const size_t required_size = prefix_size + suffix_size;
    72   assert(len >= required_size, "len too small");
    74   const size_t s = size_t(addr);
    75   const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
    76   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
    78   if (len < beg_delta + required_size) {
    79      return NULL; // Cannot do proper alignment.
    80   }
    81   const size_t end_delta = len - (beg_delta + required_size);
    83   if (beg_delta != 0) {
    84     os::release_memory(addr, beg_delta);
    85   }
    87   if (end_delta != 0) {
    88     char* release_addr = (char*) (s + beg_delta + required_size);
    89     os::release_memory(release_addr, end_delta);
    90   }
    92   return (char*) (s + beg_delta);
    93 }
    95 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
    96                                        const size_t prefix_size,
    97                                        const size_t prefix_align,
    98                                        const size_t suffix_size,
    99                                        const size_t suffix_align)
   100 {
   101   assert(reserve_size > prefix_size + suffix_size, "should not be here");
   103   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
   104   if (raw_addr == NULL) return NULL;
   106   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
   107                                        prefix_align, suffix_size,
   108                                        suffix_align);
   109   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
   110     fatal("os::release_memory failed");
   111   }
   113 #ifdef ASSERT
   114   if (result != NULL) {
   115     const size_t raw = size_t(raw_addr);
   116     const size_t res = size_t(result);
   117     assert(res >= raw, "alignment decreased start addr");
   118     assert(res + prefix_size + suffix_size <= raw + reserve_size,
   119            "alignment increased end addr");
   120     assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
   121     assert(((res + prefix_size) & (suffix_align - 1)) == 0,
   122            "bad alignment of suffix");
   123   }
   124 #endif
   126   return result;
   127 }
   129 // Helper method.
   130 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
   131                                            const size_t size, bool special)
   132 {
   133   if (base == requested_address || requested_address == NULL)
   134     return false; // did not fail
   136   if (base != NULL) {
   137     // Different reserve address may be acceptable in other cases
   138     // but for compressed oops heap should be at requested address.
   139     assert(UseCompressedOops, "currently requested address used only for compressed oops");
   140     if (PrintCompressedOopsMode) {
   141       tty->cr();
   142       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
   143     }
   144     // OS ignored requested address. Try different address.
   145     if (special) {
   146       if (!os::release_memory_special(base, size)) {
   147         fatal("os::release_memory_special failed");
   148       }
   149     } else {
   150       if (!os::release_memory(base, size)) {
   151         fatal("os::release_memory failed");
   152       }
   153     }
   154   }
   155   return true;
   156 }
   158 ReservedSpace::ReservedSpace(const size_t prefix_size,
   159                              const size_t prefix_align,
   160                              const size_t suffix_size,
   161                              const size_t suffix_align,
   162                              char* requested_address,
   163                              const size_t noaccess_prefix)
   164 {
   165   assert(prefix_size != 0, "sanity");
   166   assert(prefix_align != 0, "sanity");
   167   assert(suffix_size != 0, "sanity");
   168   assert(suffix_align != 0, "sanity");
   169   assert((prefix_size & (prefix_align - 1)) == 0,
   170     "prefix_size not divisible by prefix_align");
   171   assert((suffix_size & (suffix_align - 1)) == 0,
   172     "suffix_size not divisible by suffix_align");
   173   assert((suffix_align & (prefix_align - 1)) == 0,
   174     "suffix_align not divisible by prefix_align");
   176   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
   177   assert(noaccess_prefix == 0 ||
   178          noaccess_prefix == prefix_align, "noaccess prefix wrong");
   180   // Add in noaccess_prefix to prefix_size;
   181   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
   182   const size_t size = adjusted_prefix_size + suffix_size;
   184   // On systems where the entire region has to be reserved and committed up
   185   // front, the compound alignment normally done by this method is unnecessary.
   186   const bool try_reserve_special = UseLargePages &&
   187     prefix_align == os::large_page_size();
   188   if (!os::can_commit_large_page_memory() && try_reserve_special) {
   189     initialize(size, prefix_align, true, requested_address, noaccess_prefix,
   190                false);
   191     return;
   192   }
   194   _base = NULL;
   195   _size = 0;
   196   _alignment = 0;
   197   _special = false;
   198   _noaccess_prefix = 0;
   199   _executable = false;
   201   // Optimistically try to reserve the exact size needed.
   202   char* addr;
   203   if (requested_address != 0) {
   204     requested_address -= noaccess_prefix; // adjust address
   205     assert(requested_address != NULL, "huge noaccess prefix?");
   206     addr = os::attempt_reserve_memory_at(size, requested_address);
   207     if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
   208       // OS ignored requested address. Try different address.
   209       addr = NULL;
   210     }
   211   } else {
   212     addr = os::reserve_memory(size, NULL, prefix_align);
   213   }
   214   if (addr == NULL) return;
   216   // Check whether the result has the needed alignment (unlikely unless
   217   // prefix_align < suffix_align).
   218   const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
   219   if (ofs != 0) {
   220     // Wrong alignment.  Release, allocate more space and do manual alignment.
   221     //
   222     // On most operating systems, another allocation with a somewhat larger size
   223     // will return an address "close to" that of the previous allocation.  The
   224     // result is often the same address (if the kernel hands out virtual
   225     // addresses from low to high), or an address that is offset by the increase
   226     // in size.  Exploit that to minimize the amount of extra space requested.
   227     if (!os::release_memory(addr, size)) {
   228       fatal("os::release_memory failed");
   229     }
   231     const size_t extra = MAX2(ofs, suffix_align - ofs);
   232     addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
   233                              suffix_size, suffix_align);
   234     if (addr == NULL) {
   235       // Try an even larger region.  If this fails, address space is exhausted.
   236       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
   237                                prefix_align, suffix_size, suffix_align);
   238     }
   240     if (requested_address != 0 &&
   241         failed_to_reserve_as_requested(addr, requested_address, size, false)) {
   242       // As a result of the alignment constraints, the allocated addr differs
   243       // from the requested address. Return back to the caller who can
   244       // take remedial action (like try again without a requested address).
   245       assert(_base == NULL, "should be");
   246       return;
   247     }
   248   }
   250   _base = addr;
   251   _size = size;
   252   _alignment = prefix_align;
   253   _noaccess_prefix = noaccess_prefix;
   254 }
   256 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
   257                                char* requested_address,
   258                                const size_t noaccess_prefix,
   259                                bool executable) {
   260   const size_t granularity = os::vm_allocation_granularity();
   261   assert((size & (granularity - 1)) == 0,
   262          "size not aligned to os::vm_allocation_granularity()");
   263   assert((alignment & (granularity - 1)) == 0,
   264          "alignment not aligned to os::vm_allocation_granularity()");
   265   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
   266          "not a power of 2");
   268   alignment = MAX2(alignment, (size_t)os::vm_page_size());
   270   // Assert that if noaccess_prefix is used, it is the same as alignment.
   271   assert(noaccess_prefix == 0 ||
   272          noaccess_prefix == alignment, "noaccess prefix wrong");
   274   _base = NULL;
   275   _size = 0;
   276   _special = false;
   277   _executable = executable;
   278   _alignment = 0;
   279   _noaccess_prefix = 0;
   280   if (size == 0) {
   281     return;
   282   }
   284   // If OS doesn't support demand paging for large page memory, we need
   285   // to use reserve_memory_special() to reserve and pin the entire region.
   286   bool special = large && !os::can_commit_large_page_memory();
   287   char* base = NULL;
   289   if (requested_address != 0) {
   290     requested_address -= noaccess_prefix; // adjust requested address
   291     assert(requested_address != NULL, "huge noaccess prefix?");
   292   }
   294   if (special) {
   296     base = os::reserve_memory_special(size, requested_address, executable);
   298     if (base != NULL) {
   299       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
   300         // OS ignored requested address. Try different address.
   301         return;
   302       }
   303       // Check alignment constraints
   304       assert((uintptr_t) base % alignment == 0,
   305              "Large pages returned a non-aligned address");
   306       _special = true;
   307     } else {
   308       // failed; try to reserve regular memory below
   309       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
   310                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
   311         if (PrintCompressedOopsMode) {
   312           tty->cr();
   313           tty->print_cr("Reserve regular memory without large pages.");
   314         }
   315       }
   316     }
   317   }
   319   if (base == NULL) {
   320     // Optimistically assume that the OSes returns an aligned base pointer.
   321     // When reserving a large address range, most OSes seem to align to at
   322     // least 64K.
   324     // If the memory was requested at a particular address, use
   325     // os::attempt_reserve_memory_at() to avoid over mapping something
   326     // important.  If available space is not detected, return NULL.
   328     if (requested_address != 0) {
   329       base = os::attempt_reserve_memory_at(size, requested_address);
   330       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
   331         // OS ignored requested address. Try different address.
   332         base = NULL;
   333       }
   334     } else {
   335       base = os::reserve_memory(size, NULL, alignment);
   336     }
   338     if (base == NULL) return;
   340     // Check alignment constraints
   341     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
   342       // Base not aligned, retry
   343       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
   344       // Reserve size large enough to do manual alignment and
   345       // increase size to a multiple of the desired alignment
   346       size = align_size_up(size, alignment);
   347       size_t extra_size = size + alignment;
   348       do {
   349         char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
   350         if (extra_base == NULL) return;
   351         // Do manual alignement
   352         base = (char*) align_size_up((uintptr_t) extra_base, alignment);
   353         assert(base >= extra_base, "just checking");
   354         // Re-reserve the region at the aligned base address.
   355         os::release_memory(extra_base, extra_size);
   356         base = os::reserve_memory(size, base);
   357       } while (base == NULL);
   359       if (requested_address != 0 &&
   360           failed_to_reserve_as_requested(base, requested_address, size, false)) {
   361         // As a result of the alignment constraints, the allocated base differs
   362         // from the requested address. Return back to the caller who can
   363         // take remedial action (like try again without a requested address).
   364         assert(_base == NULL, "should be");
   365         return;
   366       }
   367     }
   368   }
   369   // Done
   370   _base = base;
   371   _size = size;
   372   _alignment = alignment;
   373   _noaccess_prefix = noaccess_prefix;
   375   // Assert that if noaccess_prefix is used, it is the same as alignment.
   376   assert(noaccess_prefix == 0 ||
   377          noaccess_prefix == _alignment, "noaccess prefix wrong");
   379   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
   380          "area must be distinguisable from marks for mark-sweep");
   381   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
   382          "area must be distinguisable from marks for mark-sweep");
   383 }
   386 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
   387                              bool special, bool executable) {
   388   assert((size % os::vm_allocation_granularity()) == 0,
   389          "size not allocation aligned");
   390   _base = base;
   391   _size = size;
   392   _alignment = alignment;
   393   _noaccess_prefix = 0;
   394   _special = special;
   395   _executable = executable;
   396 }
   399 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
   400                                         bool split, bool realloc) {
   401   assert(partition_size <= size(), "partition failed");
   402   if (split) {
   403     os::split_reserved_memory(base(), size(), partition_size, realloc);
   404   }
   405   ReservedSpace result(base(), partition_size, alignment, special(),
   406                        executable());
   407   return result;
   408 }
   411 ReservedSpace
   412 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
   413   assert(partition_size <= size(), "partition failed");
   414   ReservedSpace result(base() + partition_size, size() - partition_size,
   415                        alignment, special(), executable());
   416   return result;
   417 }
   420 size_t ReservedSpace::page_align_size_up(size_t size) {
   421   return align_size_up(size, os::vm_page_size());
   422 }
   425 size_t ReservedSpace::page_align_size_down(size_t size) {
   426   return align_size_down(size, os::vm_page_size());
   427 }
   430 size_t ReservedSpace::allocation_align_size_up(size_t size) {
   431   return align_size_up(size, os::vm_allocation_granularity());
   432 }
   435 size_t ReservedSpace::allocation_align_size_down(size_t size) {
   436   return align_size_down(size, os::vm_allocation_granularity());
   437 }
   440 void ReservedSpace::release() {
   441   if (is_reserved()) {
   442     char *real_base = _base - _noaccess_prefix;
   443     const size_t real_size = _size + _noaccess_prefix;
   444     if (special()) {
   445       os::release_memory_special(real_base, real_size);
   446     } else{
   447       os::release_memory(real_base, real_size);
   448     }
   449     _base = NULL;
   450     _size = 0;
   451     _noaccess_prefix = 0;
   452     _special = false;
   453     _executable = false;
   454   }
   455 }
   457 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
   458   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
   459                                       (Universe::narrow_oop_base() != NULL) &&
   460                                       Universe::narrow_oop_use_implicit_null_checks()),
   461          "noaccess_prefix should be used only with non zero based compressed oops");
   463   // If there is no noaccess prefix, return.
   464   if (_noaccess_prefix == 0) return;
   466   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
   467          "must be at least page size big");
   469   // Protect memory at the base of the allocated region.
   470   // If special, the page was committed (only matters on windows)
   471   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
   472                           _special)) {
   473     fatal("cannot protect protection page");
   474   }
   475   if (PrintCompressedOopsMode) {
   476     tty->cr();
   477     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
   478   }
   480   _base += _noaccess_prefix;
   481   _size -= _noaccess_prefix;
   482   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
   483          "must be exactly of required size and alignment");
   484 }
   486 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
   487                                      bool large, char* requested_address) :
   488   ReservedSpace(size, alignment, large,
   489                 requested_address,
   490                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
   491                  Universe::narrow_oop_use_implicit_null_checks()) ?
   492                   lcm(os::vm_page_size(), alignment) : 0) {
   493   if (base() > 0) {
   494     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
   495   }
   497   // Only reserved space for the java heap should have a noaccess_prefix
   498   // if using compressed oops.
   499   protect_noaccess_prefix(size);
   500 }
   502 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
   503                                      const size_t prefix_align,
   504                                      const size_t suffix_size,
   505                                      const size_t suffix_align,
   506                                      char* requested_address) :
   507   ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
   508                 requested_address,
   509                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
   510                  Universe::narrow_oop_use_implicit_null_checks()) ?
   511                   lcm(os::vm_page_size(), prefix_align) : 0) {
   512   if (base() > 0) {
   513     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
   514   }
   516   protect_noaccess_prefix(prefix_size+suffix_size);
   517 }
   519 // Reserve space for code segment.  Same as Java heap only we mark this as
   520 // executable.
   521 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
   522                                      size_t rs_align,
   523                                      bool large) :
   524   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
   525   MemTracker::record_virtual_memory_type((address)base(), mtCode);
   526 }
   528 // VirtualSpace
   530 VirtualSpace::VirtualSpace() {
   531   _low_boundary           = NULL;
   532   _high_boundary          = NULL;
   533   _low                    = NULL;
   534   _high                   = NULL;
   535   _lower_high             = NULL;
   536   _middle_high            = NULL;
   537   _upper_high             = NULL;
   538   _lower_high_boundary    = NULL;
   539   _middle_high_boundary   = NULL;
   540   _upper_high_boundary    = NULL;
   541   _lower_alignment        = 0;
   542   _middle_alignment       = 0;
   543   _upper_alignment        = 0;
   544   _special                = false;
   545   _executable             = false;
   546 }
   549 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
   550   if(!rs.is_reserved()) return false;  // allocation failed.
   551   assert(_low_boundary == NULL, "VirtualSpace already initialized");
   552   _low_boundary  = rs.base();
   553   _high_boundary = low_boundary() + rs.size();
   555   _low = low_boundary();
   556   _high = low();
   558   _special = rs.special();
   559   _executable = rs.executable();
   561   // When a VirtualSpace begins life at a large size, make all future expansion
   562   // and shrinking occur aligned to a granularity of large pages.  This avoids
   563   // fragmentation of physical addresses that inhibits the use of large pages
   564   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
   565   // page size, the only spaces that get handled this way are codecache and
   566   // the heap itself, both of which provide a substantial performance
   567   // boost in many benchmarks when covered by large pages.
   568   //
   569   // No attempt is made to force large page alignment at the very top and
   570   // bottom of the space if they are not aligned so already.
   571   _lower_alignment  = os::vm_page_size();
   572   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
   573   _upper_alignment  = os::vm_page_size();
   575   // End of each region
   576   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
   577   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
   578   _upper_high_boundary = high_boundary();
   580   // High address of each region
   581   _lower_high = low_boundary();
   582   _middle_high = lower_high_boundary();
   583   _upper_high = middle_high_boundary();
   585   // commit to initial size
   586   if (committed_size > 0) {
   587     if (!expand_by(committed_size)) {
   588       return false;
   589     }
   590   }
   591   return true;
   592 }
   595 VirtualSpace::~VirtualSpace() {
   596   release();
   597 }
   600 void VirtualSpace::release() {
   601   // This does not release memory it never reserved.
   602   // Caller must release via rs.release();
   603   _low_boundary           = NULL;
   604   _high_boundary          = NULL;
   605   _low                    = NULL;
   606   _high                   = NULL;
   607   _lower_high             = NULL;
   608   _middle_high            = NULL;
   609   _upper_high             = NULL;
   610   _lower_high_boundary    = NULL;
   611   _middle_high_boundary   = NULL;
   612   _upper_high_boundary    = NULL;
   613   _lower_alignment        = 0;
   614   _middle_alignment       = 0;
   615   _upper_alignment        = 0;
   616   _special                = false;
   617   _executable             = false;
   618 }
   621 size_t VirtualSpace::committed_size() const {
   622   return pointer_delta(high(), low(), sizeof(char));
   623 }
   626 size_t VirtualSpace::reserved_size() const {
   627   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
   628 }
   631 size_t VirtualSpace::uncommitted_size()  const {
   632   return reserved_size() - committed_size();
   633 }
   636 bool VirtualSpace::contains(const void* p) const {
   637   return low() <= (const char*) p && (const char*) p < high();
   638 }
   640 /*
   641    First we need to determine if a particular virtual space is using large
   642    pages.  This is done at the initialize function and only virtual spaces
   643    that are larger than LargePageSizeInBytes use large pages.  Once we
   644    have determined this, all expand_by and shrink_by calls must grow and
   645    shrink by large page size chunks.  If a particular request
   646    is within the current large page, the call to commit and uncommit memory
   647    can be ignored.  In the case that the low and high boundaries of this
   648    space is not large page aligned, the pages leading to the first large
   649    page address and the pages after the last large page address must be
   650    allocated with default pages.
   651 */
   652 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
   653   if (uncommitted_size() < bytes) return false;
   655   if (special()) {
   656     // don't commit memory if the entire space is pinned in memory
   657     _high += bytes;
   658     return true;
   659   }
   661   char* previous_high = high();
   662   char* unaligned_new_high = high() + bytes;
   663   assert(unaligned_new_high <= high_boundary(),
   664          "cannot expand by more than upper boundary");
   666   // Calculate where the new high for each of the regions should be.  If
   667   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
   668   // then the unaligned lower and upper new highs would be the
   669   // lower_high() and upper_high() respectively.
   670   char* unaligned_lower_new_high =
   671     MIN2(unaligned_new_high, lower_high_boundary());
   672   char* unaligned_middle_new_high =
   673     MIN2(unaligned_new_high, middle_high_boundary());
   674   char* unaligned_upper_new_high =
   675     MIN2(unaligned_new_high, upper_high_boundary());
   677   // Align the new highs based on the regions alignment.  lower and upper
   678   // alignment will always be default page size.  middle alignment will be
   679   // LargePageSizeInBytes if the actual size of the virtual space is in
   680   // fact larger than LargePageSizeInBytes.
   681   char* aligned_lower_new_high =
   682     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   683   char* aligned_middle_new_high =
   684     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   685   char* aligned_upper_new_high =
   686     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   688   // Determine which regions need to grow in this expand_by call.
   689   // If you are growing in the lower region, high() must be in that
   690   // region so calcuate the size based on high().  For the middle and
   691   // upper regions, determine the starting point of growth based on the
   692   // location of high().  By getting the MAX of the region's low address
   693   // (or the prevoius region's high address) and high(), we can tell if it
   694   // is an intra or inter region growth.
   695   size_t lower_needs = 0;
   696   if (aligned_lower_new_high > lower_high()) {
   697     lower_needs =
   698       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
   699   }
   700   size_t middle_needs = 0;
   701   if (aligned_middle_new_high > middle_high()) {
   702     middle_needs =
   703       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
   704   }
   705   size_t upper_needs = 0;
   706   if (aligned_upper_new_high > upper_high()) {
   707     upper_needs =
   708       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
   709   }
   711   // Check contiguity.
   712   assert(low_boundary() <= lower_high() &&
   713          lower_high() <= lower_high_boundary(),
   714          "high address must be contained within the region");
   715   assert(lower_high_boundary() <= middle_high() &&
   716          middle_high() <= middle_high_boundary(),
   717          "high address must be contained within the region");
   718   assert(middle_high_boundary() <= upper_high() &&
   719          upper_high() <= upper_high_boundary(),
   720          "high address must be contained within the region");
   722   // Commit regions
   723   if (lower_needs > 0) {
   724     assert(low_boundary() <= lower_high() &&
   725            lower_high() + lower_needs <= lower_high_boundary(),
   726            "must not expand beyond region");
   727     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
   728       debug_only(warning("os::commit_memory failed"));
   729       return false;
   730     } else {
   731       _lower_high += lower_needs;
   732      }
   733   }
   734   if (middle_needs > 0) {
   735     assert(lower_high_boundary() <= middle_high() &&
   736            middle_high() + middle_needs <= middle_high_boundary(),
   737            "must not expand beyond region");
   738     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
   739                            _executable)) {
   740       debug_only(warning("os::commit_memory failed"));
   741       return false;
   742     }
   743     _middle_high += middle_needs;
   744   }
   745   if (upper_needs > 0) {
   746     assert(middle_high_boundary() <= upper_high() &&
   747            upper_high() + upper_needs <= upper_high_boundary(),
   748            "must not expand beyond region");
   749     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
   750       debug_only(warning("os::commit_memory failed"));
   751       return false;
   752     } else {
   753       _upper_high += upper_needs;
   754     }
   755   }
   757   if (pre_touch || AlwaysPreTouch) {
   758     int vm_ps = os::vm_page_size();
   759     for (char* curr = previous_high;
   760          curr < unaligned_new_high;
   761          curr += vm_ps) {
   762       // Note the use of a write here; originally we tried just a read, but
   763       // since the value read was unused, the optimizer removed the read.
   764       // If we ever have a concurrent touchahead thread, we'll want to use
   765       // a read, to avoid the potential of overwriting data (if a mutator
   766       // thread beats the touchahead thread to a page).  There are various
   767       // ways of making sure this read is not optimized away: for example,
   768       // generating the code for a read procedure at runtime.
   769       *curr = 0;
   770     }
   771   }
   773   _high += bytes;
   774   return true;
   775 }
   777 // A page is uncommitted if the contents of the entire page is deemed unusable.
   778 // Continue to decrement the high() pointer until it reaches a page boundary
   779 // in which case that particular page can now be uncommitted.
   780 void VirtualSpace::shrink_by(size_t size) {
   781   if (committed_size() < size)
   782     fatal("Cannot shrink virtual space to negative size");
   784   if (special()) {
   785     // don't uncommit if the entire space is pinned in memory
   786     _high -= size;
   787     return;
   788   }
   790   char* unaligned_new_high = high() - size;
   791   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
   793   // Calculate new unaligned address
   794   char* unaligned_upper_new_high =
   795     MAX2(unaligned_new_high, middle_high_boundary());
   796   char* unaligned_middle_new_high =
   797     MAX2(unaligned_new_high, lower_high_boundary());
   798   char* unaligned_lower_new_high =
   799     MAX2(unaligned_new_high, low_boundary());
   801   // Align address to region's alignment
   802   char* aligned_upper_new_high =
   803     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
   804   char* aligned_middle_new_high =
   805     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
   806   char* aligned_lower_new_high =
   807     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
   809   // Determine which regions need to shrink
   810   size_t upper_needs = 0;
   811   if (aligned_upper_new_high < upper_high()) {
   812     upper_needs =
   813       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
   814   }
   815   size_t middle_needs = 0;
   816   if (aligned_middle_new_high < middle_high()) {
   817     middle_needs =
   818       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
   819   }
   820   size_t lower_needs = 0;
   821   if (aligned_lower_new_high < lower_high()) {
   822     lower_needs =
   823       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
   824   }
   826   // Check contiguity.
   827   assert(middle_high_boundary() <= upper_high() &&
   828          upper_high() <= upper_high_boundary(),
   829          "high address must be contained within the region");
   830   assert(lower_high_boundary() <= middle_high() &&
   831          middle_high() <= middle_high_boundary(),
   832          "high address must be contained within the region");
   833   assert(low_boundary() <= lower_high() &&
   834          lower_high() <= lower_high_boundary(),
   835          "high address must be contained within the region");
   837   // Uncommit
   838   if (upper_needs > 0) {
   839     assert(middle_high_boundary() <= aligned_upper_new_high &&
   840            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
   841            "must not shrink beyond region");
   842     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
   843       debug_only(warning("os::uncommit_memory failed"));
   844       return;
   845     } else {
   846       _upper_high -= upper_needs;
   847     }
   848   }
   849   if (middle_needs > 0) {
   850     assert(lower_high_boundary() <= aligned_middle_new_high &&
   851            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
   852            "must not shrink beyond region");
   853     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
   854       debug_only(warning("os::uncommit_memory failed"));
   855       return;
   856     } else {
   857       _middle_high -= middle_needs;
   858     }
   859   }
   860   if (lower_needs > 0) {
   861     assert(low_boundary() <= aligned_lower_new_high &&
   862            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
   863            "must not shrink beyond region");
   864     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
   865       debug_only(warning("os::uncommit_memory failed"));
   866       return;
   867     } else {
   868       _lower_high -= lower_needs;
   869     }
   870   }
   872   _high -= size;
   873 }
   875 #ifndef PRODUCT
   876 void VirtualSpace::check_for_contiguity() {
   877   // Check contiguity.
   878   assert(low_boundary() <= lower_high() &&
   879          lower_high() <= lower_high_boundary(),
   880          "high address must be contained within the region");
   881   assert(lower_high_boundary() <= middle_high() &&
   882          middle_high() <= middle_high_boundary(),
   883          "high address must be contained within the region");
   884   assert(middle_high_boundary() <= upper_high() &&
   885          upper_high() <= upper_high_boundary(),
   886          "high address must be contained within the region");
   887   assert(low() >= low_boundary(), "low");
   888   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
   889   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
   890   assert(high() <= upper_high(), "upper high");
   891 }
   893 void VirtualSpace::print() {
   894   tty->print   ("Virtual space:");
   895   if (special()) tty->print(" (pinned in memory)");
   896   tty->cr();
   897   tty->print_cr(" - committed: %ld", committed_size());
   898   tty->print_cr(" - reserved:  %ld", reserved_size());
   899   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
   900   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
   901 }
   903 #endif

mercurial