src/os/linux/vm/os_linux.cpp

changeset 8433
44c8fe602a5e
parent 8316
626f594dffa6
child 8434
776cb7cbe2e4
     1.1 --- a/src/os/linux/vm/os_linux.cpp	Tue May 17 02:44:58 2016 +0000
     1.2 +++ b/src/os/linux/vm/os_linux.cpp	Thu Apr 23 18:00:50 2015 +0200
     1.3 @@ -3433,22 +3433,66 @@
     1.4    return addr;
     1.5  }
     1.6  
     1.7 +// Helper for os::Linux::reserve_memory_special_huge_tlbfs_mixed().
     1.8 +// Allocate (using mmap, NO_RESERVE, with small pages) at either a given request address
     1.9 +//   (req_addr != NULL) or with a given alignment.
    1.10 +//  - bytes shall be a multiple of alignment.
    1.11 +//  - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
    1.12 +//  - alignment sets the alignment at which memory shall be allocated.
    1.13 +//     It must be a multiple of allocation granularity.
    1.14 +// Returns address of memory or NULL. If req_addr was not NULL, will only return
    1.15 +//  req_addr or NULL.
    1.16 +static char* anon_mmap_aligned(size_t bytes, size_t alignment, char* req_addr) {
    1.17 +
    1.18 +  size_t extra_size = bytes;
    1.19 +  if (req_addr == NULL && alignment > 0) {
    1.20 +    extra_size += alignment;
    1.21 +  }
    1.22 +
    1.23 +  char* start = (char*) ::mmap(req_addr, extra_size, PROT_NONE,
    1.24 +    MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
    1.25 +    -1, 0);
    1.26 +  if (start == MAP_FAILED) {
    1.27 +    start = NULL;
    1.28 +  } else {
    1.29 +    if (req_addr != NULL) {
    1.30 +      if (start != req_addr) {
    1.31 +        ::munmap(start, extra_size);
    1.32 +        start = NULL;
    1.33 +      }
    1.34 +    } else {
    1.35 +      char* const start_aligned = (char*) align_ptr_up(start, alignment);
    1.36 +      char* const end_aligned = start_aligned + bytes;
    1.37 +      char* const end = start + extra_size;
    1.38 +      if (start_aligned > start) {
    1.39 +        ::munmap(start, start_aligned - start);
    1.40 +      }
    1.41 +      if (end_aligned < end) {
    1.42 +        ::munmap(end_aligned, end - end_aligned);
    1.43 +      }
    1.44 +      start = start_aligned;
    1.45 +    }
    1.46 +  }
    1.47 +  return start;
    1.48 +
    1.49 +}
    1.50 +
    1.51 +// Reserve memory using mmap(MAP_HUGETLB).
    1.52 +//  - bytes shall be a multiple of alignment.
    1.53 +//  - req_addr can be NULL. If not NULL, it must be a multiple of alignment.
    1.54 +//  - alignment sets the alignment at which memory shall be allocated.
    1.55 +//     It must be a multiple of allocation granularity.
    1.56 +// Returns address of memory or NULL. If req_addr was not NULL, will only return
    1.57 +//  req_addr or NULL.
    1.58  char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
    1.59    size_t large_page_size = os::large_page_size();
    1.60 -
    1.61    assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
    1.62  
    1.63 -  // Allocate small pages.
    1.64 -
    1.65 -  char* start;
    1.66 -  if (req_addr != NULL) {
    1.67 -    assert(is_ptr_aligned(req_addr, alignment), "Must be");
    1.68 -    assert(is_size_aligned(bytes, alignment), "Must be");
    1.69 -    start = os::reserve_memory(bytes, req_addr);
    1.70 -    assert(start == NULL || start == req_addr, "Must be");
    1.71 -  } else {
    1.72 -    start = os::reserve_memory_aligned(bytes, alignment);
    1.73 -  }
    1.74 +  assert(is_ptr_aligned(req_addr, alignment), "Must be");
    1.75 +  assert(is_size_aligned(bytes, alignment), "Must be");
    1.76 +
    1.77 +  // First reserve - but not commit - the address range in small pages.
    1.78 +  char* const start = anon_mmap_aligned(bytes, alignment, req_addr);
    1.79  
    1.80    if (start == NULL) {
    1.81      return NULL;
    1.82 @@ -3456,13 +3500,6 @@
    1.83  
    1.84    assert(is_ptr_aligned(start, alignment), "Must be");
    1.85  
    1.86 -  if (MemTracker::tracking_level() > NMT_minimal) {
    1.87 -    // os::reserve_memory_special will record this memory area.
    1.88 -    // Need to release it here to prevent overlapping reservations.
    1.89 -    Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
    1.90 -    tkr.record((address)start, bytes);
    1.91 -  }
    1.92 -
    1.93    char* end = start + bytes;
    1.94  
    1.95    // Find the regions of the allocated chunk that can be promoted to large pages.
    1.96 @@ -3482,9 +3519,9 @@
    1.97  
    1.98    int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
    1.99  
   1.100 -
   1.101    void* result;
   1.102  
   1.103 +  // Commit small-paged leading area.
   1.104    if (start != lp_start) {
   1.105      result = ::mmap(start, lp_start - start, prot,
   1.106                      MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
   1.107 @@ -3495,11 +3532,12 @@
   1.108      }
   1.109    }
   1.110  
   1.111 +  // Commit large-paged area.
   1.112    result = ::mmap(lp_start, lp_bytes, prot,
   1.113                    MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
   1.114                    -1, 0);
   1.115    if (result == MAP_FAILED) {
   1.116 -    warn_on_large_pages_failure(req_addr, bytes, errno);
   1.117 +    warn_on_large_pages_failure(lp_start, lp_bytes, errno);
   1.118      // If the mmap above fails, the large pages region will be unmapped and we
   1.119      // have regions before and after with small pages. Release these regions.
   1.120      //
   1.121 @@ -3512,6 +3550,7 @@
   1.122      return NULL;
   1.123    }
   1.124  
   1.125 +  // Commit small-paged trailing area.
   1.126    if (lp_end != end) {
   1.127        result = ::mmap(lp_end, end - lp_end, prot,
   1.128                        MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
   1.129 @@ -3528,7 +3567,7 @@
   1.130  char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
   1.131    assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
   1.132    assert(is_ptr_aligned(req_addr, alignment), "Must be");
   1.133 -  assert(is_power_of_2(alignment), "Must be");
   1.134 +  assert(is_size_aligned(alignment, os::vm_allocation_granularity()), "Must be");
   1.135    assert(is_power_of_2(os::large_page_size()), "Must be");
   1.136    assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
   1.137  
   1.138 @@ -6102,47 +6141,100 @@
   1.139      }
   1.140    }
   1.141  
   1.142 -  static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
   1.143 -    if (!UseHugeTLBFS) {
   1.144 -        return;
   1.145 -    }
   1.146 -
   1.147 -    test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
   1.148 -        size, alignment);
   1.149 -
   1.150 -    assert(size >= os::large_page_size(), "Incorrect input to test");
   1.151 -
   1.152 -    char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
   1.153 -
   1.154 -    if (addr != NULL) {
   1.155 -      small_page_write(addr, size);
   1.156 -
   1.157 -      os::Linux::release_memory_special_huge_tlbfs(addr, size);
   1.158 -    }
   1.159 -  }
   1.160 -
   1.161 -  static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) {
   1.162 -    size_t lp = os::large_page_size();
   1.163 -    size_t ag = os::vm_allocation_granularity();
   1.164 -
   1.165 -    for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
   1.166 -      test_reserve_memory_special_huge_tlbfs_mixed(size, alignment);
   1.167 -    }
   1.168 -  }
   1.169 -
   1.170    static void test_reserve_memory_special_huge_tlbfs_mixed() {
   1.171      size_t lp = os::large_page_size();
   1.172      size_t ag = os::vm_allocation_granularity();
   1.173  
   1.174 -    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
   1.175 -    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
   1.176 -    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
   1.177 -    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
   1.178 -    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
   1.179 -    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
   1.180 -    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
   1.181 -    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
   1.182 -    test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
   1.183 +    // sizes to test
   1.184 +    const size_t sizes[] = {
   1.185 +      lp, lp + ag, lp + lp / 2, lp * 2,
   1.186 +      lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
   1.187 +      lp * 10, lp * 10 + lp / 2
   1.188 +    };
   1.189 +    const int num_sizes = sizeof(sizes) / sizeof(size_t);
   1.190 +
   1.191 +    // For each size/alignment combination, we test three scenarios:
   1.192 +    // 1) with req_addr == NULL
   1.193 +    // 2) with a non-null req_addr at which we expect to successfully allocate
   1.194 +    // 3) with a non-null req_addr which contains a pre-existing mapping, at which we
   1.195 +    //    expect the allocation to either fail or to ignore req_addr
   1.196 +
   1.197 +    // Pre-allocate two areas; they shall be as large as the largest allocation
   1.198 +    //  and aligned to the largest alignment we will be testing.
   1.199 +    const size_t mapping_size = sizes[num_sizes - 1] * 2;
   1.200 +    char* const mapping1 = (char*) ::mmap(NULL, mapping_size,
   1.201 +      PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
   1.202 +      -1, 0);
   1.203 +    assert(mapping1 != MAP_FAILED, "should work");
   1.204 +
   1.205 +    char* const mapping2 = (char*) ::mmap(NULL, mapping_size,
   1.206 +      PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
   1.207 +      -1, 0);
   1.208 +    assert(mapping2 != MAP_FAILED, "should work");
   1.209 +
   1.210 +    // Unmap the first mapping, but leave the second mapping intact: the first
   1.211 +    // mapping will serve as a value for a "good" req_addr (case 2). The second
   1.212 +    // mapping, still intact, as "bad" req_addr (case 3).
   1.213 +    ::munmap(mapping1, mapping_size);
   1.214 +
   1.215 +    // Case 1
   1.216 +    test_log("%s, req_addr NULL:", __FUNCTION__);
   1.217 +    test_log("size            align           result");
   1.218 +
   1.219 +    for (int i = 0; i < num_sizes; i++) {
   1.220 +      const size_t size = sizes[i];
   1.221 +      for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
   1.222 +        char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
   1.223 +        test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " ->  " PTR_FORMAT " %s",
   1.224 +            size, alignment, p, (p != NULL ? "" : "(failed)"));
   1.225 +        if (p != NULL) {
   1.226 +          assert(is_ptr_aligned(p, alignment), "must be");
   1.227 +          small_page_write(p, size);
   1.228 +          os::Linux::release_memory_special_huge_tlbfs(p, size);
   1.229 +        }
   1.230 +      }
   1.231 +    }
   1.232 +
   1.233 +    // Case 2
   1.234 +    test_log("%s, req_addr non-NULL:", __FUNCTION__);
   1.235 +    test_log("size            align           req_addr         result");
   1.236 +
   1.237 +    for (int i = 0; i < num_sizes; i++) {
   1.238 +      const size_t size = sizes[i];
   1.239 +      for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
   1.240 +        char* const req_addr = (char*) align_ptr_up(mapping1, alignment);
   1.241 +        char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
   1.242 +        test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " ->  " PTR_FORMAT " %s",
   1.243 +            size, alignment, req_addr, p,
   1.244 +            ((p != NULL ? (p == req_addr ? "(exact match)" : "") : "(failed)")));
   1.245 +        if (p != NULL) {
   1.246 +          assert(p == req_addr, "must be");
   1.247 +          small_page_write(p, size);
   1.248 +          os::Linux::release_memory_special_huge_tlbfs(p, size);
   1.249 +        }
   1.250 +      }
   1.251 +    }
   1.252 +
   1.253 +    // Case 3
   1.254 +    test_log("%s, req_addr non-NULL with preexisting mapping:", __FUNCTION__);
   1.255 +    test_log("size            align           req_addr         result");
   1.256 +
   1.257 +    for (int i = 0; i < num_sizes; i++) {
   1.258 +      const size_t size = sizes[i];
   1.259 +      for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
   1.260 +        char* const req_addr = (char*) align_ptr_up(mapping2, alignment);
   1.261 +        char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
   1.262 +        test_log(SIZE_FORMAT_HEX " " SIZE_FORMAT_HEX " " PTR_FORMAT " ->  " PTR_FORMAT " %s",
   1.263 +            size, alignment, req_addr, p,
   1.264 +            ((p != NULL ? "" : "(failed)")));
   1.265 +        // as the area around req_addr contains already existing mappings, the API should always
   1.266 +        // return NULL (as per contract, it cannot return another address)
   1.267 +        assert(p == NULL, "must be");
   1.268 +      }
   1.269 +    }
   1.270 +
   1.271 +    ::munmap(mapping2, mapping_size);
   1.272 +
   1.273    }
   1.274  
   1.275    static void test_reserve_memory_special_huge_tlbfs() {

mercurial