1.1 --- a/src/os/linux/vm/os_linux.cpp Wed Apr 20 20:32:45 2011 -0700 1.2 +++ b/src/os/linux/vm/os_linux.cpp Fri Apr 22 09:26:09 2011 -0700 1.3 @@ -2465,16 +2465,40 @@ 1.4 return res != (uintptr_t) MAP_FAILED; 1.5 } 1.6 1.7 +// Define MAP_HUGETLB here so we can build HotSpot on old systems. 1.8 +#ifndef MAP_HUGETLB 1.9 +#define MAP_HUGETLB 0x40000 1.10 +#endif 1.11 + 1.12 +// Define MADV_HUGEPAGE here so we can build HotSpot on old systems. 1.13 +#ifndef MADV_HUGEPAGE 1.14 +#define MADV_HUGEPAGE 14 1.15 +#endif 1.16 + 1.17 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, 1.18 bool exec) { 1.19 + if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { 1.20 + int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 1.21 + uintptr_t res = 1.22 + (uintptr_t) ::mmap(addr, size, prot, 1.23 + MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB, 1.24 + -1, 0); 1.25 + return res != (uintptr_t) MAP_FAILED; 1.26 + } 1.27 + 1.28 return commit_memory(addr, size, exec); 1.29 } 1.30 1.31 -void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 1.32 +void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 1.33 + if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { 1.34 + // We don't check the return value: madvise(MADV_HUGEPAGE) may not 1.35 + // be supported or the memory may already be backed by huge pages. 1.36 + ::madvise(addr, bytes, MADV_HUGEPAGE); 1.37 + } 1.38 +} 1.39 1.40 void os::free_memory(char *addr, size_t bytes) { 1.41 - ::mmap(addr, bytes, PROT_READ | PROT_WRITE, 1.42 - MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); 1.43 + ::madvise(addr, bytes, MADV_DONTNEED); 1.44 } 1.45 1.46 void os::numa_make_global(char *addr, size_t bytes) { 1.47 @@ -2812,6 +2836,43 @@ 1.48 return linux_mprotect(addr, size, PROT_READ|PROT_WRITE); 1.49 } 1.50 1.51 +bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) { 1.52 + bool result = false; 1.53 + void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE, 1.54 + MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB, 1.55 + -1, 0); 1.56 + 1.57 + if (p != (void *) -1) { 1.58 + // We don't know if this really is a huge page or not. 1.59 + FILE *fp = fopen("/proc/self/maps", "r"); 1.60 + if (fp) { 1.61 + while (!feof(fp)) { 1.62 + char chars[257]; 1.63 + long x = 0; 1.64 + if (fgets(chars, sizeof(chars), fp)) { 1.65 + if (sscanf(chars, "%lx-%*lx", &x) == 1 1.66 + && x == (long)p) { 1.67 + if (strstr (chars, "hugepage")) { 1.68 + result = true; 1.69 + break; 1.70 + } 1.71 + } 1.72 + } 1.73 + } 1.74 + fclose(fp); 1.75 + } 1.76 + munmap (p, page_size); 1.77 + if (result) 1.78 + return true; 1.79 + } 1.80 + 1.81 + if (warn) { 1.82 + warning("HugeTLBFS is not supported by the operating system."); 1.83 + } 1.84 + 1.85 + return result; 1.86 +} 1.87 + 1.88 /* 1.89 * Set the coredump_filter bits to include largepages in core dump (bit 6) 1.90 * 1.91 @@ -2854,7 +2915,16 @@ 1.92 static size_t _large_page_size = 0; 1.93 1.94 bool os::large_page_init() { 1.95 - if (!UseLargePages) return false; 1.96 + if (!UseLargePages) { 1.97 + UseHugeTLBFS = false; 1.98 + UseSHM = false; 1.99 + return false; 1.100 + } 1.101 + 1.102 + if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) { 1.103 + // Our user has not expressed a preference, so we'll try both. 1.104 + UseHugeTLBFS = UseSHM = true; 1.105 + } 1.106 1.107 if (LargePageSizeInBytes) { 1.108 _large_page_size = LargePageSizeInBytes; 1.109 @@ -2899,6 +2969,9 @@ 1.110 } 1.111 } 1.112 1.113 + // print a warning if any large page related flag is specified on command line 1.114 + bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS); 1.115 + 1.116 const size_t default_page_size = (size_t)Linux::page_size(); 1.117 if (_large_page_size > default_page_size) { 1.118 _page_sizes[0] = _large_page_size; 1.119 @@ -2906,6 +2979,14 @@ 1.120 _page_sizes[2] = 0; 1.121 } 1.122 1.123 + UseHugeTLBFS = UseHugeTLBFS && 1.124 + Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size); 1.125 + 1.126 + if (UseHugeTLBFS) 1.127 + UseSHM = false; 1.128 + 1.129 + UseLargePages = UseHugeTLBFS || UseSHM; 1.130 + 1.131 set_coredump_filter(); 1.132 1.133 // Large page support is available on 2.6 or newer kernel, some vendors 1.134 @@ -2922,7 +3003,7 @@ 1.135 char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) { 1.136 // "exec" is passed in but not used. Creating the shared image for 1.137 // the code cache doesn't have an SHM_X executable permission to check. 1.138 - assert(UseLargePages, "only for large pages"); 1.139 + assert(UseLargePages && UseSHM, "only for SHM large pages"); 1.140 1.141 key_t key = IPC_PRIVATE; 1.142 char *addr; 1.143 @@ -2989,16 +3070,15 @@ 1.144 return _large_page_size; 1.145 } 1.146 1.147 -// Linux does not support anonymous mmap with large page memory. The only way 1.148 -// to reserve large page memory without file backing is through SysV shared 1.149 -// memory API. The entire memory region is committed and pinned upfront. 1.150 -// Hopefully this will change in the future... 1.151 +// HugeTLBFS allows application to commit large page memory on demand; 1.152 +// with SysV SHM the entire memory region must be allocated as shared 1.153 +// memory. 1.154 bool os::can_commit_large_page_memory() { 1.155 - return false; 1.156 + return UseHugeTLBFS; 1.157 } 1.158 1.159 bool os::can_execute_large_page_memory() { 1.160 - return false; 1.161 + return UseHugeTLBFS; 1.162 } 1.163 1.164 // Reserve memory at an arbitrary address, only if that area is