1.1 --- a/src/os/linux/vm/os_linux.cpp Wed Jun 27 15:23:36 2012 +0200 1.2 +++ b/src/os/linux/vm/os_linux.cpp Thu Jun 28 17:03:16 2012 -0400 1.3 @@ -371,7 +371,7 @@ 1.4 // code needs to be changed accordingly. 1.5 1.6 // The next few definitions allow the code to be verbatim: 1.7 -#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n)) 1.8 +#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal) 1.9 #define getenv(n) ::getenv(n) 1.10 1.11 /* 1.12 @@ -639,7 +639,7 @@ 1.13 1.14 size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0); 1.15 if (n > 0) { 1.16 - char *str = (char *)malloc(n); 1.17 + char *str = (char *)malloc(n, mtInternal); 1.18 confstr(_CS_GNU_LIBC_VERSION, str, n); 1.19 os::Linux::set_glibc_version(str); 1.20 } else { 1.21 @@ -652,7 +652,7 @@ 1.22 1.23 n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0); 1.24 if (n > 0) { 1.25 - char *str = (char *)malloc(n); 1.26 + char *str = (char *)malloc(n, mtInternal); 1.27 confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n); 1.28 // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells 1.29 // us "NPTL-0.29" even we are running with LinuxThreads. Check if this 1.30 @@ -1685,11 +1685,11 @@ 1.31 // release the storage 1.32 for (int i = 0 ; i < n ; i++) { 1.33 if (pelements[i] != NULL) { 1.34 - FREE_C_HEAP_ARRAY(char, pelements[i]); 1.35 + FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1.36 } 1.37 } 1.38 if (pelements != NULL) { 1.39 - FREE_C_HEAP_ARRAY(char*, pelements); 1.40 + FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1.41 } 1.42 } else { 1.43 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); 1.44 @@ -2469,7 +2469,7 @@ 1.45 // All it does is to check if there are enough free pages 1.46 // left at the time of mmap(). This could be a potential 1.47 // problem. 1.48 -bool os::commit_memory(char* addr, size_t size, bool exec) { 1.49 +bool os::pd_commit_memory(char* addr, size_t size, bool exec) { 1.50 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 1.51 uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, 1.52 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); 1.53 @@ -2492,7 +2492,7 @@ 1.54 #define MADV_HUGEPAGE 14 1.55 #endif 1.56 1.57 -bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, 1.58 +bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 1.59 bool exec) { 1.60 if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { 1.61 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 1.62 @@ -2516,7 +2516,7 @@ 1.63 return false; 1.64 } 1.65 1.66 -void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 1.67 +void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 1.68 if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { 1.69 // We don't check the return value: madvise(MADV_HUGEPAGE) may not 1.70 // be supported or the memory may already be backed by huge pages. 1.71 @@ -2524,7 +2524,7 @@ 1.72 } 1.73 } 1.74 1.75 -void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) { 1.76 +void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { 1.77 // This method works by doing an mmap over an existing mmaping and effectively discarding 1.78 // the existing pages. However it won't work for SHM-based large pages that cannot be 1.79 // uncommitted at all. We don't do anything in this case to avoid creating a segment with 1.80 @@ -2646,7 +2646,7 @@ 1.81 if (numa_available() != -1) { 1.82 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes")); 1.83 // Create a cpu -> node mapping 1.84 - _cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray<int>(0, true); 1.85 + _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true); 1.86 rebuild_cpu_to_node_map(); 1.87 return true; 1.88 } 1.89 @@ -2676,7 +2676,7 @@ 1.90 cpu_to_node()->at_grow(cpu_num - 1); 1.91 size_t node_num = numa_get_groups_num(); 1.92 1.93 - unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size); 1.94 + unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal); 1.95 for (size_t i = 0; i < node_num; i++) { 1.96 if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) { 1.97 for (size_t j = 0; j < cpu_map_valid_size; j++) { 1.98 @@ -2690,7 +2690,7 @@ 1.99 } 1.100 } 1.101 } 1.102 - FREE_C_HEAP_ARRAY(unsigned long, cpu_map); 1.103 + FREE_C_HEAP_ARRAY(unsigned long, cpu_map, mtInternal); 1.104 } 1.105 1.106 int os::Linux::get_node_by_cpu(int cpu_id) { 1.107 @@ -2709,7 +2709,7 @@ 1.108 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory; 1.109 unsigned long* os::Linux::_numa_all_nodes; 1.110 1.111 -bool os::uncommit_memory(char* addr, size_t size) { 1.112 +bool os::pd_uncommit_memory(char* addr, size_t size) { 1.113 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, 1.114 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); 1.115 return res != (uintptr_t) MAP_FAILED; 1.116 @@ -2774,7 +2774,7 @@ 1.117 // munmap() the guard pages we don't leave a hole in the stack 1.118 // mapping. This only affects the main/initial thread, but guard 1.119 // against future OS changes 1.120 -bool os::create_stack_guard_pages(char* addr, size_t size) { 1.121 +bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 1.122 uintptr_t stack_extent, stack_base; 1.123 bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true); 1.124 if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) { 1.125 @@ -2847,12 +2847,12 @@ 1.126 return ::munmap(addr, size) == 0; 1.127 } 1.128 1.129 -char* os::reserve_memory(size_t bytes, char* requested_addr, 1.130 +char* os::pd_reserve_memory(size_t bytes, char* requested_addr, 1.131 size_t alignment_hint) { 1.132 return anon_mmap(requested_addr, bytes, (requested_addr != NULL)); 1.133 } 1.134 1.135 -bool os::release_memory(char* addr, size_t size) { 1.136 +bool os::pd_release_memory(char* addr, size_t size) { 1.137 return anon_munmap(addr, size); 1.138 } 1.139 1.140 @@ -3149,7 +3149,7 @@ 1.141 // Reserve memory at an arbitrary address, only if that area is 1.142 // available (and not reserved for something else). 1.143 1.144 -char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 1.145 +char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 1.146 const int max_tries = 10; 1.147 char* base[max_tries]; 1.148 size_t size[max_tries]; 1.149 @@ -4671,7 +4671,7 @@ 1.150 } 1.151 1.152 // Map a block of memory. 1.153 -char* os::map_memory(int fd, const char* file_name, size_t file_offset, 1.154 +char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 1.155 char *addr, size_t bytes, bool read_only, 1.156 bool allow_exec) { 1.157 int prot; 1.158 @@ -4701,7 +4701,7 @@ 1.159 1.160 1.161 // Remap a block of memory. 1.162 -char* os::remap_memory(int fd, const char* file_name, size_t file_offset, 1.163 +char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 1.164 char *addr, size_t bytes, bool read_only, 1.165 bool allow_exec) { 1.166 // same as map_memory() on this OS 1.167 @@ -4711,7 +4711,7 @@ 1.168 1.169 1.170 // Unmap a block of memory. 1.171 -bool os::unmap_memory(char* addr, size_t bytes) { 1.172 +bool os::pd_unmap_memory(char* addr, size_t bytes) { 1.173 return munmap(addr, bytes) == 0; 1.174 } 1.175