Tue, 24 Feb 2015 15:04:52 -0500
8072383: resolve conflicts between open and closed ports
Summary: refactor close to remove references to closed ports
Reviewed-by: kvn, simonis, sgehwolf, dholmes
1 /*
2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // no precompiled headers
26 #include "classfile/classLoader.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "jvm_linux.h"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/filemap.hpp"
37 #include "mutex_linux.inline.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "os_share_linux.hpp"
40 #include "prims/jniFastGetField.hpp"
41 #include "prims/jvm.h"
42 #include "prims/jvm_misc.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/extendedPC.hpp"
45 #include "runtime/globals.hpp"
46 #include "runtime/interfaceSupport.hpp"
47 #include "runtime/init.hpp"
48 #include "runtime/java.hpp"
49 #include "runtime/javaCalls.hpp"
50 #include "runtime/mutexLocker.hpp"
51 #include "runtime/objectMonitor.hpp"
52 #include "runtime/orderAccess.inline.hpp"
53 #include "runtime/osThread.hpp"
54 #include "runtime/perfMemory.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/statSampler.hpp"
57 #include "runtime/stubRoutines.hpp"
58 #include "runtime/thread.inline.hpp"
59 #include "runtime/threadCritical.hpp"
60 #include "runtime/timer.hpp"
61 #include "services/attachListener.hpp"
62 #include "services/memTracker.hpp"
63 #include "services/runtimeService.hpp"
64 #include "utilities/decoder.hpp"
65 #include "utilities/defaultStream.hpp"
66 #include "utilities/events.hpp"
67 #include "utilities/elfFile.hpp"
68 #include "utilities/growableArray.hpp"
69 #include "utilities/vmError.hpp"
71 // put OS-includes here
72 # include <sys/types.h>
73 # include <sys/mman.h>
74 # include <sys/stat.h>
75 # include <sys/select.h>
76 # include <pthread.h>
77 # include <signal.h>
78 # include <errno.h>
79 # include <dlfcn.h>
80 # include <stdio.h>
81 # include <unistd.h>
82 # include <sys/resource.h>
83 # include <pthread.h>
84 # include <sys/stat.h>
85 # include <sys/time.h>
86 # include <sys/times.h>
87 # include <sys/utsname.h>
88 # include <sys/socket.h>
89 # include <sys/wait.h>
90 # include <pwd.h>
91 # include <poll.h>
92 # include <semaphore.h>
93 # include <fcntl.h>
94 # include <string.h>
95 # include <syscall.h>
96 # include <sys/sysinfo.h>
97 # include <gnu/libc-version.h>
98 # include <sys/ipc.h>
99 # include <sys/shm.h>
100 # include <link.h>
101 # include <stdint.h>
102 # include <inttypes.h>
103 # include <sys/ioctl.h>
105 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
107 // if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
108 // getrusage() is prepared to handle the associated failure.
109 #ifndef RUSAGE_THREAD
110 #define RUSAGE_THREAD (1) /* only the calling thread */
111 #endif
113 #define MAX_PATH (2 * K)
115 #define MAX_SECS 100000000
117 // for timer info max values which include all bits
118 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
120 #define LARGEPAGES_BIT (1 << 6)
121 ////////////////////////////////////////////////////////////////////////////////
122 // global variables
123 julong os::Linux::_physical_memory = 0;
125 address os::Linux::_initial_thread_stack_bottom = NULL;
126 uintptr_t os::Linux::_initial_thread_stack_size = 0;
128 int (*os::Linux::_clock_gettime)(clockid_t, struct timespec *) = NULL;
129 int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
130 Mutex* os::Linux::_createThread_lock = NULL;
131 pthread_t os::Linux::_main_thread;
132 int os::Linux::_page_size = -1;
133 const int os::Linux::_vm_default_page_size = (8 * K);
134 bool os::Linux::_is_floating_stack = false;
135 bool os::Linux::_is_NPTL = false;
136 bool os::Linux::_supports_fast_thread_cpu_time = false;
137 const char * os::Linux::_glibc_version = NULL;
138 const char * os::Linux::_libpthread_version = NULL;
139 pthread_condattr_t os::Linux::_condattr[1];
141 static jlong initial_time_count=0;
143 static int clock_tics_per_sec = 100;
145 // For diagnostics to print a message once. see run_periodic_checks
146 static sigset_t check_signal_done;
147 static bool check_signals = true;
149 static pid_t _initial_pid = 0;
151 /* Signal number used to suspend/resume a thread */
153 /* do not use any signal number less than SIGSEGV, see 4355769 */
154 static int SR_signum = SIGUSR2;
155 sigset_t SR_sigset;
157 /* Used to protect dlsym() calls */
158 static pthread_mutex_t dl_mutex;
160 // Declarations
161 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
163 #ifdef JAVASE_EMBEDDED
164 class MemNotifyThread: public Thread {
165 friend class VMStructs;
166 public:
167 virtual void run();
169 private:
170 static MemNotifyThread* _memnotify_thread;
171 int _fd;
173 public:
175 // Constructor
176 MemNotifyThread(int fd);
178 // Tester
179 bool is_memnotify_thread() const { return true; }
181 // Printing
182 char* name() const { return (char*)"Linux MemNotify Thread"; }
184 // Returns the single instance of the MemNotifyThread
185 static MemNotifyThread* memnotify_thread() { return _memnotify_thread; }
187 // Create and start the single instance of MemNotifyThread
188 static void start();
189 };
190 #endif // JAVASE_EMBEDDED
192 // utility functions
194 static int SR_initialize();
196 julong os::available_memory() {
197 return Linux::available_memory();
198 }
200 julong os::Linux::available_memory() {
201 // values in struct sysinfo are "unsigned long"
202 struct sysinfo si;
203 sysinfo(&si);
205 return (julong)si.freeram * si.mem_unit;
206 }
208 julong os::physical_memory() {
209 return Linux::physical_memory();
210 }
212 ////////////////////////////////////////////////////////////////////////////////
213 // environment support
215 bool os::getenv(const char* name, char* buf, int len) {
216 const char* val = ::getenv(name);
217 if (val != NULL && strlen(val) < (size_t)len) {
218 strcpy(buf, val);
219 return true;
220 }
221 if (len > 0) buf[0] = 0; // return a null string
222 return false;
223 }
226 // Return true if user is running as root.
228 bool os::have_special_privileges() {
229 static bool init = false;
230 static bool privileges = false;
231 if (!init) {
232 privileges = (getuid() != geteuid()) || (getgid() != getegid());
233 init = true;
234 }
235 return privileges;
236 }
239 #ifndef SYS_gettid
240 // i386: 224, ia64: 1105, amd64: 186, sparc 143
241 #ifdef __ia64__
242 #define SYS_gettid 1105
243 #elif __i386__
244 #define SYS_gettid 224
245 #elif __amd64__
246 #define SYS_gettid 186
247 #elif __sparc__
248 #define SYS_gettid 143
249 #else
250 #error define gettid for the arch
251 #endif
252 #endif
254 // Cpu architecture string
255 static char cpu_arch[] = HOTSPOT_LIB_ARCH;
257 // pid_t gettid()
258 //
259 // Returns the kernel thread id of the currently running thread. Kernel
260 // thread id is used to access /proc.
261 //
262 // (Note that getpid() on LinuxThreads returns kernel thread id too; but
263 // on NPTL, it returns the same pid for all threads, as required by POSIX.)
264 //
265 pid_t os::Linux::gettid() {
266 int rslt = syscall(SYS_gettid);
267 if (rslt == -1) {
268 // old kernel, no NPTL support
269 return getpid();
270 } else {
271 return (pid_t)rslt;
272 }
273 }
275 // Most versions of linux have a bug where the number of processors are
276 // determined by looking at the /proc file system. In a chroot environment,
277 // the system call returns 1. This causes the VM to act as if it is
278 // a single processor and elide locking (see is_MP() call).
279 static bool unsafe_chroot_detected = false;
280 static const char *unstable_chroot_error = "/proc file system not found.\n"
281 "Java may be unstable running multithreaded in a chroot "
282 "environment on Linux when /proc filesystem is not mounted.";
284 void os::Linux::initialize_system_info() {
285 set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
286 if (processor_count() == 1) {
287 pid_t pid = os::Linux::gettid();
288 char fname[32];
289 jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
290 FILE *fp = fopen(fname, "r");
291 if (fp == NULL) {
292 unsafe_chroot_detected = true;
293 } else {
294 fclose(fp);
295 }
296 }
297 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
298 assert(processor_count() > 0, "linux error");
299 }
301 void os::init_system_properties_values() {
302 // The next steps are taken in the product version:
303 //
304 // Obtain the JAVA_HOME value from the location of libjvm.so.
305 // This library should be located at:
306 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
307 //
308 // If "/jre/lib/" appears at the right place in the path, then we
309 // assume libjvm.so is installed in a JDK and we use this path.
310 //
311 // Otherwise exit with message: "Could not create the Java virtual machine."
312 //
313 // The following extra steps are taken in the debugging version:
314 //
315 // If "/jre/lib/" does NOT appear at the right place in the path
316 // instead of exit check for $JAVA_HOME environment variable.
317 //
318 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
319 // then we append a fake suffix "hotspot/libjvm.so" to this path so
320 // it looks like libjvm.so is installed there
321 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
322 //
323 // Otherwise exit.
324 //
325 // Important note: if the location of libjvm.so changes this
326 // code needs to be changed accordingly.
328 // See ld(1):
329 // The linker uses the following search paths to locate required
330 // shared libraries:
331 // 1: ...
332 // ...
333 // 7: The default directories, normally /lib and /usr/lib.
334 #if defined(AMD64) || defined(_LP64) && (defined(SPARC) || defined(PPC) || defined(S390))
335 #define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
336 #else
337 #define DEFAULT_LIBPATH "/lib:/usr/lib"
338 #endif
340 // Base path of extensions installed on the system.
341 #define SYS_EXT_DIR "/usr/java/packages"
342 #define EXTENSIONS_DIR "/lib/ext"
343 #define ENDORSED_DIR "/lib/endorsed"
345 // Buffer that fits several sprintfs.
346 // Note that the space for the colon and the trailing null are provided
347 // by the nulls included by the sizeof operator.
348 const size_t bufsize =
349 MAX3((size_t)MAXPATHLEN, // For dll_dir & friends.
350 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
351 (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
352 char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
354 // sysclasspath, java_home, dll_dir
355 {
356 char *pslash;
357 os::jvm_path(buf, bufsize);
359 // Found the full path to libjvm.so.
360 // Now cut the path to <java_home>/jre if we can.
361 *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
362 pslash = strrchr(buf, '/');
363 if (pslash != NULL) {
364 *pslash = '\0'; // Get rid of /{client|server|hotspot}.
365 }
366 Arguments::set_dll_dir(buf);
368 if (pslash != NULL) {
369 pslash = strrchr(buf, '/');
370 if (pslash != NULL) {
371 *pslash = '\0'; // Get rid of /<arch>.
372 pslash = strrchr(buf, '/');
373 if (pslash != NULL) {
374 *pslash = '\0'; // Get rid of /lib.
375 }
376 }
377 }
378 Arguments::set_java_home(buf);
379 set_boot_path('/', ':');
380 }
382 // Where to look for native libraries.
383 //
384 // Note: Due to a legacy implementation, most of the library path
385 // is set in the launcher. This was to accomodate linking restrictions
386 // on legacy Linux implementations (which are no longer supported).
387 // Eventually, all the library path setting will be done here.
388 //
389 // However, to prevent the proliferation of improperly built native
390 // libraries, the new path component /usr/java/packages is added here.
391 // Eventually, all the library path setting will be done here.
392 {
393 // Get the user setting of LD_LIBRARY_PATH, and prepended it. It
394 // should always exist (until the legacy problem cited above is
395 // addressed).
396 const char *v = ::getenv("LD_LIBRARY_PATH");
397 const char *v_colon = ":";
398 if (v == NULL) { v = ""; v_colon = ""; }
399 // That's +1 for the colon and +1 for the trailing '\0'.
400 char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char,
401 strlen(v) + 1 +
402 sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH) + 1,
403 mtInternal);
404 sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib/%s:" DEFAULT_LIBPATH, v, v_colon, cpu_arch);
405 Arguments::set_library_path(ld_library_path);
406 FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
407 }
409 // Extensions directories.
410 sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
411 Arguments::set_ext_dirs(buf);
413 // Endorsed standards default directory.
414 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
415 Arguments::set_endorsed_dirs(buf);
417 FREE_C_HEAP_ARRAY(char, buf, mtInternal);
419 #undef DEFAULT_LIBPATH
420 #undef SYS_EXT_DIR
421 #undef EXTENSIONS_DIR
422 #undef ENDORSED_DIR
423 }
425 ////////////////////////////////////////////////////////////////////////////////
426 // breakpoint support
428 void os::breakpoint() {
429 BREAKPOINT;
430 }
432 extern "C" void breakpoint() {
433 // use debugger to set breakpoint here
434 }
436 ////////////////////////////////////////////////////////////////////////////////
437 // signal support
439 debug_only(static bool signal_sets_initialized = false);
440 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
442 bool os::Linux::is_sig_ignored(int sig) {
443 struct sigaction oact;
444 sigaction(sig, (struct sigaction*)NULL, &oact);
445 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
446 : CAST_FROM_FN_PTR(void*, oact.sa_handler);
447 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
448 return true;
449 else
450 return false;
451 }
453 void os::Linux::signal_sets_init() {
454 // Should also have an assertion stating we are still single-threaded.
455 assert(!signal_sets_initialized, "Already initialized");
456 // Fill in signals that are necessarily unblocked for all threads in
457 // the VM. Currently, we unblock the following signals:
458 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
459 // by -Xrs (=ReduceSignalUsage));
460 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
461 // other threads. The "ReduceSignalUsage" boolean tells us not to alter
462 // the dispositions or masks wrt these signals.
463 // Programs embedding the VM that want to use the above signals for their
464 // own purposes must, at this time, use the "-Xrs" option to prevent
465 // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
466 // (See bug 4345157, and other related bugs).
467 // In reality, though, unblocking these signals is really a nop, since
468 // these signals are not blocked by default.
469 sigemptyset(&unblocked_sigs);
470 sigemptyset(&allowdebug_blocked_sigs);
471 sigaddset(&unblocked_sigs, SIGILL);
472 sigaddset(&unblocked_sigs, SIGSEGV);
473 sigaddset(&unblocked_sigs, SIGBUS);
474 sigaddset(&unblocked_sigs, SIGFPE);
475 #if defined(PPC64)
476 sigaddset(&unblocked_sigs, SIGTRAP);
477 #endif
478 sigaddset(&unblocked_sigs, SR_signum);
480 if (!ReduceSignalUsage) {
481 if (!os::Linux::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
482 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
483 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
484 }
485 if (!os::Linux::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
486 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
487 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
488 }
489 if (!os::Linux::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
490 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
491 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
492 }
493 }
494 // Fill in signals that are blocked by all but the VM thread.
495 sigemptyset(&vm_sigs);
496 if (!ReduceSignalUsage)
497 sigaddset(&vm_sigs, BREAK_SIGNAL);
498 debug_only(signal_sets_initialized = true);
500 }
502 // These are signals that are unblocked while a thread is running Java.
503 // (For some reason, they get blocked by default.)
504 sigset_t* os::Linux::unblocked_signals() {
505 assert(signal_sets_initialized, "Not initialized");
506 return &unblocked_sigs;
507 }
509 // These are the signals that are blocked while a (non-VM) thread is
510 // running Java. Only the VM thread handles these signals.
511 sigset_t* os::Linux::vm_signals() {
512 assert(signal_sets_initialized, "Not initialized");
513 return &vm_sigs;
514 }
516 // These are signals that are blocked during cond_wait to allow debugger in
517 sigset_t* os::Linux::allowdebug_blocked_signals() {
518 assert(signal_sets_initialized, "Not initialized");
519 return &allowdebug_blocked_sigs;
520 }
522 void os::Linux::hotspot_sigmask(Thread* thread) {
524 //Save caller's signal mask before setting VM signal mask
525 sigset_t caller_sigmask;
526 pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
528 OSThread* osthread = thread->osthread();
529 osthread->set_caller_sigmask(caller_sigmask);
531 pthread_sigmask(SIG_UNBLOCK, os::Linux::unblocked_signals(), NULL);
533 if (!ReduceSignalUsage) {
534 if (thread->is_VM_thread()) {
535 // Only the VM thread handles BREAK_SIGNAL ...
536 pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
537 } else {
538 // ... all other threads block BREAK_SIGNAL
539 pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
540 }
541 }
542 }
544 //////////////////////////////////////////////////////////////////////////////
545 // detecting pthread library
547 void os::Linux::libpthread_init() {
548 // Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION
549 // and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a
550 // generic name for earlier versions.
551 // Define macros here so we can build HotSpot on old systems.
552 # ifndef _CS_GNU_LIBC_VERSION
553 # define _CS_GNU_LIBC_VERSION 2
554 # endif
555 # ifndef _CS_GNU_LIBPTHREAD_VERSION
556 # define _CS_GNU_LIBPTHREAD_VERSION 3
557 # endif
559 size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
560 if (n > 0) {
561 char *str = (char *)malloc(n, mtInternal);
562 confstr(_CS_GNU_LIBC_VERSION, str, n);
563 os::Linux::set_glibc_version(str);
564 } else {
565 // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
566 static char _gnu_libc_version[32];
567 jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
568 "glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
569 os::Linux::set_glibc_version(_gnu_libc_version);
570 }
572 n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
573 if (n > 0) {
574 char *str = (char *)malloc(n, mtInternal);
575 confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
576 // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
577 // us "NPTL-0.29" even we are running with LinuxThreads. Check if this
578 // is the case. LinuxThreads has a hard limit on max number of threads.
579 // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
580 // On the other hand, NPTL does not have such a limit, sysconf()
581 // will return -1 and errno is not changed. Check if it is really NPTL.
582 if (strcmp(os::Linux::glibc_version(), "glibc 2.3.2") == 0 &&
583 strstr(str, "NPTL") &&
584 sysconf(_SC_THREAD_THREADS_MAX) > 0) {
585 free(str);
586 os::Linux::set_libpthread_version("linuxthreads");
587 } else {
588 os::Linux::set_libpthread_version(str);
589 }
590 } else {
591 // glibc before 2.3.2 only has LinuxThreads.
592 os::Linux::set_libpthread_version("linuxthreads");
593 }
595 if (strstr(libpthread_version(), "NPTL")) {
596 os::Linux::set_is_NPTL();
597 } else {
598 os::Linux::set_is_LinuxThreads();
599 }
601 // LinuxThreads have two flavors: floating-stack mode, which allows variable
602 // stack size; and fixed-stack mode. NPTL is always floating-stack.
603 if (os::Linux::is_NPTL() || os::Linux::supports_variable_stack_size()) {
604 os::Linux::set_is_floating_stack();
605 }
606 }
608 /////////////////////////////////////////////////////////////////////////////
609 // thread stack
611 // Force Linux kernel to expand current thread stack. If "bottom" is close
612 // to the stack guard, caller should block all signals.
613 //
614 // MAP_GROWSDOWN:
615 // A special mmap() flag that is used to implement thread stacks. It tells
616 // kernel that the memory region should extend downwards when needed. This
617 // allows early versions of LinuxThreads to only mmap the first few pages
618 // when creating a new thread. Linux kernel will automatically expand thread
619 // stack as needed (on page faults).
620 //
621 // However, because the memory region of a MAP_GROWSDOWN stack can grow on
622 // demand, if a page fault happens outside an already mapped MAP_GROWSDOWN
623 // region, it's hard to tell if the fault is due to a legitimate stack
624 // access or because of reading/writing non-exist memory (e.g. buffer
625 // overrun). As a rule, if the fault happens below current stack pointer,
626 // Linux kernel does not expand stack, instead a SIGSEGV is sent to the
627 // application (see Linux kernel fault.c).
628 //
629 // This Linux feature can cause SIGSEGV when VM bangs thread stack for
630 // stack overflow detection.
631 //
632 // Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do
633 // not use this flag. However, the stack of initial thread is not created
634 // by pthread, it is still MAP_GROWSDOWN. Also it's possible (though
635 // unlikely) that user code can create a thread with MAP_GROWSDOWN stack
636 // and then attach the thread to JVM.
637 //
638 // To get around the problem and allow stack banging on Linux, we need to
639 // manually expand thread stack after receiving the SIGSEGV.
640 //
641 // There are two ways to expand thread stack to address "bottom", we used
642 // both of them in JVM before 1.5:
643 // 1. adjust stack pointer first so that it is below "bottom", and then
644 // touch "bottom"
645 // 2. mmap() the page in question
646 //
647 // Now alternate signal stack is gone, it's harder to use 2. For instance,
648 // if current sp is already near the lower end of page 101, and we need to
649 // call mmap() to map page 100, it is possible that part of the mmap() frame
650 // will be placed in page 100. When page 100 is mapped, it is zero-filled.
651 // That will destroy the mmap() frame and cause VM to crash.
652 //
653 // The following code works by adjusting sp first, then accessing the "bottom"
654 // page to force a page fault. Linux kernel will then automatically expand the
655 // stack mapping.
656 //
657 // _expand_stack_to() assumes its frame size is less than page size, which
658 // should always be true if the function is not inlined.
660 #if __GNUC__ < 3 // gcc 2.x does not support noinline attribute
661 #define NOINLINE
662 #else
663 #define NOINLINE __attribute__ ((noinline))
664 #endif
666 static void _expand_stack_to(address bottom) NOINLINE;
668 static void _expand_stack_to(address bottom) {
669 address sp;
670 size_t size;
671 volatile char *p;
673 // Adjust bottom to point to the largest address within the same page, it
674 // gives us a one-page buffer if alloca() allocates slightly more memory.
675 bottom = (address)align_size_down((uintptr_t)bottom, os::Linux::page_size());
676 bottom += os::Linux::page_size() - 1;
678 // sp might be slightly above current stack pointer; if that's the case, we
679 // will alloca() a little more space than necessary, which is OK. Don't use
680 // os::current_stack_pointer(), as its result can be slightly below current
681 // stack pointer, causing us to not alloca enough to reach "bottom".
682 sp = (address)&sp;
684 if (sp > bottom) {
685 size = sp - bottom;
686 p = (volatile char *)alloca(size);
687 assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?");
688 p[0] = '\0';
689 }
690 }
692 bool os::Linux::manually_expand_stack(JavaThread * t, address addr) {
693 assert(t!=NULL, "just checking");
694 assert(t->osthread()->expanding_stack(), "expand should be set");
695 assert(t->stack_base() != NULL, "stack_base was not initialized");
697 if (addr < t->stack_base() && addr >= t->stack_yellow_zone_base()) {
698 sigset_t mask_all, old_sigset;
699 sigfillset(&mask_all);
700 pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);
701 _expand_stack_to(addr);
702 pthread_sigmask(SIG_SETMASK, &old_sigset, NULL);
703 return true;
704 }
705 return false;
706 }
708 //////////////////////////////////////////////////////////////////////////////
709 // create new thread
711 static address highest_vm_reserved_address();
713 // check if it's safe to start a new thread
714 static bool _thread_safety_check(Thread* thread) {
715 if (os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack()) {
716 // Fixed stack LinuxThreads (SuSE Linux/x86, and some versions of Redhat)
717 // Heap is mmap'ed at lower end of memory space. Thread stacks are
718 // allocated (MAP_FIXED) from high address space. Every thread stack
719 // occupies a fixed size slot (usually 2Mbytes, but user can change
720 // it to other values if they rebuild LinuxThreads).
721 //
722 // Problem with MAP_FIXED is that mmap() can still succeed even part of
723 // the memory region has already been mmap'ed. That means if we have too
724 // many threads and/or very large heap, eventually thread stack will
725 // collide with heap.
726 //
727 // Here we try to prevent heap/stack collision by comparing current
728 // stack bottom with the highest address that has been mmap'ed by JVM
729 // plus a safety margin for memory maps created by native code.
730 //
731 // This feature can be disabled by setting ThreadSafetyMargin to 0
732 //
733 if (ThreadSafetyMargin > 0) {
734 address stack_bottom = os::current_stack_base() - os::current_stack_size();
736 // not safe if our stack extends below the safety margin
737 return stack_bottom - ThreadSafetyMargin >= highest_vm_reserved_address();
738 } else {
739 return true;
740 }
741 } else {
742 // Floating stack LinuxThreads or NPTL:
743 // Unlike fixed stack LinuxThreads, thread stacks are not MAP_FIXED. When
744 // there's not enough space left, pthread_create() will fail. If we come
745 // here, that means enough space has been reserved for stack.
746 return true;
747 }
748 }
750 // Thread start routine for all newly created threads
751 static void *java_start(Thread *thread) {
752 // Try to randomize the cache line index of hot stack frames.
753 // This helps when threads of the same stack traces evict each other's
754 // cache lines. The threads can be either from the same JVM instance, or
755 // from different JVM instances. The benefit is especially true for
756 // processors with hyperthreading technology.
757 static int counter = 0;
758 int pid = os::current_process_id();
759 alloca(((pid ^ counter++) & 7) * 128);
761 ThreadLocalStorage::set_thread(thread);
763 OSThread* osthread = thread->osthread();
764 Monitor* sync = osthread->startThread_lock();
766 // non floating stack LinuxThreads needs extra check, see above
767 if (!_thread_safety_check(thread)) {
768 // notify parent thread
769 MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
770 osthread->set_state(ZOMBIE);
771 sync->notify_all();
772 return NULL;
773 }
775 // thread_id is kernel thread id (similar to Solaris LWP id)
776 osthread->set_thread_id(os::Linux::gettid());
778 if (UseNUMA) {
779 int lgrp_id = os::numa_get_group_id();
780 if (lgrp_id != -1) {
781 thread->set_lgrp_id(lgrp_id);
782 }
783 }
784 // initialize signal mask for this thread
785 os::Linux::hotspot_sigmask(thread);
787 // initialize floating point control register
788 os::Linux::init_thread_fpu_state();
790 // handshaking with parent thread
791 {
792 MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
794 // notify parent thread
795 osthread->set_state(INITIALIZED);
796 sync->notify_all();
798 // wait until os::start_thread()
799 while (osthread->get_state() == INITIALIZED) {
800 sync->wait(Mutex::_no_safepoint_check_flag);
801 }
802 }
804 // call one more level start routine
805 thread->run();
807 return 0;
808 }
810 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
811 assert(thread->osthread() == NULL, "caller responsible");
813 // Allocate the OSThread object
814 OSThread* osthread = new OSThread(NULL, NULL);
815 if (osthread == NULL) {
816 return false;
817 }
819 // set the correct thread state
820 osthread->set_thread_type(thr_type);
822 // Initial state is ALLOCATED but not INITIALIZED
823 osthread->set_state(ALLOCATED);
825 thread->set_osthread(osthread);
827 // init thread attributes
828 pthread_attr_t attr;
829 pthread_attr_init(&attr);
830 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
832 // stack size
833 if (os::Linux::supports_variable_stack_size()) {
834 // calculate stack size if it's not specified by caller
835 if (stack_size == 0) {
836 stack_size = os::Linux::default_stack_size(thr_type);
838 switch (thr_type) {
839 case os::java_thread:
840 // Java threads use ThreadStackSize which default value can be
841 // changed with the flag -Xss
842 assert (JavaThread::stack_size_at_create() > 0, "this should be set");
843 stack_size = JavaThread::stack_size_at_create();
844 break;
845 case os::compiler_thread:
846 if (CompilerThreadStackSize > 0) {
847 stack_size = (size_t)(CompilerThreadStackSize * K);
848 break;
849 } // else fall through:
850 // use VMThreadStackSize if CompilerThreadStackSize is not defined
851 case os::vm_thread:
852 case os::pgc_thread:
853 case os::cgc_thread:
854 case os::watcher_thread:
855 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
856 break;
857 }
858 }
860 stack_size = MAX2(stack_size, os::Linux::min_stack_allowed);
861 pthread_attr_setstacksize(&attr, stack_size);
862 } else {
863 // let pthread_create() pick the default value.
864 }
866 // glibc guard page
867 pthread_attr_setguardsize(&attr, os::Linux::default_guard_size(thr_type));
869 ThreadState state;
871 {
872 // Serialize thread creation if we are running with fixed stack LinuxThreads
873 bool lock = os::Linux::is_LinuxThreads() && !os::Linux::is_floating_stack();
874 if (lock) {
875 os::Linux::createThread_lock()->lock_without_safepoint_check();
876 }
878 pthread_t tid;
879 int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
881 pthread_attr_destroy(&attr);
883 if (ret != 0) {
884 if (PrintMiscellaneous && (Verbose || WizardMode)) {
885 perror("pthread_create()");
886 }
887 // Need to clean up stuff we've allocated so far
888 thread->set_osthread(NULL);
889 delete osthread;
890 if (lock) os::Linux::createThread_lock()->unlock();
891 return false;
892 }
894 // Store pthread info into the OSThread
895 osthread->set_pthread_id(tid);
897 // Wait until child thread is either initialized or aborted
898 {
899 Monitor* sync_with_child = osthread->startThread_lock();
900 MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
901 while ((state = osthread->get_state()) == ALLOCATED) {
902 sync_with_child->wait(Mutex::_no_safepoint_check_flag);
903 }
904 }
906 if (lock) {
907 os::Linux::createThread_lock()->unlock();
908 }
909 }
911 // Aborted due to thread limit being reached
912 if (state == ZOMBIE) {
913 thread->set_osthread(NULL);
914 delete osthread;
915 return false;
916 }
918 // The thread is returned suspended (in state INITIALIZED),
919 // and is started higher up in the call chain
920 assert(state == INITIALIZED, "race condition");
921 return true;
922 }
924 /////////////////////////////////////////////////////////////////////////////
925 // attach existing thread
927 // bootstrap the main thread
928 bool os::create_main_thread(JavaThread* thread) {
929 assert(os::Linux::_main_thread == pthread_self(), "should be called inside main thread");
930 return create_attached_thread(thread);
931 }
933 bool os::create_attached_thread(JavaThread* thread) {
934 #ifdef ASSERT
935 thread->verify_not_published();
936 #endif
938 // Allocate the OSThread object
939 OSThread* osthread = new OSThread(NULL, NULL);
941 if (osthread == NULL) {
942 return false;
943 }
945 // Store pthread info into the OSThread
946 osthread->set_thread_id(os::Linux::gettid());
947 osthread->set_pthread_id(::pthread_self());
949 // initialize floating point control register
950 os::Linux::init_thread_fpu_state();
952 // Initial thread state is RUNNABLE
953 osthread->set_state(RUNNABLE);
955 thread->set_osthread(osthread);
957 if (UseNUMA) {
958 int lgrp_id = os::numa_get_group_id();
959 if (lgrp_id != -1) {
960 thread->set_lgrp_id(lgrp_id);
961 }
962 }
964 if (os::Linux::is_initial_thread()) {
965 // If current thread is initial thread, its stack is mapped on demand,
966 // see notes about MAP_GROWSDOWN. Here we try to force kernel to map
967 // the entire stack region to avoid SEGV in stack banging.
968 // It is also useful to get around the heap-stack-gap problem on SuSE
969 // kernel (see 4821821 for details). We first expand stack to the top
970 // of yellow zone, then enable stack yellow zone (order is significant,
971 // enabling yellow zone first will crash JVM on SuSE Linux), so there
972 // is no gap between the last two virtual memory regions.
974 JavaThread *jt = (JavaThread *)thread;
975 address addr = jt->stack_yellow_zone_base();
976 assert(addr != NULL, "initialization problem?");
977 assert(jt->stack_available(addr) > 0, "stack guard should not be enabled");
979 osthread->set_expanding_stack();
980 os::Linux::manually_expand_stack(jt, addr);
981 osthread->clear_expanding_stack();
982 }
984 // initialize signal mask for this thread
985 // and save the caller's signal mask
986 os::Linux::hotspot_sigmask(thread);
988 return true;
989 }
991 void os::pd_start_thread(Thread* thread) {
992 OSThread * osthread = thread->osthread();
993 assert(osthread->get_state() != INITIALIZED, "just checking");
994 Monitor* sync_with_child = osthread->startThread_lock();
995 MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
996 sync_with_child->notify();
997 }
999 // Free Linux resources related to the OSThread
1000 void os::free_thread(OSThread* osthread) {
1001 assert(osthread != NULL, "osthread not set");
1003 if (Thread::current()->osthread() == osthread) {
1004 // Restore caller's signal mask
1005 sigset_t sigmask = osthread->caller_sigmask();
1006 pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1007 }
1009 delete osthread;
1010 }
1012 //////////////////////////////////////////////////////////////////////////////
1013 // thread local storage
1015 // Restore the thread pointer if the destructor is called. This is in case
1016 // someone from JNI code sets up a destructor with pthread_key_create to run
1017 // detachCurrentThread on thread death. Unless we restore the thread pointer we
1018 // will hang or crash. When detachCurrentThread is called the key will be set
1019 // to null and we will not be called again. If detachCurrentThread is never
1020 // called we could loop forever depending on the pthread implementation.
1021 static void restore_thread_pointer(void* p) {
1022 Thread* thread = (Thread*) p;
1023 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
1024 }
1026 int os::allocate_thread_local_storage() {
1027 pthread_key_t key;
1028 int rslt = pthread_key_create(&key, restore_thread_pointer);
1029 assert(rslt == 0, "cannot allocate thread local storage");
1030 return (int)key;
1031 }
1033 // Note: This is currently not used by VM, as we don't destroy TLS key
1034 // on VM exit.
1035 void os::free_thread_local_storage(int index) {
1036 int rslt = pthread_key_delete((pthread_key_t)index);
1037 assert(rslt == 0, "invalid index");
1038 }
1040 void os::thread_local_storage_at_put(int index, void* value) {
1041 int rslt = pthread_setspecific((pthread_key_t)index, value);
1042 assert(rslt == 0, "pthread_setspecific failed");
1043 }
1045 extern "C" Thread* get_thread() {
1046 return ThreadLocalStorage::thread();
1047 }
1049 //////////////////////////////////////////////////////////////////////////////
1050 // initial thread
1052 // Check if current thread is the initial thread, similar to Solaris thr_main.
1053 bool os::Linux::is_initial_thread(void) {
1054 char dummy;
1055 // If called before init complete, thread stack bottom will be null.
1056 // Can be called if fatal error occurs before initialization.
1057 if (initial_thread_stack_bottom() == NULL) return false;
1058 assert(initial_thread_stack_bottom() != NULL &&
1059 initial_thread_stack_size() != 0,
1060 "os::init did not locate initial thread's stack region");
1061 if ((address)&dummy >= initial_thread_stack_bottom() &&
1062 (address)&dummy < initial_thread_stack_bottom() + initial_thread_stack_size())
1063 return true;
1064 else return false;
1065 }
1067 // Find the virtual memory area that contains addr
1068 static bool find_vma(address addr, address* vma_low, address* vma_high) {
1069 FILE *fp = fopen("/proc/self/maps", "r");
1070 if (fp) {
1071 address low, high;
1072 while (!feof(fp)) {
1073 if (fscanf(fp, "%p-%p", &low, &high) == 2) {
1074 if (low <= addr && addr < high) {
1075 if (vma_low) *vma_low = low;
1076 if (vma_high) *vma_high = high;
1077 fclose (fp);
1078 return true;
1079 }
1080 }
1081 for (;;) {
1082 int ch = fgetc(fp);
1083 if (ch == EOF || ch == (int)'\n') break;
1084 }
1085 }
1086 fclose(fp);
1087 }
1088 return false;
1089 }
1091 // Locate initial thread stack. This special handling of initial thread stack
1092 // is needed because pthread_getattr_np() on most (all?) Linux distros returns
1093 // bogus value for initial thread.
1094 void os::Linux::capture_initial_stack(size_t max_size) {
1095 // stack size is the easy part, get it from RLIMIT_STACK
1096 size_t stack_size;
1097 struct rlimit rlim;
1098 getrlimit(RLIMIT_STACK, &rlim);
1099 stack_size = rlim.rlim_cur;
1101 // 6308388: a bug in ld.so will relocate its own .data section to the
1102 // lower end of primordial stack; reduce ulimit -s value a little bit
1103 // so we won't install guard page on ld.so's data section.
1104 stack_size -= 2 * page_size();
1106 // 4441425: avoid crash with "unlimited" stack size on SuSE 7.1 or Redhat
1107 // 7.1, in both cases we will get 2G in return value.
1108 // 4466587: glibc 2.2.x compiled w/o "--enable-kernel=2.4.0" (RH 7.0,
1109 // SuSE 7.2, Debian) can not handle alternate signal stack correctly
1110 // for initial thread if its stack size exceeds 6M. Cap it at 2M,
1111 // in case other parts in glibc still assumes 2M max stack size.
1112 // FIXME: alt signal stack is gone, maybe we can relax this constraint?
1113 // Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small
1114 if (stack_size > 2 * K * K IA64_ONLY(*2))
1115 stack_size = 2 * K * K IA64_ONLY(*2);
1116 // Try to figure out where the stack base (top) is. This is harder.
1117 //
1118 // When an application is started, glibc saves the initial stack pointer in
1119 // a global variable "__libc_stack_end", which is then used by system
1120 // libraries. __libc_stack_end should be pretty close to stack top. The
1121 // variable is available since the very early days. However, because it is
1122 // a private interface, it could disappear in the future.
1123 //
1124 // Linux kernel saves start_stack information in /proc/<pid>/stat. Similar
1125 // to __libc_stack_end, it is very close to stack top, but isn't the real
1126 // stack top. Note that /proc may not exist if VM is running as a chroot
1127 // program, so reading /proc/<pid>/stat could fail. Also the contents of
1128 // /proc/<pid>/stat could change in the future (though unlikely).
1129 //
1130 // We try __libc_stack_end first. If that doesn't work, look for
1131 // /proc/<pid>/stat. If neither of them works, we use current stack pointer
1132 // as a hint, which should work well in most cases.
1134 uintptr_t stack_start;
1136 // try __libc_stack_end first
1137 uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end");
1138 if (p && *p) {
1139 stack_start = *p;
1140 } else {
1141 // see if we can get the start_stack field from /proc/self/stat
1142 FILE *fp;
1143 int pid;
1144 char state;
1145 int ppid;
1146 int pgrp;
1147 int session;
1148 int nr;
1149 int tpgrp;
1150 unsigned long flags;
1151 unsigned long minflt;
1152 unsigned long cminflt;
1153 unsigned long majflt;
1154 unsigned long cmajflt;
1155 unsigned long utime;
1156 unsigned long stime;
1157 long cutime;
1158 long cstime;
1159 long prio;
1160 long nice;
1161 long junk;
1162 long it_real;
1163 uintptr_t start;
1164 uintptr_t vsize;
1165 intptr_t rss;
1166 uintptr_t rsslim;
1167 uintptr_t scodes;
1168 uintptr_t ecode;
1169 int i;
1171 // Figure what the primordial thread stack base is. Code is inspired
1172 // by email from Hans Boehm. /proc/self/stat begins with current pid,
1173 // followed by command name surrounded by parentheses, state, etc.
1174 char stat[2048];
1175 int statlen;
1177 fp = fopen("/proc/self/stat", "r");
1178 if (fp) {
1179 statlen = fread(stat, 1, 2047, fp);
1180 stat[statlen] = '\0';
1181 fclose(fp);
1183 // Skip pid and the command string. Note that we could be dealing with
1184 // weird command names, e.g. user could decide to rename java launcher
1185 // to "java 1.4.2 :)", then the stat file would look like
1186 // 1234 (java 1.4.2 :)) R ... ...
1187 // We don't really need to know the command string, just find the last
1188 // occurrence of ")" and then start parsing from there. See bug 4726580.
1189 char * s = strrchr(stat, ')');
1191 i = 0;
1192 if (s) {
1193 // Skip blank chars
1194 do s++; while (isspace(*s));
1196 #define _UFM UINTX_FORMAT
1197 #define _DFM INTX_FORMAT
1199 /* 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 */
1200 /* 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 */
1201 i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
1202 &state, /* 3 %c */
1203 &ppid, /* 4 %d */
1204 &pgrp, /* 5 %d */
1205 &session, /* 6 %d */
1206 &nr, /* 7 %d */
1207 &tpgrp, /* 8 %d */
1208 &flags, /* 9 %lu */
1209 &minflt, /* 10 %lu */
1210 &cminflt, /* 11 %lu */
1211 &majflt, /* 12 %lu */
1212 &cmajflt, /* 13 %lu */
1213 &utime, /* 14 %lu */
1214 &stime, /* 15 %lu */
1215 &cutime, /* 16 %ld */
1216 &cstime, /* 17 %ld */
1217 &prio, /* 18 %ld */
1218 &nice, /* 19 %ld */
1219 &junk, /* 20 %ld */
1220 &it_real, /* 21 %ld */
1221 &start, /* 22 UINTX_FORMAT */
1222 &vsize, /* 23 UINTX_FORMAT */
1223 &rss, /* 24 INTX_FORMAT */
1224 &rsslim, /* 25 UINTX_FORMAT */
1225 &scodes, /* 26 UINTX_FORMAT */
1226 &ecode, /* 27 UINTX_FORMAT */
1227 &stack_start); /* 28 UINTX_FORMAT */
1228 }
1230 #undef _UFM
1231 #undef _DFM
1233 if (i != 28 - 2) {
1234 assert(false, "Bad conversion from /proc/self/stat");
1235 // product mode - assume we are the initial thread, good luck in the
1236 // embedded case.
1237 warning("Can't detect initial thread stack location - bad conversion");
1238 stack_start = (uintptr_t) &rlim;
1239 }
1240 } else {
1241 // For some reason we can't open /proc/self/stat (for example, running on
1242 // FreeBSD with a Linux emulator, or inside chroot), this should work for
1243 // most cases, so don't abort:
1244 warning("Can't detect initial thread stack location - no /proc/self/stat");
1245 stack_start = (uintptr_t) &rlim;
1246 }
1247 }
1249 // Now we have a pointer (stack_start) very close to the stack top, the
1250 // next thing to do is to figure out the exact location of stack top. We
1251 // can find out the virtual memory area that contains stack_start by
1252 // reading /proc/self/maps, it should be the last vma in /proc/self/maps,
1253 // and its upper limit is the real stack top. (again, this would fail if
1254 // running inside chroot, because /proc may not exist.)
1256 uintptr_t stack_top;
1257 address low, high;
1258 if (find_vma((address)stack_start, &low, &high)) {
1259 // success, "high" is the true stack top. (ignore "low", because initial
1260 // thread stack grows on demand, its real bottom is high - RLIMIT_STACK.)
1261 stack_top = (uintptr_t)high;
1262 } else {
1263 // failed, likely because /proc/self/maps does not exist
1264 warning("Can't detect initial thread stack location - find_vma failed");
1265 // best effort: stack_start is normally within a few pages below the real
1266 // stack top, use it as stack top, and reduce stack size so we won't put
1267 // guard page outside stack.
1268 stack_top = stack_start;
1269 stack_size -= 16 * page_size();
1270 }
1272 // stack_top could be partially down the page so align it
1273 stack_top = align_size_up(stack_top, page_size());
1275 if (max_size && stack_size > max_size) {
1276 _initial_thread_stack_size = max_size;
1277 } else {
1278 _initial_thread_stack_size = stack_size;
1279 }
1281 _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
1282 _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
1283 }
1285 ////////////////////////////////////////////////////////////////////////////////
1286 // time support
1288 // Time since start-up in seconds to a fine granularity.
1289 // Used by VMSelfDestructTimer and the MemProfiler.
1290 double os::elapsedTime() {
1292 return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution
1293 }
1295 jlong os::elapsed_counter() {
1296 return javaTimeNanos() - initial_time_count;
1297 }
1299 jlong os::elapsed_frequency() {
1300 return NANOSECS_PER_SEC; // nanosecond resolution
1301 }
1303 bool os::supports_vtime() { return true; }
1304 bool os::enable_vtime() { return false; }
1305 bool os::vtime_enabled() { return false; }
1307 double os::elapsedVTime() {
1308 struct rusage usage;
1309 int retval = getrusage(RUSAGE_THREAD, &usage);
1310 if (retval == 0) {
1311 return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000);
1312 } else {
1313 // better than nothing, but not much
1314 return elapsedTime();
1315 }
1316 }
1318 jlong os::javaTimeMillis() {
1319 timeval time;
1320 int status = gettimeofday(&time, NULL);
1321 assert(status != -1, "linux error");
1322 return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1323 }
1325 #ifndef CLOCK_MONOTONIC
1326 #define CLOCK_MONOTONIC (1)
1327 #endif
1329 void os::Linux::clock_init() {
1330 // we do dlopen's in this particular order due to bug in linux
1331 // dynamical loader (see 6348968) leading to crash on exit
1332 void* handle = dlopen("librt.so.1", RTLD_LAZY);
1333 if (handle == NULL) {
1334 handle = dlopen("librt.so", RTLD_LAZY);
1335 }
1337 if (handle) {
1338 int (*clock_getres_func)(clockid_t, struct timespec*) =
1339 (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
1340 int (*clock_gettime_func)(clockid_t, struct timespec*) =
1341 (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
1342 if (clock_getres_func && clock_gettime_func) {
1343 // See if monotonic clock is supported by the kernel. Note that some
1344 // early implementations simply return kernel jiffies (updated every
1345 // 1/100 or 1/1000 second). It would be bad to use such a low res clock
1346 // for nano time (though the monotonic property is still nice to have).
1347 // It's fixed in newer kernels, however clock_getres() still returns
1348 // 1/HZ. We check if clock_getres() works, but will ignore its reported
1349 // resolution for now. Hopefully as people move to new kernels, this
1350 // won't be a problem.
1351 struct timespec res;
1352 struct timespec tp;
1353 if (clock_getres_func (CLOCK_MONOTONIC, &res) == 0 &&
1354 clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) {
1355 // yes, monotonic clock is supported
1356 _clock_gettime = clock_gettime_func;
1357 return;
1358 } else {
1359 // close librt if there is no monotonic clock
1360 dlclose(handle);
1361 }
1362 }
1363 }
1364 warning("No monotonic clock was available - timed services may " \
1365 "be adversely affected if the time-of-day clock changes");
1366 }
1368 #ifndef SYS_clock_getres
1370 #if defined(IA32) || defined(AMD64)
1371 #define SYS_clock_getres IA32_ONLY(266) AMD64_ONLY(229)
1372 #define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
1373 #else
1374 #warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time"
1375 #define sys_clock_getres(x,y) -1
1376 #endif
1378 #else
1379 #define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
1380 #endif
1382 void os::Linux::fast_thread_clock_init() {
1383 if (!UseLinuxPosixThreadCPUClocks) {
1384 return;
1385 }
1386 clockid_t clockid;
1387 struct timespec tp;
1388 int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =
1389 (int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");
1391 // Switch to using fast clocks for thread cpu time if
1392 // the sys_clock_getres() returns 0 error code.
1393 // Note, that some kernels may support the current thread
1394 // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks
1395 // returned by the pthread_getcpuclockid().
1396 // If the fast Posix clocks are supported then the sys_clock_getres()
1397 // must return at least tp.tv_sec == 0 which means a resolution
1398 // better than 1 sec. This is extra check for reliability.
1400 if(pthread_getcpuclockid_func &&
1401 pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
1402 sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
1404 _supports_fast_thread_cpu_time = true;
1405 _pthread_getcpuclockid = pthread_getcpuclockid_func;
1406 }
1407 }
1409 jlong os::javaTimeNanos() {
1410 if (Linux::supports_monotonic_clock()) {
1411 struct timespec tp;
1412 int status = Linux::clock_gettime(CLOCK_MONOTONIC, &tp);
1413 assert(status == 0, "gettime error");
1414 jlong result = jlong(tp.tv_sec) * (1000 * 1000 * 1000) + jlong(tp.tv_nsec);
1415 return result;
1416 } else {
1417 timeval time;
1418 int status = gettimeofday(&time, NULL);
1419 assert(status != -1, "linux error");
1420 jlong usecs = jlong(time.tv_sec) * (1000 * 1000) + jlong(time.tv_usec);
1421 return 1000 * usecs;
1422 }
1423 }
1425 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1426 if (Linux::supports_monotonic_clock()) {
1427 info_ptr->max_value = ALL_64_BITS;
1429 // CLOCK_MONOTONIC - amount of time since some arbitrary point in the past
1430 info_ptr->may_skip_backward = false; // not subject to resetting or drifting
1431 info_ptr->may_skip_forward = false; // not subject to resetting or drifting
1432 } else {
1433 // gettimeofday - based on time in seconds since the Epoch thus does not wrap
1434 info_ptr->max_value = ALL_64_BITS;
1436 // gettimeofday is a real time clock so it skips
1437 info_ptr->may_skip_backward = true;
1438 info_ptr->may_skip_forward = true;
1439 }
1441 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1442 }
1444 // Return the real, user, and system times in seconds from an
1445 // arbitrary fixed point in the past.
1446 bool os::getTimesSecs(double* process_real_time,
1447 double* process_user_time,
1448 double* process_system_time) {
1449 struct tms ticks;
1450 clock_t real_ticks = times(&ticks);
1452 if (real_ticks == (clock_t) (-1)) {
1453 return false;
1454 } else {
1455 double ticks_per_second = (double) clock_tics_per_sec;
1456 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1457 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1458 *process_real_time = ((double) real_ticks) / ticks_per_second;
1460 return true;
1461 }
1462 }
1465 char * os::local_time_string(char *buf, size_t buflen) {
1466 struct tm t;
1467 time_t long_time;
1468 time(&long_time);
1469 localtime_r(&long_time, &t);
1470 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1471 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1472 t.tm_hour, t.tm_min, t.tm_sec);
1473 return buf;
1474 }
1476 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1477 return localtime_r(clock, res);
1478 }
1480 ////////////////////////////////////////////////////////////////////////////////
1481 // runtime exit support
1483 // Note: os::shutdown() might be called very early during initialization, or
1484 // called from signal handler. Before adding something to os::shutdown(), make
1485 // sure it is async-safe and can handle partially initialized VM.
1486 void os::shutdown() {
1488 // allow PerfMemory to attempt cleanup of any persistent resources
1489 perfMemory_exit();
1491 // needs to remove object in file system
1492 AttachListener::abort();
1494 // flush buffered output, finish log files
1495 ostream_abort();
1497 // Check for abort hook
1498 abort_hook_t abort_hook = Arguments::abort_hook();
1499 if (abort_hook != NULL) {
1500 abort_hook();
1501 }
1503 }
1505 // Note: os::abort() might be called very early during initialization, or
1506 // called from signal handler. Before adding something to os::abort(), make
1507 // sure it is async-safe and can handle partially initialized VM.
1508 void os::abort(bool dump_core) {
1509 os::shutdown();
1510 if (dump_core) {
1511 #ifndef PRODUCT
1512 fdStream out(defaultStream::output_fd());
1513 out.print_raw("Current thread is ");
1514 char buf[16];
1515 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1516 out.print_raw_cr(buf);
1517 out.print_raw_cr("Dumping core ...");
1518 #endif
1519 ::abort(); // dump core
1520 }
1522 ::exit(1);
1523 }
1525 // Die immediately, no exit hook, no abort hook, no cleanup.
1526 void os::die() {
1527 // _exit() on LinuxThreads only kills current thread
1528 ::abort();
1529 }
1532 // This method is a copy of JDK's sysGetLastErrorString
1533 // from src/solaris/hpi/src/system_md.c
1535 size_t os::lasterror(char *buf, size_t len) {
1537 if (errno == 0) return 0;
1539 const char *s = ::strerror(errno);
1540 size_t n = ::strlen(s);
1541 if (n >= len) {
1542 n = len - 1;
1543 }
1544 ::strncpy(buf, s, n);
1545 buf[n] = '\0';
1546 return n;
1547 }
1549 intx os::current_thread_id() { return (intx)pthread_self(); }
1550 int os::current_process_id() {
1552 // Under the old linux thread library, linux gives each thread
1553 // its own process id. Because of this each thread will return
1554 // a different pid if this method were to return the result
1555 // of getpid(2). Linux provides no api that returns the pid
1556 // of the launcher thread for the vm. This implementation
1557 // returns a unique pid, the pid of the launcher thread
1558 // that starts the vm 'process'.
1560 // Under the NPTL, getpid() returns the same pid as the
1561 // launcher thread rather than a unique pid per thread.
1562 // Use gettid() if you want the old pre NPTL behaviour.
1564 // if you are looking for the result of a call to getpid() that
1565 // returns a unique pid for the calling thread, then look at the
1566 // OSThread::thread_id() method in osThread_linux.hpp file
1568 return (int)(_initial_pid ? _initial_pid : getpid());
1569 }
1571 // DLL functions
1573 const char* os::dll_file_extension() { return ".so"; }
1575 // This must be hard coded because it's the system's temporary
1576 // directory not the java application's temp directory, ala java.io.tmpdir.
1577 const char* os::get_temp_directory() { return "/tmp"; }
1579 static bool file_exists(const char* filename) {
1580 struct stat statbuf;
1581 if (filename == NULL || strlen(filename) == 0) {
1582 return false;
1583 }
1584 return os::stat(filename, &statbuf) == 0;
1585 }
1587 bool os::dll_build_name(char* buffer, size_t buflen,
1588 const char* pname, const char* fname) {
1589 bool retval = false;
1590 // Copied from libhpi
1591 const size_t pnamelen = pname ? strlen(pname) : 0;
1593 // Return error on buffer overflow.
1594 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1595 return retval;
1596 }
1598 if (pnamelen == 0) {
1599 snprintf(buffer, buflen, "lib%s.so", fname);
1600 retval = true;
1601 } else if (strchr(pname, *os::path_separator()) != NULL) {
1602 int n;
1603 char** pelements = split_path(pname, &n);
1604 if (pelements == NULL) {
1605 return false;
1606 }
1607 for (int i = 0 ; i < n ; i++) {
1608 // Really shouldn't be NULL, but check can't hurt
1609 if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1610 continue; // skip the empty path values
1611 }
1612 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1613 if (file_exists(buffer)) {
1614 retval = true;
1615 break;
1616 }
1617 }
1618 // release the storage
1619 for (int i = 0 ; i < n ; i++) {
1620 if (pelements[i] != NULL) {
1621 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1622 }
1623 }
1624 if (pelements != NULL) {
1625 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1626 }
1627 } else {
1628 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1629 retval = true;
1630 }
1631 return retval;
1632 }
1634 // check if addr is inside libjvm.so
1635 bool os::address_is_in_vm(address addr) {
1636 static address libjvm_base_addr;
1637 Dl_info dlinfo;
1639 if (libjvm_base_addr == NULL) {
1640 if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1641 libjvm_base_addr = (address)dlinfo.dli_fbase;
1642 }
1643 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1644 }
1646 if (dladdr((void *)addr, &dlinfo) != 0) {
1647 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1648 }
1650 return false;
1651 }
1653 bool os::dll_address_to_function_name(address addr, char *buf,
1654 int buflen, int *offset) {
1655 // buf is not optional, but offset is optional
1656 assert(buf != NULL, "sanity check");
1658 Dl_info dlinfo;
1660 if (dladdr((void*)addr, &dlinfo) != 0) {
1661 // see if we have a matching symbol
1662 if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1663 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1664 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1665 }
1666 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1667 return true;
1668 }
1669 // no matching symbol so try for just file info
1670 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1671 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1672 buf, buflen, offset, dlinfo.dli_fname)) {
1673 return true;
1674 }
1675 }
1676 }
1678 buf[0] = '\0';
1679 if (offset != NULL) *offset = -1;
1680 return false;
1681 }
1683 struct _address_to_library_name {
1684 address addr; // input : memory address
1685 size_t buflen; // size of fname
1686 char* fname; // output: library name
1687 address base; // library base addr
1688 };
1690 static int address_to_library_name_callback(struct dl_phdr_info *info,
1691 size_t size, void *data) {
1692 int i;
1693 bool found = false;
1694 address libbase = NULL;
1695 struct _address_to_library_name * d = (struct _address_to_library_name *)data;
1697 // iterate through all loadable segments
1698 for (i = 0; i < info->dlpi_phnum; i++) {
1699 address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
1700 if (info->dlpi_phdr[i].p_type == PT_LOAD) {
1701 // base address of a library is the lowest address of its loaded
1702 // segments.
1703 if (libbase == NULL || libbase > segbase) {
1704 libbase = segbase;
1705 }
1706 // see if 'addr' is within current segment
1707 if (segbase <= d->addr &&
1708 d->addr < segbase + info->dlpi_phdr[i].p_memsz) {
1709 found = true;
1710 }
1711 }
1712 }
1714 // dlpi_name is NULL or empty if the ELF file is executable, return 0
1715 // so dll_address_to_library_name() can fall through to use dladdr() which
1716 // can figure out executable name from argv[0].
1717 if (found && info->dlpi_name && info->dlpi_name[0]) {
1718 d->base = libbase;
1719 if (d->fname) {
1720 jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name);
1721 }
1722 return 1;
1723 }
1724 return 0;
1725 }
1727 bool os::dll_address_to_library_name(address addr, char* buf,
1728 int buflen, int* offset) {
1729 // buf is not optional, but offset is optional
1730 assert(buf != NULL, "sanity check");
1732 Dl_info dlinfo;
1733 struct _address_to_library_name data;
1735 // There is a bug in old glibc dladdr() implementation that it could resolve
1736 // to wrong library name if the .so file has a base address != NULL. Here
1737 // we iterate through the program headers of all loaded libraries to find
1738 // out which library 'addr' really belongs to. This workaround can be
1739 // removed once the minimum requirement for glibc is moved to 2.3.x.
1740 data.addr = addr;
1741 data.fname = buf;
1742 data.buflen = buflen;
1743 data.base = NULL;
1744 int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data);
1746 if (rslt) {
1747 // buf already contains library name
1748 if (offset) *offset = addr - data.base;
1749 return true;
1750 }
1751 if (dladdr((void*)addr, &dlinfo) != 0) {
1752 if (dlinfo.dli_fname != NULL) {
1753 jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1754 }
1755 if (dlinfo.dli_fbase != NULL && offset != NULL) {
1756 *offset = addr - (address)dlinfo.dli_fbase;
1757 }
1758 return true;
1759 }
1761 buf[0] = '\0';
1762 if (offset) *offset = -1;
1763 return false;
1764 }
1766 // Loads .dll/.so and
1767 // in case of error it checks if .dll/.so was built for the
1768 // same architecture as Hotspot is running on
1771 // Remember the stack's state. The Linux dynamic linker will change
1772 // the stack to 'executable' at most once, so we must safepoint only once.
1773 bool os::Linux::_stack_is_executable = false;
1775 // VM operation that loads a library. This is necessary if stack protection
1776 // of the Java stacks can be lost during loading the library. If we
1777 // do not stop the Java threads, they can stack overflow before the stacks
1778 // are protected again.
1779 class VM_LinuxDllLoad: public VM_Operation {
1780 private:
1781 const char *_filename;
1782 char *_ebuf;
1783 int _ebuflen;
1784 void *_lib;
1785 public:
1786 VM_LinuxDllLoad(const char *fn, char *ebuf, int ebuflen) :
1787 _filename(fn), _ebuf(ebuf), _ebuflen(ebuflen), _lib(NULL) {}
1788 VMOp_Type type() const { return VMOp_LinuxDllLoad; }
1789 void doit() {
1790 _lib = os::Linux::dll_load_in_vmthread(_filename, _ebuf, _ebuflen);
1791 os::Linux::_stack_is_executable = true;
1792 }
1793 void* loaded_library() { return _lib; }
1794 };
1796 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1797 {
1798 void * result = NULL;
1799 bool load_attempted = false;
1801 // Check whether the library to load might change execution rights
1802 // of the stack. If they are changed, the protection of the stack
1803 // guard pages will be lost. We need a safepoint to fix this.
1804 //
1805 // See Linux man page execstack(8) for more info.
1806 if (os::uses_stack_guard_pages() && !os::Linux::_stack_is_executable) {
1807 ElfFile ef(filename);
1808 if (!ef.specifies_noexecstack()) {
1809 if (!is_init_completed()) {
1810 os::Linux::_stack_is_executable = true;
1811 // This is OK - No Java threads have been created yet, and hence no
1812 // stack guard pages to fix.
1813 //
1814 // This should happen only when you are building JDK7 using a very
1815 // old version of JDK6 (e.g., with JPRT) and running test_gamma.
1816 //
1817 // Dynamic loader will make all stacks executable after
1818 // this function returns, and will not do that again.
1819 assert(Threads::first() == NULL, "no Java threads should exist yet.");
1820 } else {
1821 warning("You have loaded library %s which might have disabled stack guard. "
1822 "The VM will try to fix the stack guard now.\n"
1823 "It's highly recommended that you fix the library with "
1824 "'execstack -c <libfile>', or link it with '-z noexecstack'.",
1825 filename);
1827 assert(Thread::current()->is_Java_thread(), "must be Java thread");
1828 JavaThread *jt = JavaThread::current();
1829 if (jt->thread_state() != _thread_in_native) {
1830 // This happens when a compiler thread tries to load a hsdis-<arch>.so file
1831 // that requires ExecStack. Cannot enter safe point. Let's give up.
1832 warning("Unable to fix stack guard. Giving up.");
1833 } else {
1834 if (!LoadExecStackDllInVMThread) {
1835 // This is for the case where the DLL has an static
1836 // constructor function that executes JNI code. We cannot
1837 // load such DLLs in the VMThread.
1838 result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
1839 }
1841 ThreadInVMfromNative tiv(jt);
1842 debug_only(VMNativeEntryWrapper vew;)
1844 VM_LinuxDllLoad op(filename, ebuf, ebuflen);
1845 VMThread::execute(&op);
1846 if (LoadExecStackDllInVMThread) {
1847 result = op.loaded_library();
1848 }
1849 load_attempted = true;
1850 }
1851 }
1852 }
1853 }
1855 if (!load_attempted) {
1856 result = os::Linux::dlopen_helper(filename, ebuf, ebuflen);
1857 }
1859 if (result != NULL) {
1860 // Successful loading
1861 return result;
1862 }
1864 Elf32_Ehdr elf_head;
1865 int diag_msg_max_length=ebuflen-strlen(ebuf);
1866 char* diag_msg_buf=ebuf+strlen(ebuf);
1868 if (diag_msg_max_length==0) {
1869 // No more space in ebuf for additional diagnostics message
1870 return NULL;
1871 }
1874 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1876 if (file_descriptor < 0) {
1877 // Can't open library, report dlerror() message
1878 return NULL;
1879 }
1881 bool failed_to_read_elf_head=
1882 (sizeof(elf_head)!=
1883 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1885 ::close(file_descriptor);
1886 if (failed_to_read_elf_head) {
1887 // file i/o error - report dlerror() msg
1888 return NULL;
1889 }
1891 typedef struct {
1892 Elf32_Half code; // Actual value as defined in elf.h
1893 Elf32_Half compat_class; // Compatibility of archs at VM's sense
1894 char elf_class; // 32 or 64 bit
1895 char endianess; // MSB or LSB
1896 char* name; // String representation
1897 } arch_t;
1899 #ifndef EM_486
1900 #define EM_486 6 /* Intel 80486 */
1901 #endif
1903 static const arch_t arch_array[]={
1904 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1905 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1906 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1907 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1908 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1909 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1910 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1911 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1912 #if defined(VM_LITTLE_ENDIAN)
1913 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64"},
1914 #else
1915 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1916 #endif
1917 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
1918 {EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
1919 {EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
1920 {EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},
1921 {EM_MIPS, EM_MIPS, ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},
1922 {EM_PARISC, EM_PARISC, ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"},
1923 {EM_68K, EM_68K, ELFCLASS32, ELFDATA2MSB, (char*)"M68k"}
1924 };
1926 #if (defined IA32)
1927 static Elf32_Half running_arch_code=EM_386;
1928 #elif (defined AMD64)
1929 static Elf32_Half running_arch_code=EM_X86_64;
1930 #elif (defined IA64)
1931 static Elf32_Half running_arch_code=EM_IA_64;
1932 #elif (defined __sparc) && (defined _LP64)
1933 static Elf32_Half running_arch_code=EM_SPARCV9;
1934 #elif (defined __sparc) && (!defined _LP64)
1935 static Elf32_Half running_arch_code=EM_SPARC;
1936 #elif (defined __powerpc64__)
1937 static Elf32_Half running_arch_code=EM_PPC64;
1938 #elif (defined __powerpc__)
1939 static Elf32_Half running_arch_code=EM_PPC;
1940 #elif (defined ARM)
1941 static Elf32_Half running_arch_code=EM_ARM;
1942 #elif (defined S390)
1943 static Elf32_Half running_arch_code=EM_S390;
1944 #elif (defined ALPHA)
1945 static Elf32_Half running_arch_code=EM_ALPHA;
1946 #elif (defined MIPSEL)
1947 static Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
1948 #elif (defined PARISC)
1949 static Elf32_Half running_arch_code=EM_PARISC;
1950 #elif (defined MIPS)
1951 static Elf32_Half running_arch_code=EM_MIPS;
1952 #elif (defined M68K)
1953 static Elf32_Half running_arch_code=EM_68K;
1954 #else
1955 #error Method os::dll_load requires that one of following is defined:\
1956 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
1957 #endif
1959 // Identify compatability class for VM's architecture and library's architecture
1960 // Obtain string descriptions for architectures
1962 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1963 int running_arch_index=-1;
1965 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
1966 if (running_arch_code == arch_array[i].code) {
1967 running_arch_index = i;
1968 }
1969 if (lib_arch.code == arch_array[i].code) {
1970 lib_arch.compat_class = arch_array[i].compat_class;
1971 lib_arch.name = arch_array[i].name;
1972 }
1973 }
1975 assert(running_arch_index != -1,
1976 "Didn't find running architecture code (running_arch_code) in arch_array");
1977 if (running_arch_index == -1) {
1978 // Even though running architecture detection failed
1979 // we may still continue with reporting dlerror() message
1980 return NULL;
1981 }
1983 if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1984 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1985 return NULL;
1986 }
1988 #ifndef S390
1989 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1990 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1991 return NULL;
1992 }
1993 #endif // !S390
1995 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1996 if ( lib_arch.name!=NULL ) {
1997 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1998 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1999 lib_arch.name, arch_array[running_arch_index].name);
2000 } else {
2001 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
2002 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
2003 lib_arch.code,
2004 arch_array[running_arch_index].name);
2005 }
2006 }
2008 return NULL;
2009 }
2011 void * os::Linux::dlopen_helper(const char *filename, char *ebuf, int ebuflen) {
2012 void * result = ::dlopen(filename, RTLD_LAZY);
2013 if (result == NULL) {
2014 ::strncpy(ebuf, ::dlerror(), ebuflen - 1);
2015 ebuf[ebuflen-1] = '\0';
2016 }
2017 return result;
2018 }
2020 void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf, int ebuflen) {
2021 void * result = NULL;
2022 if (LoadExecStackDllInVMThread) {
2023 result = dlopen_helper(filename, ebuf, ebuflen);
2024 }
2026 // Since 7019808, libjvm.so is linked with -noexecstack. If the VM loads a
2027 // library that requires an executable stack, or which does not have this
2028 // stack attribute set, dlopen changes the stack attribute to executable. The
2029 // read protection of the guard pages gets lost.
2030 //
2031 // Need to check _stack_is_executable again as multiple VM_LinuxDllLoad
2032 // may have been queued at the same time.
2034 if (!_stack_is_executable) {
2035 JavaThread *jt = Threads::first();
2037 while (jt) {
2038 if (!jt->stack_guard_zone_unused() && // Stack not yet fully initialized
2039 jt->stack_yellow_zone_enabled()) { // No pending stack overflow exceptions
2040 if (!os::guard_memory((char *) jt->stack_red_zone_base() - jt->stack_red_zone_size(),
2041 jt->stack_yellow_zone_size() + jt->stack_red_zone_size())) {
2042 warning("Attempt to reguard stack yellow zone failed.");
2043 }
2044 }
2045 jt = jt->next();
2046 }
2047 }
2049 return result;
2050 }
2052 /*
2053 * glibc-2.0 libdl is not MT safe. If you are building with any glibc,
2054 * chances are you might want to run the generated bits against glibc-2.0
2055 * libdl.so, so always use locking for any version of glibc.
2056 */
2057 void* os::dll_lookup(void* handle, const char* name) {
2058 pthread_mutex_lock(&dl_mutex);
2059 void* res = dlsym(handle, name);
2060 pthread_mutex_unlock(&dl_mutex);
2061 return res;
2062 }
2064 void* os::get_default_process_handle() {
2065 return (void*)::dlopen(NULL, RTLD_LAZY);
2066 }
2068 static bool _print_ascii_file(const char* filename, outputStream* st) {
2069 int fd = ::open(filename, O_RDONLY);
2070 if (fd == -1) {
2071 return false;
2072 }
2074 char buf[32];
2075 int bytes;
2076 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
2077 st->print_raw(buf, bytes);
2078 }
2080 ::close(fd);
2082 return true;
2083 }
2085 void os::print_dll_info(outputStream *st) {
2086 st->print_cr("Dynamic libraries:");
2088 char fname[32];
2089 pid_t pid = os::Linux::gettid();
2091 jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
2093 if (!_print_ascii_file(fname, st)) {
2094 st->print("Can not get library information for pid = %d\n", pid);
2095 }
2096 }
2098 void os::print_os_info_brief(outputStream* st) {
2099 os::Linux::print_distro_info(st);
2101 os::Posix::print_uname_info(st);
2103 os::Linux::print_libversion_info(st);
2105 }
2107 void os::print_os_info(outputStream* st) {
2108 st->print("OS:");
2110 os::Linux::print_distro_info(st);
2112 os::Posix::print_uname_info(st);
2114 // Print warning if unsafe chroot environment detected
2115 if (unsafe_chroot_detected) {
2116 st->print("WARNING!! ");
2117 st->print_cr("%s", unstable_chroot_error);
2118 }
2120 os::Linux::print_libversion_info(st);
2122 os::Posix::print_rlimit_info(st);
2124 os::Posix::print_load_average(st);
2126 os::Linux::print_full_memory_info(st);
2127 }
2129 // Try to identify popular distros.
2130 // Most Linux distributions have a /etc/XXX-release file, which contains
2131 // the OS version string. Newer Linux distributions have a /etc/lsb-release
2132 // file that also contains the OS version string. Some have more than one
2133 // /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and
2134 // /etc/redhat-release.), so the order is important.
2135 // Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have
2136 // their own specific XXX-release file as well as a redhat-release file.
2137 // Because of this the XXX-release file needs to be searched for before the
2138 // redhat-release file.
2139 // Since Red Hat has a lsb-release file that is not very descriptive the
2140 // search for redhat-release needs to be before lsb-release.
2141 // Since the lsb-release file is the new standard it needs to be searched
2142 // before the older style release files.
2143 // Searching system-release (Red Hat) and os-release (other Linuxes) are a
2144 // next to last resort. The os-release file is a new standard that contains
2145 // distribution information and the system-release file seems to be an old
2146 // standard that has been replaced by the lsb-release and os-release files.
2147 // Searching for the debian_version file is the last resort. It contains
2148 // an informative string like "6.0.6" or "wheezy/sid". Because of this
2149 // "Debian " is printed before the contents of the debian_version file.
2150 void os::Linux::print_distro_info(outputStream* st) {
2151 if (!_print_ascii_file("/etc/oracle-release", st) &&
2152 !_print_ascii_file("/etc/mandriva-release", st) &&
2153 !_print_ascii_file("/etc/mandrake-release", st) &&
2154 !_print_ascii_file("/etc/sun-release", st) &&
2155 !_print_ascii_file("/etc/redhat-release", st) &&
2156 !_print_ascii_file("/etc/lsb-release", st) &&
2157 !_print_ascii_file("/etc/SuSE-release", st) &&
2158 !_print_ascii_file("/etc/turbolinux-release", st) &&
2159 !_print_ascii_file("/etc/gentoo-release", st) &&
2160 !_print_ascii_file("/etc/ltib-release", st) &&
2161 !_print_ascii_file("/etc/angstrom-version", st) &&
2162 !_print_ascii_file("/etc/system-release", st) &&
2163 !_print_ascii_file("/etc/os-release", st)) {
2165 if (file_exists("/etc/debian_version")) {
2166 st->print("Debian ");
2167 _print_ascii_file("/etc/debian_version", st);
2168 } else {
2169 st->print("Linux");
2170 }
2171 }
2172 st->cr();
2173 }
2175 void os::Linux::print_libversion_info(outputStream* st) {
2176 // libc, pthread
2177 st->print("libc:");
2178 st->print("%s ", os::Linux::glibc_version());
2179 st->print("%s ", os::Linux::libpthread_version());
2180 if (os::Linux::is_LinuxThreads()) {
2181 st->print("(%s stack)", os::Linux::is_floating_stack() ? "floating" : "fixed");
2182 }
2183 st->cr();
2184 }
2186 void os::Linux::print_full_memory_info(outputStream* st) {
2187 st->print("\n/proc/meminfo:\n");
2188 _print_ascii_file("/proc/meminfo", st);
2189 st->cr();
2190 }
2192 void os::print_memory_info(outputStream* st) {
2194 st->print("Memory:");
2195 st->print(" %dk page", os::vm_page_size()>>10);
2197 // values in struct sysinfo are "unsigned long"
2198 struct sysinfo si;
2199 sysinfo(&si);
2201 st->print(", physical " UINT64_FORMAT "k",
2202 os::physical_memory() >> 10);
2203 st->print("(" UINT64_FORMAT "k free)",
2204 os::available_memory() >> 10);
2205 st->print(", swap " UINT64_FORMAT "k",
2206 ((jlong)si.totalswap * si.mem_unit) >> 10);
2207 st->print("(" UINT64_FORMAT "k free)",
2208 ((jlong)si.freeswap * si.mem_unit) >> 10);
2209 st->cr();
2210 }
2212 void os::pd_print_cpu_info(outputStream* st) {
2213 st->print("\n/proc/cpuinfo:\n");
2214 if (!_print_ascii_file("/proc/cpuinfo", st)) {
2215 st->print(" <Not Available>");
2216 }
2217 st->cr();
2218 }
2220 void os::print_siginfo(outputStream* st, void* siginfo) {
2221 const siginfo_t* si = (const siginfo_t*)siginfo;
2223 os::Posix::print_siginfo_brief(st, si);
2224 #if INCLUDE_CDS
2225 if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2226 UseSharedSpaces) {
2227 FileMapInfo* mapinfo = FileMapInfo::current_info();
2228 if (mapinfo->is_in_shared_space(si->si_addr)) {
2229 st->print("\n\nError accessing class data sharing archive." \
2230 " Mapped file inaccessible during execution, " \
2231 " possible disk/network problem.");
2232 }
2233 }
2234 #endif
2235 st->cr();
2236 }
2239 static void print_signal_handler(outputStream* st, int sig,
2240 char* buf, size_t buflen);
2242 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2243 st->print_cr("Signal Handlers:");
2244 print_signal_handler(st, SIGSEGV, buf, buflen);
2245 print_signal_handler(st, SIGBUS , buf, buflen);
2246 print_signal_handler(st, SIGFPE , buf, buflen);
2247 print_signal_handler(st, SIGPIPE, buf, buflen);
2248 print_signal_handler(st, SIGXFSZ, buf, buflen);
2249 print_signal_handler(st, SIGILL , buf, buflen);
2250 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2251 print_signal_handler(st, SR_signum, buf, buflen);
2252 print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
2253 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2254 print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
2255 print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2256 #if defined(PPC64)
2257 print_signal_handler(st, SIGTRAP, buf, buflen);
2258 #endif
2259 }
2261 static char saved_jvm_path[MAXPATHLEN] = {0};
2263 // Find the full path to the current module, libjvm.so
2264 void os::jvm_path(char *buf, jint buflen) {
2265 // Error checking.
2266 if (buflen < MAXPATHLEN) {
2267 assert(false, "must use a large-enough buffer");
2268 buf[0] = '\0';
2269 return;
2270 }
2271 // Lazy resolve the path to current module.
2272 if (saved_jvm_path[0] != 0) {
2273 strcpy(buf, saved_jvm_path);
2274 return;
2275 }
2277 char dli_fname[MAXPATHLEN];
2278 bool ret = dll_address_to_library_name(
2279 CAST_FROM_FN_PTR(address, os::jvm_path),
2280 dli_fname, sizeof(dli_fname), NULL);
2281 assert(ret, "cannot locate libjvm");
2282 char *rp = NULL;
2283 if (ret && dli_fname[0] != '\0') {
2284 rp = realpath(dli_fname, buf);
2285 }
2286 if (rp == NULL)
2287 return;
2289 if (Arguments::created_by_gamma_launcher()) {
2290 // Support for the gamma launcher. Typical value for buf is
2291 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at
2292 // the right place in the string, then assume we are installed in a JDK and
2293 // we're done. Otherwise, check for a JAVA_HOME environment variable and fix
2294 // up the path so it looks like libjvm.so is installed there (append a
2295 // fake suffix hotspot/libjvm.so).
2296 const char *p = buf + strlen(buf) - 1;
2297 for (int count = 0; p > buf && count < 5; ++count) {
2298 for (--p; p > buf && *p != '/'; --p)
2299 /* empty */ ;
2300 }
2302 if (strncmp(p, "/jre/lib/", 9) != 0) {
2303 // Look for JAVA_HOME in the environment.
2304 char* java_home_var = ::getenv("JAVA_HOME");
2305 if (java_home_var != NULL && java_home_var[0] != 0) {
2306 char* jrelib_p;
2307 int len;
2309 // Check the current module name "libjvm.so".
2310 p = strrchr(buf, '/');
2311 assert(strstr(p, "/libjvm") == p, "invalid library name");
2313 rp = realpath(java_home_var, buf);
2314 if (rp == NULL)
2315 return;
2317 // determine if this is a legacy image or modules image
2318 // modules image doesn't have "jre" subdirectory
2319 len = strlen(buf);
2320 assert(len < buflen, "Ran out of buffer room");
2321 jrelib_p = buf + len;
2322 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2323 if (0 != access(buf, F_OK)) {
2324 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2325 }
2327 if (0 == access(buf, F_OK)) {
2328 // Use current module name "libjvm.so"
2329 len = strlen(buf);
2330 snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2331 } else {
2332 // Go back to path of .so
2333 rp = realpath(dli_fname, buf);
2334 if (rp == NULL)
2335 return;
2336 }
2337 }
2338 }
2339 }
2341 strncpy(saved_jvm_path, buf, MAXPATHLEN);
2342 }
2344 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2345 // no prefix required, not even "_"
2346 }
2348 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2349 // no suffix required
2350 }
2352 ////////////////////////////////////////////////////////////////////////////////
2353 // sun.misc.Signal support
2355 static volatile jint sigint_count = 0;
2357 static void
2358 UserHandler(int sig, void *siginfo, void *context) {
2359 // 4511530 - sem_post is serialized and handled by the manager thread. When
2360 // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
2361 // don't want to flood the manager thread with sem_post requests.
2362 if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
2363 return;
2365 // Ctrl-C is pressed during error reporting, likely because the error
2366 // handler fails to abort. Let VM die immediately.
2367 if (sig == SIGINT && is_error_reported()) {
2368 os::die();
2369 }
2371 os::signal_notify(sig);
2372 }
2374 void* os::user_handler() {
2375 return CAST_FROM_FN_PTR(void*, UserHandler);
2376 }
2378 class Semaphore : public StackObj {
2379 public:
2380 Semaphore();
2381 ~Semaphore();
2382 void signal();
2383 void wait();
2384 bool trywait();
2385 bool timedwait(unsigned int sec, int nsec);
2386 private:
2387 sem_t _semaphore;
2388 };
2390 Semaphore::Semaphore() {
2391 sem_init(&_semaphore, 0, 0);
2392 }
2394 Semaphore::~Semaphore() {
2395 sem_destroy(&_semaphore);
2396 }
2398 void Semaphore::signal() {
2399 sem_post(&_semaphore);
2400 }
2402 void Semaphore::wait() {
2403 sem_wait(&_semaphore);
2404 }
2406 bool Semaphore::trywait() {
2407 return sem_trywait(&_semaphore) == 0;
2408 }
2410 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2412 struct timespec ts;
2413 // Semaphore's are always associated with CLOCK_REALTIME
2414 os::Linux::clock_gettime(CLOCK_REALTIME, &ts);
2415 // see unpackTime for discussion on overflow checking
2416 if (sec >= MAX_SECS) {
2417 ts.tv_sec += MAX_SECS;
2418 ts.tv_nsec = 0;
2419 } else {
2420 ts.tv_sec += sec;
2421 ts.tv_nsec += nsec;
2422 if (ts.tv_nsec >= NANOSECS_PER_SEC) {
2423 ts.tv_nsec -= NANOSECS_PER_SEC;
2424 ++ts.tv_sec; // note: this must be <= max_secs
2425 }
2426 }
2428 while (1) {
2429 int result = sem_timedwait(&_semaphore, &ts);
2430 if (result == 0) {
2431 return true;
2432 } else if (errno == EINTR) {
2433 continue;
2434 } else if (errno == ETIMEDOUT) {
2435 return false;
2436 } else {
2437 return false;
2438 }
2439 }
2440 }
2442 extern "C" {
2443 typedef void (*sa_handler_t)(int);
2444 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2445 }
2447 void* os::signal(int signal_number, void* handler) {
2448 struct sigaction sigAct, oldSigAct;
2450 sigfillset(&(sigAct.sa_mask));
2451 sigAct.sa_flags = SA_RESTART|SA_SIGINFO;
2452 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2454 if (sigaction(signal_number, &sigAct, &oldSigAct)) {
2455 // -1 means registration failed
2456 return (void *)-1;
2457 }
2459 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2460 }
2462 void os::signal_raise(int signal_number) {
2463 ::raise(signal_number);
2464 }
2466 /*
2467 * The following code is moved from os.cpp for making this
2468 * code platform specific, which it is by its very nature.
2469 */
2471 // Will be modified when max signal is changed to be dynamic
2472 int os::sigexitnum_pd() {
2473 return NSIG;
2474 }
2476 // a counter for each possible signal value
2477 static volatile jint pending_signals[NSIG+1] = { 0 };
2479 // Linux(POSIX) specific hand shaking semaphore.
2480 static sem_t sig_sem;
2481 static Semaphore sr_semaphore;
2483 void os::signal_init_pd() {
2484 // Initialize signal structures
2485 ::memset((void*)pending_signals, 0, sizeof(pending_signals));
2487 // Initialize signal semaphore
2488 ::sem_init(&sig_sem, 0, 0);
2489 }
2491 void os::signal_notify(int sig) {
2492 Atomic::inc(&pending_signals[sig]);
2493 ::sem_post(&sig_sem);
2494 }
2496 static int check_pending_signals(bool wait) {
2497 Atomic::store(0, &sigint_count);
2498 for (;;) {
2499 for (int i = 0; i < NSIG + 1; i++) {
2500 jint n = pending_signals[i];
2501 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2502 return i;
2503 }
2504 }
2505 if (!wait) {
2506 return -1;
2507 }
2508 JavaThread *thread = JavaThread::current();
2509 ThreadBlockInVM tbivm(thread);
2511 bool threadIsSuspended;
2512 do {
2513 thread->set_suspend_equivalent();
2514 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2515 ::sem_wait(&sig_sem);
2517 // were we externally suspended while we were waiting?
2518 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2519 if (threadIsSuspended) {
2520 //
2521 // The semaphore has been incremented, but while we were waiting
2522 // another thread suspended us. We don't want to continue running
2523 // while suspended because that would surprise the thread that
2524 // suspended us.
2525 //
2526 ::sem_post(&sig_sem);
2528 thread->java_suspend_self();
2529 }
2530 } while (threadIsSuspended);
2531 }
2532 }
2534 int os::signal_lookup() {
2535 return check_pending_signals(false);
2536 }
2538 int os::signal_wait() {
2539 return check_pending_signals(true);
2540 }
2542 ////////////////////////////////////////////////////////////////////////////////
2543 // Virtual Memory
2545 int os::vm_page_size() {
2546 // Seems redundant as all get out
2547 assert(os::Linux::page_size() != -1, "must call os::init");
2548 return os::Linux::page_size();
2549 }
2551 // Solaris allocates memory by pages.
2552 int os::vm_allocation_granularity() {
2553 assert(os::Linux::page_size() != -1, "must call os::init");
2554 return os::Linux::page_size();
2555 }
2557 // Rationale behind this function:
2558 // current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
2559 // mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
2560 // samples for JITted code. Here we create private executable mapping over the code cache
2561 // and then we can use standard (well, almost, as mapping can change) way to provide
2562 // info for the reporting script by storing timestamp and location of symbol
2563 void linux_wrap_code(char* base, size_t size) {
2564 static volatile jint cnt = 0;
2566 if (!UseOprofile) {
2567 return;
2568 }
2570 char buf[PATH_MAX+1];
2571 int num = Atomic::add(1, &cnt);
2573 snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
2574 os::get_temp_directory(), os::current_process_id(), num);
2575 unlink(buf);
2577 int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
2579 if (fd != -1) {
2580 off_t rv = ::lseek(fd, size-2, SEEK_SET);
2581 if (rv != (off_t)-1) {
2582 if (::write(fd, "", 1) == 1) {
2583 mmap(base, size,
2584 PROT_READ|PROT_WRITE|PROT_EXEC,
2585 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
2586 }
2587 }
2588 ::close(fd);
2589 unlink(buf);
2590 }
2591 }
2593 static bool recoverable_mmap_error(int err) {
2594 // See if the error is one we can let the caller handle. This
2595 // list of errno values comes from JBS-6843484. I can't find a
2596 // Linux man page that documents this specific set of errno
2597 // values so while this list currently matches Solaris, it may
2598 // change as we gain experience with this failure mode.
2599 switch (err) {
2600 case EBADF:
2601 case EINVAL:
2602 case ENOTSUP:
2603 // let the caller deal with these errors
2604 return true;
2606 default:
2607 // Any remaining errors on this OS can cause our reserved mapping
2608 // to be lost. That can cause confusion where different data
2609 // structures think they have the same memory mapped. The worst
2610 // scenario is if both the VM and a library think they have the
2611 // same memory mapped.
2612 return false;
2613 }
2614 }
2616 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2617 int err) {
2618 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2619 ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2620 strerror(err), err);
2621 }
2623 static void warn_fail_commit_memory(char* addr, size_t size,
2624 size_t alignment_hint, bool exec,
2625 int err) {
2626 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2627 ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, size,
2628 alignment_hint, exec, strerror(err), err);
2629 }
2631 // NOTE: Linux kernel does not really reserve the pages for us.
2632 // All it does is to check if there are enough free pages
2633 // left at the time of mmap(). This could be a potential
2634 // problem.
2635 int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
2636 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2637 uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
2638 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
2639 if (res != (uintptr_t) MAP_FAILED) {
2640 if (UseNUMAInterleaving) {
2641 numa_make_global(addr, size);
2642 }
2643 return 0;
2644 }
2646 int err = errno; // save errno from mmap() call above
2648 if (!recoverable_mmap_error(err)) {
2649 warn_fail_commit_memory(addr, size, exec, err);
2650 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory.");
2651 }
2653 return err;
2654 }
2656 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2657 return os::Linux::commit_memory_impl(addr, size, exec) == 0;
2658 }
2660 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2661 const char* mesg) {
2662 assert(mesg != NULL, "mesg must be specified");
2663 int err = os::Linux::commit_memory_impl(addr, size, exec);
2664 if (err != 0) {
2665 // the caller wants all commit errors to exit with the specified mesg:
2666 warn_fail_commit_memory(addr, size, exec, err);
2667 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2668 }
2669 }
2671 // Define MAP_HUGETLB here so we can build HotSpot on old systems.
2672 #ifndef MAP_HUGETLB
2673 #define MAP_HUGETLB 0x40000
2674 #endif
2676 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2677 #ifndef MADV_HUGEPAGE
2678 #define MADV_HUGEPAGE 14
2679 #endif
2681 int os::Linux::commit_memory_impl(char* addr, size_t size,
2682 size_t alignment_hint, bool exec) {
2683 int err = os::Linux::commit_memory_impl(addr, size, exec);
2684 if (err == 0) {
2685 realign_memory(addr, size, alignment_hint);
2686 }
2687 return err;
2688 }
2690 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2691 bool exec) {
2692 return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2693 }
2695 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2696 size_t alignment_hint, bool exec,
2697 const char* mesg) {
2698 assert(mesg != NULL, "mesg must be specified");
2699 int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
2700 if (err != 0) {
2701 // the caller wants all commit errors to exit with the specified mesg:
2702 warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
2703 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2704 }
2705 }
2707 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2708 if (UseTransparentHugePages && alignment_hint > (size_t)vm_page_size()) {
2709 // We don't check the return value: madvise(MADV_HUGEPAGE) may not
2710 // be supported or the memory may already be backed by huge pages.
2711 ::madvise(addr, bytes, MADV_HUGEPAGE);
2712 }
2713 }
2715 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2716 // This method works by doing an mmap over an existing mmaping and effectively discarding
2717 // the existing pages. However it won't work for SHM-based large pages that cannot be
2718 // uncommitted at all. We don't do anything in this case to avoid creating a segment with
2719 // small pages on top of the SHM segment. This method always works for small pages, so we
2720 // allow that in any case.
2721 if (alignment_hint <= (size_t)os::vm_page_size() || can_commit_large_page_memory()) {
2722 commit_memory(addr, bytes, alignment_hint, !ExecMem);
2723 }
2724 }
2726 void os::numa_make_global(char *addr, size_t bytes) {
2727 Linux::numa_interleave_memory(addr, bytes);
2728 }
2730 // Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
2731 // bind policy to MPOL_PREFERRED for the current thread.
2732 #define USE_MPOL_PREFERRED 0
2734 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2735 // To make NUMA and large pages more robust when both enabled, we need to ease
2736 // the requirements on where the memory should be allocated. MPOL_BIND is the
2737 // default policy and it will force memory to be allocated on the specified
2738 // node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
2739 // the specified node, but will not force it. Using this policy will prevent
2740 // getting SIGBUS when trying to allocate large pages on NUMA nodes with no
2741 // free large pages.
2742 Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
2743 Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
2744 }
2746 bool os::numa_topology_changed() { return false; }
2748 size_t os::numa_get_groups_num() {
2749 int max_node = Linux::numa_max_node();
2750 return max_node > 0 ? max_node + 1 : 1;
2751 }
2753 int os::numa_get_group_id() {
2754 int cpu_id = Linux::sched_getcpu();
2755 if (cpu_id != -1) {
2756 int lgrp_id = Linux::get_node_by_cpu(cpu_id);
2757 if (lgrp_id != -1) {
2758 return lgrp_id;
2759 }
2760 }
2761 return 0;
2762 }
2764 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2765 for (size_t i = 0; i < size; i++) {
2766 ids[i] = i;
2767 }
2768 return size;
2769 }
2771 bool os::get_page_info(char *start, page_info* info) {
2772 return false;
2773 }
2775 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2776 return end;
2777 }
2780 int os::Linux::sched_getcpu_syscall(void) {
2781 unsigned int cpu;
2782 int retval = -1;
2784 #if defined(IA32)
2785 # ifndef SYS_getcpu
2786 # define SYS_getcpu 318
2787 # endif
2788 retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
2789 #elif defined(AMD64)
2790 // Unfortunately we have to bring all these macros here from vsyscall.h
2791 // to be able to compile on old linuxes.
2792 # define __NR_vgetcpu 2
2793 # define VSYSCALL_START (-10UL << 20)
2794 # define VSYSCALL_SIZE 1024
2795 # define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
2796 typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
2797 vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
2798 retval = vgetcpu(&cpu, NULL, NULL);
2799 #endif
2801 return (retval == -1) ? retval : cpu;
2802 }
2804 // Something to do with the numa-aware allocator needs these symbols
2805 extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
2806 extern "C" JNIEXPORT void numa_error(char *where) { }
2807 extern "C" JNIEXPORT int fork1() { return fork(); }
2810 // If we are running with libnuma version > 2, then we should
2811 // be trying to use symbols with versions 1.1
2812 // If we are running with earlier version, which did not have symbol versions,
2813 // we should use the base version.
2814 void* os::Linux::libnuma_dlsym(void* handle, const char *name) {
2815 void *f = dlvsym(handle, name, "libnuma_1.1");
2816 if (f == NULL) {
2817 f = dlsym(handle, name);
2818 }
2819 return f;
2820 }
2822 bool os::Linux::libnuma_init() {
2823 // sched_getcpu() should be in libc.
2824 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
2825 dlsym(RTLD_DEFAULT, "sched_getcpu")));
2827 // If it's not, try a direct syscall.
2828 if (sched_getcpu() == -1)
2829 set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, (void*)&sched_getcpu_syscall));
2831 if (sched_getcpu() != -1) { // Does it work?
2832 void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
2833 if (handle != NULL) {
2834 set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
2835 libnuma_dlsym(handle, "numa_node_to_cpus")));
2836 set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
2837 libnuma_dlsym(handle, "numa_max_node")));
2838 set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
2839 libnuma_dlsym(handle, "numa_available")));
2840 set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
2841 libnuma_dlsym(handle, "numa_tonode_memory")));
2842 set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
2843 libnuma_dlsym(handle, "numa_interleave_memory")));
2844 set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
2845 libnuma_dlsym(handle, "numa_set_bind_policy")));
2848 if (numa_available() != -1) {
2849 set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
2850 // Create a cpu -> node mapping
2851 _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
2852 rebuild_cpu_to_node_map();
2853 return true;
2854 }
2855 }
2856 }
2857 return false;
2858 }
2860 // rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
2861 // The table is later used in get_node_by_cpu().
2862 void os::Linux::rebuild_cpu_to_node_map() {
2863 const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
2864 // in libnuma (possible values are starting from 16,
2865 // and continuing up with every other power of 2, but less
2866 // than the maximum number of CPUs supported by kernel), and
2867 // is a subject to change (in libnuma version 2 the requirements
2868 // are more reasonable) we'll just hardcode the number they use
2869 // in the library.
2870 const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
2872 size_t cpu_num = os::active_processor_count();
2873 size_t cpu_map_size = NCPUS / BitsPerCLong;
2874 size_t cpu_map_valid_size =
2875 MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
2877 cpu_to_node()->clear();
2878 cpu_to_node()->at_grow(cpu_num - 1);
2879 size_t node_num = numa_get_groups_num();
2881 unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal);
2882 for (size_t i = 0; i < node_num; i++) {
2883 if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
2884 for (size_t j = 0; j < cpu_map_valid_size; j++) {
2885 if (cpu_map[j] != 0) {
2886 for (size_t k = 0; k < BitsPerCLong; k++) {
2887 if (cpu_map[j] & (1UL << k)) {
2888 cpu_to_node()->at_put(j * BitsPerCLong + k, i);
2889 }
2890 }
2891 }
2892 }
2893 }
2894 }
2895 FREE_C_HEAP_ARRAY(unsigned long, cpu_map, mtInternal);
2896 }
2898 int os::Linux::get_node_by_cpu(int cpu_id) {
2899 if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
2900 return cpu_to_node()->at(cpu_id);
2901 }
2902 return -1;
2903 }
2905 GrowableArray<int>* os::Linux::_cpu_to_node;
2906 os::Linux::sched_getcpu_func_t os::Linux::_sched_getcpu;
2907 os::Linux::numa_node_to_cpus_func_t os::Linux::_numa_node_to_cpus;
2908 os::Linux::numa_max_node_func_t os::Linux::_numa_max_node;
2909 os::Linux::numa_available_func_t os::Linux::_numa_available;
2910 os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
2911 os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
2912 os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
2913 unsigned long* os::Linux::_numa_all_nodes;
2915 bool os::pd_uncommit_memory(char* addr, size_t size) {
2916 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
2917 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
2918 return res != (uintptr_t) MAP_FAILED;
2919 }
2921 static
2922 address get_stack_commited_bottom(address bottom, size_t size) {
2923 address nbot = bottom;
2924 address ntop = bottom + size;
2926 size_t page_sz = os::vm_page_size();
2927 unsigned pages = size / page_sz;
2929 unsigned char vec[1];
2930 unsigned imin = 1, imax = pages + 1, imid;
2931 int mincore_return_value = 0;
2933 assert(imin <= imax, "Unexpected page size");
2935 while (imin < imax) {
2936 imid = (imax + imin) / 2;
2937 nbot = ntop - (imid * page_sz);
2939 // Use a trick with mincore to check whether the page is mapped or not.
2940 // mincore sets vec to 1 if page resides in memory and to 0 if page
2941 // is swapped output but if page we are asking for is unmapped
2942 // it returns -1,ENOMEM
2943 mincore_return_value = mincore(nbot, page_sz, vec);
2945 if (mincore_return_value == -1) {
2946 // Page is not mapped go up
2947 // to find first mapped page
2948 if (errno != EAGAIN) {
2949 assert(errno == ENOMEM, "Unexpected mincore errno");
2950 imax = imid;
2951 }
2952 } else {
2953 // Page is mapped go down
2954 // to find first not mapped page
2955 imin = imid + 1;
2956 }
2957 }
2959 nbot = nbot + page_sz;
2961 // Adjust stack bottom one page up if last checked page is not mapped
2962 if (mincore_return_value == -1) {
2963 nbot = nbot + page_sz;
2964 }
2966 return nbot;
2967 }
2970 // Linux uses a growable mapping for the stack, and if the mapping for
2971 // the stack guard pages is not removed when we detach a thread the
2972 // stack cannot grow beyond the pages where the stack guard was
2973 // mapped. If at some point later in the process the stack expands to
2974 // that point, the Linux kernel cannot expand the stack any further
2975 // because the guard pages are in the way, and a segfault occurs.
2976 //
2977 // However, it's essential not to split the stack region by unmapping
2978 // a region (leaving a hole) that's already part of the stack mapping,
2979 // so if the stack mapping has already grown beyond the guard pages at
2980 // the time we create them, we have to truncate the stack mapping.
2981 // So, we need to know the extent of the stack mapping when
2982 // create_stack_guard_pages() is called.
2984 // We only need this for stacks that are growable: at the time of
2985 // writing thread stacks don't use growable mappings (i.e. those
2986 // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
2987 // only applies to the main thread.
2989 // If the (growable) stack mapping already extends beyond the point
2990 // where we're going to put our guard pages, truncate the mapping at
2991 // that point by munmap()ping it. This ensures that when we later
2992 // munmap() the guard pages we don't leave a hole in the stack
2993 // mapping. This only affects the main/initial thread
2995 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2997 if (os::Linux::is_initial_thread()) {
2998 // As we manually grow stack up to bottom inside create_attached_thread(),
2999 // it's likely that os::Linux::initial_thread_stack_bottom is mapped and
3000 // we don't need to do anything special.
3001 // Check it first, before calling heavy function.
3002 uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom();
3003 unsigned char vec[1];
3005 if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
3006 // Fallback to slow path on all errors, including EAGAIN
3007 stack_extent = (uintptr_t) get_stack_commited_bottom(
3008 os::Linux::initial_thread_stack_bottom(),
3009 (size_t)addr - stack_extent);
3010 }
3012 if (stack_extent < (uintptr_t)addr) {
3013 ::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));
3014 }
3015 }
3017 return os::commit_memory(addr, size, !ExecMem);
3018 }
3020 // If this is a growable mapping, remove the guard pages entirely by
3021 // munmap()ping them. If not, just call uncommit_memory(). This only
3022 // affects the main/initial thread, but guard against future OS changes
3023 // It's safe to always unmap guard pages for initial thread because we
3024 // always place it right after end of the mapped region
3026 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3027 uintptr_t stack_extent, stack_base;
3029 if (os::Linux::is_initial_thread()) {
3030 return ::munmap(addr, size) == 0;
3031 }
3033 return os::uncommit_memory(addr, size);
3034 }
3036 static address _highest_vm_reserved_address = NULL;
3038 // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
3039 // at 'requested_addr'. If there are existing memory mappings at the same
3040 // location, however, they will be overwritten. If 'fixed' is false,
3041 // 'requested_addr' is only treated as a hint, the return value may or
3042 // may not start from the requested address. Unlike Linux mmap(), this
3043 // function returns NULL to indicate failure.
3044 static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
3045 char * addr;
3046 int flags;
3048 flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
3049 if (fixed) {
3050 assert((uintptr_t)requested_addr % os::Linux::page_size() == 0, "unaligned address");
3051 flags |= MAP_FIXED;
3052 }
3054 // Map reserved/uncommitted pages PROT_NONE so we fail early if we
3055 // touch an uncommitted page. Otherwise, the read/write might
3056 // succeed if we have enough swap space to back the physical page.
3057 addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
3058 flags, -1, 0);
3060 if (addr != MAP_FAILED) {
3061 // anon_mmap() should only get called during VM initialization,
3062 // don't need lock (actually we can skip locking even it can be called
3063 // from multiple threads, because _highest_vm_reserved_address is just a
3064 // hint about the upper limit of non-stack memory regions.)
3065 if ((address)addr + bytes > _highest_vm_reserved_address) {
3066 _highest_vm_reserved_address = (address)addr + bytes;
3067 }
3068 }
3070 return addr == MAP_FAILED ? NULL : addr;
3071 }
3073 // Don't update _highest_vm_reserved_address, because there might be memory
3074 // regions above addr + size. If so, releasing a memory region only creates
3075 // a hole in the address space, it doesn't help prevent heap-stack collision.
3076 //
3077 static int anon_munmap(char * addr, size_t size) {
3078 return ::munmap(addr, size) == 0;
3079 }
3081 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
3082 size_t alignment_hint) {
3083 return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
3084 }
3086 bool os::pd_release_memory(char* addr, size_t size) {
3087 return anon_munmap(addr, size);
3088 }
3090 static address highest_vm_reserved_address() {
3091 return _highest_vm_reserved_address;
3092 }
3094 static bool linux_mprotect(char* addr, size_t size, int prot) {
3095 // Linux wants the mprotect address argument to be page aligned.
3096 char* bottom = (char*)align_size_down((intptr_t)addr, os::Linux::page_size());
3098 // According to SUSv3, mprotect() should only be used with mappings
3099 // established by mmap(), and mmap() always maps whole pages. Unaligned
3100 // 'addr' likely indicates problem in the VM (e.g. trying to change
3101 // protection of malloc'ed or statically allocated memory). Check the
3102 // caller if you hit this assert.
3103 assert(addr == bottom, "sanity check");
3105 size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Linux::page_size());
3106 return ::mprotect(bottom, size, prot) == 0;
3107 }
3109 // Set protections specified
3110 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3111 bool is_committed) {
3112 unsigned int p = 0;
3113 switch (prot) {
3114 case MEM_PROT_NONE: p = PROT_NONE; break;
3115 case MEM_PROT_READ: p = PROT_READ; break;
3116 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
3117 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3118 default:
3119 ShouldNotReachHere();
3120 }
3121 // is_committed is unused.
3122 return linux_mprotect(addr, bytes, p);
3123 }
3125 bool os::guard_memory(char* addr, size_t size) {
3126 return linux_mprotect(addr, size, PROT_NONE);
3127 }
3129 bool os::unguard_memory(char* addr, size_t size) {
3130 return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
3131 }
3133 bool os::Linux::transparent_huge_pages_sanity_check(bool warn, size_t page_size) {
3134 bool result = false;
3135 void *p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE,
3136 MAP_ANONYMOUS|MAP_PRIVATE,
3137 -1, 0);
3138 if (p != MAP_FAILED) {
3139 void *aligned_p = align_ptr_up(p, page_size);
3141 result = madvise(aligned_p, page_size, MADV_HUGEPAGE) == 0;
3143 munmap(p, page_size * 2);
3144 }
3146 if (warn && !result) {
3147 warning("TransparentHugePages is not supported by the operating system.");
3148 }
3150 return result;
3151 }
3153 bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
3154 bool result = false;
3155 void *p = mmap(NULL, page_size, PROT_READ|PROT_WRITE,
3156 MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
3157 -1, 0);
3159 if (p != MAP_FAILED) {
3160 // We don't know if this really is a huge page or not.
3161 FILE *fp = fopen("/proc/self/maps", "r");
3162 if (fp) {
3163 while (!feof(fp)) {
3164 char chars[257];
3165 long x = 0;
3166 if (fgets(chars, sizeof(chars), fp)) {
3167 if (sscanf(chars, "%lx-%*x", &x) == 1
3168 && x == (long)p) {
3169 if (strstr (chars, "hugepage")) {
3170 result = true;
3171 break;
3172 }
3173 }
3174 }
3175 }
3176 fclose(fp);
3177 }
3178 munmap(p, page_size);
3179 }
3181 if (warn && !result) {
3182 warning("HugeTLBFS is not supported by the operating system.");
3183 }
3185 return result;
3186 }
3188 /*
3189 * Set the coredump_filter bits to include largepages in core dump (bit 6)
3190 *
3191 * From the coredump_filter documentation:
3192 *
3193 * - (bit 0) anonymous private memory
3194 * - (bit 1) anonymous shared memory
3195 * - (bit 2) file-backed private memory
3196 * - (bit 3) file-backed shared memory
3197 * - (bit 4) ELF header pages in file-backed private memory areas (it is
3198 * effective only if the bit 2 is cleared)
3199 * - (bit 5) hugetlb private memory
3200 * - (bit 6) hugetlb shared memory
3201 */
3202 static void set_coredump_filter(void) {
3203 FILE *f;
3204 long cdm;
3206 if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
3207 return;
3208 }
3210 if (fscanf(f, "%lx", &cdm) != 1) {
3211 fclose(f);
3212 return;
3213 }
3215 rewind(f);
3217 if ((cdm & LARGEPAGES_BIT) == 0) {
3218 cdm |= LARGEPAGES_BIT;
3219 fprintf(f, "%#lx", cdm);
3220 }
3222 fclose(f);
3223 }
3225 // Large page support
3227 static size_t _large_page_size = 0;
3229 size_t os::Linux::find_large_page_size() {
3230 size_t large_page_size = 0;
3232 // large_page_size on Linux is used to round up heap size. x86 uses either
3233 // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3234 // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3235 // page as large as 256M.
3236 //
3237 // Here we try to figure out page size by parsing /proc/meminfo and looking
3238 // for a line with the following format:
3239 // Hugepagesize: 2048 kB
3240 //
3241 // If we can't determine the value (e.g. /proc is not mounted, or the text
3242 // format has been changed), we'll use the largest page size supported by
3243 // the processor.
3245 #ifndef ZERO
3246 large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
3247 ARM_ONLY(2 * M) PPC_ONLY(4 * M);
3248 #endif // ZERO
3250 FILE *fp = fopen("/proc/meminfo", "r");
3251 if (fp) {
3252 while (!feof(fp)) {
3253 int x = 0;
3254 char buf[16];
3255 if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
3256 if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
3257 large_page_size = x * K;
3258 break;
3259 }
3260 } else {
3261 // skip to next line
3262 for (;;) {
3263 int ch = fgetc(fp);
3264 if (ch == EOF || ch == (int)'\n') break;
3265 }
3266 }
3267 }
3268 fclose(fp);
3269 }
3271 if (!FLAG_IS_DEFAULT(LargePageSizeInBytes) && LargePageSizeInBytes != large_page_size) {
3272 warning("Setting LargePageSizeInBytes has no effect on this OS. Large page size is "
3273 SIZE_FORMAT "%s.", byte_size_in_proper_unit(large_page_size),
3274 proper_unit_for_byte_size(large_page_size));
3275 }
3277 return large_page_size;
3278 }
3280 size_t os::Linux::setup_large_page_size() {
3281 _large_page_size = Linux::find_large_page_size();
3282 const size_t default_page_size = (size_t)Linux::page_size();
3283 if (_large_page_size > default_page_size) {
3284 _page_sizes[0] = _large_page_size;
3285 _page_sizes[1] = default_page_size;
3286 _page_sizes[2] = 0;
3287 }
3289 return _large_page_size;
3290 }
3292 bool os::Linux::setup_large_page_type(size_t page_size) {
3293 if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
3294 FLAG_IS_DEFAULT(UseSHM) &&
3295 FLAG_IS_DEFAULT(UseTransparentHugePages)) {
3297 // The type of large pages has not been specified by the user.
3299 // Try UseHugeTLBFS and then UseSHM.
3300 UseHugeTLBFS = UseSHM = true;
3302 // Don't try UseTransparentHugePages since there are known
3303 // performance issues with it turned on. This might change in the future.
3304 UseTransparentHugePages = false;
3305 }
3307 if (UseTransparentHugePages) {
3308 bool warn_on_failure = !FLAG_IS_DEFAULT(UseTransparentHugePages);
3309 if (transparent_huge_pages_sanity_check(warn_on_failure, page_size)) {
3310 UseHugeTLBFS = false;
3311 UseSHM = false;
3312 return true;
3313 }
3314 UseTransparentHugePages = false;
3315 }
3317 if (UseHugeTLBFS) {
3318 bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
3319 if (hugetlbfs_sanity_check(warn_on_failure, page_size)) {
3320 UseSHM = false;
3321 return true;
3322 }
3323 UseHugeTLBFS = false;
3324 }
3326 return UseSHM;
3327 }
3329 void os::large_page_init() {
3330 if (!UseLargePages &&
3331 !UseTransparentHugePages &&
3332 !UseHugeTLBFS &&
3333 !UseSHM) {
3334 // Not using large pages.
3335 return;
3336 }
3338 if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
3339 // The user explicitly turned off large pages.
3340 // Ignore the rest of the large pages flags.
3341 UseTransparentHugePages = false;
3342 UseHugeTLBFS = false;
3343 UseSHM = false;
3344 return;
3345 }
3347 size_t large_page_size = Linux::setup_large_page_size();
3348 UseLargePages = Linux::setup_large_page_type(large_page_size);
3350 set_coredump_filter();
3351 }
3353 #ifndef SHM_HUGETLB
3354 #define SHM_HUGETLB 04000
3355 #endif
3357 char* os::Linux::reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3358 // "exec" is passed in but not used. Creating the shared image for
3359 // the code cache doesn't have an SHM_X executable permission to check.
3360 assert(UseLargePages && UseSHM, "only for SHM large pages");
3361 assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
3363 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3364 return NULL; // Fallback to small pages.
3365 }
3367 key_t key = IPC_PRIVATE;
3368 char *addr;
3370 bool warn_on_failure = UseLargePages &&
3371 (!FLAG_IS_DEFAULT(UseLargePages) ||
3372 !FLAG_IS_DEFAULT(UseSHM) ||
3373 !FLAG_IS_DEFAULT(LargePageSizeInBytes)
3374 );
3375 char msg[128];
3377 // Create a large shared memory region to attach to based on size.
3378 // Currently, size is the total size of the heap
3379 int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
3380 if (shmid == -1) {
3381 // Possible reasons for shmget failure:
3382 // 1. shmmax is too small for Java heap.
3383 // > check shmmax value: cat /proc/sys/kernel/shmmax
3384 // > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
3385 // 2. not enough large page memory.
3386 // > check available large pages: cat /proc/meminfo
3387 // > increase amount of large pages:
3388 // echo new_value > /proc/sys/vm/nr_hugepages
3389 // Note 1: different Linux may use different name for this property,
3390 // e.g. on Redhat AS-3 it is "hugetlb_pool".
3391 // Note 2: it's possible there's enough physical memory available but
3392 // they are so fragmented after a long run that they can't
3393 // coalesce into large pages. Try to reserve large pages when
3394 // the system is still "fresh".
3395 if (warn_on_failure) {
3396 jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
3397 warning("%s", msg);
3398 }
3399 return NULL;
3400 }
3402 // attach to the region
3403 addr = (char*)shmat(shmid, req_addr, 0);
3404 int err = errno;
3406 // Remove shmid. If shmat() is successful, the actual shared memory segment
3407 // will be deleted when it's detached by shmdt() or when the process
3408 // terminates. If shmat() is not successful this will remove the shared
3409 // segment immediately.
3410 shmctl(shmid, IPC_RMID, NULL);
3412 if ((intptr_t)addr == -1) {
3413 if (warn_on_failure) {
3414 jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
3415 warning("%s", msg);
3416 }
3417 return NULL;
3418 }
3420 return addr;
3421 }
3423 static void warn_on_large_pages_failure(char* req_addr, size_t bytes, int error) {
3424 assert(error == ENOMEM, "Only expect to fail if no memory is available");
3426 bool warn_on_failure = UseLargePages &&
3427 (!FLAG_IS_DEFAULT(UseLargePages) ||
3428 !FLAG_IS_DEFAULT(UseHugeTLBFS) ||
3429 !FLAG_IS_DEFAULT(LargePageSizeInBytes));
3431 if (warn_on_failure) {
3432 char msg[128];
3433 jio_snprintf(msg, sizeof(msg), "Failed to reserve large pages memory req_addr: "
3434 PTR_FORMAT " bytes: " SIZE_FORMAT " (errno = %d).", req_addr, bytes, error);
3435 warning("%s", msg);
3436 }
3437 }
3439 char* os::Linux::reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec) {
3440 assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
3441 assert(is_size_aligned(bytes, os::large_page_size()), "Unaligned size");
3442 assert(is_ptr_aligned(req_addr, os::large_page_size()), "Unaligned address");
3444 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
3445 char* addr = (char*)::mmap(req_addr, bytes, prot,
3446 MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB,
3447 -1, 0);
3449 if (addr == MAP_FAILED) {
3450 warn_on_large_pages_failure(req_addr, bytes, errno);
3451 return NULL;
3452 }
3454 assert(is_ptr_aligned(addr, os::large_page_size()), "Must be");
3456 return addr;
3457 }
3459 char* os::Linux::reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3460 size_t large_page_size = os::large_page_size();
3462 assert(bytes >= large_page_size, "Shouldn't allocate large pages for small sizes");
3464 // Allocate small pages.
3466 char* start;
3467 if (req_addr != NULL) {
3468 assert(is_ptr_aligned(req_addr, alignment), "Must be");
3469 assert(is_size_aligned(bytes, alignment), "Must be");
3470 start = os::reserve_memory(bytes, req_addr);
3471 assert(start == NULL || start == req_addr, "Must be");
3472 } else {
3473 start = os::reserve_memory_aligned(bytes, alignment);
3474 }
3476 if (start == NULL) {
3477 return NULL;
3478 }
3480 assert(is_ptr_aligned(start, alignment), "Must be");
3482 if (MemTracker::tracking_level() > NMT_minimal) {
3483 // os::reserve_memory_special will record this memory area.
3484 // Need to release it here to prevent overlapping reservations.
3485 Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
3486 tkr.record((address)start, bytes);
3487 }
3489 char* end = start + bytes;
3491 // Find the regions of the allocated chunk that can be promoted to large pages.
3492 char* lp_start = (char*)align_ptr_up(start, large_page_size);
3493 char* lp_end = (char*)align_ptr_down(end, large_page_size);
3495 size_t lp_bytes = lp_end - lp_start;
3497 assert(is_size_aligned(lp_bytes, large_page_size), "Must be");
3499 if (lp_bytes == 0) {
3500 // The mapped region doesn't even span the start and the end of a large page.
3501 // Fall back to allocate a non-special area.
3502 ::munmap(start, end - start);
3503 return NULL;
3504 }
3506 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
3509 void* result;
3511 if (start != lp_start) {
3512 result = ::mmap(start, lp_start - start, prot,
3513 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
3514 -1, 0);
3515 if (result == MAP_FAILED) {
3516 ::munmap(lp_start, end - lp_start);
3517 return NULL;
3518 }
3519 }
3521 result = ::mmap(lp_start, lp_bytes, prot,
3522 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED|MAP_HUGETLB,
3523 -1, 0);
3524 if (result == MAP_FAILED) {
3525 warn_on_large_pages_failure(req_addr, bytes, errno);
3526 // If the mmap above fails, the large pages region will be unmapped and we
3527 // have regions before and after with small pages. Release these regions.
3528 //
3529 // | mapped | unmapped | mapped |
3530 // ^ ^ ^ ^
3531 // start lp_start lp_end end
3532 //
3533 ::munmap(start, lp_start - start);
3534 ::munmap(lp_end, end - lp_end);
3535 return NULL;
3536 }
3538 if (lp_end != end) {
3539 result = ::mmap(lp_end, end - lp_end, prot,
3540 MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
3541 -1, 0);
3542 if (result == MAP_FAILED) {
3543 ::munmap(start, lp_end - start);
3544 return NULL;
3545 }
3546 }
3548 return start;
3549 }
3551 char* os::Linux::reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3552 assert(UseLargePages && UseHugeTLBFS, "only for Huge TLBFS large pages");
3553 assert(is_ptr_aligned(req_addr, alignment), "Must be");
3554 assert(is_power_of_2(alignment), "Must be");
3555 assert(is_power_of_2(os::large_page_size()), "Must be");
3556 assert(bytes >= os::large_page_size(), "Shouldn't allocate large pages for small sizes");
3558 if (is_size_aligned(bytes, os::large_page_size()) && alignment <= os::large_page_size()) {
3559 return reserve_memory_special_huge_tlbfs_only(bytes, req_addr, exec);
3560 } else {
3561 return reserve_memory_special_huge_tlbfs_mixed(bytes, alignment, req_addr, exec);
3562 }
3563 }
3565 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
3566 assert(UseLargePages, "only for large pages");
3568 char* addr;
3569 if (UseSHM) {
3570 addr = os::Linux::reserve_memory_special_shm(bytes, alignment, req_addr, exec);
3571 } else {
3572 assert(UseHugeTLBFS, "must be");
3573 addr = os::Linux::reserve_memory_special_huge_tlbfs(bytes, alignment, req_addr, exec);
3574 }
3576 if (addr != NULL) {
3577 if (UseNUMAInterleaving) {
3578 numa_make_global(addr, bytes);
3579 }
3581 // The memory is committed
3582 MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
3583 }
3585 return addr;
3586 }
3588 bool os::Linux::release_memory_special_shm(char* base, size_t bytes) {
3589 // detaching the SHM segment will also delete it, see reserve_memory_special_shm()
3590 return shmdt(base) == 0;
3591 }
3593 bool os::Linux::release_memory_special_huge_tlbfs(char* base, size_t bytes) {
3594 return pd_release_memory(base, bytes);
3595 }
3597 bool os::release_memory_special(char* base, size_t bytes) {
3598 bool res;
3599 if (MemTracker::tracking_level() > NMT_minimal) {
3600 Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
3601 res = os::Linux::release_memory_special_impl(base, bytes);
3602 if (res) {
3603 tkr.record((address)base, bytes);
3604 }
3606 } else {
3607 res = os::Linux::release_memory_special_impl(base, bytes);
3608 }
3609 return res;
3610 }
3612 bool os::Linux::release_memory_special_impl(char* base, size_t bytes) {
3613 assert(UseLargePages, "only for large pages");
3614 bool res;
3616 if (UseSHM) {
3617 res = os::Linux::release_memory_special_shm(base, bytes);
3618 } else {
3619 assert(UseHugeTLBFS, "must be");
3620 res = os::Linux::release_memory_special_huge_tlbfs(base, bytes);
3621 }
3622 return res;
3623 }
3625 size_t os::large_page_size() {
3626 return _large_page_size;
3627 }
3629 // With SysV SHM the entire memory region must be allocated as shared
3630 // memory.
3631 // HugeTLBFS allows application to commit large page memory on demand.
3632 // However, when committing memory with HugeTLBFS fails, the region
3633 // that was supposed to be committed will lose the old reservation
3634 // and allow other threads to steal that memory region. Because of this
3635 // behavior we can't commit HugeTLBFS memory.
3636 bool os::can_commit_large_page_memory() {
3637 return UseTransparentHugePages;
3638 }
3640 bool os::can_execute_large_page_memory() {
3641 return UseTransparentHugePages || UseHugeTLBFS;
3642 }
3644 // Reserve memory at an arbitrary address, only if that area is
3645 // available (and not reserved for something else).
3647 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3648 const int max_tries = 10;
3649 char* base[max_tries];
3650 size_t size[max_tries];
3651 const size_t gap = 0x000000;
3653 // Assert only that the size is a multiple of the page size, since
3654 // that's all that mmap requires, and since that's all we really know
3655 // about at this low abstraction level. If we need higher alignment,
3656 // we can either pass an alignment to this method or verify alignment
3657 // in one of the methods further up the call chain. See bug 5044738.
3658 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
3660 // Repeatedly allocate blocks until the block is allocated at the
3661 // right spot. Give up after max_tries. Note that reserve_memory() will
3662 // automatically update _highest_vm_reserved_address if the call is
3663 // successful. The variable tracks the highest memory address every reserved
3664 // by JVM. It is used to detect heap-stack collision if running with
3665 // fixed-stack LinuxThreads. Because here we may attempt to reserve more
3666 // space than needed, it could confuse the collision detecting code. To
3667 // solve the problem, save current _highest_vm_reserved_address and
3668 // calculate the correct value before return.
3669 address old_highest = _highest_vm_reserved_address;
3671 // Linux mmap allows caller to pass an address as hint; give it a try first,
3672 // if kernel honors the hint then we can return immediately.
3673 char * addr = anon_mmap(requested_addr, bytes, false);
3674 if (addr == requested_addr) {
3675 return requested_addr;
3676 }
3678 if (addr != NULL) {
3679 // mmap() is successful but it fails to reserve at the requested address
3680 anon_munmap(addr, bytes);
3681 }
3683 int i;
3684 for (i = 0; i < max_tries; ++i) {
3685 base[i] = reserve_memory(bytes);
3687 if (base[i] != NULL) {
3688 // Is this the block we wanted?
3689 if (base[i] == requested_addr) {
3690 size[i] = bytes;
3691 break;
3692 }
3694 // Does this overlap the block we wanted? Give back the overlapped
3695 // parts and try again.
3697 size_t top_overlap = requested_addr + (bytes + gap) - base[i];
3698 if (top_overlap >= 0 && top_overlap < bytes) {
3699 unmap_memory(base[i], top_overlap);
3700 base[i] += top_overlap;
3701 size[i] = bytes - top_overlap;
3702 } else {
3703 size_t bottom_overlap = base[i] + bytes - requested_addr;
3704 if (bottom_overlap >= 0 && bottom_overlap < bytes) {
3705 unmap_memory(requested_addr, bottom_overlap);
3706 size[i] = bytes - bottom_overlap;
3707 } else {
3708 size[i] = bytes;
3709 }
3710 }
3711 }
3712 }
3714 // Give back the unused reserved pieces.
3716 for (int j = 0; j < i; ++j) {
3717 if (base[j] != NULL) {
3718 unmap_memory(base[j], size[j]);
3719 }
3720 }
3722 if (i < max_tries) {
3723 _highest_vm_reserved_address = MAX2(old_highest, (address)requested_addr + bytes);
3724 return requested_addr;
3725 } else {
3726 _highest_vm_reserved_address = old_highest;
3727 return NULL;
3728 }
3729 }
3731 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3732 return ::read(fd, buf, nBytes);
3733 }
3735 // TODO-FIXME: reconcile Solaris' os::sleep with the linux variation.
3736 // Solaris uses poll(), linux uses park().
3737 // Poll() is likely a better choice, assuming that Thread.interrupt()
3738 // generates a SIGUSRx signal. Note that SIGUSR1 can interfere with
3739 // SIGSEGV, see 4355769.
3741 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
3742 assert(thread == Thread::current(), "thread consistency check");
3744 ParkEvent * const slp = thread->_SleepEvent ;
3745 slp->reset() ;
3746 OrderAccess::fence() ;
3748 if (interruptible) {
3749 jlong prevtime = javaTimeNanos();
3751 for (;;) {
3752 if (os::is_interrupted(thread, true)) {
3753 return OS_INTRPT;
3754 }
3756 jlong newtime = javaTimeNanos();
3758 if (newtime - prevtime < 0) {
3759 // time moving backwards, should only happen if no monotonic clock
3760 // not a guarantee() because JVM should not abort on kernel/glibc bugs
3761 assert(!Linux::supports_monotonic_clock(), "time moving backwards");
3762 } else {
3763 millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
3764 }
3766 if(millis <= 0) {
3767 return OS_OK;
3768 }
3770 prevtime = newtime;
3772 {
3773 assert(thread->is_Java_thread(), "sanity check");
3774 JavaThread *jt = (JavaThread *) thread;
3775 ThreadBlockInVM tbivm(jt);
3776 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
3778 jt->set_suspend_equivalent();
3779 // cleared by handle_special_suspend_equivalent_condition() or
3780 // java_suspend_self() via check_and_wait_while_suspended()
3782 slp->park(millis);
3784 // were we externally suspended while we were waiting?
3785 jt->check_and_wait_while_suspended();
3786 }
3787 }
3788 } else {
3789 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
3790 jlong prevtime = javaTimeNanos();
3792 for (;;) {
3793 // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
3794 // the 1st iteration ...
3795 jlong newtime = javaTimeNanos();
3797 if (newtime - prevtime < 0) {
3798 // time moving backwards, should only happen if no monotonic clock
3799 // not a guarantee() because JVM should not abort on kernel/glibc bugs
3800 assert(!Linux::supports_monotonic_clock(), "time moving backwards");
3801 } else {
3802 millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
3803 }
3805 if(millis <= 0) break ;
3807 prevtime = newtime;
3808 slp->park(millis);
3809 }
3810 return OS_OK ;
3811 }
3812 }
3814 //
3815 // Short sleep, direct OS call.
3816 //
3817 // Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee
3818 // sched_yield(2) will actually give up the CPU:
3819 //
3820 // * Alone on this pariticular CPU, keeps running.
3821 // * Before the introduction of "skip_buddy" with "compat_yield" disabled
3822 // (pre 2.6.39).
3823 //
3824 // So calling this with 0 is an alternative.
3825 //
3826 void os::naked_short_sleep(jlong ms) {
3827 struct timespec req;
3829 assert(ms < 1000, "Un-interruptable sleep, short time use only");
3830 req.tv_sec = 0;
3831 if (ms > 0) {
3832 req.tv_nsec = (ms % 1000) * 1000000;
3833 }
3834 else {
3835 req.tv_nsec = 1;
3836 }
3838 nanosleep(&req, NULL);
3840 return;
3841 }
3843 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3844 void os::infinite_sleep() {
3845 while (true) { // sleep forever ...
3846 ::sleep(100); // ... 100 seconds at a time
3847 }
3848 }
3850 // Used to convert frequent JVM_Yield() to nops
3851 bool os::dont_yield() {
3852 return DontYieldALot;
3853 }
3855 void os::yield() {
3856 sched_yield();
3857 }
3859 os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
3861 void os::yield_all(int attempts) {
3862 // Yields to all threads, including threads with lower priorities
3863 // Threads on Linux are all with same priority. The Solaris style
3864 // os::yield_all() with nanosleep(1ms) is not necessary.
3865 sched_yield();
3866 }
3868 // Called from the tight loops to possibly influence time-sharing heuristics
3869 void os::loop_breaker(int attempts) {
3870 os::yield_all(attempts);
3871 }
3873 ////////////////////////////////////////////////////////////////////////////////
3874 // thread priority support
3876 // Note: Normal Linux applications are run with SCHED_OTHER policy. SCHED_OTHER
3877 // only supports dynamic priority, static priority must be zero. For real-time
3878 // applications, Linux supports SCHED_RR which allows static priority (1-99).
3879 // However, for large multi-threaded applications, SCHED_RR is not only slower
3880 // than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out
3881 // of 5 runs - Sep 2005).
3882 //
3883 // The following code actually changes the niceness of kernel-thread/LWP. It
3884 // has an assumption that setpriority() only modifies one kernel-thread/LWP,
3885 // not the entire user process, and user level threads are 1:1 mapped to kernel
3886 // threads. It has always been the case, but could change in the future. For
3887 // this reason, the code should not be used as default (ThreadPriorityPolicy=0).
3888 // It is only used when ThreadPriorityPolicy=1 and requires root privilege.
3890 int os::java_to_os_priority[CriticalPriority + 1] = {
3891 19, // 0 Entry should never be used
3893 4, // 1 MinPriority
3894 3, // 2
3895 2, // 3
3897 1, // 4
3898 0, // 5 NormPriority
3899 -1, // 6
3901 -2, // 7
3902 -3, // 8
3903 -4, // 9 NearMaxPriority
3905 -5, // 10 MaxPriority
3907 -5 // 11 CriticalPriority
3908 };
3910 static int prio_init() {
3911 if (ThreadPriorityPolicy == 1) {
3912 // Only root can raise thread priority. Don't allow ThreadPriorityPolicy=1
3913 // if effective uid is not root. Perhaps, a more elegant way of doing
3914 // this is to test CAP_SYS_NICE capability, but that will require libcap.so
3915 if (geteuid() != 0) {
3916 if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy)) {
3917 warning("-XX:ThreadPriorityPolicy requires root privilege on Linux");
3918 }
3919 ThreadPriorityPolicy = 0;
3920 }
3921 }
3922 if (UseCriticalJavaThreadPriority) {
3923 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3924 }
3925 return 0;
3926 }
3928 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3929 if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) return OS_OK;
3931 int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri);
3932 return (ret == 0) ? OS_OK : OS_ERR;
3933 }
3935 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3936 if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) {
3937 *priority_ptr = java_to_os_priority[NormPriority];
3938 return OS_OK;
3939 }
3941 errno = 0;
3942 *priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id());
3943 return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR);
3944 }
3946 // Hint to the underlying OS that a task switch would not be good.
3947 // Void return because it's a hint and can fail.
3948 void os::hint_no_preempt() {}
3950 ////////////////////////////////////////////////////////////////////////////////
3951 // suspend/resume support
3953 // the low-level signal-based suspend/resume support is a remnant from the
3954 // old VM-suspension that used to be for java-suspension, safepoints etc,
3955 // within hotspot. Now there is a single use-case for this:
3956 // - calling get_thread_pc() on the VMThread by the flat-profiler task
3957 // that runs in the watcher thread.
3958 // The remaining code is greatly simplified from the more general suspension
3959 // code that used to be used.
3960 //
3961 // The protocol is quite simple:
3962 // - suspend:
3963 // - sends a signal to the target thread
3964 // - polls the suspend state of the osthread using a yield loop
3965 // - target thread signal handler (SR_handler) sets suspend state
3966 // and blocks in sigsuspend until continued
3967 // - resume:
3968 // - sets target osthread state to continue
3969 // - sends signal to end the sigsuspend loop in the SR_handler
3970 //
3971 // Note that the SR_lock plays no role in this suspend/resume protocol.
3972 //
3974 static void resume_clear_context(OSThread *osthread) {
3975 osthread->set_ucontext(NULL);
3976 osthread->set_siginfo(NULL);
3977 }
3979 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
3980 osthread->set_ucontext(context);
3981 osthread->set_siginfo(siginfo);
3982 }
3984 //
3985 // Handler function invoked when a thread's execution is suspended or
3986 // resumed. We have to be careful that only async-safe functions are
3987 // called here (Note: most pthread functions are not async safe and
3988 // should be avoided.)
3989 //
3990 // Note: sigwait() is a more natural fit than sigsuspend() from an
3991 // interface point of view, but sigwait() prevents the signal hander
3992 // from being run. libpthread would get very confused by not having
3993 // its signal handlers run and prevents sigwait()'s use with the
3994 // mutex granting granting signal.
3995 //
3996 // Currently only ever called on the VMThread and JavaThreads (PC sampling)
3997 //
3998 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
3999 // Save and restore errno to avoid confusing native code with EINTR
4000 // after sigsuspend.
4001 int old_errno = errno;
4003 Thread* thread = Thread::current();
4004 OSThread* osthread = thread->osthread();
4005 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
4007 os::SuspendResume::State current = osthread->sr.state();
4008 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
4009 suspend_save_context(osthread, siginfo, context);
4011 // attempt to switch the state, we assume we had a SUSPEND_REQUEST
4012 os::SuspendResume::State state = osthread->sr.suspended();
4013 if (state == os::SuspendResume::SR_SUSPENDED) {
4014 sigset_t suspend_set; // signals for sigsuspend()
4016 // get current set of blocked signals and unblock resume signal
4017 pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
4018 sigdelset(&suspend_set, SR_signum);
4020 sr_semaphore.signal();
4021 // wait here until we are resumed
4022 while (1) {
4023 sigsuspend(&suspend_set);
4025 os::SuspendResume::State result = osthread->sr.running();
4026 if (result == os::SuspendResume::SR_RUNNING) {
4027 sr_semaphore.signal();
4028 break;
4029 }
4030 }
4032 } else if (state == os::SuspendResume::SR_RUNNING) {
4033 // request was cancelled, continue
4034 } else {
4035 ShouldNotReachHere();
4036 }
4038 resume_clear_context(osthread);
4039 } else if (current == os::SuspendResume::SR_RUNNING) {
4040 // request was cancelled, continue
4041 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
4042 // ignore
4043 } else {
4044 // ignore
4045 }
4047 errno = old_errno;
4048 }
4051 static int SR_initialize() {
4052 struct sigaction act;
4053 char *s;
4054 /* Get signal number to use for suspend/resume */
4055 if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
4056 int sig = ::strtol(s, 0, 10);
4057 if (sig > 0 || sig < _NSIG) {
4058 SR_signum = sig;
4059 }
4060 }
4062 assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
4063 "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
4065 sigemptyset(&SR_sigset);
4066 sigaddset(&SR_sigset, SR_signum);
4068 /* Set up signal handler for suspend/resume */
4069 act.sa_flags = SA_RESTART|SA_SIGINFO;
4070 act.sa_handler = (void (*)(int)) SR_handler;
4072 // SR_signum is blocked by default.
4073 // 4528190 - We also need to block pthread restart signal (32 on all
4074 // supported Linux platforms). Note that LinuxThreads need to block
4075 // this signal for all threads to work properly. So we don't have
4076 // to use hard-coded signal number when setting up the mask.
4077 pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
4079 if (sigaction(SR_signum, &act, 0) == -1) {
4080 return -1;
4081 }
4083 // Save signal flag
4084 os::Linux::set_our_sigflags(SR_signum, act.sa_flags);
4085 return 0;
4086 }
4088 static int sr_notify(OSThread* osthread) {
4089 int status = pthread_kill(osthread->pthread_id(), SR_signum);
4090 assert_status(status == 0, status, "pthread_kill");
4091 return status;
4092 }
4094 // "Randomly" selected value for how long we want to spin
4095 // before bailing out on suspending a thread, also how often
4096 // we send a signal to a thread we want to resume
4097 static const int RANDOMLY_LARGE_INTEGER = 1000000;
4098 static const int RANDOMLY_LARGE_INTEGER2 = 100;
4100 // returns true on success and false on error - really an error is fatal
4101 // but this seems the normal response to library errors
4102 static bool do_suspend(OSThread* osthread) {
4103 assert(osthread->sr.is_running(), "thread should be running");
4104 assert(!sr_semaphore.trywait(), "semaphore has invalid state");
4106 // mark as suspended and send signal
4107 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
4108 // failed to switch, state wasn't running?
4109 ShouldNotReachHere();
4110 return false;
4111 }
4113 if (sr_notify(osthread) != 0) {
4114 ShouldNotReachHere();
4115 }
4117 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
4118 while (true) {
4119 if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4120 break;
4121 } else {
4122 // timeout
4123 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
4124 if (cancelled == os::SuspendResume::SR_RUNNING) {
4125 return false;
4126 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
4127 // make sure that we consume the signal on the semaphore as well
4128 sr_semaphore.wait();
4129 break;
4130 } else {
4131 ShouldNotReachHere();
4132 return false;
4133 }
4134 }
4135 }
4137 guarantee(osthread->sr.is_suspended(), "Must be suspended");
4138 return true;
4139 }
4141 static void do_resume(OSThread* osthread) {
4142 assert(osthread->sr.is_suspended(), "thread should be suspended");
4143 assert(!sr_semaphore.trywait(), "invalid semaphore state");
4145 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
4146 // failed to switch to WAKEUP_REQUEST
4147 ShouldNotReachHere();
4148 return;
4149 }
4151 while (true) {
4152 if (sr_notify(osthread) == 0) {
4153 if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4154 if (osthread->sr.is_running()) {
4155 return;
4156 }
4157 }
4158 } else {
4159 ShouldNotReachHere();
4160 }
4161 }
4163 guarantee(osthread->sr.is_running(), "Must be running!");
4164 }
4166 ////////////////////////////////////////////////////////////////////////////////
4167 // interrupt support
4169 void os::interrupt(Thread* thread) {
4170 assert(Thread::current() == thread || Threads_lock->owned_by_self(),
4171 "possibility of dangling Thread pointer");
4173 OSThread* osthread = thread->osthread();
4175 if (!osthread->interrupted()) {
4176 osthread->set_interrupted(true);
4177 // More than one thread can get here with the same value of osthread,
4178 // resulting in multiple notifications. We do, however, want the store
4179 // to interrupted() to be visible to other threads before we execute unpark().
4180 OrderAccess::fence();
4181 ParkEvent * const slp = thread->_SleepEvent ;
4182 if (slp != NULL) slp->unpark() ;
4183 }
4185 // For JSR166. Unpark even if interrupt status already was set
4186 if (thread->is_Java_thread())
4187 ((JavaThread*)thread)->parker()->unpark();
4189 ParkEvent * ev = thread->_ParkEvent ;
4190 if (ev != NULL) ev->unpark() ;
4192 }
4194 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
4195 assert(Thread::current() == thread || Threads_lock->owned_by_self(),
4196 "possibility of dangling Thread pointer");
4198 OSThread* osthread = thread->osthread();
4200 bool interrupted = osthread->interrupted();
4202 if (interrupted && clear_interrupted) {
4203 osthread->set_interrupted(false);
4204 // consider thread->_SleepEvent->reset() ... optional optimization
4205 }
4207 return interrupted;
4208 }
4210 ///////////////////////////////////////////////////////////////////////////////////
4211 // signal handling (except suspend/resume)
4213 // This routine may be used by user applications as a "hook" to catch signals.
4214 // The user-defined signal handler must pass unrecognized signals to this
4215 // routine, and if it returns true (non-zero), then the signal handler must
4216 // return immediately. If the flag "abort_if_unrecognized" is true, then this
4217 // routine will never retun false (zero), but instead will execute a VM panic
4218 // routine kill the process.
4219 //
4220 // If this routine returns false, it is OK to call it again. This allows
4221 // the user-defined signal handler to perform checks either before or after
4222 // the VM performs its own checks. Naturally, the user code would be making
4223 // a serious error if it tried to handle an exception (such as a null check
4224 // or breakpoint) that the VM was generating for its own correct operation.
4225 //
4226 // This routine may recognize any of the following kinds of signals:
4227 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
4228 // It should be consulted by handlers for any of those signals.
4229 //
4230 // The caller of this routine must pass in the three arguments supplied
4231 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
4232 // field of the structure passed to sigaction(). This routine assumes that
4233 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4234 //
4235 // Note that the VM will print warnings if it detects conflicting signal
4236 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4237 //
4238 extern "C" JNIEXPORT int
4239 JVM_handle_linux_signal(int signo, siginfo_t* siginfo,
4240 void* ucontext, int abort_if_unrecognized);
4242 void signalHandler(int sig, siginfo_t* info, void* uc) {
4243 assert(info != NULL && uc != NULL, "it must be old kernel");
4244 int orig_errno = errno; // Preserve errno value over signal handler.
4245 JVM_handle_linux_signal(sig, info, uc, true);
4246 errno = orig_errno;
4247 }
4250 // This boolean allows users to forward their own non-matching signals
4251 // to JVM_handle_linux_signal, harmlessly.
4252 bool os::Linux::signal_handlers_are_installed = false;
4254 // For signal-chaining
4255 struct sigaction os::Linux::sigact[MAXSIGNUM];
4256 unsigned int os::Linux::sigs = 0;
4257 bool os::Linux::libjsig_is_loaded = false;
4258 typedef struct sigaction *(*get_signal_t)(int);
4259 get_signal_t os::Linux::get_signal_action = NULL;
4261 struct sigaction* os::Linux::get_chained_signal_action(int sig) {
4262 struct sigaction *actp = NULL;
4264 if (libjsig_is_loaded) {
4265 // Retrieve the old signal handler from libjsig
4266 actp = (*get_signal_action)(sig);
4267 }
4268 if (actp == NULL) {
4269 // Retrieve the preinstalled signal handler from jvm
4270 actp = get_preinstalled_handler(sig);
4271 }
4273 return actp;
4274 }
4276 static bool call_chained_handler(struct sigaction *actp, int sig,
4277 siginfo_t *siginfo, void *context) {
4278 // Call the old signal handler
4279 if (actp->sa_handler == SIG_DFL) {
4280 // It's more reasonable to let jvm treat it as an unexpected exception
4281 // instead of taking the default action.
4282 return false;
4283 } else if (actp->sa_handler != SIG_IGN) {
4284 if ((actp->sa_flags & SA_NODEFER) == 0) {
4285 // automaticlly block the signal
4286 sigaddset(&(actp->sa_mask), sig);
4287 }
4289 sa_handler_t hand;
4290 sa_sigaction_t sa;
4291 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4292 // retrieve the chained handler
4293 if (siginfo_flag_set) {
4294 sa = actp->sa_sigaction;
4295 } else {
4296 hand = actp->sa_handler;
4297 }
4299 if ((actp->sa_flags & SA_RESETHAND) != 0) {
4300 actp->sa_handler = SIG_DFL;
4301 }
4303 // try to honor the signal mask
4304 sigset_t oset;
4305 pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4307 // call into the chained handler
4308 if (siginfo_flag_set) {
4309 (*sa)(sig, siginfo, context);
4310 } else {
4311 (*hand)(sig);
4312 }
4314 // restore the signal mask
4315 pthread_sigmask(SIG_SETMASK, &oset, 0);
4316 }
4317 // Tell jvm's signal handler the signal is taken care of.
4318 return true;
4319 }
4321 bool os::Linux::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4322 bool chained = false;
4323 // signal-chaining
4324 if (UseSignalChaining) {
4325 struct sigaction *actp = get_chained_signal_action(sig);
4326 if (actp != NULL) {
4327 chained = call_chained_handler(actp, sig, siginfo, context);
4328 }
4329 }
4330 return chained;
4331 }
4333 struct sigaction* os::Linux::get_preinstalled_handler(int sig) {
4334 if ((( (unsigned int)1 << sig ) & sigs) != 0) {
4335 return &sigact[sig];
4336 }
4337 return NULL;
4338 }
4340 void os::Linux::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4341 assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
4342 sigact[sig] = oldAct;
4343 sigs |= (unsigned int)1 << sig;
4344 }
4346 // for diagnostic
4347 int os::Linux::sigflags[MAXSIGNUM];
4349 int os::Linux::get_our_sigflags(int sig) {
4350 assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
4351 return sigflags[sig];
4352 }
4354 void os::Linux::set_our_sigflags(int sig, int flags) {
4355 assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
4356 sigflags[sig] = flags;
4357 }
4359 void os::Linux::set_signal_handler(int sig, bool set_installed) {
4360 // Check for overwrite.
4361 struct sigaction oldAct;
4362 sigaction(sig, (struct sigaction*)NULL, &oldAct);
4364 void* oldhand = oldAct.sa_sigaction
4365 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4366 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4367 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4368 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4369 oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)signalHandler)) {
4370 if (AllowUserSignalHandlers || !set_installed) {
4371 // Do not overwrite; user takes responsibility to forward to us.
4372 return;
4373 } else if (UseSignalChaining) {
4374 // save the old handler in jvm
4375 save_preinstalled_handler(sig, oldAct);
4376 // libjsig also interposes the sigaction() call below and saves the
4377 // old sigaction on it own.
4378 } else {
4379 fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4380 "%#lx for signal %d.", (long)oldhand, sig));
4381 }
4382 }
4384 struct sigaction sigAct;
4385 sigfillset(&(sigAct.sa_mask));
4386 sigAct.sa_handler = SIG_DFL;
4387 if (!set_installed) {
4388 sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
4389 } else {
4390 sigAct.sa_sigaction = signalHandler;
4391 sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
4392 }
4393 // Save flags, which are set by ours
4394 assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
4395 sigflags[sig] = sigAct.sa_flags;
4397 int ret = sigaction(sig, &sigAct, &oldAct);
4398 assert(ret == 0, "check");
4400 void* oldhand2 = oldAct.sa_sigaction
4401 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4402 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4403 assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4404 }
4406 // install signal handlers for signals that HotSpot needs to
4407 // handle in order to support Java-level exception handling.
4409 void os::Linux::install_signal_handlers() {
4410 if (!signal_handlers_are_installed) {
4411 signal_handlers_are_installed = true;
4413 // signal-chaining
4414 typedef void (*signal_setting_t)();
4415 signal_setting_t begin_signal_setting = NULL;
4416 signal_setting_t end_signal_setting = NULL;
4417 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4418 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4419 if (begin_signal_setting != NULL) {
4420 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4421 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4422 get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4423 dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4424 libjsig_is_loaded = true;
4425 assert(UseSignalChaining, "should enable signal-chaining");
4426 }
4427 if (libjsig_is_loaded) {
4428 // Tell libjsig jvm is setting signal handlers
4429 (*begin_signal_setting)();
4430 }
4432 set_signal_handler(SIGSEGV, true);
4433 set_signal_handler(SIGPIPE, true);
4434 set_signal_handler(SIGBUS, true);
4435 set_signal_handler(SIGILL, true);
4436 set_signal_handler(SIGFPE, true);
4437 #if defined(PPC64)
4438 set_signal_handler(SIGTRAP, true);
4439 #endif
4440 set_signal_handler(SIGXFSZ, true);
4442 if (libjsig_is_loaded) {
4443 // Tell libjsig jvm finishes setting signal handlers
4444 (*end_signal_setting)();
4445 }
4447 // We don't activate signal checker if libjsig is in place, we trust ourselves
4448 // and if UserSignalHandler is installed all bets are off.
4449 // Log that signal checking is off only if -verbose:jni is specified.
4450 if (CheckJNICalls) {
4451 if (libjsig_is_loaded) {
4452 if (PrintJNIResolving) {
4453 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4454 }
4455 check_signals = false;
4456 }
4457 if (AllowUserSignalHandlers) {
4458 if (PrintJNIResolving) {
4459 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4460 }
4461 check_signals = false;
4462 }
4463 }
4464 }
4465 }
4467 // This is the fastest way to get thread cpu time on Linux.
4468 // Returns cpu time (user+sys) for any thread, not only for current.
4469 // POSIX compliant clocks are implemented in the kernels 2.6.16+.
4470 // It might work on 2.6.10+ with a special kernel/glibc patch.
4471 // For reference, please, see IEEE Std 1003.1-2004:
4472 // http://www.unix.org/single_unix_specification
4474 jlong os::Linux::fast_thread_cpu_time(clockid_t clockid) {
4475 struct timespec tp;
4476 int rc = os::Linux::clock_gettime(clockid, &tp);
4477 assert(rc == 0, "clock_gettime is expected to return 0 code");
4479 return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
4480 }
4482 /////
4483 // glibc on Linux platform uses non-documented flag
4484 // to indicate, that some special sort of signal
4485 // trampoline is used.
4486 // We will never set this flag, and we should
4487 // ignore this flag in our diagnostic
4488 #ifdef SIGNIFICANT_SIGNAL_MASK
4489 #undef SIGNIFICANT_SIGNAL_MASK
4490 #endif
4491 #define SIGNIFICANT_SIGNAL_MASK (~0x04000000)
4493 static const char* get_signal_handler_name(address handler,
4494 char* buf, int buflen) {
4495 int offset;
4496 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
4497 if (found) {
4498 // skip directory names
4499 const char *p1, *p2;
4500 p1 = buf;
4501 size_t len = strlen(os::file_separator());
4502 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
4503 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
4504 } else {
4505 jio_snprintf(buf, buflen, PTR_FORMAT, handler);
4506 }
4507 return buf;
4508 }
4510 static void print_signal_handler(outputStream* st, int sig,
4511 char* buf, size_t buflen) {
4512 struct sigaction sa;
4514 sigaction(sig, NULL, &sa);
4516 // See comment for SIGNIFICANT_SIGNAL_MASK define
4517 sa.sa_flags &= SIGNIFICANT_SIGNAL_MASK;
4519 st->print("%s: ", os::exception_name(sig, buf, buflen));
4521 address handler = (sa.sa_flags & SA_SIGINFO)
4522 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
4523 : CAST_FROM_FN_PTR(address, sa.sa_handler);
4525 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
4526 st->print("SIG_DFL");
4527 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
4528 st->print("SIG_IGN");
4529 } else {
4530 st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
4531 }
4533 st->print(", sa_mask[0]=");
4534 os::Posix::print_signal_set_short(st, &sa.sa_mask);
4536 address rh = VMError::get_resetted_sighandler(sig);
4537 // May be, handler was resetted by VMError?
4538 if(rh != NULL) {
4539 handler = rh;
4540 sa.sa_flags = VMError::get_resetted_sigflags(sig) & SIGNIFICANT_SIGNAL_MASK;
4541 }
4543 st->print(", sa_flags=");
4544 os::Posix::print_sa_flags(st, sa.sa_flags);
4546 // Check: is it our handler?
4547 if(handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
4548 handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
4549 // It is our signal handler
4550 // check for flags, reset system-used one!
4551 if((int)sa.sa_flags != os::Linux::get_our_sigflags(sig)) {
4552 st->print(
4553 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
4554 os::Linux::get_our_sigflags(sig));
4555 }
4556 }
4557 st->cr();
4558 }
4561 #define DO_SIGNAL_CHECK(sig) \
4562 if (!sigismember(&check_signal_done, sig)) \
4563 os::Linux::check_signal_handler(sig)
4565 // This method is a periodic task to check for misbehaving JNI applications
4566 // under CheckJNI, we can add any periodic checks here
4568 void os::run_periodic_checks() {
4570 if (check_signals == false) return;
4572 // SEGV and BUS if overridden could potentially prevent
4573 // generation of hs*.log in the event of a crash, debugging
4574 // such a case can be very challenging, so we absolutely
4575 // check the following for a good measure:
4576 DO_SIGNAL_CHECK(SIGSEGV);
4577 DO_SIGNAL_CHECK(SIGILL);
4578 DO_SIGNAL_CHECK(SIGFPE);
4579 DO_SIGNAL_CHECK(SIGBUS);
4580 DO_SIGNAL_CHECK(SIGPIPE);
4581 DO_SIGNAL_CHECK(SIGXFSZ);
4582 #if defined(PPC64)
4583 DO_SIGNAL_CHECK(SIGTRAP);
4584 #endif
4586 // ReduceSignalUsage allows the user to override these handlers
4587 // see comments at the very top and jvm_solaris.h
4588 if (!ReduceSignalUsage) {
4589 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4590 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4591 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4592 DO_SIGNAL_CHECK(BREAK_SIGNAL);
4593 }
4595 DO_SIGNAL_CHECK(SR_signum);
4596 DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
4597 }
4599 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4601 static os_sigaction_t os_sigaction = NULL;
4603 void os::Linux::check_signal_handler(int sig) {
4604 char buf[O_BUFLEN];
4605 address jvmHandler = NULL;
4608 struct sigaction act;
4609 if (os_sigaction == NULL) {
4610 // only trust the default sigaction, in case it has been interposed
4611 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4612 if (os_sigaction == NULL) return;
4613 }
4615 os_sigaction(sig, (struct sigaction*)NULL, &act);
4618 act.sa_flags &= SIGNIFICANT_SIGNAL_MASK;
4620 address thisHandler = (act.sa_flags & SA_SIGINFO)
4621 ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4622 : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4625 switch(sig) {
4626 case SIGSEGV:
4627 case SIGBUS:
4628 case SIGFPE:
4629 case SIGPIPE:
4630 case SIGILL:
4631 case SIGXFSZ:
4632 jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler);
4633 break;
4635 case SHUTDOWN1_SIGNAL:
4636 case SHUTDOWN2_SIGNAL:
4637 case SHUTDOWN3_SIGNAL:
4638 case BREAK_SIGNAL:
4639 jvmHandler = (address)user_handler();
4640 break;
4642 case INTERRUPT_SIGNAL:
4643 jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
4644 break;
4646 default:
4647 if (sig == SR_signum) {
4648 jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
4649 } else {
4650 return;
4651 }
4652 break;
4653 }
4655 if (thisHandler != jvmHandler) {
4656 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4657 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4658 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4659 // No need to check this sig any longer
4660 sigaddset(&check_signal_done, sig);
4661 } else if(os::Linux::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Linux::get_our_sigflags(sig)) {
4662 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4663 tty->print("expected:" PTR32_FORMAT, os::Linux::get_our_sigflags(sig));
4664 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags);
4665 // No need to check this sig any longer
4666 sigaddset(&check_signal_done, sig);
4667 }
4669 // Dump all the signal
4670 if (sigismember(&check_signal_done, sig)) {
4671 print_signal_handlers(tty, buf, O_BUFLEN);
4672 }
4673 }
4675 extern void report_error(char* file_name, int line_no, char* title, char* format, ...);
4677 extern bool signal_name(int signo, char* buf, size_t len);
4679 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4680 if (0 < exception_code && exception_code <= SIGRTMAX) {
4681 // signal
4682 if (!signal_name(exception_code, buf, size)) {
4683 jio_snprintf(buf, size, "SIG%d", exception_code);
4684 }
4685 return buf;
4686 } else {
4687 return NULL;
4688 }
4689 }
4691 // this is called _before_ the most of global arguments have been parsed
4692 void os::init(void) {
4693 char dummy; /* used to get a guess on initial stack address */
4694 // first_hrtime = gethrtime();
4696 // With LinuxThreads the JavaMain thread pid (primordial thread)
4697 // is different than the pid of the java launcher thread.
4698 // So, on Linux, the launcher thread pid is passed to the VM
4699 // via the sun.java.launcher.pid property.
4700 // Use this property instead of getpid() if it was correctly passed.
4701 // See bug 6351349.
4702 pid_t java_launcher_pid = (pid_t) Arguments::sun_java_launcher_pid();
4704 _initial_pid = (java_launcher_pid > 0) ? java_launcher_pid : getpid();
4706 clock_tics_per_sec = sysconf(_SC_CLK_TCK);
4708 init_random(1234567);
4710 ThreadCritical::initialize();
4712 Linux::set_page_size(sysconf(_SC_PAGESIZE));
4713 if (Linux::page_size() == -1) {
4714 fatal(err_msg("os_linux.cpp: os::init: sysconf failed (%s)",
4715 strerror(errno)));
4716 }
4717 init_page_sizes((size_t) Linux::page_size());
4719 Linux::initialize_system_info();
4721 // main_thread points to the aboriginal thread
4722 Linux::_main_thread = pthread_self();
4724 Linux::clock_init();
4725 initial_time_count = javaTimeNanos();
4727 // pthread_condattr initialization for monotonic clock
4728 int status;
4729 pthread_condattr_t* _condattr = os::Linux::condAttr();
4730 if ((status = pthread_condattr_init(_condattr)) != 0) {
4731 fatal(err_msg("pthread_condattr_init: %s", strerror(status)));
4732 }
4733 // Only set the clock if CLOCK_MONOTONIC is available
4734 if (Linux::supports_monotonic_clock()) {
4735 if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) {
4736 if (status == EINVAL) {
4737 warning("Unable to use monotonic clock with relative timed-waits" \
4738 " - changes to the time-of-day clock may have adverse affects");
4739 } else {
4740 fatal(err_msg("pthread_condattr_setclock: %s", strerror(status)));
4741 }
4742 }
4743 }
4744 // else it defaults to CLOCK_REALTIME
4746 pthread_mutex_init(&dl_mutex, NULL);
4748 // If the pagesize of the VM is greater than 8K determine the appropriate
4749 // number of initial guard pages. The user can change this with the
4750 // command line arguments, if needed.
4751 if (vm_page_size() > (int)Linux::vm_default_page_size()) {
4752 StackYellowPages = 1;
4753 StackRedPages = 1;
4754 StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size();
4755 }
4756 }
4758 // To install functions for atexit system call
4759 extern "C" {
4760 static void perfMemory_exit_helper() {
4761 perfMemory_exit();
4762 }
4763 }
4765 // this is called _after_ the global arguments have been parsed
4766 jint os::init_2(void)
4767 {
4768 Linux::fast_thread_clock_init();
4770 // Allocate a single page and mark it as readable for safepoint polling
4771 address polling_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
4772 guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" );
4774 os::set_polling_page( polling_page );
4776 #ifndef PRODUCT
4777 if(Verbose && PrintMiscellaneous)
4778 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
4779 #endif
4781 if (!UseMembar) {
4782 address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
4783 guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
4784 os::set_memory_serialize_page( mem_serialize_page );
4786 #ifndef PRODUCT
4787 if(Verbose && PrintMiscellaneous)
4788 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
4789 #endif
4790 }
4792 // initialize suspend/resume support - must do this before signal_sets_init()
4793 if (SR_initialize() != 0) {
4794 perror("SR_initialize failed");
4795 return JNI_ERR;
4796 }
4798 Linux::signal_sets_init();
4799 Linux::install_signal_handlers();
4801 // Check minimum allowable stack size for thread creation and to initialize
4802 // the java system classes, including StackOverflowError - depends on page
4803 // size. Add a page for compiler2 recursion in main thread.
4804 // Add in 2*BytesPerWord times page size to account for VM stack during
4805 // class initialization depending on 32 or 64 bit VM.
4806 os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
4807 (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
4808 (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
4810 size_t threadStackSizeInBytes = ThreadStackSize * K;
4811 if (threadStackSizeInBytes != 0 &&
4812 threadStackSizeInBytes < os::Linux::min_stack_allowed) {
4813 tty->print_cr("\nThe stack size specified is too small, "
4814 "Specify at least %dk",
4815 os::Linux::min_stack_allowed/ K);
4816 return JNI_ERR;
4817 }
4819 // Make the stack size a multiple of the page size so that
4820 // the yellow/red zones can be guarded.
4821 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
4822 vm_page_size()));
4824 Linux::capture_initial_stack(JavaThread::stack_size_at_create());
4826 #if defined(IA32)
4827 workaround_expand_exec_shield_cs_limit();
4828 #endif
4830 Linux::libpthread_init();
4831 if (PrintMiscellaneous && (Verbose || WizardMode)) {
4832 tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
4833 Linux::glibc_version(), Linux::libpthread_version(),
4834 Linux::is_floating_stack() ? "floating stack" : "fixed stack");
4835 }
4837 if (UseNUMA) {
4838 if (!Linux::libnuma_init()) {
4839 UseNUMA = false;
4840 } else {
4841 if ((Linux::numa_max_node() < 1)) {
4842 // There's only one node(they start from 0), disable NUMA.
4843 UseNUMA = false;
4844 }
4845 }
4846 // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
4847 // we can make the adaptive lgrp chunk resizing work. If the user specified
4848 // both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and
4849 // disable adaptive resizing.
4850 if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
4851 if (FLAG_IS_DEFAULT(UseNUMA)) {
4852 UseNUMA = false;
4853 } else {
4854 if (FLAG_IS_DEFAULT(UseLargePages) &&
4855 FLAG_IS_DEFAULT(UseSHM) &&
4856 FLAG_IS_DEFAULT(UseHugeTLBFS)) {
4857 UseLargePages = false;
4858 } else {
4859 warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing");
4860 UseAdaptiveSizePolicy = false;
4861 UseAdaptiveNUMAChunkSizing = false;
4862 }
4863 }
4864 }
4865 if (!UseNUMA && ForceNUMA) {
4866 UseNUMA = true;
4867 }
4868 }
4870 if (MaxFDLimit) {
4871 // set the number of file descriptors to max. print out error
4872 // if getrlimit/setrlimit fails but continue regardless.
4873 struct rlimit nbr_files;
4874 int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4875 if (status != 0) {
4876 if (PrintMiscellaneous && (Verbose || WizardMode))
4877 perror("os::init_2 getrlimit failed");
4878 } else {
4879 nbr_files.rlim_cur = nbr_files.rlim_max;
4880 status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4881 if (status != 0) {
4882 if (PrintMiscellaneous && (Verbose || WizardMode))
4883 perror("os::init_2 setrlimit failed");
4884 }
4885 }
4886 }
4888 // Initialize lock used to serialize thread creation (see os::create_thread)
4889 Linux::set_createThread_lock(new Mutex(Mutex::leaf, "createThread_lock", false));
4891 // at-exit methods are called in the reverse order of their registration.
4892 // atexit functions are called on return from main or as a result of a
4893 // call to exit(3C). There can be only 32 of these functions registered
4894 // and atexit() does not set errno.
4896 if (PerfAllowAtExitRegistration) {
4897 // only register atexit functions if PerfAllowAtExitRegistration is set.
4898 // atexit functions can be delayed until process exit time, which
4899 // can be problematic for embedded VM situations. Embedded VMs should
4900 // call DestroyJavaVM() to assure that VM resources are released.
4902 // note: perfMemory_exit_helper atexit function may be removed in
4903 // the future if the appropriate cleanup code can be added to the
4904 // VM_Exit VMOperation's doit method.
4905 if (atexit(perfMemory_exit_helper) != 0) {
4906 warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4907 }
4908 }
4910 // initialize thread priority policy
4911 prio_init();
4913 return JNI_OK;
4914 }
4916 // this is called at the end of vm_initialization
4917 void os::init_3(void) {
4918 #ifdef JAVASE_EMBEDDED
4919 // Start the MemNotifyThread
4920 if (LowMemoryProtection) {
4921 MemNotifyThread::start();
4922 }
4923 return;
4924 #endif
4925 }
4927 // Mark the polling page as unreadable
4928 void os::make_polling_page_unreadable(void) {
4929 if( !guard_memory((char*)_polling_page, Linux::page_size()) )
4930 fatal("Could not disable polling page");
4931 };
4933 // Mark the polling page as readable
4934 void os::make_polling_page_readable(void) {
4935 if( !linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
4936 fatal("Could not enable polling page");
4937 }
4938 };
4940 int os::active_processor_count() {
4941 // Linux doesn't yet have a (official) notion of processor sets,
4942 // so just return the number of online processors.
4943 int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
4944 assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
4945 return online_cpus;
4946 }
4948 void os::set_native_thread_name(const char *name) {
4949 // Not yet implemented.
4950 return;
4951 }
4953 bool os::distribute_processes(uint length, uint* distribution) {
4954 // Not yet implemented.
4955 return false;
4956 }
4958 bool os::bind_to_processor(uint processor_id) {
4959 // Not yet implemented.
4960 return false;
4961 }
4963 ///
4965 void os::SuspendedThreadTask::internal_do_task() {
4966 if (do_suspend(_thread->osthread())) {
4967 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
4968 do_task(context);
4969 do_resume(_thread->osthread());
4970 }
4971 }
4973 class PcFetcher : public os::SuspendedThreadTask {
4974 public:
4975 PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
4976 ExtendedPC result();
4977 protected:
4978 void do_task(const os::SuspendedThreadTaskContext& context);
4979 private:
4980 ExtendedPC _epc;
4981 };
4983 ExtendedPC PcFetcher::result() {
4984 guarantee(is_done(), "task is not done yet.");
4985 return _epc;
4986 }
4988 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
4989 Thread* thread = context.thread();
4990 OSThread* osthread = thread->osthread();
4991 if (osthread->ucontext() != NULL) {
4992 _epc = os::Linux::ucontext_get_pc((ucontext_t *) context.ucontext());
4993 } else {
4994 // NULL context is unexpected, double-check this is the VMThread
4995 guarantee(thread->is_VM_thread(), "can only be called for VMThread");
4996 }
4997 }
4999 // Suspends the target using the signal mechanism and then grabs the PC before
5000 // resuming the target. Used by the flat-profiler only
5001 ExtendedPC os::get_thread_pc(Thread* thread) {
5002 // Make sure that it is called by the watcher for the VMThread
5003 assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
5004 assert(thread->is_VM_thread(), "Can only be called for VMThread");
5006 PcFetcher fetcher(thread);
5007 fetcher.run();
5008 return fetcher.result();
5009 }
5011 int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
5012 {
5013 if (is_NPTL()) {
5014 return pthread_cond_timedwait(_cond, _mutex, _abstime);
5015 } else {
5016 // 6292965: LinuxThreads pthread_cond_timedwait() resets FPU control
5017 // word back to default 64bit precision if condvar is signaled. Java
5018 // wants 53bit precision. Save and restore current value.
5019 int fpu = get_fpu_control_word();
5020 int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
5021 set_fpu_control_word(fpu);
5022 return status;
5023 }
5024 }
5026 ////////////////////////////////////////////////////////////////////////////////
5027 // debug support
5029 bool os::find(address addr, outputStream* st) {
5030 Dl_info dlinfo;
5031 memset(&dlinfo, 0, sizeof(dlinfo));
5032 if (dladdr(addr, &dlinfo) != 0) {
5033 st->print(PTR_FORMAT ": ", addr);
5034 if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5035 st->print("%s+%#x", dlinfo.dli_sname,
5036 addr - (intptr_t)dlinfo.dli_saddr);
5037 } else if (dlinfo.dli_fbase != NULL) {
5038 st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
5039 } else {
5040 st->print("<absolute address>");
5041 }
5042 if (dlinfo.dli_fname != NULL) {
5043 st->print(" in %s", dlinfo.dli_fname);
5044 }
5045 if (dlinfo.dli_fbase != NULL) {
5046 st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5047 }
5048 st->cr();
5050 if (Verbose) {
5051 // decode some bytes around the PC
5052 address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5053 address end = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5054 address lowest = (address) dlinfo.dli_sname;
5055 if (!lowest) lowest = (address) dlinfo.dli_fbase;
5056 if (begin < lowest) begin = lowest;
5057 Dl_info dlinfo2;
5058 if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5059 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5060 end = (address) dlinfo2.dli_saddr;
5061 Disassembler::decode(begin, end, st);
5062 }
5063 return true;
5064 }
5065 return false;
5066 }
5068 ////////////////////////////////////////////////////////////////////////////////
5069 // misc
5071 // This does not do anything on Linux. This is basically a hook for being
5072 // able to use structured exception handling (thread-local exception filters)
5073 // on, e.g., Win32.
5074 void
5075 os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
5076 JavaCallArguments* args, Thread* thread) {
5077 f(value, method, args, thread);
5078 }
5080 void os::print_statistics() {
5081 }
5083 int os::message_box(const char* title, const char* message) {
5084 int i;
5085 fdStream err(defaultStream::error_fd());
5086 for (i = 0; i < 78; i++) err.print_raw("=");
5087 err.cr();
5088 err.print_raw_cr(title);
5089 for (i = 0; i < 78; i++) err.print_raw("-");
5090 err.cr();
5091 err.print_raw_cr(message);
5092 for (i = 0; i < 78; i++) err.print_raw("=");
5093 err.cr();
5095 char buf[16];
5096 // Prevent process from exiting upon "read error" without consuming all CPU
5097 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
5099 return buf[0] == 'y' || buf[0] == 'Y';
5100 }
5102 int os::stat(const char *path, struct stat *sbuf) {
5103 char pathbuf[MAX_PATH];
5104 if (strlen(path) > MAX_PATH - 1) {
5105 errno = ENAMETOOLONG;
5106 return -1;
5107 }
5108 os::native_path(strcpy(pathbuf, path));
5109 return ::stat(pathbuf, sbuf);
5110 }
5112 bool os::check_heap(bool force) {
5113 return true;
5114 }
5116 int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
5117 return ::vsnprintf(buf, count, format, args);
5118 }
5120 // Is a (classpath) directory empty?
5121 bool os::dir_is_empty(const char* path) {
5122 DIR *dir = NULL;
5123 struct dirent *ptr;
5125 dir = opendir(path);
5126 if (dir == NULL) return true;
5128 /* Scan the directory */
5129 bool result = true;
5130 char buf[sizeof(struct dirent) + MAX_PATH];
5131 while (result && (ptr = ::readdir(dir)) != NULL) {
5132 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5133 result = false;
5134 }
5135 }
5136 closedir(dir);
5137 return result;
5138 }
5140 // This code originates from JDK's sysOpen and open64_w
5141 // from src/solaris/hpi/src/system_md.c
5143 #ifndef O_DELETE
5144 #define O_DELETE 0x10000
5145 #endif
5147 // Open a file. Unlink the file immediately after open returns
5148 // if the specified oflag has the O_DELETE flag set.
5149 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5151 int os::open(const char *path, int oflag, int mode) {
5153 if (strlen(path) > MAX_PATH - 1) {
5154 errno = ENAMETOOLONG;
5155 return -1;
5156 }
5157 int fd;
5158 int o_delete = (oflag & O_DELETE);
5159 oflag = oflag & ~O_DELETE;
5161 fd = ::open64(path, oflag, mode);
5162 if (fd == -1) return -1;
5164 //If the open succeeded, the file might still be a directory
5165 {
5166 struct stat64 buf64;
5167 int ret = ::fstat64(fd, &buf64);
5168 int st_mode = buf64.st_mode;
5170 if (ret != -1) {
5171 if ((st_mode & S_IFMT) == S_IFDIR) {
5172 errno = EISDIR;
5173 ::close(fd);
5174 return -1;
5175 }
5176 } else {
5177 ::close(fd);
5178 return -1;
5179 }
5180 }
5182 /*
5183 * All file descriptors that are opened in the JVM and not
5184 * specifically destined for a subprocess should have the
5185 * close-on-exec flag set. If we don't set it, then careless 3rd
5186 * party native code might fork and exec without closing all
5187 * appropriate file descriptors (e.g. as we do in closeDescriptors in
5188 * UNIXProcess.c), and this in turn might:
5189 *
5190 * - cause end-of-file to fail to be detected on some file
5191 * descriptors, resulting in mysterious hangs, or
5192 *
5193 * - might cause an fopen in the subprocess to fail on a system
5194 * suffering from bug 1085341.
5195 *
5196 * (Yes, the default setting of the close-on-exec flag is a Unix
5197 * design flaw)
5198 *
5199 * See:
5200 * 1085341: 32-bit stdio routines should support file descriptors >255
5201 * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5202 * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5203 */
5204 #ifdef FD_CLOEXEC
5205 {
5206 int flags = ::fcntl(fd, F_GETFD);
5207 if (flags != -1)
5208 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5209 }
5210 #endif
5212 if (o_delete != 0) {
5213 ::unlink(path);
5214 }
5215 return fd;
5216 }
5219 // create binary file, rewriting existing file if required
5220 int os::create_binary_file(const char* path, bool rewrite_existing) {
5221 int oflags = O_WRONLY | O_CREAT;
5222 if (!rewrite_existing) {
5223 oflags |= O_EXCL;
5224 }
5225 return ::open64(path, oflags, S_IREAD | S_IWRITE);
5226 }
5228 // return current position of file pointer
5229 jlong os::current_file_offset(int fd) {
5230 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5231 }
5233 // move file pointer to the specified offset
5234 jlong os::seek_to_file_offset(int fd, jlong offset) {
5235 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5236 }
5238 // This code originates from JDK's sysAvailable
5239 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
5241 int os::available(int fd, jlong *bytes) {
5242 jlong cur, end;
5243 int mode;
5244 struct stat64 buf64;
5246 if (::fstat64(fd, &buf64) >= 0) {
5247 mode = buf64.st_mode;
5248 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5249 /*
5250 * XXX: is the following call interruptible? If so, this might
5251 * need to go through the INTERRUPT_IO() wrapper as for other
5252 * blocking, interruptible calls in this file.
5253 */
5254 int n;
5255 if (::ioctl(fd, FIONREAD, &n) >= 0) {
5256 *bytes = n;
5257 return 1;
5258 }
5259 }
5260 }
5261 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5262 return 0;
5263 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5264 return 0;
5265 } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5266 return 0;
5267 }
5268 *bytes = end - cur;
5269 return 1;
5270 }
5272 int os::socket_available(int fd, jint *pbytes) {
5273 // Linux doc says EINTR not returned, unlike Solaris
5274 int ret = ::ioctl(fd, FIONREAD, pbytes);
5276 //%% note ioctl can return 0 when successful, JVM_SocketAvailable
5277 // is expected to return 0 on failure and 1 on success to the jdk.
5278 return (ret < 0) ? 0 : 1;
5279 }
5281 // Map a block of memory.
5282 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5283 char *addr, size_t bytes, bool read_only,
5284 bool allow_exec) {
5285 int prot;
5286 int flags = MAP_PRIVATE;
5288 if (read_only) {
5289 prot = PROT_READ;
5290 } else {
5291 prot = PROT_READ | PROT_WRITE;
5292 }
5294 if (allow_exec) {
5295 prot |= PROT_EXEC;
5296 }
5298 if (addr != NULL) {
5299 flags |= MAP_FIXED;
5300 }
5302 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5303 fd, file_offset);
5304 if (mapped_address == MAP_FAILED) {
5305 return NULL;
5306 }
5307 return mapped_address;
5308 }
5311 // Remap a block of memory.
5312 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5313 char *addr, size_t bytes, bool read_only,
5314 bool allow_exec) {
5315 // same as map_memory() on this OS
5316 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5317 allow_exec);
5318 }
5321 // Unmap a block of memory.
5322 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5323 return munmap(addr, bytes) == 0;
5324 }
5326 static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
5328 static clockid_t thread_cpu_clockid(Thread* thread) {
5329 pthread_t tid = thread->osthread()->pthread_id();
5330 clockid_t clockid;
5332 // Get thread clockid
5333 int rc = os::Linux::pthread_getcpuclockid(tid, &clockid);
5334 assert(rc == 0, "pthread_getcpuclockid is expected to return 0 code");
5335 return clockid;
5336 }
5338 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5339 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5340 // of a thread.
5341 //
5342 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
5343 // the fast estimate available on the platform.
5345 jlong os::current_thread_cpu_time() {
5346 if (os::Linux::supports_fast_thread_cpu_time()) {
5347 return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
5348 } else {
5349 // return user + sys since the cost is the same
5350 return slow_thread_cpu_time(Thread::current(), true /* user + sys */);
5351 }
5352 }
5354 jlong os::thread_cpu_time(Thread* thread) {
5355 // consistent with what current_thread_cpu_time() returns
5356 if (os::Linux::supports_fast_thread_cpu_time()) {
5357 return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread));
5358 } else {
5359 return slow_thread_cpu_time(thread, true /* user + sys */);
5360 }
5361 }
5363 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5364 if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
5365 return os::Linux::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
5366 } else {
5367 return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time);
5368 }
5369 }
5371 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5372 if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) {
5373 return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread));
5374 } else {
5375 return slow_thread_cpu_time(thread, user_sys_cpu_time);
5376 }
5377 }
5379 //
5380 // -1 on error.
5381 //
5383 PRAGMA_DIAG_PUSH
5384 PRAGMA_FORMAT_NONLITERAL_IGNORED
5385 static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5386 static bool proc_task_unchecked = true;
5387 static const char *proc_stat_path = "/proc/%d/stat";
5388 pid_t tid = thread->osthread()->thread_id();
5389 char *s;
5390 char stat[2048];
5391 int statlen;
5392 char proc_name[64];
5393 int count;
5394 long sys_time, user_time;
5395 char cdummy;
5396 int idummy;
5397 long ldummy;
5398 FILE *fp;
5400 // The /proc/<tid>/stat aggregates per-process usage on
5401 // new Linux kernels 2.6+ where NPTL is supported.
5402 // The /proc/self/task/<tid>/stat still has the per-thread usage.
5403 // See bug 6328462.
5404 // There possibly can be cases where there is no directory
5405 // /proc/self/task, so we check its availability.
5406 if (proc_task_unchecked && os::Linux::is_NPTL()) {
5407 // This is executed only once
5408 proc_task_unchecked = false;
5409 fp = fopen("/proc/self/task", "r");
5410 if (fp != NULL) {
5411 proc_stat_path = "/proc/self/task/%d/stat";
5412 fclose(fp);
5413 }
5414 }
5416 sprintf(proc_name, proc_stat_path, tid);
5417 fp = fopen(proc_name, "r");
5418 if ( fp == NULL ) return -1;
5419 statlen = fread(stat, 1, 2047, fp);
5420 stat[statlen] = '\0';
5421 fclose(fp);
5423 // Skip pid and the command string. Note that we could be dealing with
5424 // weird command names, e.g. user could decide to rename java launcher
5425 // to "java 1.4.2 :)", then the stat file would look like
5426 // 1234 (java 1.4.2 :)) R ... ...
5427 // We don't really need to know the command string, just find the last
5428 // occurrence of ")" and then start parsing from there. See bug 4726580.
5429 s = strrchr(stat, ')');
5430 if (s == NULL ) return -1;
5432 // Skip blank chars
5433 do s++; while (isspace(*s));
5435 count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
5436 &cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
5437 &ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
5438 &user_time, &sys_time);
5439 if ( count != 13 ) return -1;
5440 if (user_sys_cpu_time) {
5441 return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
5442 } else {
5443 return (jlong)user_time * (1000000000 / clock_tics_per_sec);
5444 }
5445 }
5446 PRAGMA_DIAG_POP
5448 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5449 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5450 info_ptr->may_skip_backward = false; // elapsed time not wall time
5451 info_ptr->may_skip_forward = false; // elapsed time not wall time
5452 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
5453 }
5455 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5456 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5457 info_ptr->may_skip_backward = false; // elapsed time not wall time
5458 info_ptr->may_skip_forward = false; // elapsed time not wall time
5459 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
5460 }
5462 bool os::is_thread_cpu_time_supported() {
5463 return true;
5464 }
5466 // System loadavg support. Returns -1 if load average cannot be obtained.
5467 // Linux doesn't yet have a (official) notion of processor sets,
5468 // so just return the system wide load average.
5469 int os::loadavg(double loadavg[], int nelem) {
5470 return ::getloadavg(loadavg, nelem);
5471 }
5473 void os::pause() {
5474 char filename[MAX_PATH];
5475 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5476 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5477 } else {
5478 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5479 }
5481 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5482 if (fd != -1) {
5483 struct stat buf;
5484 ::close(fd);
5485 while (::stat(filename, &buf) == 0) {
5486 (void)::poll(NULL, 0, 100);
5487 }
5488 } else {
5489 jio_fprintf(stderr,
5490 "Could not open pause file '%s', continuing immediately.\n", filename);
5491 }
5492 }
5495 // Refer to the comments in os_solaris.cpp park-unpark.
5496 //
5497 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
5498 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
5499 // For specifics regarding the bug see GLIBC BUGID 261237 :
5500 // http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
5501 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
5502 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
5503 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
5504 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
5505 // and monitorenter when we're using 1-0 locking. All those operations may result in
5506 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
5507 // of libpthread avoids the problem, but isn't practical.
5508 //
5509 // Possible remedies:
5510 //
5511 // 1. Establish a minimum relative wait time. 50 to 100 msecs seems to work.
5512 // This is palliative and probabilistic, however. If the thread is preempted
5513 // between the call to compute_abstime() and pthread_cond_timedwait(), more
5514 // than the minimum period may have passed, and the abstime may be stale (in the
5515 // past) resultin in a hang. Using this technique reduces the odds of a hang
5516 // but the JVM is still vulnerable, particularly on heavily loaded systems.
5517 //
5518 // 2. Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
5519 // of the usual flag-condvar-mutex idiom. The write side of the pipe is set
5520 // NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
5521 // reduces to poll()+read(). This works well, but consumes 2 FDs per extant
5522 // thread.
5523 //
5524 // 3. Embargo pthread_cond_timedwait() and implement a native "chron" thread
5525 // that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
5526 // a timeout request to the chron thread and then blocking via pthread_cond_wait().
5527 // This also works well. In fact it avoids kernel-level scalability impediments
5528 // on certain platforms that don't handle lots of active pthread_cond_timedwait()
5529 // timers in a graceful fashion.
5530 //
5531 // 4. When the abstime value is in the past it appears that control returns
5532 // correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
5533 // Subsequent timedwait/wait calls may hang indefinitely. Given that, we
5534 // can avoid the problem by reinitializing the condvar -- by cond_destroy()
5535 // followed by cond_init() -- after all calls to pthread_cond_timedwait().
5536 // It may be possible to avoid reinitialization by checking the return
5537 // value from pthread_cond_timedwait(). In addition to reinitializing the
5538 // condvar we must establish the invariant that cond_signal() is only called
5539 // within critical sections protected by the adjunct mutex. This prevents
5540 // cond_signal() from "seeing" a condvar that's in the midst of being
5541 // reinitialized or that is corrupt. Sadly, this invariant obviates the
5542 // desirable signal-after-unlock optimization that avoids futile context switching.
5543 //
5544 // I'm also concerned that some versions of NTPL might allocate an auxilliary
5545 // structure when a condvar is used or initialized. cond_destroy() would
5546 // release the helper structure. Our reinitialize-after-timedwait fix
5547 // put excessive stress on malloc/free and locks protecting the c-heap.
5548 //
5549 // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
5550 // It may be possible to refine (4) by checking the kernel and NTPL verisons
5551 // and only enabling the work-around for vulnerable environments.
5553 // utility to compute the abstime argument to timedwait:
5554 // millis is the relative timeout time
5555 // abstime will be the absolute timeout time
5556 // TODO: replace compute_abstime() with unpackTime()
5558 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
5559 if (millis < 0) millis = 0;
5561 jlong seconds = millis / 1000;
5562 millis %= 1000;
5563 if (seconds > 50000000) { // see man cond_timedwait(3T)
5564 seconds = 50000000;
5565 }
5567 if (os::Linux::supports_monotonic_clock()) {
5568 struct timespec now;
5569 int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
5570 assert_status(status == 0, status, "clock_gettime");
5571 abstime->tv_sec = now.tv_sec + seconds;
5572 long nanos = now.tv_nsec + millis * NANOSECS_PER_MILLISEC;
5573 if (nanos >= NANOSECS_PER_SEC) {
5574 abstime->tv_sec += 1;
5575 nanos -= NANOSECS_PER_SEC;
5576 }
5577 abstime->tv_nsec = nanos;
5578 } else {
5579 struct timeval now;
5580 int status = gettimeofday(&now, NULL);
5581 assert(status == 0, "gettimeofday");
5582 abstime->tv_sec = now.tv_sec + seconds;
5583 long usec = now.tv_usec + millis * 1000;
5584 if (usec >= 1000000) {
5585 abstime->tv_sec += 1;
5586 usec -= 1000000;
5587 }
5588 abstime->tv_nsec = usec * 1000;
5589 }
5590 return abstime;
5591 }
5594 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5595 // Conceptually TryPark() should be equivalent to park(0).
5597 int os::PlatformEvent::TryPark() {
5598 for (;;) {
5599 const int v = _Event ;
5600 guarantee ((v == 0) || (v == 1), "invariant") ;
5601 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ;
5602 }
5603 }
5605 void os::PlatformEvent::park() { // AKA "down()"
5606 // Invariant: Only the thread associated with the Event/PlatformEvent
5607 // may call park().
5608 // TODO: assert that _Assoc != NULL or _Assoc == Self
5609 int v ;
5610 for (;;) {
5611 v = _Event ;
5612 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5613 }
5614 guarantee (v >= 0, "invariant") ;
5615 if (v == 0) {
5616 // Do this the hard way by blocking ...
5617 int status = pthread_mutex_lock(_mutex);
5618 assert_status(status == 0, status, "mutex_lock");
5619 guarantee (_nParked == 0, "invariant") ;
5620 ++ _nParked ;
5621 while (_Event < 0) {
5622 status = pthread_cond_wait(_cond, _mutex);
5623 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5624 // Treat this the same as if the wait was interrupted
5625 if (status == ETIME) { status = EINTR; }
5626 assert_status(status == 0 || status == EINTR, status, "cond_wait");
5627 }
5628 -- _nParked ;
5630 _Event = 0 ;
5631 status = pthread_mutex_unlock(_mutex);
5632 assert_status(status == 0, status, "mutex_unlock");
5633 // Paranoia to ensure our locked and lock-free paths interact
5634 // correctly with each other.
5635 OrderAccess::fence();
5636 }
5637 guarantee (_Event >= 0, "invariant") ;
5638 }
5640 int os::PlatformEvent::park(jlong millis) {
5641 guarantee (_nParked == 0, "invariant") ;
5643 int v ;
5644 for (;;) {
5645 v = _Event ;
5646 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5647 }
5648 guarantee (v >= 0, "invariant") ;
5649 if (v != 0) return OS_OK ;
5651 // We do this the hard way, by blocking the thread.
5652 // Consider enforcing a minimum timeout value.
5653 struct timespec abst;
5654 compute_abstime(&abst, millis);
5656 int ret = OS_TIMEOUT;
5657 int status = pthread_mutex_lock(_mutex);
5658 assert_status(status == 0, status, "mutex_lock");
5659 guarantee (_nParked == 0, "invariant") ;
5660 ++_nParked ;
5662 // Object.wait(timo) will return because of
5663 // (a) notification
5664 // (b) timeout
5665 // (c) thread.interrupt
5666 //
5667 // Thread.interrupt and object.notify{All} both call Event::set.
5668 // That is, we treat thread.interrupt as a special case of notification.
5669 // The underlying Solaris implementation, cond_timedwait, admits
5670 // spurious/premature wakeups, but the JLS/JVM spec prevents the
5671 // JVM from making those visible to Java code. As such, we must
5672 // filter out spurious wakeups. We assume all ETIME returns are valid.
5673 //
5674 // TODO: properly differentiate simultaneous notify+interrupt.
5675 // In that case, we should propagate the notify to another waiter.
5677 while (_Event < 0) {
5678 status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
5679 if (status != 0 && WorkAroundNPTLTimedWaitHang) {
5680 pthread_cond_destroy (_cond);
5681 pthread_cond_init (_cond, os::Linux::condAttr()) ;
5682 }
5683 assert_status(status == 0 || status == EINTR ||
5684 status == ETIME || status == ETIMEDOUT,
5685 status, "cond_timedwait");
5686 if (!FilterSpuriousWakeups) break ; // previous semantics
5687 if (status == ETIME || status == ETIMEDOUT) break ;
5688 // We consume and ignore EINTR and spurious wakeups.
5689 }
5690 --_nParked ;
5691 if (_Event >= 0) {
5692 ret = OS_OK;
5693 }
5694 _Event = 0 ;
5695 status = pthread_mutex_unlock(_mutex);
5696 assert_status(status == 0, status, "mutex_unlock");
5697 assert (_nParked == 0, "invariant") ;
5698 // Paranoia to ensure our locked and lock-free paths interact
5699 // correctly with each other.
5700 OrderAccess::fence();
5701 return ret;
5702 }
5704 void os::PlatformEvent::unpark() {
5705 // Transitions for _Event:
5706 // 0 :=> 1
5707 // 1 :=> 1
5708 // -1 :=> either 0 or 1; must signal target thread
5709 // That is, we can safely transition _Event from -1 to either
5710 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back
5711 // unpark() calls.
5712 // See also: "Semaphores in Plan 9" by Mullender & Cox
5713 //
5714 // Note: Forcing a transition from "-1" to "1" on an unpark() means
5715 // that it will take two back-to-back park() calls for the owning
5716 // thread to block. This has the benefit of forcing a spurious return
5717 // from the first park() call after an unpark() call which will help
5718 // shake out uses of park() and unpark() without condition variables.
5720 if (Atomic::xchg(1, &_Event) >= 0) return;
5722 // Wait for the thread associated with the event to vacate
5723 int status = pthread_mutex_lock(_mutex);
5724 assert_status(status == 0, status, "mutex_lock");
5725 int AnyWaiters = _nParked;
5726 assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5727 if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
5728 AnyWaiters = 0;
5729 pthread_cond_signal(_cond);
5730 }
5731 status = pthread_mutex_unlock(_mutex);
5732 assert_status(status == 0, status, "mutex_unlock");
5733 if (AnyWaiters != 0) {
5734 status = pthread_cond_signal(_cond);
5735 assert_status(status == 0, status, "cond_signal");
5736 }
5738 // Note that we signal() _after dropping the lock for "immortal" Events.
5739 // This is safe and avoids a common class of futile wakeups. In rare
5740 // circumstances this can cause a thread to return prematurely from
5741 // cond_{timed}wait() but the spurious wakeup is benign and the victim will
5742 // simply re-test the condition and re-park itself.
5743 }
5746 // JSR166
5747 // -------------------------------------------------------
5749 /*
5750 * The solaris and linux implementations of park/unpark are fairly
5751 * conservative for now, but can be improved. They currently use a
5752 * mutex/condvar pair, plus a a count.
5753 * Park decrements count if > 0, else does a condvar wait. Unpark
5754 * sets count to 1 and signals condvar. Only one thread ever waits
5755 * on the condvar. Contention seen when trying to park implies that someone
5756 * is unparking you, so don't wait. And spurious returns are fine, so there
5757 * is no need to track notifications.
5758 */
5760 /*
5761 * This code is common to linux and solaris and will be moved to a
5762 * common place in dolphin.
5763 *
5764 * The passed in time value is either a relative time in nanoseconds
5765 * or an absolute time in milliseconds. Either way it has to be unpacked
5766 * into suitable seconds and nanoseconds components and stored in the
5767 * given timespec structure.
5768 * Given time is a 64-bit value and the time_t used in the timespec is only
5769 * a signed-32-bit value (except on 64-bit Linux) we have to watch for
5770 * overflow if times way in the future are given. Further on Solaris versions
5771 * prior to 10 there is a restriction (see cond_timedwait) that the specified
5772 * number of seconds, in abstime, is less than current_time + 100,000,000.
5773 * As it will be 28 years before "now + 100000000" will overflow we can
5774 * ignore overflow and just impose a hard-limit on seconds using the value
5775 * of "now + 100,000,000". This places a limit on the timeout of about 3.17
5776 * years from "now".
5777 */
5779 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5780 assert (time > 0, "convertTime");
5781 time_t max_secs = 0;
5783 if (!os::Linux::supports_monotonic_clock() || isAbsolute) {
5784 struct timeval now;
5785 int status = gettimeofday(&now, NULL);
5786 assert(status == 0, "gettimeofday");
5788 max_secs = now.tv_sec + MAX_SECS;
5790 if (isAbsolute) {
5791 jlong secs = time / 1000;
5792 if (secs > max_secs) {
5793 absTime->tv_sec = max_secs;
5794 } else {
5795 absTime->tv_sec = secs;
5796 }
5797 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5798 } else {
5799 jlong secs = time / NANOSECS_PER_SEC;
5800 if (secs >= MAX_SECS) {
5801 absTime->tv_sec = max_secs;
5802 absTime->tv_nsec = 0;
5803 } else {
5804 absTime->tv_sec = now.tv_sec + secs;
5805 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5806 if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5807 absTime->tv_nsec -= NANOSECS_PER_SEC;
5808 ++absTime->tv_sec; // note: this must be <= max_secs
5809 }
5810 }
5811 }
5812 } else {
5813 // must be relative using monotonic clock
5814 struct timespec now;
5815 int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
5816 assert_status(status == 0, status, "clock_gettime");
5817 max_secs = now.tv_sec + MAX_SECS;
5818 jlong secs = time / NANOSECS_PER_SEC;
5819 if (secs >= MAX_SECS) {
5820 absTime->tv_sec = max_secs;
5821 absTime->tv_nsec = 0;
5822 } else {
5823 absTime->tv_sec = now.tv_sec + secs;
5824 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_nsec;
5825 if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5826 absTime->tv_nsec -= NANOSECS_PER_SEC;
5827 ++absTime->tv_sec; // note: this must be <= max_secs
5828 }
5829 }
5830 }
5831 assert(absTime->tv_sec >= 0, "tv_sec < 0");
5832 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5833 assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5834 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5835 }
5837 void Parker::park(bool isAbsolute, jlong time) {
5838 // Ideally we'd do something useful while spinning, such
5839 // as calling unpackTime().
5841 // Optional fast-path check:
5842 // Return immediately if a permit is available.
5843 // We depend on Atomic::xchg() having full barrier semantics
5844 // since we are doing a lock-free update to _counter.
5845 if (Atomic::xchg(0, &_counter) > 0) return;
5847 Thread* thread = Thread::current();
5848 assert(thread->is_Java_thread(), "Must be JavaThread");
5849 JavaThread *jt = (JavaThread *)thread;
5851 // Optional optimization -- avoid state transitions if there's an interrupt pending.
5852 // Check interrupt before trying to wait
5853 if (Thread::is_interrupted(thread, false)) {
5854 return;
5855 }
5857 // Next, demultiplex/decode time arguments
5858 timespec absTime;
5859 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
5860 return;
5861 }
5862 if (time > 0) {
5863 unpackTime(&absTime, isAbsolute, time);
5864 }
5867 // Enter safepoint region
5868 // Beware of deadlocks such as 6317397.
5869 // The per-thread Parker:: mutex is a classic leaf-lock.
5870 // In particular a thread must never block on the Threads_lock while
5871 // holding the Parker:: mutex. If safepoints are pending both the
5872 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5873 ThreadBlockInVM tbivm(jt);
5875 // Don't wait if cannot get lock since interference arises from
5876 // unblocking. Also. check interrupt before trying wait
5877 if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
5878 return;
5879 }
5881 int status ;
5882 if (_counter > 0) { // no wait needed
5883 _counter = 0;
5884 status = pthread_mutex_unlock(_mutex);
5885 assert (status == 0, "invariant") ;
5886 // Paranoia to ensure our locked and lock-free paths interact
5887 // correctly with each other and Java-level accesses.
5888 OrderAccess::fence();
5889 return;
5890 }
5892 #ifdef ASSERT
5893 // Don't catch signals while blocked; let the running threads have the signals.
5894 // (This allows a debugger to break into the running thread.)
5895 sigset_t oldsigs;
5896 sigset_t* allowdebug_blocked = os::Linux::allowdebug_blocked_signals();
5897 pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5898 #endif
5900 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5901 jt->set_suspend_equivalent();
5902 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5904 assert(_cur_index == -1, "invariant");
5905 if (time == 0) {
5906 _cur_index = REL_INDEX; // arbitrary choice when not timed
5907 status = pthread_cond_wait (&_cond[_cur_index], _mutex) ;
5908 } else {
5909 _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
5910 status = os::Linux::safe_cond_timedwait (&_cond[_cur_index], _mutex, &absTime) ;
5911 if (status != 0 && WorkAroundNPTLTimedWaitHang) {
5912 pthread_cond_destroy (&_cond[_cur_index]) ;
5913 pthread_cond_init (&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
5914 }
5915 }
5916 _cur_index = -1;
5917 assert_status(status == 0 || status == EINTR ||
5918 status == ETIME || status == ETIMEDOUT,
5919 status, "cond_timedwait");
5921 #ifdef ASSERT
5922 pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
5923 #endif
5925 _counter = 0 ;
5926 status = pthread_mutex_unlock(_mutex) ;
5927 assert_status(status == 0, status, "invariant") ;
5928 // Paranoia to ensure our locked and lock-free paths interact
5929 // correctly with each other and Java-level accesses.
5930 OrderAccess::fence();
5932 // If externally suspended while waiting, re-suspend
5933 if (jt->handle_special_suspend_equivalent_condition()) {
5934 jt->java_suspend_self();
5935 }
5936 }
5938 void Parker::unpark() {
5939 int s, status ;
5940 status = pthread_mutex_lock(_mutex);
5941 assert (status == 0, "invariant") ;
5942 s = _counter;
5943 _counter = 1;
5944 if (s < 1) {
5945 // thread might be parked
5946 if (_cur_index != -1) {
5947 // thread is definitely parked
5948 if (WorkAroundNPTLTimedWaitHang) {
5949 status = pthread_cond_signal (&_cond[_cur_index]);
5950 assert (status == 0, "invariant");
5951 status = pthread_mutex_unlock(_mutex);
5952 assert (status == 0, "invariant");
5953 } else {
5954 status = pthread_mutex_unlock(_mutex);
5955 assert (status == 0, "invariant");
5956 status = pthread_cond_signal (&_cond[_cur_index]);
5957 assert (status == 0, "invariant");
5958 }
5959 } else {
5960 pthread_mutex_unlock(_mutex);
5961 assert (status == 0, "invariant") ;
5962 }
5963 } else {
5964 pthread_mutex_unlock(_mutex);
5965 assert (status == 0, "invariant") ;
5966 }
5967 }
5970 extern char** environ;
5972 #ifndef __NR_fork
5973 #define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57)
5974 #endif
5976 #ifndef __NR_execve
5977 #define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59)
5978 #endif
5980 // Run the specified command in a separate process. Return its exit value,
5981 // or -1 on failure (e.g. can't fork a new process).
5982 // Unlike system(), this function can be called from signal handler. It
5983 // doesn't block SIGINT et al.
5984 int os::fork_and_exec(char* cmd) {
5985 const char * argv[4] = {"sh", "-c", cmd, NULL};
5987 // fork() in LinuxThreads/NPTL is not async-safe. It needs to run
5988 // pthread_atfork handlers and reset pthread library. All we need is a
5989 // separate process to execve. Make a direct syscall to fork process.
5990 // On IA64 there's no fork syscall, we have to use fork() and hope for
5991 // the best...
5992 pid_t pid = NOT_IA64(syscall(__NR_fork);)
5993 IA64_ONLY(fork();)
5995 if (pid < 0) {
5996 // fork failed
5997 return -1;
5999 } else if (pid == 0) {
6000 // child process
6002 // execve() in LinuxThreads will call pthread_kill_other_threads_np()
6003 // first to kill every thread on the thread list. Because this list is
6004 // not reset by fork() (see notes above), execve() will instead kill
6005 // every thread in the parent process. We know this is the only thread
6006 // in the new process, so make a system call directly.
6007 // IA64 should use normal execve() from glibc to match the glibc fork()
6008 // above.
6009 NOT_IA64(syscall(__NR_execve, "/bin/sh", argv, environ);)
6010 IA64_ONLY(execve("/bin/sh", (char* const*)argv, environ);)
6012 // execve failed
6013 _exit(-1);
6015 } else {
6016 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6017 // care about the actual exit code, for now.
6019 int status;
6021 // Wait for the child process to exit. This returns immediately if
6022 // the child has already exited. */
6023 while (waitpid(pid, &status, 0) < 0) {
6024 switch (errno) {
6025 case ECHILD: return 0;
6026 case EINTR: break;
6027 default: return -1;
6028 }
6029 }
6031 if (WIFEXITED(status)) {
6032 // The child exited normally; get its exit code.
6033 return WEXITSTATUS(status);
6034 } else if (WIFSIGNALED(status)) {
6035 // The child exited because of a signal
6036 // The best value to return is 0x80 + signal number,
6037 // because that is what all Unix shells do, and because
6038 // it allows callers to distinguish between process exit and
6039 // process death by signal.
6040 return 0x80 + WTERMSIG(status);
6041 } else {
6042 // Unknown exit code; pass it through
6043 return status;
6044 }
6045 }
6046 }
6048 // is_headless_jre()
6049 //
6050 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
6051 // in order to report if we are running in a headless jre
6052 //
6053 // Since JDK8 xawt/libmawt.so was moved into the same directory
6054 // as libawt.so, and renamed libawt_xawt.so
6055 //
6056 bool os::is_headless_jre() {
6057 struct stat statbuf;
6058 char buf[MAXPATHLEN];
6059 char libmawtpath[MAXPATHLEN];
6060 const char *xawtstr = "/xawt/libmawt.so";
6061 const char *new_xawtstr = "/libawt_xawt.so";
6062 char *p;
6064 // Get path to libjvm.so
6065 os::jvm_path(buf, sizeof(buf));
6067 // Get rid of libjvm.so
6068 p = strrchr(buf, '/');
6069 if (p == NULL) return false;
6070 else *p = '\0';
6072 // Get rid of client or server
6073 p = strrchr(buf, '/');
6074 if (p == NULL) return false;
6075 else *p = '\0';
6077 // check xawt/libmawt.so
6078 strcpy(libmawtpath, buf);
6079 strcat(libmawtpath, xawtstr);
6080 if (::stat(libmawtpath, &statbuf) == 0) return false;
6082 // check libawt_xawt.so
6083 strcpy(libmawtpath, buf);
6084 strcat(libmawtpath, new_xawtstr);
6085 if (::stat(libmawtpath, &statbuf) == 0) return false;
6087 return true;
6088 }
6090 // Get the default path to the core file
6091 // Returns the length of the string
6092 int os::get_core_path(char* buffer, size_t bufferSize) {
6093 const char* p = get_current_directory(buffer, bufferSize);
6095 if (p == NULL) {
6096 assert(p != NULL, "failed to get current directory");
6097 return 0;
6098 }
6100 return strlen(buffer);
6101 }
6103 #ifdef JAVASE_EMBEDDED
6104 //
6105 // A thread to watch the '/dev/mem_notify' device, which will tell us when the OS is running low on memory.
6106 //
6107 MemNotifyThread* MemNotifyThread::_memnotify_thread = NULL;
6109 // ctor
6110 //
6111 MemNotifyThread::MemNotifyThread(int fd): Thread() {
6112 assert(memnotify_thread() == NULL, "we can only allocate one MemNotifyThread");
6113 _fd = fd;
6115 if (os::create_thread(this, os::os_thread)) {
6116 _memnotify_thread = this;
6117 os::set_priority(this, NearMaxPriority);
6118 os::start_thread(this);
6119 }
6120 }
6122 // Where all the work gets done
6123 //
6124 void MemNotifyThread::run() {
6125 assert(this == memnotify_thread(), "expected the singleton MemNotifyThread");
6127 // Set up the select arguments
6128 fd_set rfds;
6129 if (_fd != -1) {
6130 FD_ZERO(&rfds);
6131 FD_SET(_fd, &rfds);
6132 }
6134 // Now wait for the mem_notify device to wake up
6135 while (1) {
6136 // Wait for the mem_notify device to signal us..
6137 int rc = select(_fd+1, _fd != -1 ? &rfds : NULL, NULL, NULL, NULL);
6138 if (rc == -1) {
6139 perror("select!\n");
6140 break;
6141 } else if (rc) {
6142 //ssize_t free_before = os::available_memory();
6143 //tty->print ("Notified: Free: %dK \n",os::available_memory()/1024);
6145 // The kernel is telling us there is not much memory left...
6146 // try to do something about that
6148 // If we are not already in a GC, try one.
6149 if (!Universe::heap()->is_gc_active()) {
6150 Universe::heap()->collect(GCCause::_allocation_failure);
6152 //ssize_t free_after = os::available_memory();
6153 //tty->print ("Post-Notify: Free: %dK\n",free_after/1024);
6154 //tty->print ("GC freed: %dK\n", (free_after - free_before)/1024);
6155 }
6156 // We might want to do something like the following if we find the GC's are not helping...
6157 // Universe::heap()->size_policy()->set_gc_time_limit_exceeded(true);
6158 }
6159 }
6160 }
6162 //
6163 // See if the /dev/mem_notify device exists, and if so, start a thread to monitor it.
6164 //
6165 void MemNotifyThread::start() {
6166 int fd;
6167 fd = open ("/dev/mem_notify", O_RDONLY, 0);
6168 if (fd < 0) {
6169 return;
6170 }
6172 if (memnotify_thread() == NULL) {
6173 new MemNotifyThread(fd);
6174 }
6175 }
6177 #endif // JAVASE_EMBEDDED
6180 /////////////// Unit tests ///////////////
6182 #ifndef PRODUCT
6184 #define test_log(...) \
6185 do {\
6186 if (VerboseInternalVMTests) { \
6187 tty->print_cr(__VA_ARGS__); \
6188 tty->flush(); \
6189 }\
6190 } while (false)
6192 class TestReserveMemorySpecial : AllStatic {
6193 public:
6194 static void small_page_write(void* addr, size_t size) {
6195 size_t page_size = os::vm_page_size();
6197 char* end = (char*)addr + size;
6198 for (char* p = (char*)addr; p < end; p += page_size) {
6199 *p = 1;
6200 }
6201 }
6203 static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
6204 if (!UseHugeTLBFS) {
6205 return;
6206 }
6208 test_log("test_reserve_memory_special_huge_tlbfs_only(" SIZE_FORMAT ")", size);
6210 char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);
6212 if (addr != NULL) {
6213 small_page_write(addr, size);
6215 os::Linux::release_memory_special_huge_tlbfs(addr, size);
6216 }
6217 }
6219 static void test_reserve_memory_special_huge_tlbfs_only() {
6220 if (!UseHugeTLBFS) {
6221 return;
6222 }
6224 size_t lp = os::large_page_size();
6226 for (size_t size = lp; size <= lp * 10; size += lp) {
6227 test_reserve_memory_special_huge_tlbfs_only(size);
6228 }
6229 }
6231 static void test_reserve_memory_special_huge_tlbfs_mixed(size_t size, size_t alignment) {
6232 if (!UseHugeTLBFS) {
6233 return;
6234 }
6236 test_log("test_reserve_memory_special_huge_tlbfs_mixed(" SIZE_FORMAT ", " SIZE_FORMAT ")",
6237 size, alignment);
6239 assert(size >= os::large_page_size(), "Incorrect input to test");
6241 char* addr = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
6243 if (addr != NULL) {
6244 small_page_write(addr, size);
6246 os::Linux::release_memory_special_huge_tlbfs(addr, size);
6247 }
6248 }
6250 static void test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(size_t size) {
6251 size_t lp = os::large_page_size();
6252 size_t ag = os::vm_allocation_granularity();
6254 for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
6255 test_reserve_memory_special_huge_tlbfs_mixed(size, alignment);
6256 }
6257 }
6259 static void test_reserve_memory_special_huge_tlbfs_mixed() {
6260 size_t lp = os::large_page_size();
6261 size_t ag = os::vm_allocation_granularity();
6263 test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp);
6264 test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + ag);
6265 test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp + lp / 2);
6266 test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2);
6267 test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + ag);
6268 test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 - ag);
6269 test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 2 + lp / 2);
6270 test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10);
6271 test_reserve_memory_special_huge_tlbfs_mixed_all_alignments(lp * 10 + lp / 2);
6272 }
6274 static void test_reserve_memory_special_huge_tlbfs() {
6275 if (!UseHugeTLBFS) {
6276 return;
6277 }
6279 test_reserve_memory_special_huge_tlbfs_only();
6280 test_reserve_memory_special_huge_tlbfs_mixed();
6281 }
6283 static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
6284 if (!UseSHM) {
6285 return;
6286 }
6288 test_log("test_reserve_memory_special_shm(" SIZE_FORMAT ", " SIZE_FORMAT ")", size, alignment);
6290 char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);
6292 if (addr != NULL) {
6293 assert(is_ptr_aligned(addr, alignment), "Check");
6294 assert(is_ptr_aligned(addr, os::large_page_size()), "Check");
6296 small_page_write(addr, size);
6298 os::Linux::release_memory_special_shm(addr, size);
6299 }
6300 }
6302 static void test_reserve_memory_special_shm() {
6303 size_t lp = os::large_page_size();
6304 size_t ag = os::vm_allocation_granularity();
6306 for (size_t size = ag; size < lp * 3; size += ag) {
6307 for (size_t alignment = ag; is_size_aligned(size, alignment); alignment *= 2) {
6308 test_reserve_memory_special_shm(size, alignment);
6309 }
6310 }
6311 }
6313 static void test() {
6314 test_reserve_memory_special_huge_tlbfs();
6315 test_reserve_memory_special_shm();
6316 }
6317 };
6319 void TestReserveMemorySpecial_test() {
6320 TestReserveMemorySpecial::test();
6321 }
6323 #endif