aoqi@0: /* minqi@7824: * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: // no precompiled headers aoqi@0: #include "classfile/classLoader.hpp" aoqi@0: #include "classfile/systemDictionary.hpp" aoqi@0: #include "classfile/vmSymbols.hpp" aoqi@0: #include "code/icBuffer.hpp" aoqi@0: #include "code/vtableStubs.hpp" aoqi@0: #include "compiler/compileBroker.hpp" aoqi@0: #include "compiler/disassembler.hpp" aoqi@0: #include "interpreter/interpreter.hpp" aoqi@0: #include "jvm_bsd.h" aoqi@0: #include "memory/allocation.inline.hpp" aoqi@0: #include "memory/filemap.hpp" aoqi@0: #include "mutex_bsd.inline.hpp" aoqi@0: #include "oops/oop.inline.hpp" aoqi@0: #include "os_share_bsd.hpp" aoqi@0: #include "prims/jniFastGetField.hpp" aoqi@0: #include "prims/jvm.h" aoqi@0: #include "prims/jvm_misc.hpp" aoqi@0: #include "runtime/arguments.hpp" aoqi@0: #include "runtime/extendedPC.hpp" aoqi@0: #include "runtime/globals.hpp" aoqi@0: #include "runtime/interfaceSupport.hpp" aoqi@0: #include "runtime/java.hpp" aoqi@0: #include "runtime/javaCalls.hpp" aoqi@0: #include "runtime/mutexLocker.hpp" aoqi@0: #include "runtime/objectMonitor.hpp" goetz@6911: #include "runtime/orderAccess.inline.hpp" aoqi@0: #include "runtime/osThread.hpp" aoqi@0: #include "runtime/perfMemory.hpp" aoqi@0: #include "runtime/sharedRuntime.hpp" aoqi@0: #include "runtime/statSampler.hpp" aoqi@0: #include "runtime/stubRoutines.hpp" aoqi@0: #include "runtime/thread.inline.hpp" aoqi@0: #include "runtime/threadCritical.hpp" aoqi@0: #include "runtime/timer.hpp" aoqi@0: #include "services/attachListener.hpp" aoqi@0: #include "services/memTracker.hpp" aoqi@0: #include "services/runtimeService.hpp" aoqi@0: #include "utilities/decoder.hpp" aoqi@0: #include "utilities/defaultStream.hpp" aoqi@0: #include "utilities/events.hpp" aoqi@0: #include "utilities/growableArray.hpp" aoqi@0: #include "utilities/vmError.hpp" aoqi@0: aoqi@0: // put OS-includes here aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: #ifndef __APPLE__ aoqi@0: # include aoqi@0: #endif aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: aoqi@0: #if defined(__FreeBSD__) || defined(__NetBSD__) aoqi@0: # include aoqi@0: #endif aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: # include // semaphore_* API aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: #endif aoqi@0: aoqi@0: #ifndef MAP_ANONYMOUS aoqi@0: #define MAP_ANONYMOUS MAP_ANON aoqi@0: #endif aoqi@0: aoqi@0: #define MAX_PATH (2 * K) aoqi@0: aoqi@0: // for timer info max values which include all bits aoqi@0: #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) aoqi@0: aoqi@0: #define LARGEPAGES_BIT (1 << 6) aoqi@0: aoqi@0: PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // global variables aoqi@0: julong os::Bsd::_physical_memory = 0; aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: mach_timebase_info_data_t os::Bsd::_timebase_info = {0, 0}; aoqi@0: volatile uint64_t os::Bsd::_max_abstime = 0; aoqi@0: #else aoqi@0: int (*os::Bsd::_clock_gettime)(clockid_t, struct timespec *) = NULL; aoqi@0: #endif aoqi@0: pthread_t os::Bsd::_main_thread; aoqi@0: int os::Bsd::_page_size = -1; aoqi@0: aoqi@0: static jlong initial_time_count=0; aoqi@0: aoqi@0: static int clock_tics_per_sec = 100; aoqi@0: aoqi@0: // For diagnostics to print a message once. see run_periodic_checks aoqi@0: static sigset_t check_signal_done; aoqi@0: static bool check_signals = true; aoqi@0: aoqi@0: static pid_t _initial_pid = 0; aoqi@0: aoqi@0: /* Signal number used to suspend/resume a thread */ aoqi@0: aoqi@0: /* do not use any signal number less than SIGSEGV, see 4355769 */ aoqi@0: static int SR_signum = SIGUSR2; aoqi@0: sigset_t SR_sigset; aoqi@0: aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // utility functions aoqi@0: aoqi@0: static int SR_initialize(); aoqi@0: static void unpackTime(timespec* absTime, bool isAbsolute, jlong time); aoqi@0: aoqi@0: julong os::available_memory() { aoqi@0: return Bsd::available_memory(); aoqi@0: } aoqi@0: aoqi@0: // available here means free aoqi@0: julong os::Bsd::available_memory() { aoqi@0: uint64_t available = physical_memory() >> 2; aoqi@0: #ifdef __APPLE__ aoqi@0: mach_msg_type_number_t count = HOST_VM_INFO64_COUNT; aoqi@0: vm_statistics64_data_t vmstat; aoqi@0: kern_return_t kerr = host_statistics64(mach_host_self(), HOST_VM_INFO64, aoqi@0: (host_info64_t)&vmstat, &count); aoqi@0: assert(kerr == KERN_SUCCESS, aoqi@0: "host_statistics64 failed - check mach_host_self() and count"); aoqi@0: if (kerr == KERN_SUCCESS) { aoqi@0: available = vmstat.free_count * os::vm_page_size(); aoqi@0: } aoqi@0: #endif aoqi@0: return available; aoqi@0: } aoqi@0: aoqi@0: julong os::physical_memory() { aoqi@0: return Bsd::physical_memory(); aoqi@0: } aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // environment support aoqi@0: aoqi@0: bool os::getenv(const char* name, char* buf, int len) { aoqi@0: const char* val = ::getenv(name); aoqi@0: if (val != NULL && strlen(val) < (size_t)len) { aoqi@0: strcpy(buf, val); aoqi@0: return true; aoqi@0: } aoqi@0: if (len > 0) buf[0] = 0; // return a null string aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // Return true if user is running as root. aoqi@0: aoqi@0: bool os::have_special_privileges() { aoqi@0: static bool init = false; aoqi@0: static bool privileges = false; aoqi@0: if (!init) { aoqi@0: privileges = (getuid() != geteuid()) || (getgid() != getegid()); aoqi@0: init = true; aoqi@0: } aoqi@0: return privileges; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: aoqi@0: // Cpu architecture string aoqi@0: #if defined(ZERO) aoqi@0: static char cpu_arch[] = ZERO_LIBARCH; aoqi@0: #elif defined(IA64) aoqi@0: static char cpu_arch[] = "ia64"; aoqi@0: #elif defined(IA32) aoqi@0: static char cpu_arch[] = "i386"; aoqi@0: #elif defined(AMD64) aoqi@0: static char cpu_arch[] = "amd64"; aoqi@0: #elif defined(ARM) aoqi@0: static char cpu_arch[] = "arm"; aoqi@0: #elif defined(PPC32) aoqi@0: static char cpu_arch[] = "ppc"; aoqi@0: #elif defined(SPARC) aoqi@0: # ifdef _LP64 aoqi@0: static char cpu_arch[] = "sparcv9"; aoqi@0: # else aoqi@0: static char cpu_arch[] = "sparc"; aoqi@0: # endif aoqi@0: #else aoqi@0: #error Add appropriate cpu_arch setting aoqi@0: #endif aoqi@0: aoqi@0: // Compiler variant aoqi@0: #ifdef COMPILER2 aoqi@0: #define COMPILER_VARIANT "server" aoqi@0: #else aoqi@0: #define COMPILER_VARIANT "client" aoqi@0: #endif aoqi@0: aoqi@0: aoqi@0: void os::Bsd::initialize_system_info() { aoqi@0: int mib[2]; aoqi@0: size_t len; aoqi@0: int cpu_val; aoqi@0: julong mem_val; aoqi@0: aoqi@0: /* get processors count via hw.ncpus sysctl */ aoqi@0: mib[0] = CTL_HW; aoqi@0: mib[1] = HW_NCPU; aoqi@0: len = sizeof(cpu_val); aoqi@0: if (sysctl(mib, 2, &cpu_val, &len, NULL, 0) != -1 && cpu_val >= 1) { aoqi@0: assert(len == sizeof(cpu_val), "unexpected data size"); aoqi@0: set_processor_count(cpu_val); aoqi@0: } aoqi@0: else { aoqi@0: set_processor_count(1); // fallback aoqi@0: } aoqi@0: aoqi@0: /* get physical memory via hw.memsize sysctl (hw.memsize is used aoqi@0: * since it returns a 64 bit value) aoqi@0: */ aoqi@0: mib[0] = CTL_HW; aoqi@0: aoqi@0: #if defined (HW_MEMSIZE) // Apple aoqi@0: mib[1] = HW_MEMSIZE; aoqi@0: #elif defined(HW_PHYSMEM) // Most of BSD aoqi@0: mib[1] = HW_PHYSMEM; aoqi@0: #elif defined(HW_REALMEM) // Old FreeBSD aoqi@0: mib[1] = HW_REALMEM; aoqi@0: #else aoqi@0: #error No ways to get physmem aoqi@0: #endif aoqi@0: aoqi@0: len = sizeof(mem_val); aoqi@0: if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1) { aoqi@0: assert(len == sizeof(mem_val), "unexpected data size"); aoqi@0: _physical_memory = mem_val; aoqi@0: } else { aoqi@0: _physical_memory = 256*1024*1024; // fallback (XXXBSD?) aoqi@0: } aoqi@0: aoqi@0: #ifdef __OpenBSD__ aoqi@0: { aoqi@0: // limit _physical_memory memory view on OpenBSD since aoqi@0: // datasize rlimit restricts us anyway. aoqi@0: struct rlimit limits; aoqi@0: getrlimit(RLIMIT_DATA, &limits); aoqi@0: _physical_memory = MIN2(_physical_memory, (julong)limits.rlim_cur); aoqi@0: } aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: static const char *get_home() { aoqi@0: const char *home_dir = ::getenv("HOME"); aoqi@0: if ((home_dir == NULL) || (*home_dir == '\0')) { aoqi@0: struct passwd *passwd_info = getpwuid(geteuid()); aoqi@0: if (passwd_info != NULL) { aoqi@0: home_dir = passwd_info->pw_dir; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: return home_dir; aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: void os::init_system_properties_values() { aoqi@0: // The next steps are taken in the product version: aoqi@0: // aoqi@0: // Obtain the JAVA_HOME value from the location of libjvm.so. aoqi@0: // This library should be located at: aoqi@0: // /jre/lib//{client|server}/libjvm.so. aoqi@0: // aoqi@0: // If "/jre/lib/" appears at the right place in the path, then we aoqi@0: // assume libjvm.so is installed in a JDK and we use this path. aoqi@0: // aoqi@0: // Otherwise exit with message: "Could not create the Java virtual machine." aoqi@0: // aoqi@0: // The following extra steps are taken in the debugging version: aoqi@0: // aoqi@0: // If "/jre/lib/" does NOT appear at the right place in the path aoqi@0: // instead of exit check for $JAVA_HOME environment variable. aoqi@0: // aoqi@0: // If it is defined and we are able to locate $JAVA_HOME/jre/lib/, aoqi@0: // then we append a fake suffix "hotspot/libjvm.so" to this path so aoqi@0: // it looks like libjvm.so is installed there aoqi@0: // /jre/lib//hotspot/libjvm.so. aoqi@0: // aoqi@0: // Otherwise exit. aoqi@0: // aoqi@0: // Important note: if the location of libjvm.so changes this aoqi@0: // code needs to be changed accordingly. aoqi@0: aoqi@0: // See ld(1): aoqi@0: // The linker uses the following search paths to locate required aoqi@0: // shared libraries: aoqi@0: // 1: ... aoqi@0: // ... aoqi@0: // 7: The default directories, normally /lib and /usr/lib. aoqi@0: #ifndef DEFAULT_LIBPATH aoqi@0: #define DEFAULT_LIBPATH "/lib:/usr/lib" aoqi@0: #endif aoqi@0: aoqi@0: // Base path of extensions installed on the system. aoqi@0: #define SYS_EXT_DIR "/usr/java/packages" aoqi@0: #define EXTENSIONS_DIR "/lib/ext" aoqi@0: #define ENDORSED_DIR "/lib/endorsed" aoqi@0: aoqi@0: #ifndef __APPLE__ aoqi@0: aoqi@0: // Buffer that fits several sprintfs. aoqi@0: // Note that the space for the colon and the trailing null are provided aoqi@0: // by the nulls included by the sizeof operator. aoqi@0: const size_t bufsize = aoqi@0: MAX3((size_t)MAXPATHLEN, // For dll_dir & friends. aoqi@0: (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir aoqi@0: (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir aoqi@0: char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); aoqi@0: aoqi@0: // sysclasspath, java_home, dll_dir aoqi@0: { aoqi@0: char *pslash; aoqi@0: os::jvm_path(buf, bufsize); aoqi@0: aoqi@0: // Found the full path to libjvm.so. aoqi@0: // Now cut the path to /jre if we can. aoqi@0: *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so. aoqi@0: pslash = strrchr(buf, '/'); aoqi@0: if (pslash != NULL) { aoqi@0: *pslash = '\0'; // Get rid of /{client|server|hotspot}. aoqi@0: } aoqi@0: Arguments::set_dll_dir(buf); aoqi@0: aoqi@0: if (pslash != NULL) { aoqi@0: pslash = strrchr(buf, '/'); aoqi@0: if (pslash != NULL) { aoqi@0: *pslash = '\0'; // Get rid of /. aoqi@0: pslash = strrchr(buf, '/'); aoqi@0: if (pslash != NULL) { aoqi@0: *pslash = '\0'; // Get rid of /lib. aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: Arguments::set_java_home(buf); aoqi@0: set_boot_path('/', ':'); aoqi@0: } aoqi@0: aoqi@0: // Where to look for native libraries. aoqi@0: // aoqi@0: // Note: Due to a legacy implementation, most of the library path aoqi@0: // is set in the launcher. This was to accomodate linking restrictions aoqi@0: // on legacy Bsd implementations (which are no longer supported). aoqi@0: // Eventually, all the library path setting will be done here. aoqi@0: // aoqi@0: // However, to prevent the proliferation of improperly built native aoqi@0: // libraries, the new path component /usr/java/packages is added here. aoqi@0: // Eventually, all the library path setting will be done here. aoqi@0: { aoqi@0: // Get the user setting of LD_LIBRARY_PATH, and prepended it. It aoqi@0: // should always exist (until the legacy problem cited above is aoqi@0: // addressed). aoqi@0: const char *v = ::getenv("LD_LIBRARY_PATH"); aoqi@0: const char *v_colon = ":"; aoqi@0: if (v == NULL) { v = ""; v_colon = ""; } aoqi@0: // That's +1 for the colon and +1 for the trailing '\0'. aoqi@0: char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, aoqi@0: strlen(v) + 1 + aoqi@0: sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH) + 1, aoqi@0: mtInternal); aoqi@0: sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib/%s:" DEFAULT_LIBPATH, v, v_colon, cpu_arch); aoqi@0: Arguments::set_library_path(ld_library_path); aoqi@0: FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal); aoqi@0: } aoqi@0: aoqi@0: // Extensions directories. aoqi@0: sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home()); aoqi@0: Arguments::set_ext_dirs(buf); aoqi@0: aoqi@0: // Endorsed standards default directory. aoqi@0: sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home()); aoqi@0: Arguments::set_endorsed_dirs(buf); aoqi@0: aoqi@0: FREE_C_HEAP_ARRAY(char, buf, mtInternal); aoqi@0: aoqi@0: #else // __APPLE__ aoqi@0: aoqi@0: #define SYS_EXTENSIONS_DIR "/Library/Java/Extensions" aoqi@0: #define SYS_EXTENSIONS_DIRS SYS_EXTENSIONS_DIR ":/Network" SYS_EXTENSIONS_DIR ":/System" SYS_EXTENSIONS_DIR ":/usr/lib/java" aoqi@0: aoqi@0: const char *user_home_dir = get_home(); aoqi@0: // The null in SYS_EXTENSIONS_DIRS counts for the size of the colon after user_home_dir. aoqi@0: size_t system_ext_size = strlen(user_home_dir) + sizeof(SYS_EXTENSIONS_DIR) + aoqi@0: sizeof(SYS_EXTENSIONS_DIRS); aoqi@0: aoqi@0: // Buffer that fits several sprintfs. aoqi@0: // Note that the space for the colon and the trailing null are provided aoqi@0: // by the nulls included by the sizeof operator. aoqi@0: const size_t bufsize = aoqi@0: MAX3((size_t)MAXPATHLEN, // for dll_dir & friends. aoqi@0: (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + system_ext_size, // extensions dir aoqi@0: (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir aoqi@0: char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); aoqi@0: aoqi@0: // sysclasspath, java_home, dll_dir aoqi@0: { aoqi@0: char *pslash; aoqi@0: os::jvm_path(buf, bufsize); aoqi@0: aoqi@0: // Found the full path to libjvm.so. aoqi@0: // Now cut the path to /jre if we can. aoqi@0: *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so. aoqi@0: pslash = strrchr(buf, '/'); aoqi@0: if (pslash != NULL) { aoqi@0: *pslash = '\0'; // Get rid of /{client|server|hotspot}. aoqi@0: } aoqi@0: Arguments::set_dll_dir(buf); aoqi@0: aoqi@0: if (pslash != NULL) { aoqi@0: pslash = strrchr(buf, '/'); aoqi@0: if (pslash != NULL) { aoqi@0: *pslash = '\0'; // Get rid of /lib. aoqi@0: } aoqi@0: } aoqi@0: Arguments::set_java_home(buf); aoqi@0: set_boot_path('/', ':'); aoqi@0: } aoqi@0: aoqi@0: // Where to look for native libraries. aoqi@0: // aoqi@0: // Note: Due to a legacy implementation, most of the library path aoqi@0: // is set in the launcher. This was to accomodate linking restrictions aoqi@0: // on legacy Bsd implementations (which are no longer supported). aoqi@0: // Eventually, all the library path setting will be done here. aoqi@0: // aoqi@0: // However, to prevent the proliferation of improperly built native aoqi@0: // libraries, the new path component /usr/java/packages is added here. aoqi@0: // Eventually, all the library path setting will be done here. aoqi@0: { aoqi@0: // Get the user setting of LD_LIBRARY_PATH, and prepended it. It aoqi@0: // should always exist (until the legacy problem cited above is aoqi@0: // addressed). aoqi@0: // Prepend the default path with the JAVA_LIBRARY_PATH so that the app launcher code aoqi@0: // can specify a directory inside an app wrapper aoqi@0: const char *l = ::getenv("JAVA_LIBRARY_PATH"); aoqi@0: const char *l_colon = ":"; aoqi@0: if (l == NULL) { l = ""; l_colon = ""; } aoqi@0: aoqi@0: const char *v = ::getenv("DYLD_LIBRARY_PATH"); aoqi@0: const char *v_colon = ":"; aoqi@0: if (v == NULL) { v = ""; v_colon = ""; } aoqi@0: aoqi@0: // Apple's Java6 has "." at the beginning of java.library.path. aoqi@0: // OpenJDK on Windows has "." at the end of java.library.path. aoqi@0: // OpenJDK on Linux and Solaris don't have "." in java.library.path aoqi@0: // at all. To ease the transition from Apple's Java6 to OpenJDK7, aoqi@0: // "." is appended to the end of java.library.path. Yes, this aoqi@0: // could cause a change in behavior, but Apple's Java6 behavior aoqi@0: // can be achieved by putting "." at the beginning of the aoqi@0: // JAVA_LIBRARY_PATH environment variable. aoqi@0: char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, aoqi@0: strlen(v) + 1 + strlen(l) + 1 + aoqi@0: system_ext_size + 3, aoqi@0: mtInternal); aoqi@0: sprintf(ld_library_path, "%s%s%s%s%s" SYS_EXTENSIONS_DIR ":" SYS_EXTENSIONS_DIRS ":.", aoqi@0: v, v_colon, l, l_colon, user_home_dir); aoqi@0: Arguments::set_library_path(ld_library_path); aoqi@0: FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal); aoqi@0: } aoqi@0: aoqi@0: // Extensions directories. aoqi@0: // aoqi@0: // Note that the space for the colon and the trailing null are provided aoqi@0: // by the nulls included by the sizeof operator (so actually one byte more aoqi@0: // than necessary is allocated). aoqi@0: sprintf(buf, "%s" SYS_EXTENSIONS_DIR ":%s" EXTENSIONS_DIR ":" SYS_EXTENSIONS_DIRS, aoqi@0: user_home_dir, Arguments::get_java_home()); aoqi@0: Arguments::set_ext_dirs(buf); aoqi@0: aoqi@0: // Endorsed standards default directory. aoqi@0: sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home()); aoqi@0: Arguments::set_endorsed_dirs(buf); aoqi@0: aoqi@0: FREE_C_HEAP_ARRAY(char, buf, mtInternal); aoqi@0: aoqi@0: #undef SYS_EXTENSIONS_DIR aoqi@0: #undef SYS_EXTENSIONS_DIRS aoqi@0: aoqi@0: #endif // __APPLE__ aoqi@0: aoqi@0: #undef SYS_EXT_DIR aoqi@0: #undef EXTENSIONS_DIR aoqi@0: #undef ENDORSED_DIR aoqi@0: } aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // breakpoint support aoqi@0: aoqi@0: void os::breakpoint() { aoqi@0: BREAKPOINT; aoqi@0: } aoqi@0: aoqi@0: extern "C" void breakpoint() { aoqi@0: // use debugger to set breakpoint here aoqi@0: } aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // signal support aoqi@0: aoqi@0: debug_only(static bool signal_sets_initialized = false); aoqi@0: static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs; aoqi@0: aoqi@0: bool os::Bsd::is_sig_ignored(int sig) { aoqi@0: struct sigaction oact; aoqi@0: sigaction(sig, (struct sigaction*)NULL, &oact); aoqi@0: void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction) aoqi@0: : CAST_FROM_FN_PTR(void*, oact.sa_handler); aoqi@0: if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) aoqi@0: return true; aoqi@0: else aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: void os::Bsd::signal_sets_init() { aoqi@0: // Should also have an assertion stating we are still single-threaded. aoqi@0: assert(!signal_sets_initialized, "Already initialized"); aoqi@0: // Fill in signals that are necessarily unblocked for all threads in aoqi@0: // the VM. Currently, we unblock the following signals: aoqi@0: // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden aoqi@0: // by -Xrs (=ReduceSignalUsage)); aoqi@0: // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all aoqi@0: // other threads. The "ReduceSignalUsage" boolean tells us not to alter aoqi@0: // the dispositions or masks wrt these signals. aoqi@0: // Programs embedding the VM that want to use the above signals for their aoqi@0: // own purposes must, at this time, use the "-Xrs" option to prevent aoqi@0: // interference with shutdown hooks and BREAK_SIGNAL thread dumping. aoqi@0: // (See bug 4345157, and other related bugs). aoqi@0: // In reality, though, unblocking these signals is really a nop, since aoqi@0: // these signals are not blocked by default. aoqi@0: sigemptyset(&unblocked_sigs); aoqi@0: sigemptyset(&allowdebug_blocked_sigs); aoqi@0: sigaddset(&unblocked_sigs, SIGILL); aoqi@0: sigaddset(&unblocked_sigs, SIGSEGV); aoqi@0: sigaddset(&unblocked_sigs, SIGBUS); aoqi@0: sigaddset(&unblocked_sigs, SIGFPE); aoqi@0: sigaddset(&unblocked_sigs, SR_signum); aoqi@0: aoqi@0: if (!ReduceSignalUsage) { aoqi@0: if (!os::Bsd::is_sig_ignored(SHUTDOWN1_SIGNAL)) { aoqi@0: sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL); aoqi@0: sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL); aoqi@0: } aoqi@0: if (!os::Bsd::is_sig_ignored(SHUTDOWN2_SIGNAL)) { aoqi@0: sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL); aoqi@0: sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL); aoqi@0: } aoqi@0: if (!os::Bsd::is_sig_ignored(SHUTDOWN3_SIGNAL)) { aoqi@0: sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL); aoqi@0: sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL); aoqi@0: } aoqi@0: } aoqi@0: // Fill in signals that are blocked by all but the VM thread. aoqi@0: sigemptyset(&vm_sigs); aoqi@0: if (!ReduceSignalUsage) aoqi@0: sigaddset(&vm_sigs, BREAK_SIGNAL); aoqi@0: debug_only(signal_sets_initialized = true); aoqi@0: aoqi@0: } aoqi@0: aoqi@0: // These are signals that are unblocked while a thread is running Java. aoqi@0: // (For some reason, they get blocked by default.) aoqi@0: sigset_t* os::Bsd::unblocked_signals() { aoqi@0: assert(signal_sets_initialized, "Not initialized"); aoqi@0: return &unblocked_sigs; aoqi@0: } aoqi@0: aoqi@0: // These are the signals that are blocked while a (non-VM) thread is aoqi@0: // running Java. Only the VM thread handles these signals. aoqi@0: sigset_t* os::Bsd::vm_signals() { aoqi@0: assert(signal_sets_initialized, "Not initialized"); aoqi@0: return &vm_sigs; aoqi@0: } aoqi@0: aoqi@0: // These are signals that are blocked during cond_wait to allow debugger in aoqi@0: sigset_t* os::Bsd::allowdebug_blocked_signals() { aoqi@0: assert(signal_sets_initialized, "Not initialized"); aoqi@0: return &allowdebug_blocked_sigs; aoqi@0: } aoqi@0: aoqi@0: void os::Bsd::hotspot_sigmask(Thread* thread) { aoqi@0: aoqi@0: //Save caller's signal mask before setting VM signal mask aoqi@0: sigset_t caller_sigmask; aoqi@0: pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask); aoqi@0: aoqi@0: OSThread* osthread = thread->osthread(); aoqi@0: osthread->set_caller_sigmask(caller_sigmask); aoqi@0: aoqi@0: pthread_sigmask(SIG_UNBLOCK, os::Bsd::unblocked_signals(), NULL); aoqi@0: aoqi@0: if (!ReduceSignalUsage) { aoqi@0: if (thread->is_VM_thread()) { aoqi@0: // Only the VM thread handles BREAK_SIGNAL ... aoqi@0: pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL); aoqi@0: } else { aoqi@0: // ... all other threads block BREAK_SIGNAL aoqi@0: pthread_sigmask(SIG_BLOCK, vm_signals(), NULL); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: ////////////////////////////////////////////////////////////////////////////// aoqi@0: // create new thread aoqi@0: aoqi@0: // check if it's safe to start a new thread aoqi@0: static bool _thread_safety_check(Thread* thread) { aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: // library handle for calling objc_registerThreadWithCollector() aoqi@0: // without static linking to the libobjc library aoqi@0: #define OBJC_LIB "/usr/lib/libobjc.dylib" aoqi@0: #define OBJC_GCREGISTER "objc_registerThreadWithCollector" aoqi@0: typedef void (*objc_registerThreadWithCollector_t)(); aoqi@0: extern "C" objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction; aoqi@0: objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction = NULL; aoqi@0: #endif aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: static uint64_t locate_unique_thread_id(mach_port_t mach_thread_port) { aoqi@0: // Additional thread_id used to correlate threads in SA aoqi@0: thread_identifier_info_data_t m_ident_info; aoqi@0: mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT; aoqi@0: aoqi@0: thread_info(mach_thread_port, THREAD_IDENTIFIER_INFO, aoqi@0: (thread_info_t) &m_ident_info, &count); aoqi@0: aoqi@0: return m_ident_info.thread_id; aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: // Thread start routine for all newly created threads aoqi@0: static void *java_start(Thread *thread) { aoqi@0: // Try to randomize the cache line index of hot stack frames. aoqi@0: // This helps when threads of the same stack traces evict each other's aoqi@0: // cache lines. The threads can be either from the same JVM instance, or aoqi@0: // from different JVM instances. The benefit is especially true for aoqi@0: // processors with hyperthreading technology. aoqi@0: static int counter = 0; aoqi@0: int pid = os::current_process_id(); aoqi@0: alloca(((pid ^ counter++) & 7) * 128); aoqi@0: aoqi@0: ThreadLocalStorage::set_thread(thread); aoqi@0: aoqi@0: OSThread* osthread = thread->osthread(); aoqi@0: Monitor* sync = osthread->startThread_lock(); aoqi@0: aoqi@0: // non floating stack BsdThreads needs extra check, see above aoqi@0: if (!_thread_safety_check(thread)) { aoqi@0: // notify parent thread aoqi@0: MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag); aoqi@0: osthread->set_state(ZOMBIE); aoqi@0: sync->notify_all(); aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: osthread->set_thread_id(os::Bsd::gettid()); aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id()); aoqi@0: guarantee(unique_thread_id != 0, "unique thread id was not found"); aoqi@0: osthread->set_unique_thread_id(unique_thread_id); aoqi@0: #endif aoqi@0: // initialize signal mask for this thread aoqi@0: os::Bsd::hotspot_sigmask(thread); aoqi@0: aoqi@0: // initialize floating point control register aoqi@0: os::Bsd::init_thread_fpu_state(); aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: // register thread with objc gc aoqi@0: if (objc_registerThreadWithCollectorFunction != NULL) { aoqi@0: objc_registerThreadWithCollectorFunction(); aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: // handshaking with parent thread aoqi@0: { aoqi@0: MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag); aoqi@0: aoqi@0: // notify parent thread aoqi@0: osthread->set_state(INITIALIZED); aoqi@0: sync->notify_all(); aoqi@0: aoqi@0: // wait until os::start_thread() aoqi@0: while (osthread->get_state() == INITIALIZED) { aoqi@0: sync->wait(Mutex::_no_safepoint_check_flag); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // call one more level start routine aoqi@0: thread->run(); aoqi@0: aoqi@0: return 0; aoqi@0: } aoqi@0: aoqi@0: bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { aoqi@0: assert(thread->osthread() == NULL, "caller responsible"); aoqi@0: aoqi@0: // Allocate the OSThread object aoqi@0: OSThread* osthread = new OSThread(NULL, NULL); aoqi@0: if (osthread == NULL) { aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: // set the correct thread state aoqi@0: osthread->set_thread_type(thr_type); aoqi@0: aoqi@0: // Initial state is ALLOCATED but not INITIALIZED aoqi@0: osthread->set_state(ALLOCATED); aoqi@0: aoqi@0: thread->set_osthread(osthread); aoqi@0: aoqi@0: // init thread attributes aoqi@0: pthread_attr_t attr; aoqi@0: pthread_attr_init(&attr); aoqi@0: pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); aoqi@0: aoqi@0: // stack size aoqi@0: if (os::Bsd::supports_variable_stack_size()) { aoqi@0: // calculate stack size if it's not specified by caller aoqi@0: if (stack_size == 0) { aoqi@0: stack_size = os::Bsd::default_stack_size(thr_type); aoqi@0: aoqi@0: switch (thr_type) { aoqi@0: case os::java_thread: aoqi@0: // Java threads use ThreadStackSize which default value can be aoqi@0: // changed with the flag -Xss aoqi@0: assert (JavaThread::stack_size_at_create() > 0, "this should be set"); aoqi@0: stack_size = JavaThread::stack_size_at_create(); aoqi@0: break; aoqi@0: case os::compiler_thread: aoqi@0: if (CompilerThreadStackSize > 0) { aoqi@0: stack_size = (size_t)(CompilerThreadStackSize * K); aoqi@0: break; aoqi@0: } // else fall through: aoqi@0: // use VMThreadStackSize if CompilerThreadStackSize is not defined aoqi@0: case os::vm_thread: aoqi@0: case os::pgc_thread: aoqi@0: case os::cgc_thread: aoqi@0: case os::watcher_thread: aoqi@0: if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); aoqi@0: break; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: stack_size = MAX2(stack_size, os::Bsd::min_stack_allowed); aoqi@0: pthread_attr_setstacksize(&attr, stack_size); aoqi@0: } else { aoqi@0: // let pthread_create() pick the default value. aoqi@0: } aoqi@0: aoqi@0: ThreadState state; aoqi@0: aoqi@0: { aoqi@0: pthread_t tid; aoqi@0: int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread); aoqi@0: aoqi@0: pthread_attr_destroy(&attr); aoqi@0: aoqi@0: if (ret != 0) { aoqi@0: if (PrintMiscellaneous && (Verbose || WizardMode)) { aoqi@0: perror("pthread_create()"); aoqi@0: } aoqi@0: // Need to clean up stuff we've allocated so far aoqi@0: thread->set_osthread(NULL); aoqi@0: delete osthread; aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: // Store pthread info into the OSThread aoqi@0: osthread->set_pthread_id(tid); aoqi@0: aoqi@0: // Wait until child thread is either initialized or aborted aoqi@0: { aoqi@0: Monitor* sync_with_child = osthread->startThread_lock(); aoqi@0: MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag); aoqi@0: while ((state = osthread->get_state()) == ALLOCATED) { aoqi@0: sync_with_child->wait(Mutex::_no_safepoint_check_flag); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: } aoqi@0: aoqi@0: // Aborted due to thread limit being reached aoqi@0: if (state == ZOMBIE) { aoqi@0: thread->set_osthread(NULL); aoqi@0: delete osthread; aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: // The thread is returned suspended (in state INITIALIZED), aoqi@0: // and is started higher up in the call chain aoqi@0: assert(state == INITIALIZED, "race condition"); aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: ///////////////////////////////////////////////////////////////////////////// aoqi@0: // attach existing thread aoqi@0: aoqi@0: // bootstrap the main thread aoqi@0: bool os::create_main_thread(JavaThread* thread) { aoqi@0: assert(os::Bsd::_main_thread == pthread_self(), "should be called inside main thread"); aoqi@0: return create_attached_thread(thread); aoqi@0: } aoqi@0: aoqi@0: bool os::create_attached_thread(JavaThread* thread) { aoqi@0: #ifdef ASSERT aoqi@0: thread->verify_not_published(); aoqi@0: #endif aoqi@0: aoqi@0: // Allocate the OSThread object aoqi@0: OSThread* osthread = new OSThread(NULL, NULL); aoqi@0: aoqi@0: if (osthread == NULL) { aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: osthread->set_thread_id(os::Bsd::gettid()); aoqi@0: aoqi@0: // Store pthread info into the OSThread aoqi@0: #ifdef __APPLE__ aoqi@0: uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id()); aoqi@0: guarantee(unique_thread_id != 0, "just checking"); aoqi@0: osthread->set_unique_thread_id(unique_thread_id); aoqi@0: #endif aoqi@0: osthread->set_pthread_id(::pthread_self()); aoqi@0: aoqi@0: // initialize floating point control register aoqi@0: os::Bsd::init_thread_fpu_state(); aoqi@0: aoqi@0: // Initial thread state is RUNNABLE aoqi@0: osthread->set_state(RUNNABLE); aoqi@0: aoqi@0: thread->set_osthread(osthread); aoqi@0: aoqi@0: // initialize signal mask for this thread aoqi@0: // and save the caller's signal mask aoqi@0: os::Bsd::hotspot_sigmask(thread); aoqi@0: aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: void os::pd_start_thread(Thread* thread) { aoqi@0: OSThread * osthread = thread->osthread(); aoqi@0: assert(osthread->get_state() != INITIALIZED, "just checking"); aoqi@0: Monitor* sync_with_child = osthread->startThread_lock(); aoqi@0: MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag); aoqi@0: sync_with_child->notify(); aoqi@0: } aoqi@0: aoqi@0: // Free Bsd resources related to the OSThread aoqi@0: void os::free_thread(OSThread* osthread) { aoqi@0: assert(osthread != NULL, "osthread not set"); aoqi@0: aoqi@0: if (Thread::current()->osthread() == osthread) { aoqi@0: // Restore caller's signal mask aoqi@0: sigset_t sigmask = osthread->caller_sigmask(); aoqi@0: pthread_sigmask(SIG_SETMASK, &sigmask, NULL); aoqi@0: } aoqi@0: aoqi@0: delete osthread; aoqi@0: } aoqi@0: aoqi@0: ////////////////////////////////////////////////////////////////////////////// aoqi@0: // thread local storage aoqi@0: aoqi@0: // Restore the thread pointer if the destructor is called. This is in case aoqi@0: // someone from JNI code sets up a destructor with pthread_key_create to run aoqi@0: // detachCurrentThread on thread death. Unless we restore the thread pointer we aoqi@0: // will hang or crash. When detachCurrentThread is called the key will be set aoqi@0: // to null and we will not be called again. If detachCurrentThread is never aoqi@0: // called we could loop forever depending on the pthread implementation. aoqi@0: static void restore_thread_pointer(void* p) { aoqi@0: Thread* thread = (Thread*) p; aoqi@0: os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread); aoqi@0: } aoqi@0: aoqi@0: int os::allocate_thread_local_storage() { aoqi@0: pthread_key_t key; aoqi@0: int rslt = pthread_key_create(&key, restore_thread_pointer); aoqi@0: assert(rslt == 0, "cannot allocate thread local storage"); aoqi@0: return (int)key; aoqi@0: } aoqi@0: aoqi@0: // Note: This is currently not used by VM, as we don't destroy TLS key aoqi@0: // on VM exit. aoqi@0: void os::free_thread_local_storage(int index) { aoqi@0: int rslt = pthread_key_delete((pthread_key_t)index); aoqi@0: assert(rslt == 0, "invalid index"); aoqi@0: } aoqi@0: aoqi@0: void os::thread_local_storage_at_put(int index, void* value) { aoqi@0: int rslt = pthread_setspecific((pthread_key_t)index, value); aoqi@0: assert(rslt == 0, "pthread_setspecific failed"); aoqi@0: } aoqi@0: aoqi@0: extern "C" Thread* get_thread() { aoqi@0: return ThreadLocalStorage::thread(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // time support aoqi@0: aoqi@0: // Time since start-up in seconds to a fine granularity. aoqi@0: // Used by VMSelfDestructTimer and the MemProfiler. aoqi@0: double os::elapsedTime() { aoqi@0: aoqi@0: return ((double)os::elapsed_counter()) / os::elapsed_frequency(); aoqi@0: } aoqi@0: aoqi@0: jlong os::elapsed_counter() { aoqi@0: return javaTimeNanos() - initial_time_count; aoqi@0: } aoqi@0: aoqi@0: jlong os::elapsed_frequency() { aoqi@0: return NANOSECS_PER_SEC; // nanosecond resolution aoqi@0: } aoqi@0: aoqi@0: bool os::supports_vtime() { return true; } aoqi@0: bool os::enable_vtime() { return false; } aoqi@0: bool os::vtime_enabled() { return false; } aoqi@0: aoqi@0: double os::elapsedVTime() { aoqi@0: // better than nothing, but not much aoqi@0: return elapsedTime(); aoqi@0: } aoqi@0: aoqi@0: jlong os::javaTimeMillis() { aoqi@0: timeval time; aoqi@0: int status = gettimeofday(&time, NULL); aoqi@0: assert(status != -1, "bsd error"); aoqi@0: return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000); aoqi@0: } aoqi@0: aoqi@0: #ifndef __APPLE__ aoqi@0: #ifndef CLOCK_MONOTONIC aoqi@0: #define CLOCK_MONOTONIC (1) aoqi@0: #endif aoqi@0: #endif aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: void os::Bsd::clock_init() { aoqi@0: mach_timebase_info(&_timebase_info); aoqi@0: } aoqi@0: #else aoqi@0: void os::Bsd::clock_init() { aoqi@0: struct timespec res; aoqi@0: struct timespec tp; aoqi@0: if (::clock_getres(CLOCK_MONOTONIC, &res) == 0 && aoqi@0: ::clock_gettime(CLOCK_MONOTONIC, &tp) == 0) { aoqi@0: // yes, monotonic clock is supported aoqi@0: _clock_gettime = ::clock_gettime; aoqi@0: } aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: aoqi@0: jlong os::javaTimeNanos() { aoqi@0: const uint64_t tm = mach_absolute_time(); aoqi@0: const uint64_t now = (tm * Bsd::_timebase_info.numer) / Bsd::_timebase_info.denom; aoqi@0: const uint64_t prev = Bsd::_max_abstime; aoqi@0: if (now <= prev) { aoqi@0: return prev; // same or retrograde time; aoqi@0: } aoqi@0: const uint64_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&Bsd::_max_abstime, prev); aoqi@0: assert(obsv >= prev, "invariant"); // Monotonicity aoqi@0: // If the CAS succeeded then we're done and return "now". aoqi@0: // If the CAS failed and the observed value "obsv" is >= now then aoqi@0: // we should return "obsv". If the CAS failed and now > obsv > prv then aoqi@0: // some other thread raced this thread and installed a new value, in which case aoqi@0: // we could either (a) retry the entire operation, (b) retry trying to install now aoqi@0: // or (c) just return obsv. We use (c). No loop is required although in some cases aoqi@0: // we might discard a higher "now" value in deference to a slightly lower but freshly aoqi@0: // installed obsv value. That's entirely benign -- it admits no new orderings compared aoqi@0: // to (a) or (b) -- and greatly reduces coherence traffic. aoqi@0: // We might also condition (c) on the magnitude of the delta between obsv and now. aoqi@0: // Avoiding excessive CAS operations to hot RW locations is critical. aoqi@0: // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate aoqi@0: return (prev == obsv) ? now : obsv; aoqi@0: } aoqi@0: aoqi@0: #else // __APPLE__ aoqi@0: aoqi@0: jlong os::javaTimeNanos() { aoqi@0: if (Bsd::supports_monotonic_clock()) { aoqi@0: struct timespec tp; aoqi@0: int status = Bsd::_clock_gettime(CLOCK_MONOTONIC, &tp); aoqi@0: assert(status == 0, "gettime error"); aoqi@0: jlong result = jlong(tp.tv_sec) * (1000 * 1000 * 1000) + jlong(tp.tv_nsec); aoqi@0: return result; aoqi@0: } else { aoqi@0: timeval time; aoqi@0: int status = gettimeofday(&time, NULL); aoqi@0: assert(status != -1, "bsd error"); aoqi@0: jlong usecs = jlong(time.tv_sec) * (1000 * 1000) + jlong(time.tv_usec); aoqi@0: return 1000 * usecs; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: #endif // __APPLE__ aoqi@0: aoqi@0: void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { aoqi@0: if (Bsd::supports_monotonic_clock()) { aoqi@0: info_ptr->max_value = ALL_64_BITS; aoqi@0: aoqi@0: // CLOCK_MONOTONIC - amount of time since some arbitrary point in the past aoqi@0: info_ptr->may_skip_backward = false; // not subject to resetting or drifting aoqi@0: info_ptr->may_skip_forward = false; // not subject to resetting or drifting aoqi@0: } else { aoqi@0: // gettimeofday - based on time in seconds since the Epoch thus does not wrap aoqi@0: info_ptr->max_value = ALL_64_BITS; aoqi@0: aoqi@0: // gettimeofday is a real time clock so it skips aoqi@0: info_ptr->may_skip_backward = true; aoqi@0: info_ptr->may_skip_forward = true; aoqi@0: } aoqi@0: aoqi@0: info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time aoqi@0: } aoqi@0: aoqi@0: // Return the real, user, and system times in seconds from an aoqi@0: // arbitrary fixed point in the past. aoqi@0: bool os::getTimesSecs(double* process_real_time, aoqi@0: double* process_user_time, aoqi@0: double* process_system_time) { aoqi@0: struct tms ticks; aoqi@0: clock_t real_ticks = times(&ticks); aoqi@0: aoqi@0: if (real_ticks == (clock_t) (-1)) { aoqi@0: return false; aoqi@0: } else { aoqi@0: double ticks_per_second = (double) clock_tics_per_sec; aoqi@0: *process_user_time = ((double) ticks.tms_utime) / ticks_per_second; aoqi@0: *process_system_time = ((double) ticks.tms_stime) / ticks_per_second; aoqi@0: *process_real_time = ((double) real_ticks) / ticks_per_second; aoqi@0: aoqi@0: return true; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: char * os::local_time_string(char *buf, size_t buflen) { aoqi@0: struct tm t; aoqi@0: time_t long_time; aoqi@0: time(&long_time); aoqi@0: localtime_r(&long_time, &t); aoqi@0: jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", aoqi@0: t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, aoqi@0: t.tm_hour, t.tm_min, t.tm_sec); aoqi@0: return buf; aoqi@0: } aoqi@0: aoqi@0: struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { aoqi@0: return localtime_r(clock, res); aoqi@0: } aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // runtime exit support aoqi@0: aoqi@0: // Note: os::shutdown() might be called very early during initialization, or aoqi@0: // called from signal handler. Before adding something to os::shutdown(), make aoqi@0: // sure it is async-safe and can handle partially initialized VM. aoqi@0: void os::shutdown() { aoqi@0: aoqi@0: // allow PerfMemory to attempt cleanup of any persistent resources aoqi@0: perfMemory_exit(); aoqi@0: aoqi@0: // needs to remove object in file system aoqi@0: AttachListener::abort(); aoqi@0: aoqi@0: // flush buffered output, finish log files aoqi@0: ostream_abort(); aoqi@0: aoqi@0: // Check for abort hook aoqi@0: abort_hook_t abort_hook = Arguments::abort_hook(); aoqi@0: if (abort_hook != NULL) { aoqi@0: abort_hook(); aoqi@0: } aoqi@0: aoqi@0: } aoqi@0: aoqi@0: // Note: os::abort() might be called very early during initialization, or aoqi@0: // called from signal handler. Before adding something to os::abort(), make aoqi@0: // sure it is async-safe and can handle partially initialized VM. aoqi@0: void os::abort(bool dump_core) { aoqi@0: os::shutdown(); aoqi@0: if (dump_core) { aoqi@0: #ifndef PRODUCT aoqi@0: fdStream out(defaultStream::output_fd()); aoqi@0: out.print_raw("Current thread is "); aoqi@0: char buf[16]; aoqi@0: jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id()); aoqi@0: out.print_raw_cr(buf); aoqi@0: out.print_raw_cr("Dumping core ..."); aoqi@0: #endif aoqi@0: ::abort(); // dump core aoqi@0: } aoqi@0: aoqi@0: ::exit(1); aoqi@0: } aoqi@0: aoqi@0: // Die immediately, no exit hook, no abort hook, no cleanup. aoqi@0: void os::die() { aoqi@0: // _exit() on BsdThreads only kills current thread aoqi@0: ::abort(); aoqi@0: } aoqi@0: aoqi@0: // This method is a copy of JDK's sysGetLastErrorString aoqi@0: // from src/solaris/hpi/src/system_md.c aoqi@0: aoqi@0: size_t os::lasterror(char *buf, size_t len) { aoqi@0: aoqi@0: if (errno == 0) return 0; aoqi@0: aoqi@0: const char *s = ::strerror(errno); aoqi@0: size_t n = ::strlen(s); aoqi@0: if (n >= len) { aoqi@0: n = len - 1; aoqi@0: } aoqi@0: ::strncpy(buf, s, n); aoqi@0: buf[n] = '\0'; aoqi@0: return n; aoqi@0: } aoqi@0: aoqi@0: // Information of current thread in variety of formats aoqi@0: pid_t os::Bsd::gettid() { aoqi@0: int retval = -1; aoqi@0: aoqi@0: #ifdef __APPLE__ //XNU kernel aoqi@0: // despite the fact mach port is actually not a thread id use it aoqi@0: // instead of syscall(SYS_thread_selfid) as it certainly fits to u4 aoqi@0: retval = ::pthread_mach_thread_np(::pthread_self()); aoqi@0: guarantee(retval != 0, "just checking"); aoqi@0: return retval; aoqi@0: dholmes@7808: #else dholmes@7808: #ifdef __FreeBSD__ aoqi@0: retval = syscall(SYS_thr_self); dholmes@7808: #else dholmes@7808: #ifdef __OpenBSD__ aoqi@0: retval = syscall(SYS_getthrid); dholmes@7808: #else dholmes@7808: #ifdef __NetBSD__ aoqi@0: retval = (pid_t) syscall(SYS__lwp_self); dholmes@7808: #endif dholmes@7808: #endif dholmes@7808: #endif aoqi@0: #endif aoqi@0: aoqi@0: if (retval == -1) { aoqi@0: return getpid(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: intx os::current_thread_id() { aoqi@0: #ifdef __APPLE__ aoqi@0: return (intx)::pthread_mach_thread_np(::pthread_self()); aoqi@0: #else aoqi@0: return (intx)::pthread_self(); aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: int os::current_process_id() { aoqi@0: aoqi@0: // Under the old bsd thread library, bsd gives each thread aoqi@0: // its own process id. Because of this each thread will return aoqi@0: // a different pid if this method were to return the result aoqi@0: // of getpid(2). Bsd provides no api that returns the pid aoqi@0: // of the launcher thread for the vm. This implementation aoqi@0: // returns a unique pid, the pid of the launcher thread aoqi@0: // that starts the vm 'process'. aoqi@0: aoqi@0: // Under the NPTL, getpid() returns the same pid as the aoqi@0: // launcher thread rather than a unique pid per thread. aoqi@0: // Use gettid() if you want the old pre NPTL behaviour. aoqi@0: aoqi@0: // if you are looking for the result of a call to getpid() that aoqi@0: // returns a unique pid for the calling thread, then look at the aoqi@0: // OSThread::thread_id() method in osThread_bsd.hpp file aoqi@0: aoqi@0: return (int)(_initial_pid ? _initial_pid : getpid()); aoqi@0: } aoqi@0: aoqi@0: // DLL functions aoqi@0: aoqi@0: #define JNI_LIB_PREFIX "lib" aoqi@0: #ifdef __APPLE__ aoqi@0: #define JNI_LIB_SUFFIX ".dylib" aoqi@0: #else aoqi@0: #define JNI_LIB_SUFFIX ".so" aoqi@0: #endif aoqi@0: aoqi@0: const char* os::dll_file_extension() { return JNI_LIB_SUFFIX; } aoqi@0: aoqi@0: // This must be hard coded because it's the system's temporary aoqi@0: // directory not the java application's temp directory, ala java.io.tmpdir. aoqi@0: #ifdef __APPLE__ aoqi@0: // macosx has a secure per-user temporary directory aoqi@0: char temp_path_storage[PATH_MAX]; aoqi@0: const char* os::get_temp_directory() { aoqi@0: static char *temp_path = NULL; aoqi@0: if (temp_path == NULL) { aoqi@0: int pathSize = confstr(_CS_DARWIN_USER_TEMP_DIR, temp_path_storage, PATH_MAX); aoqi@0: if (pathSize == 0 || pathSize > PATH_MAX) { aoqi@0: strlcpy(temp_path_storage, "/tmp/", sizeof(temp_path_storage)); aoqi@0: } aoqi@0: temp_path = temp_path_storage; aoqi@0: } aoqi@0: return temp_path; aoqi@0: } aoqi@0: #else /* __APPLE__ */ aoqi@0: const char* os::get_temp_directory() { return "/tmp"; } aoqi@0: #endif /* __APPLE__ */ aoqi@0: aoqi@0: static bool file_exists(const char* filename) { aoqi@0: struct stat statbuf; aoqi@0: if (filename == NULL || strlen(filename) == 0) { aoqi@0: return false; aoqi@0: } aoqi@0: return os::stat(filename, &statbuf) == 0; aoqi@0: } aoqi@0: aoqi@0: bool os::dll_build_name(char* buffer, size_t buflen, aoqi@0: const char* pname, const char* fname) { aoqi@0: bool retval = false; aoqi@0: // Copied from libhpi aoqi@0: const size_t pnamelen = pname ? strlen(pname) : 0; aoqi@0: aoqi@0: // Return error on buffer overflow. aoqi@0: if (pnamelen + strlen(fname) + strlen(JNI_LIB_PREFIX) + strlen(JNI_LIB_SUFFIX) + 2 > buflen) { aoqi@0: return retval; aoqi@0: } aoqi@0: aoqi@0: if (pnamelen == 0) { aoqi@0: snprintf(buffer, buflen, JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX, fname); aoqi@0: retval = true; aoqi@0: } else if (strchr(pname, *os::path_separator()) != NULL) { aoqi@0: int n; aoqi@0: char** pelements = split_path(pname, &n); aoqi@0: if (pelements == NULL) { aoqi@0: return false; aoqi@0: } aoqi@0: for (int i = 0 ; i < n ; i++) { aoqi@0: // Really shouldn't be NULL, but check can't hurt aoqi@0: if (pelements[i] == NULL || strlen(pelements[i]) == 0) { aoqi@0: continue; // skip the empty path values aoqi@0: } aoqi@0: snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX, aoqi@0: pelements[i], fname); aoqi@0: if (file_exists(buffer)) { aoqi@0: retval = true; aoqi@0: break; aoqi@0: } aoqi@0: } aoqi@0: // release the storage aoqi@0: for (int i = 0 ; i < n ; i++) { aoqi@0: if (pelements[i] != NULL) { aoqi@0: FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); aoqi@0: } aoqi@0: } aoqi@0: if (pelements != NULL) { aoqi@0: FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); aoqi@0: } aoqi@0: } else { aoqi@0: snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX, pname, fname); aoqi@0: retval = true; aoqi@0: } aoqi@0: return retval; aoqi@0: } aoqi@0: aoqi@0: // check if addr is inside libjvm.so aoqi@0: bool os::address_is_in_vm(address addr) { aoqi@0: static address libjvm_base_addr; aoqi@0: Dl_info dlinfo; aoqi@0: aoqi@0: if (libjvm_base_addr == NULL) { aoqi@0: if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) { aoqi@0: libjvm_base_addr = (address)dlinfo.dli_fbase; aoqi@0: } aoqi@0: assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm"); aoqi@0: } aoqi@0: aoqi@0: if (dladdr((void *)addr, &dlinfo) != 0) { aoqi@0: if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true; aoqi@0: } aoqi@0: aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: #define MACH_MAXSYMLEN 256 aoqi@0: aoqi@0: bool os::dll_address_to_function_name(address addr, char *buf, aoqi@0: int buflen, int *offset) { aoqi@0: // buf is not optional, but offset is optional aoqi@0: assert(buf != NULL, "sanity check"); aoqi@0: aoqi@0: Dl_info dlinfo; aoqi@0: char localbuf[MACH_MAXSYMLEN]; aoqi@0: aoqi@0: if (dladdr((void*)addr, &dlinfo) != 0) { aoqi@0: // see if we have a matching symbol aoqi@0: if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) { aoqi@0: if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) { aoqi@0: jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname); aoqi@0: } aoqi@0: if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr; aoqi@0: return true; aoqi@0: } aoqi@0: // no matching symbol so try for just file info aoqi@0: if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) { aoqi@0: if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase), aoqi@0: buf, buflen, offset, dlinfo.dli_fname)) { aoqi@0: return true; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Handle non-dynamic manually: aoqi@0: if (dlinfo.dli_fbase != NULL && aoqi@0: Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset, aoqi@0: dlinfo.dli_fbase)) { aoqi@0: if (!Decoder::demangle(localbuf, buf, buflen)) { aoqi@0: jio_snprintf(buf, buflen, "%s", localbuf); aoqi@0: } aoqi@0: return true; aoqi@0: } aoqi@0: } aoqi@0: buf[0] = '\0'; aoqi@0: if (offset != NULL) *offset = -1; aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: // ported from solaris version aoqi@0: bool os::dll_address_to_library_name(address addr, char* buf, aoqi@0: int buflen, int* offset) { aoqi@0: // buf is not optional, but offset is optional aoqi@0: assert(buf != NULL, "sanity check"); aoqi@0: aoqi@0: Dl_info dlinfo; aoqi@0: aoqi@0: if (dladdr((void*)addr, &dlinfo) != 0) { aoqi@0: if (dlinfo.dli_fname != NULL) { aoqi@0: jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname); aoqi@0: } aoqi@0: if (dlinfo.dli_fbase != NULL && offset != NULL) { aoqi@0: *offset = addr - (address)dlinfo.dli_fbase; aoqi@0: } aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: buf[0] = '\0'; aoqi@0: if (offset) *offset = -1; aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: // Loads .dll/.so and aoqi@0: // in case of error it checks if .dll/.so was built for the aoqi@0: // same architecture as Hotspot is running on aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: void * os::dll_load(const char *filename, char *ebuf, int ebuflen) { aoqi@0: void * result= ::dlopen(filename, RTLD_LAZY); aoqi@0: if (result != NULL) { aoqi@0: // Successful loading aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: // Read system error message into ebuf aoqi@0: ::strncpy(ebuf, ::dlerror(), ebuflen-1); aoqi@0: ebuf[ebuflen-1]='\0'; aoqi@0: aoqi@0: return NULL; aoqi@0: } aoqi@0: #else aoqi@0: void * os::dll_load(const char *filename, char *ebuf, int ebuflen) aoqi@0: { aoqi@0: void * result= ::dlopen(filename, RTLD_LAZY); aoqi@0: if (result != NULL) { aoqi@0: // Successful loading aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: Elf32_Ehdr elf_head; aoqi@0: aoqi@0: // Read system error message into ebuf aoqi@0: // It may or may not be overwritten below aoqi@0: ::strncpy(ebuf, ::dlerror(), ebuflen-1); aoqi@0: ebuf[ebuflen-1]='\0'; aoqi@0: int diag_msg_max_length=ebuflen-strlen(ebuf); aoqi@0: char* diag_msg_buf=ebuf+strlen(ebuf); aoqi@0: aoqi@0: if (diag_msg_max_length==0) { aoqi@0: // No more space in ebuf for additional diagnostics message aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK); aoqi@0: aoqi@0: if (file_descriptor < 0) { aoqi@0: // Can't open library, report dlerror() message aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: bool failed_to_read_elf_head= aoqi@0: (sizeof(elf_head)!= aoqi@0: (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ; aoqi@0: aoqi@0: ::close(file_descriptor); aoqi@0: if (failed_to_read_elf_head) { aoqi@0: // file i/o error - report dlerror() msg aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: typedef struct { aoqi@0: Elf32_Half code; // Actual value as defined in elf.h aoqi@0: Elf32_Half compat_class; // Compatibility of archs at VM's sense aoqi@0: char elf_class; // 32 or 64 bit aoqi@0: char endianess; // MSB or LSB aoqi@0: char* name; // String representation aoqi@0: } arch_t; aoqi@0: aoqi@0: #ifndef EM_486 aoqi@0: #define EM_486 6 /* Intel 80486 */ aoqi@0: #endif aoqi@0: aoqi@0: #ifndef EM_MIPS_RS3_LE aoqi@0: #define EM_MIPS_RS3_LE 10 /* MIPS */ aoqi@0: #endif aoqi@0: aoqi@0: #ifndef EM_PPC64 aoqi@0: #define EM_PPC64 21 /* PowerPC64 */ aoqi@0: #endif aoqi@0: aoqi@0: #ifndef EM_S390 aoqi@0: #define EM_S390 22 /* IBM System/390 */ aoqi@0: #endif aoqi@0: aoqi@0: #ifndef EM_IA_64 aoqi@0: #define EM_IA_64 50 /* HP/Intel IA-64 */ aoqi@0: #endif aoqi@0: aoqi@0: #ifndef EM_X86_64 aoqi@0: #define EM_X86_64 62 /* AMD x86-64 */ aoqi@0: #endif aoqi@0: aoqi@0: static const arch_t arch_array[]={ aoqi@0: {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, aoqi@0: {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"}, aoqi@0: {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"}, aoqi@0: {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"}, aoqi@0: {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, aoqi@0: {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"}, aoqi@0: {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"}, aoqi@0: {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"}, aoqi@0: {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, aoqi@0: {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"}, aoqi@0: {EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"}, aoqi@0: {EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"}, aoqi@0: {EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"}, aoqi@0: {EM_MIPS, EM_MIPS, ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"}, aoqi@0: {EM_PARISC, EM_PARISC, ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"}, aoqi@0: {EM_68K, EM_68K, ELFCLASS32, ELFDATA2MSB, (char*)"M68k"} aoqi@0: }; aoqi@0: aoqi@0: #if (defined IA32) aoqi@0: static Elf32_Half running_arch_code=EM_386; aoqi@0: #elif (defined AMD64) aoqi@0: static Elf32_Half running_arch_code=EM_X86_64; aoqi@0: #elif (defined IA64) aoqi@0: static Elf32_Half running_arch_code=EM_IA_64; aoqi@0: #elif (defined __sparc) && (defined _LP64) aoqi@0: static Elf32_Half running_arch_code=EM_SPARCV9; aoqi@0: #elif (defined __sparc) && (!defined _LP64) aoqi@0: static Elf32_Half running_arch_code=EM_SPARC; aoqi@0: #elif (defined __powerpc64__) aoqi@0: static Elf32_Half running_arch_code=EM_PPC64; aoqi@0: #elif (defined __powerpc__) aoqi@0: static Elf32_Half running_arch_code=EM_PPC; aoqi@0: #elif (defined ARM) aoqi@0: static Elf32_Half running_arch_code=EM_ARM; aoqi@0: #elif (defined S390) aoqi@0: static Elf32_Half running_arch_code=EM_S390; aoqi@0: #elif (defined ALPHA) aoqi@0: static Elf32_Half running_arch_code=EM_ALPHA; aoqi@0: #elif (defined MIPSEL) aoqi@0: static Elf32_Half running_arch_code=EM_MIPS_RS3_LE; aoqi@0: #elif (defined PARISC) aoqi@0: static Elf32_Half running_arch_code=EM_PARISC; aoqi@0: #elif (defined MIPS) aoqi@0: static Elf32_Half running_arch_code=EM_MIPS; aoqi@0: #elif (defined M68K) aoqi@0: static Elf32_Half running_arch_code=EM_68K; aoqi@0: #else aoqi@0: #error Method os::dll_load requires that one of following is defined:\ aoqi@0: IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K aoqi@0: #endif aoqi@0: aoqi@0: // Identify compatability class for VM's architecture and library's architecture aoqi@0: // Obtain string descriptions for architectures aoqi@0: aoqi@0: arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL}; aoqi@0: int running_arch_index=-1; aoqi@0: aoqi@0: for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) { aoqi@0: if (running_arch_code == arch_array[i].code) { aoqi@0: running_arch_index = i; aoqi@0: } aoqi@0: if (lib_arch.code == arch_array[i].code) { aoqi@0: lib_arch.compat_class = arch_array[i].compat_class; aoqi@0: lib_arch.name = arch_array[i].name; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: assert(running_arch_index != -1, aoqi@0: "Didn't find running architecture code (running_arch_code) in arch_array"); aoqi@0: if (running_arch_index == -1) { aoqi@0: // Even though running architecture detection failed aoqi@0: // we may still continue with reporting dlerror() message aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: if (lib_arch.endianess != arch_array[running_arch_index].endianess) { aoqi@0: ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)"); aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: #ifndef S390 aoqi@0: if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) { aoqi@0: ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)"); aoqi@0: return NULL; aoqi@0: } aoqi@0: #endif // !S390 aoqi@0: aoqi@0: if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) { aoqi@0: if ( lib_arch.name!=NULL ) { aoqi@0: ::snprintf(diag_msg_buf, diag_msg_max_length-1, aoqi@0: " (Possible cause: can't load %s-bit .so on a %s-bit platform)", aoqi@0: lib_arch.name, arch_array[running_arch_index].name); aoqi@0: } else { aoqi@0: ::snprintf(diag_msg_buf, diag_msg_max_length-1, aoqi@0: " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)", aoqi@0: lib_arch.code, aoqi@0: arch_array[running_arch_index].name); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: return NULL; aoqi@0: } aoqi@0: #endif /* !__APPLE__ */ aoqi@0: aoqi@0: void* os::get_default_process_handle() { aoqi@0: #ifdef __APPLE__ aoqi@0: // MacOS X needs to use RTLD_FIRST instead of RTLD_LAZY aoqi@0: // to avoid finding unexpected symbols on second (or later) aoqi@0: // loads of a library. aoqi@0: return (void*)::dlopen(NULL, RTLD_FIRST); aoqi@0: #else aoqi@0: return (void*)::dlopen(NULL, RTLD_LAZY); aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: // XXX: Do we need a lock around this as per Linux? aoqi@0: void* os::dll_lookup(void* handle, const char* name) { aoqi@0: return dlsym(handle, name); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: static bool _print_ascii_file(const char* filename, outputStream* st) { aoqi@0: int fd = ::open(filename, O_RDONLY); aoqi@0: if (fd == -1) { aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: char buf[32]; aoqi@0: int bytes; aoqi@0: while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) { aoqi@0: st->print_raw(buf, bytes); aoqi@0: } aoqi@0: aoqi@0: ::close(fd); aoqi@0: aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: void os::print_dll_info(outputStream *st) { aoqi@0: st->print_cr("Dynamic libraries:"); aoqi@0: #ifdef RTLD_DI_LINKMAP aoqi@0: Dl_info dli; aoqi@0: void *handle; aoqi@0: Link_map *map; aoqi@0: Link_map *p; aoqi@0: aoqi@0: if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 || aoqi@0: dli.dli_fname == NULL) { aoqi@0: st->print_cr("Error: Cannot print dynamic libraries."); aoqi@0: return; aoqi@0: } aoqi@0: handle = dlopen(dli.dli_fname, RTLD_LAZY); aoqi@0: if (handle == NULL) { aoqi@0: st->print_cr("Error: Cannot print dynamic libraries."); aoqi@0: return; aoqi@0: } aoqi@0: dlinfo(handle, RTLD_DI_LINKMAP, &map); aoqi@0: if (map == NULL) { aoqi@0: st->print_cr("Error: Cannot print dynamic libraries."); aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: while (map->l_prev != NULL) aoqi@0: map = map->l_prev; aoqi@0: aoqi@0: while (map != NULL) { aoqi@0: st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name); aoqi@0: map = map->l_next; aoqi@0: } aoqi@0: aoqi@0: dlclose(handle); aoqi@0: #elif defined(__APPLE__) aoqi@0: uint32_t count; aoqi@0: uint32_t i; aoqi@0: aoqi@0: count = _dyld_image_count(); aoqi@0: for (i = 1; i < count; i++) { aoqi@0: const char *name = _dyld_get_image_name(i); aoqi@0: intptr_t slide = _dyld_get_image_vmaddr_slide(i); aoqi@0: st->print_cr(PTR_FORMAT " \t%s", slide, name); aoqi@0: } aoqi@0: #else aoqi@0: st->print_cr("Error: Cannot print dynamic libraries."); aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: void os::print_os_info_brief(outputStream* st) { aoqi@0: st->print("Bsd"); aoqi@0: aoqi@0: os::Posix::print_uname_info(st); aoqi@0: } aoqi@0: aoqi@0: void os::print_os_info(outputStream* st) { aoqi@0: st->print("OS:"); aoqi@0: st->print("Bsd"); aoqi@0: aoqi@0: os::Posix::print_uname_info(st); aoqi@0: aoqi@0: os::Posix::print_rlimit_info(st); aoqi@0: aoqi@0: os::Posix::print_load_average(st); aoqi@0: } aoqi@0: aoqi@0: void os::pd_print_cpu_info(outputStream* st) { aoqi@0: // Nothing to do for now. aoqi@0: } aoqi@0: aoqi@0: void os::print_memory_info(outputStream* st) { aoqi@0: aoqi@0: st->print("Memory:"); aoqi@0: st->print(" %dk page", os::vm_page_size()>>10); aoqi@0: aoqi@0: st->print(", physical " UINT64_FORMAT "k", aoqi@0: os::physical_memory() >> 10); aoqi@0: st->print("(" UINT64_FORMAT "k free)", aoqi@0: os::available_memory() >> 10); aoqi@0: st->cr(); aoqi@0: aoqi@0: // meminfo aoqi@0: st->print("\n/proc/meminfo:\n"); aoqi@0: _print_ascii_file("/proc/meminfo", st); aoqi@0: st->cr(); aoqi@0: } aoqi@0: aoqi@0: void os::print_siginfo(outputStream* st, void* siginfo) { aoqi@0: const siginfo_t* si = (const siginfo_t*)siginfo; aoqi@0: aoqi@0: os::Posix::print_siginfo_brief(st, si); aoqi@0: aoqi@0: if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && aoqi@0: UseSharedSpaces) { aoqi@0: FileMapInfo* mapinfo = FileMapInfo::current_info(); aoqi@0: if (mapinfo->is_in_shared_space(si->si_addr)) { aoqi@0: st->print("\n\nError accessing class data sharing archive." \ aoqi@0: " Mapped file inaccessible during execution, " \ aoqi@0: " possible disk/network problem."); aoqi@0: } aoqi@0: } aoqi@0: st->cr(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: static void print_signal_handler(outputStream* st, int sig, aoqi@0: char* buf, size_t buflen); aoqi@0: aoqi@0: void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { aoqi@0: st->print_cr("Signal Handlers:"); aoqi@0: print_signal_handler(st, SIGSEGV, buf, buflen); aoqi@0: print_signal_handler(st, SIGBUS , buf, buflen); aoqi@0: print_signal_handler(st, SIGFPE , buf, buflen); aoqi@0: print_signal_handler(st, SIGPIPE, buf, buflen); aoqi@0: print_signal_handler(st, SIGXFSZ, buf, buflen); aoqi@0: print_signal_handler(st, SIGILL , buf, buflen); aoqi@0: print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen); aoqi@0: print_signal_handler(st, SR_signum, buf, buflen); aoqi@0: print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen); aoqi@0: print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen); aoqi@0: print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen); aoqi@0: print_signal_handler(st, BREAK_SIGNAL, buf, buflen); aoqi@0: } aoqi@0: aoqi@0: static char saved_jvm_path[MAXPATHLEN] = {0}; aoqi@0: aoqi@0: // Find the full path to the current module, libjvm aoqi@0: void os::jvm_path(char *buf, jint buflen) { aoqi@0: // Error checking. aoqi@0: if (buflen < MAXPATHLEN) { aoqi@0: assert(false, "must use a large-enough buffer"); aoqi@0: buf[0] = '\0'; aoqi@0: return; aoqi@0: } aoqi@0: // Lazy resolve the path to current module. aoqi@0: if (saved_jvm_path[0] != 0) { aoqi@0: strcpy(buf, saved_jvm_path); aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: char dli_fname[MAXPATHLEN]; aoqi@0: bool ret = dll_address_to_library_name( aoqi@0: CAST_FROM_FN_PTR(address, os::jvm_path), aoqi@0: dli_fname, sizeof(dli_fname), NULL); aoqi@0: assert(ret, "cannot locate libjvm"); aoqi@0: char *rp = NULL; aoqi@0: if (ret && dli_fname[0] != '\0') { aoqi@0: rp = realpath(dli_fname, buf); aoqi@0: } aoqi@0: if (rp == NULL) aoqi@0: return; aoqi@0: aoqi@0: if (Arguments::created_by_gamma_launcher()) { aoqi@0: // Support for the gamma launcher. Typical value for buf is aoqi@0: // "/jre/lib///libjvm". If "/jre/lib/" appears at aoqi@0: // the right place in the string, then assume we are installed in a JDK and aoqi@0: // we're done. Otherwise, check for a JAVA_HOME environment variable and aoqi@0: // construct a path to the JVM being overridden. aoqi@0: aoqi@0: const char *p = buf + strlen(buf) - 1; aoqi@0: for (int count = 0; p > buf && count < 5; ++count) { aoqi@0: for (--p; p > buf && *p != '/'; --p) aoqi@0: /* empty */ ; aoqi@0: } aoqi@0: aoqi@0: if (strncmp(p, "/jre/lib/", 9) != 0) { aoqi@0: // Look for JAVA_HOME in the environment. aoqi@0: char* java_home_var = ::getenv("JAVA_HOME"); aoqi@0: if (java_home_var != NULL && java_home_var[0] != 0) { aoqi@0: char* jrelib_p; aoqi@0: int len; aoqi@0: aoqi@0: // Check the current module name "libjvm" aoqi@0: p = strrchr(buf, '/'); aoqi@0: assert(strstr(p, "/libjvm") == p, "invalid library name"); aoqi@0: aoqi@0: rp = realpath(java_home_var, buf); aoqi@0: if (rp == NULL) aoqi@0: return; aoqi@0: aoqi@0: // determine if this is a legacy image or modules image aoqi@0: // modules image doesn't have "jre" subdirectory aoqi@0: len = strlen(buf); aoqi@0: assert(len < buflen, "Ran out of buffer space"); aoqi@0: jrelib_p = buf + len; aoqi@0: aoqi@0: // Add the appropriate library subdir aoqi@0: snprintf(jrelib_p, buflen-len, "/jre/lib"); aoqi@0: if (0 != access(buf, F_OK)) { aoqi@0: snprintf(jrelib_p, buflen-len, "/lib"); aoqi@0: } aoqi@0: aoqi@0: // Add the appropriate client or server subdir aoqi@0: len = strlen(buf); aoqi@0: jrelib_p = buf + len; aoqi@0: snprintf(jrelib_p, buflen-len, "/%s", COMPILER_VARIANT); aoqi@0: if (0 != access(buf, F_OK)) { aoqi@0: snprintf(jrelib_p, buflen-len, ""); aoqi@0: } aoqi@0: aoqi@0: // If the path exists within JAVA_HOME, add the JVM library name aoqi@0: // to complete the path to JVM being overridden. Otherwise fallback aoqi@0: // to the path to the current library. aoqi@0: if (0 == access(buf, F_OK)) { aoqi@0: // Use current module name "libjvm" aoqi@0: len = strlen(buf); aoqi@0: snprintf(buf + len, buflen-len, "/libjvm%s", JNI_LIB_SUFFIX); aoqi@0: } else { aoqi@0: // Fall back to path of current library aoqi@0: rp = realpath(dli_fname, buf); aoqi@0: if (rp == NULL) aoqi@0: return; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: strncpy(saved_jvm_path, buf, MAXPATHLEN); aoqi@0: } aoqi@0: aoqi@0: void os::print_jni_name_prefix_on(outputStream* st, int args_size) { aoqi@0: // no prefix required, not even "_" aoqi@0: } aoqi@0: aoqi@0: void os::print_jni_name_suffix_on(outputStream* st, int args_size) { aoqi@0: // no suffix required aoqi@0: } aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // sun.misc.Signal support aoqi@0: aoqi@0: static volatile jint sigint_count = 0; aoqi@0: aoqi@0: static void aoqi@0: UserHandler(int sig, void *siginfo, void *context) { aoqi@0: // 4511530 - sem_post is serialized and handled by the manager thread. When aoqi@0: // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We aoqi@0: // don't want to flood the manager thread with sem_post requests. aoqi@0: if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1) aoqi@0: return; aoqi@0: aoqi@0: // Ctrl-C is pressed during error reporting, likely because the error aoqi@0: // handler fails to abort. Let VM die immediately. aoqi@0: if (sig == SIGINT && is_error_reported()) { aoqi@0: os::die(); aoqi@0: } aoqi@0: aoqi@0: os::signal_notify(sig); aoqi@0: } aoqi@0: aoqi@0: void* os::user_handler() { aoqi@0: return CAST_FROM_FN_PTR(void*, UserHandler); aoqi@0: } aoqi@0: aoqi@0: extern "C" { aoqi@0: typedef void (*sa_handler_t)(int); aoqi@0: typedef void (*sa_sigaction_t)(int, siginfo_t *, void *); aoqi@0: } aoqi@0: aoqi@0: void* os::signal(int signal_number, void* handler) { aoqi@0: struct sigaction sigAct, oldSigAct; aoqi@0: aoqi@0: sigfillset(&(sigAct.sa_mask)); aoqi@0: sigAct.sa_flags = SA_RESTART|SA_SIGINFO; aoqi@0: sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler); aoqi@0: aoqi@0: if (sigaction(signal_number, &sigAct, &oldSigAct)) { aoqi@0: // -1 means registration failed aoqi@0: return (void *)-1; aoqi@0: } aoqi@0: aoqi@0: return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler); aoqi@0: } aoqi@0: aoqi@0: void os::signal_raise(int signal_number) { aoqi@0: ::raise(signal_number); aoqi@0: } aoqi@0: aoqi@0: /* aoqi@0: * The following code is moved from os.cpp for making this aoqi@0: * code platform specific, which it is by its very nature. aoqi@0: */ aoqi@0: aoqi@0: // Will be modified when max signal is changed to be dynamic aoqi@0: int os::sigexitnum_pd() { aoqi@0: return NSIG; aoqi@0: } aoqi@0: aoqi@0: // a counter for each possible signal value aoqi@0: static volatile jint pending_signals[NSIG+1] = { 0 }; aoqi@0: aoqi@0: // Bsd(POSIX) specific hand shaking semaphore. aoqi@0: #ifdef __APPLE__ aoqi@0: typedef semaphore_t os_semaphore_t; aoqi@0: #define SEM_INIT(sem, value) semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, value) aoqi@0: #define SEM_WAIT(sem) semaphore_wait(sem) aoqi@0: #define SEM_POST(sem) semaphore_signal(sem) aoqi@0: #define SEM_DESTROY(sem) semaphore_destroy(mach_task_self(), sem) aoqi@0: #else aoqi@0: typedef sem_t os_semaphore_t; aoqi@0: #define SEM_INIT(sem, value) sem_init(&sem, 0, value) aoqi@0: #define SEM_WAIT(sem) sem_wait(&sem) aoqi@0: #define SEM_POST(sem) sem_post(&sem) aoqi@0: #define SEM_DESTROY(sem) sem_destroy(&sem) aoqi@0: #endif aoqi@0: aoqi@0: class Semaphore : public StackObj { aoqi@0: public: aoqi@0: Semaphore(); aoqi@0: ~Semaphore(); aoqi@0: void signal(); aoqi@0: void wait(); aoqi@0: bool trywait(); aoqi@0: bool timedwait(unsigned int sec, int nsec); aoqi@0: private: aoqi@0: jlong currenttime() const; aoqi@0: os_semaphore_t _semaphore; aoqi@0: }; aoqi@0: aoqi@0: Semaphore::Semaphore() : _semaphore(0) { aoqi@0: SEM_INIT(_semaphore, 0); aoqi@0: } aoqi@0: aoqi@0: Semaphore::~Semaphore() { aoqi@0: SEM_DESTROY(_semaphore); aoqi@0: } aoqi@0: aoqi@0: void Semaphore::signal() { aoqi@0: SEM_POST(_semaphore); aoqi@0: } aoqi@0: aoqi@0: void Semaphore::wait() { aoqi@0: SEM_WAIT(_semaphore); aoqi@0: } aoqi@0: aoqi@0: jlong Semaphore::currenttime() const { aoqi@0: struct timeval tv; aoqi@0: gettimeofday(&tv, NULL); aoqi@0: return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000); aoqi@0: } aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: bool Semaphore::trywait() { aoqi@0: return timedwait(0, 0); aoqi@0: } aoqi@0: aoqi@0: bool Semaphore::timedwait(unsigned int sec, int nsec) { aoqi@0: kern_return_t kr = KERN_ABORTED; aoqi@0: mach_timespec_t waitspec; aoqi@0: waitspec.tv_sec = sec; aoqi@0: waitspec.tv_nsec = nsec; aoqi@0: aoqi@0: jlong starttime = currenttime(); aoqi@0: aoqi@0: kr = semaphore_timedwait(_semaphore, waitspec); aoqi@0: while (kr == KERN_ABORTED) { aoqi@0: jlong totalwait = (sec * NANOSECS_PER_SEC) + nsec; aoqi@0: aoqi@0: jlong current = currenttime(); aoqi@0: jlong passedtime = current - starttime; aoqi@0: aoqi@0: if (passedtime >= totalwait) { aoqi@0: waitspec.tv_sec = 0; aoqi@0: waitspec.tv_nsec = 0; aoqi@0: } else { aoqi@0: jlong waittime = totalwait - (current - starttime); aoqi@0: waitspec.tv_sec = waittime / NANOSECS_PER_SEC; aoqi@0: waitspec.tv_nsec = waittime % NANOSECS_PER_SEC; aoqi@0: } aoqi@0: aoqi@0: kr = semaphore_timedwait(_semaphore, waitspec); aoqi@0: } aoqi@0: aoqi@0: return kr == KERN_SUCCESS; aoqi@0: } aoqi@0: aoqi@0: #else aoqi@0: aoqi@0: bool Semaphore::trywait() { aoqi@0: return sem_trywait(&_semaphore) == 0; aoqi@0: } aoqi@0: aoqi@0: bool Semaphore::timedwait(unsigned int sec, int nsec) { aoqi@0: struct timespec ts; aoqi@0: unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec); aoqi@0: aoqi@0: while (1) { aoqi@0: int result = sem_timedwait(&_semaphore, &ts); aoqi@0: if (result == 0) { aoqi@0: return true; aoqi@0: } else if (errno == EINTR) { aoqi@0: continue; aoqi@0: } else if (errno == ETIMEDOUT) { aoqi@0: return false; aoqi@0: } else { aoqi@0: return false; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: #endif // __APPLE__ aoqi@0: aoqi@0: static os_semaphore_t sig_sem; aoqi@0: static Semaphore sr_semaphore; aoqi@0: aoqi@0: void os::signal_init_pd() { aoqi@0: // Initialize signal structures aoqi@0: ::memset((void*)pending_signals, 0, sizeof(pending_signals)); aoqi@0: aoqi@0: // Initialize signal semaphore aoqi@0: ::SEM_INIT(sig_sem, 0); aoqi@0: } aoqi@0: aoqi@0: void os::signal_notify(int sig) { aoqi@0: Atomic::inc(&pending_signals[sig]); aoqi@0: ::SEM_POST(sig_sem); aoqi@0: } aoqi@0: aoqi@0: static int check_pending_signals(bool wait) { aoqi@0: Atomic::store(0, &sigint_count); aoqi@0: for (;;) { aoqi@0: for (int i = 0; i < NSIG + 1; i++) { aoqi@0: jint n = pending_signals[i]; aoqi@0: if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { aoqi@0: return i; aoqi@0: } aoqi@0: } aoqi@0: if (!wait) { aoqi@0: return -1; aoqi@0: } aoqi@0: JavaThread *thread = JavaThread::current(); aoqi@0: ThreadBlockInVM tbivm(thread); aoqi@0: aoqi@0: bool threadIsSuspended; aoqi@0: do { aoqi@0: thread->set_suspend_equivalent(); aoqi@0: // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() aoqi@0: ::SEM_WAIT(sig_sem); aoqi@0: aoqi@0: // were we externally suspended while we were waiting? aoqi@0: threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); aoqi@0: if (threadIsSuspended) { aoqi@0: // aoqi@0: // The semaphore has been incremented, but while we were waiting aoqi@0: // another thread suspended us. We don't want to continue running aoqi@0: // while suspended because that would surprise the thread that aoqi@0: // suspended us. aoqi@0: // aoqi@0: ::SEM_POST(sig_sem); aoqi@0: aoqi@0: thread->java_suspend_self(); aoqi@0: } aoqi@0: } while (threadIsSuspended); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: int os::signal_lookup() { aoqi@0: return check_pending_signals(false); aoqi@0: } aoqi@0: aoqi@0: int os::signal_wait() { aoqi@0: return check_pending_signals(true); aoqi@0: } aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // Virtual Memory aoqi@0: aoqi@0: int os::vm_page_size() { aoqi@0: // Seems redundant as all get out aoqi@0: assert(os::Bsd::page_size() != -1, "must call os::init"); aoqi@0: return os::Bsd::page_size(); aoqi@0: } aoqi@0: aoqi@0: // Solaris allocates memory by pages. aoqi@0: int os::vm_allocation_granularity() { aoqi@0: assert(os::Bsd::page_size() != -1, "must call os::init"); aoqi@0: return os::Bsd::page_size(); aoqi@0: } aoqi@0: aoqi@0: // Rationale behind this function: aoqi@0: // current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable aoqi@0: // mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get aoqi@0: // samples for JITted code. Here we create private executable mapping over the code cache aoqi@0: // and then we can use standard (well, almost, as mapping can change) way to provide aoqi@0: // info for the reporting script by storing timestamp and location of symbol aoqi@0: void bsd_wrap_code(char* base, size_t size) { aoqi@0: static volatile jint cnt = 0; aoqi@0: aoqi@0: if (!UseOprofile) { aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: char buf[PATH_MAX + 1]; aoqi@0: int num = Atomic::add(1, &cnt); aoqi@0: aoqi@0: snprintf(buf, PATH_MAX + 1, "%s/hs-vm-%d-%d", aoqi@0: os::get_temp_directory(), os::current_process_id(), num); aoqi@0: unlink(buf); aoqi@0: aoqi@0: int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU); aoqi@0: aoqi@0: if (fd != -1) { aoqi@0: off_t rv = ::lseek(fd, size-2, SEEK_SET); aoqi@0: if (rv != (off_t)-1) { aoqi@0: if (::write(fd, "", 1) == 1) { aoqi@0: mmap(base, size, aoqi@0: PROT_READ|PROT_WRITE|PROT_EXEC, aoqi@0: MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0); aoqi@0: } aoqi@0: } aoqi@0: ::close(fd); aoqi@0: unlink(buf); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: static void warn_fail_commit_memory(char* addr, size_t size, bool exec, aoqi@0: int err) { aoqi@0: warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT aoqi@0: ", %d) failed; error='%s' (errno=%d)", addr, size, exec, aoqi@0: strerror(err), err); aoqi@0: } aoqi@0: aoqi@0: // NOTE: Bsd kernel does not really reserve the pages for us. aoqi@0: // All it does is to check if there are enough free pages aoqi@0: // left at the time of mmap(). This could be a potential aoqi@0: // problem. aoqi@0: bool os::pd_commit_memory(char* addr, size_t size, bool exec) { aoqi@0: int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; aoqi@0: #ifdef __OpenBSD__ aoqi@0: // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD aoqi@0: if (::mprotect(addr, size, prot) == 0) { aoqi@0: return true; aoqi@0: } aoqi@0: #else aoqi@0: uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, aoqi@0: MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); aoqi@0: if (res != (uintptr_t) MAP_FAILED) { aoqi@0: return true; aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: // Warn about any commit errors we see in non-product builds just aoqi@0: // in case mmap() doesn't work as described on the man page. aoqi@0: NOT_PRODUCT(warn_fail_commit_memory(addr, size, exec, errno);) aoqi@0: aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, aoqi@0: bool exec) { aoqi@0: // alignment_hint is ignored on this OS aoqi@0: return pd_commit_memory(addr, size, exec); aoqi@0: } aoqi@0: aoqi@0: void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, aoqi@0: const char* mesg) { aoqi@0: assert(mesg != NULL, "mesg must be specified"); aoqi@0: if (!pd_commit_memory(addr, size, exec)) { aoqi@0: // add extra info in product mode for vm_exit_out_of_memory(): aoqi@0: PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);) aoqi@0: vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void os::pd_commit_memory_or_exit(char* addr, size_t size, aoqi@0: size_t alignment_hint, bool exec, aoqi@0: const char* mesg) { aoqi@0: // alignment_hint is ignored on this OS aoqi@0: pd_commit_memory_or_exit(addr, size, exec, mesg); aoqi@0: } aoqi@0: aoqi@0: void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { aoqi@0: } aoqi@0: aoqi@0: void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { aoqi@0: ::madvise(addr, bytes, MADV_DONTNEED); aoqi@0: } aoqi@0: aoqi@0: void os::numa_make_global(char *addr, size_t bytes) { aoqi@0: } aoqi@0: aoqi@0: void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { aoqi@0: } aoqi@0: aoqi@0: bool os::numa_topology_changed() { return false; } aoqi@0: aoqi@0: size_t os::numa_get_groups_num() { aoqi@0: return 1; aoqi@0: } aoqi@0: aoqi@0: int os::numa_get_group_id() { aoqi@0: return 0; aoqi@0: } aoqi@0: aoqi@0: size_t os::numa_get_leaf_groups(int *ids, size_t size) { aoqi@0: if (size > 0) { aoqi@0: ids[0] = 0; aoqi@0: return 1; aoqi@0: } aoqi@0: return 0; aoqi@0: } aoqi@0: aoqi@0: bool os::get_page_info(char *start, page_info* info) { aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { aoqi@0: return end; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: bool os::pd_uncommit_memory(char* addr, size_t size) { aoqi@0: #ifdef __OpenBSD__ aoqi@0: // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD aoqi@0: return ::mprotect(addr, size, PROT_NONE) == 0; aoqi@0: #else aoqi@0: uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, aoqi@0: MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); aoqi@0: return res != (uintptr_t) MAP_FAILED; aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: bool os::pd_create_stack_guard_pages(char* addr, size_t size) { aoqi@0: return os::commit_memory(addr, size, !ExecMem); aoqi@0: } aoqi@0: aoqi@0: // If this is a growable mapping, remove the guard pages entirely by aoqi@0: // munmap()ping them. If not, just call uncommit_memory(). aoqi@0: bool os::remove_stack_guard_pages(char* addr, size_t size) { aoqi@0: return os::uncommit_memory(addr, size); aoqi@0: } aoqi@0: aoqi@0: static address _highest_vm_reserved_address = NULL; aoqi@0: aoqi@0: // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory aoqi@0: // at 'requested_addr'. If there are existing memory mappings at the same aoqi@0: // location, however, they will be overwritten. If 'fixed' is false, aoqi@0: // 'requested_addr' is only treated as a hint, the return value may or aoqi@0: // may not start from the requested address. Unlike Bsd mmap(), this aoqi@0: // function returns NULL to indicate failure. aoqi@0: static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) { aoqi@0: char * addr; aoqi@0: int flags; aoqi@0: aoqi@0: flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS; aoqi@0: if (fixed) { aoqi@0: assert((uintptr_t)requested_addr % os::Bsd::page_size() == 0, "unaligned address"); aoqi@0: flags |= MAP_FIXED; aoqi@0: } aoqi@0: aoqi@0: // Map reserved/uncommitted pages PROT_NONE so we fail early if we aoqi@0: // touch an uncommitted page. Otherwise, the read/write might aoqi@0: // succeed if we have enough swap space to back the physical page. aoqi@0: addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, aoqi@0: flags, -1, 0); aoqi@0: aoqi@0: if (addr != MAP_FAILED) { aoqi@0: // anon_mmap() should only get called during VM initialization, aoqi@0: // don't need lock (actually we can skip locking even it can be called aoqi@0: // from multiple threads, because _highest_vm_reserved_address is just a aoqi@0: // hint about the upper limit of non-stack memory regions.) aoqi@0: if ((address)addr + bytes > _highest_vm_reserved_address) { aoqi@0: _highest_vm_reserved_address = (address)addr + bytes; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: return addr == MAP_FAILED ? NULL : addr; aoqi@0: } aoqi@0: aoqi@0: // Don't update _highest_vm_reserved_address, because there might be memory aoqi@0: // regions above addr + size. If so, releasing a memory region only creates aoqi@0: // a hole in the address space, it doesn't help prevent heap-stack collision. aoqi@0: // aoqi@0: static int anon_munmap(char * addr, size_t size) { aoqi@0: return ::munmap(addr, size) == 0; aoqi@0: } aoqi@0: aoqi@0: char* os::pd_reserve_memory(size_t bytes, char* requested_addr, aoqi@0: size_t alignment_hint) { aoqi@0: return anon_mmap(requested_addr, bytes, (requested_addr != NULL)); aoqi@0: } aoqi@0: aoqi@0: bool os::pd_release_memory(char* addr, size_t size) { aoqi@0: return anon_munmap(addr, size); aoqi@0: } aoqi@0: aoqi@0: static bool bsd_mprotect(char* addr, size_t size, int prot) { aoqi@0: // Bsd wants the mprotect address argument to be page aligned. aoqi@0: char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size()); aoqi@0: aoqi@0: // According to SUSv3, mprotect() should only be used with mappings aoqi@0: // established by mmap(), and mmap() always maps whole pages. Unaligned aoqi@0: // 'addr' likely indicates problem in the VM (e.g. trying to change aoqi@0: // protection of malloc'ed or statically allocated memory). Check the aoqi@0: // caller if you hit this assert. aoqi@0: assert(addr == bottom, "sanity check"); aoqi@0: aoqi@0: size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size()); aoqi@0: return ::mprotect(bottom, size, prot) == 0; aoqi@0: } aoqi@0: aoqi@0: // Set protections specified aoqi@0: bool os::protect_memory(char* addr, size_t bytes, ProtType prot, aoqi@0: bool is_committed) { aoqi@0: unsigned int p = 0; aoqi@0: switch (prot) { aoqi@0: case MEM_PROT_NONE: p = PROT_NONE; break; aoqi@0: case MEM_PROT_READ: p = PROT_READ; break; aoqi@0: case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break; aoqi@0: case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break; aoqi@0: default: aoqi@0: ShouldNotReachHere(); aoqi@0: } aoqi@0: // is_committed is unused. aoqi@0: return bsd_mprotect(addr, bytes, p); aoqi@0: } aoqi@0: aoqi@0: bool os::guard_memory(char* addr, size_t size) { aoqi@0: return bsd_mprotect(addr, size, PROT_NONE); aoqi@0: } aoqi@0: aoqi@0: bool os::unguard_memory(char* addr, size_t size) { aoqi@0: return bsd_mprotect(addr, size, PROT_READ|PROT_WRITE); aoqi@0: } aoqi@0: aoqi@0: bool os::Bsd::hugetlbfs_sanity_check(bool warn, size_t page_size) { aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: // Large page support aoqi@0: aoqi@0: static size_t _large_page_size = 0; aoqi@0: aoqi@0: void os::large_page_init() { aoqi@0: } aoqi@0: aoqi@0: aoqi@0: char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) { aoqi@0: fatal("This code is not used or maintained."); aoqi@0: aoqi@0: // "exec" is passed in but not used. Creating the shared image for aoqi@0: // the code cache doesn't have an SHM_X executable permission to check. aoqi@0: assert(UseLargePages && UseSHM, "only for SHM large pages"); aoqi@0: aoqi@0: key_t key = IPC_PRIVATE; aoqi@0: char *addr; aoqi@0: aoqi@0: bool warn_on_failure = UseLargePages && aoqi@0: (!FLAG_IS_DEFAULT(UseLargePages) || aoqi@0: !FLAG_IS_DEFAULT(LargePageSizeInBytes) aoqi@0: ); aoqi@0: aoqi@0: // Create a large shared memory region to attach to based on size. aoqi@0: // Currently, size is the total size of the heap aoqi@0: int shmid = shmget(key, bytes, IPC_CREAT|SHM_R|SHM_W); aoqi@0: if (shmid == -1) { aoqi@0: // Possible reasons for shmget failure: aoqi@0: // 1. shmmax is too small for Java heap. aoqi@0: // > check shmmax value: cat /proc/sys/kernel/shmmax aoqi@0: // > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax aoqi@0: // 2. not enough large page memory. aoqi@0: // > check available large pages: cat /proc/meminfo aoqi@0: // > increase amount of large pages: aoqi@0: // echo new_value > /proc/sys/vm/nr_hugepages aoqi@0: // Note 1: different Bsd may use different name for this property, aoqi@0: // e.g. on Redhat AS-3 it is "hugetlb_pool". aoqi@0: // Note 2: it's possible there's enough physical memory available but aoqi@0: // they are so fragmented after a long run that they can't aoqi@0: // coalesce into large pages. Try to reserve large pages when aoqi@0: // the system is still "fresh". aoqi@0: if (warn_on_failure) { aoqi@0: warning("Failed to reserve shared memory (errno = %d).", errno); aoqi@0: } aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: // attach to the region aoqi@0: addr = (char*)shmat(shmid, req_addr, 0); aoqi@0: int err = errno; aoqi@0: aoqi@0: // Remove shmid. If shmat() is successful, the actual shared memory segment aoqi@0: // will be deleted when it's detached by shmdt() or when the process aoqi@0: // terminates. If shmat() is not successful this will remove the shared aoqi@0: // segment immediately. aoqi@0: shmctl(shmid, IPC_RMID, NULL); aoqi@0: aoqi@0: if ((intptr_t)addr == -1) { aoqi@0: if (warn_on_failure) { aoqi@0: warning("Failed to attach shared memory (errno = %d).", err); aoqi@0: } aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: // The memory is committed zgu@7074: MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC); aoqi@0: aoqi@0: return addr; aoqi@0: } aoqi@0: aoqi@0: bool os::release_memory_special(char* base, size_t bytes) { zgu@7074: if (MemTracker::tracking_level() > NMT_minimal) { zgu@7074: Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); zgu@7074: // detaching the SHM segment will also delete it, see reserve_memory_special() zgu@7074: int rslt = shmdt(base); zgu@7074: if (rslt == 0) { zgu@7074: tkr.record((address)base, bytes); zgu@7074: return true; zgu@7074: } else { zgu@7074: return false; zgu@7074: } aoqi@0: } else { zgu@7074: return shmdt(base) == 0; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: size_t os::large_page_size() { aoqi@0: return _large_page_size; aoqi@0: } aoqi@0: aoqi@0: // HugeTLBFS allows application to commit large page memory on demand; aoqi@0: // with SysV SHM the entire memory region must be allocated as shared aoqi@0: // memory. aoqi@0: bool os::can_commit_large_page_memory() { aoqi@0: return UseHugeTLBFS; aoqi@0: } aoqi@0: aoqi@0: bool os::can_execute_large_page_memory() { aoqi@0: return UseHugeTLBFS; aoqi@0: } aoqi@0: aoqi@0: // Reserve memory at an arbitrary address, only if that area is aoqi@0: // available (and not reserved for something else). aoqi@0: aoqi@0: char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { aoqi@0: const int max_tries = 10; aoqi@0: char* base[max_tries]; aoqi@0: size_t size[max_tries]; aoqi@0: const size_t gap = 0x000000; aoqi@0: aoqi@0: // Assert only that the size is a multiple of the page size, since aoqi@0: // that's all that mmap requires, and since that's all we really know aoqi@0: // about at this low abstraction level. If we need higher alignment, aoqi@0: // we can either pass an alignment to this method or verify alignment aoqi@0: // in one of the methods further up the call chain. See bug 5044738. aoqi@0: assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block"); aoqi@0: aoqi@0: // Repeatedly allocate blocks until the block is allocated at the aoqi@0: // right spot. Give up after max_tries. Note that reserve_memory() will aoqi@0: // automatically update _highest_vm_reserved_address if the call is aoqi@0: // successful. The variable tracks the highest memory address every reserved aoqi@0: // by JVM. It is used to detect heap-stack collision if running with aoqi@0: // fixed-stack BsdThreads. Because here we may attempt to reserve more aoqi@0: // space than needed, it could confuse the collision detecting code. To aoqi@0: // solve the problem, save current _highest_vm_reserved_address and aoqi@0: // calculate the correct value before return. aoqi@0: address old_highest = _highest_vm_reserved_address; aoqi@0: aoqi@0: // Bsd mmap allows caller to pass an address as hint; give it a try first, aoqi@0: // if kernel honors the hint then we can return immediately. aoqi@0: char * addr = anon_mmap(requested_addr, bytes, false); aoqi@0: if (addr == requested_addr) { aoqi@0: return requested_addr; aoqi@0: } aoqi@0: aoqi@0: if (addr != NULL) { aoqi@0: // mmap() is successful but it fails to reserve at the requested address aoqi@0: anon_munmap(addr, bytes); aoqi@0: } aoqi@0: aoqi@0: int i; aoqi@0: for (i = 0; i < max_tries; ++i) { aoqi@0: base[i] = reserve_memory(bytes); aoqi@0: aoqi@0: if (base[i] != NULL) { aoqi@0: // Is this the block we wanted? aoqi@0: if (base[i] == requested_addr) { aoqi@0: size[i] = bytes; aoqi@0: break; aoqi@0: } aoqi@0: aoqi@0: // Does this overlap the block we wanted? Give back the overlapped aoqi@0: // parts and try again. aoqi@0: aoqi@0: size_t top_overlap = requested_addr + (bytes + gap) - base[i]; aoqi@0: if (top_overlap >= 0 && top_overlap < bytes) { aoqi@0: unmap_memory(base[i], top_overlap); aoqi@0: base[i] += top_overlap; aoqi@0: size[i] = bytes - top_overlap; aoqi@0: } else { aoqi@0: size_t bottom_overlap = base[i] + bytes - requested_addr; aoqi@0: if (bottom_overlap >= 0 && bottom_overlap < bytes) { aoqi@0: unmap_memory(requested_addr, bottom_overlap); aoqi@0: size[i] = bytes - bottom_overlap; aoqi@0: } else { aoqi@0: size[i] = bytes; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Give back the unused reserved pieces. aoqi@0: aoqi@0: for (int j = 0; j < i; ++j) { aoqi@0: if (base[j] != NULL) { aoqi@0: unmap_memory(base[j], size[j]); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (i < max_tries) { aoqi@0: _highest_vm_reserved_address = MAX2(old_highest, (address)requested_addr + bytes); aoqi@0: return requested_addr; aoqi@0: } else { aoqi@0: _highest_vm_reserved_address = old_highest; aoqi@0: return NULL; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: size_t os::read(int fd, void *buf, unsigned int nBytes) { aoqi@0: RESTARTABLE_RETURN_INT(::read(fd, buf, nBytes)); aoqi@0: } aoqi@0: aoqi@0: // TODO-FIXME: reconcile Solaris' os::sleep with the bsd variation. aoqi@0: // Solaris uses poll(), bsd uses park(). aoqi@0: // Poll() is likely a better choice, assuming that Thread.interrupt() aoqi@0: // generates a SIGUSRx signal. Note that SIGUSR1 can interfere with aoqi@0: // SIGSEGV, see 4355769. aoqi@0: aoqi@0: int os::sleep(Thread* thread, jlong millis, bool interruptible) { aoqi@0: assert(thread == Thread::current(), "thread consistency check"); aoqi@0: aoqi@0: ParkEvent * const slp = thread->_SleepEvent ; aoqi@0: slp->reset() ; aoqi@0: OrderAccess::fence() ; aoqi@0: aoqi@0: if (interruptible) { aoqi@0: jlong prevtime = javaTimeNanos(); aoqi@0: aoqi@0: for (;;) { aoqi@0: if (os::is_interrupted(thread, true)) { aoqi@0: return OS_INTRPT; aoqi@0: } aoqi@0: aoqi@0: jlong newtime = javaTimeNanos(); aoqi@0: aoqi@0: if (newtime - prevtime < 0) { aoqi@0: // time moving backwards, should only happen if no monotonic clock aoqi@0: // not a guarantee() because JVM should not abort on kernel/glibc bugs aoqi@0: assert(!Bsd::supports_monotonic_clock(), "time moving backwards"); aoqi@0: } else { aoqi@0: millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC; aoqi@0: } aoqi@0: aoqi@0: if(millis <= 0) { aoqi@0: return OS_OK; aoqi@0: } aoqi@0: aoqi@0: prevtime = newtime; aoqi@0: aoqi@0: { aoqi@0: assert(thread->is_Java_thread(), "sanity check"); aoqi@0: JavaThread *jt = (JavaThread *) thread; aoqi@0: ThreadBlockInVM tbivm(jt); aoqi@0: OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */); aoqi@0: aoqi@0: jt->set_suspend_equivalent(); aoqi@0: // cleared by handle_special_suspend_equivalent_condition() or aoqi@0: // java_suspend_self() via check_and_wait_while_suspended() aoqi@0: aoqi@0: slp->park(millis); aoqi@0: aoqi@0: // were we externally suspended while we were waiting? aoqi@0: jt->check_and_wait_while_suspended(); aoqi@0: } aoqi@0: } aoqi@0: } else { aoqi@0: OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); aoqi@0: jlong prevtime = javaTimeNanos(); aoqi@0: aoqi@0: for (;;) { aoqi@0: // It'd be nice to avoid the back-to-back javaTimeNanos() calls on aoqi@0: // the 1st iteration ... aoqi@0: jlong newtime = javaTimeNanos(); aoqi@0: aoqi@0: if (newtime - prevtime < 0) { aoqi@0: // time moving backwards, should only happen if no monotonic clock aoqi@0: // not a guarantee() because JVM should not abort on kernel/glibc bugs aoqi@0: assert(!Bsd::supports_monotonic_clock(), "time moving backwards"); aoqi@0: } else { aoqi@0: millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC; aoqi@0: } aoqi@0: aoqi@0: if(millis <= 0) break ; aoqi@0: aoqi@0: prevtime = newtime; aoqi@0: slp->park(millis); aoqi@0: } aoqi@0: return OS_OK ; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void os::naked_short_sleep(jlong ms) { aoqi@0: struct timespec req; aoqi@0: aoqi@0: assert(ms < 1000, "Un-interruptable sleep, short time use only"); aoqi@0: req.tv_sec = 0; aoqi@0: if (ms > 0) { aoqi@0: req.tv_nsec = (ms % 1000) * 1000000; aoqi@0: } aoqi@0: else { aoqi@0: req.tv_nsec = 1; aoqi@0: } aoqi@0: aoqi@0: nanosleep(&req, NULL); aoqi@0: aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: // Sleep forever; naked call to OS-specific sleep; use with CAUTION aoqi@0: void os::infinite_sleep() { aoqi@0: while (true) { // sleep forever ... aoqi@0: ::sleep(100); // ... 100 seconds at a time aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Used to convert frequent JVM_Yield() to nops aoqi@0: bool os::dont_yield() { aoqi@0: return DontYieldALot; aoqi@0: } aoqi@0: aoqi@0: void os::yield() { aoqi@0: sched_yield(); aoqi@0: } aoqi@0: aoqi@0: os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;} aoqi@0: aoqi@0: void os::yield_all(int attempts) { aoqi@0: // Yields to all threads, including threads with lower priorities aoqi@0: // Threads on Bsd are all with same priority. The Solaris style aoqi@0: // os::yield_all() with nanosleep(1ms) is not necessary. aoqi@0: sched_yield(); aoqi@0: } aoqi@0: aoqi@0: // Called from the tight loops to possibly influence time-sharing heuristics aoqi@0: void os::loop_breaker(int attempts) { aoqi@0: os::yield_all(attempts); aoqi@0: } aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // thread priority support aoqi@0: aoqi@0: // Note: Normal Bsd applications are run with SCHED_OTHER policy. SCHED_OTHER aoqi@0: // only supports dynamic priority, static priority must be zero. For real-time aoqi@0: // applications, Bsd supports SCHED_RR which allows static priority (1-99). aoqi@0: // However, for large multi-threaded applications, SCHED_RR is not only slower aoqi@0: // than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out aoqi@0: // of 5 runs - Sep 2005). aoqi@0: // aoqi@0: // The following code actually changes the niceness of kernel-thread/LWP. It aoqi@0: // has an assumption that setpriority() only modifies one kernel-thread/LWP, aoqi@0: // not the entire user process, and user level threads are 1:1 mapped to kernel aoqi@0: // threads. It has always been the case, but could change in the future. For aoqi@0: // this reason, the code should not be used as default (ThreadPriorityPolicy=0). aoqi@0: // It is only used when ThreadPriorityPolicy=1 and requires root privilege. aoqi@0: aoqi@0: #if !defined(__APPLE__) aoqi@0: int os::java_to_os_priority[CriticalPriority + 1] = { aoqi@0: 19, // 0 Entry should never be used aoqi@0: aoqi@0: 0, // 1 MinPriority aoqi@0: 3, // 2 aoqi@0: 6, // 3 aoqi@0: aoqi@0: 10, // 4 aoqi@0: 15, // 5 NormPriority aoqi@0: 18, // 6 aoqi@0: aoqi@0: 21, // 7 aoqi@0: 25, // 8 aoqi@0: 28, // 9 NearMaxPriority aoqi@0: aoqi@0: 31, // 10 MaxPriority aoqi@0: aoqi@0: 31 // 11 CriticalPriority aoqi@0: }; aoqi@0: #else aoqi@0: /* Using Mach high-level priority assignments */ aoqi@0: int os::java_to_os_priority[CriticalPriority + 1] = { aoqi@0: 0, // 0 Entry should never be used (MINPRI_USER) aoqi@0: aoqi@0: 27, // 1 MinPriority aoqi@0: 28, // 2 aoqi@0: 29, // 3 aoqi@0: aoqi@0: 30, // 4 aoqi@0: 31, // 5 NormPriority (BASEPRI_DEFAULT) aoqi@0: 32, // 6 aoqi@0: aoqi@0: 33, // 7 aoqi@0: 34, // 8 aoqi@0: 35, // 9 NearMaxPriority aoqi@0: aoqi@0: 36, // 10 MaxPriority aoqi@0: aoqi@0: 36 // 11 CriticalPriority aoqi@0: }; aoqi@0: #endif aoqi@0: aoqi@0: static int prio_init() { aoqi@0: if (ThreadPriorityPolicy == 1) { aoqi@0: // Only root can raise thread priority. Don't allow ThreadPriorityPolicy=1 aoqi@0: // if effective uid is not root. Perhaps, a more elegant way of doing aoqi@0: // this is to test CAP_SYS_NICE capability, but that will require libcap.so aoqi@0: if (geteuid() != 0) { aoqi@0: if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy)) { aoqi@0: warning("-XX:ThreadPriorityPolicy requires root privilege on Bsd"); aoqi@0: } aoqi@0: ThreadPriorityPolicy = 0; aoqi@0: } aoqi@0: } aoqi@0: if (UseCriticalJavaThreadPriority) { aoqi@0: os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; aoqi@0: } aoqi@0: return 0; aoqi@0: } aoqi@0: aoqi@0: OSReturn os::set_native_priority(Thread* thread, int newpri) { aoqi@0: if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) return OS_OK; aoqi@0: aoqi@0: #ifdef __OpenBSD__ aoqi@0: // OpenBSD pthread_setprio starves low priority threads aoqi@0: return OS_OK; aoqi@0: #elif defined(__FreeBSD__) aoqi@0: int ret = pthread_setprio(thread->osthread()->pthread_id(), newpri); aoqi@0: #elif defined(__APPLE__) || defined(__NetBSD__) aoqi@0: struct sched_param sp; aoqi@0: int policy; aoqi@0: pthread_t self = pthread_self(); aoqi@0: aoqi@0: if (pthread_getschedparam(self, &policy, &sp) != 0) aoqi@0: return OS_ERR; aoqi@0: aoqi@0: sp.sched_priority = newpri; aoqi@0: if (pthread_setschedparam(self, policy, &sp) != 0) aoqi@0: return OS_ERR; aoqi@0: aoqi@0: return OS_OK; aoqi@0: #else aoqi@0: int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri); aoqi@0: return (ret == 0) ? OS_OK : OS_ERR; aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) { aoqi@0: if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) { aoqi@0: *priority_ptr = java_to_os_priority[NormPriority]; aoqi@0: return OS_OK; aoqi@0: } aoqi@0: aoqi@0: errno = 0; aoqi@0: #if defined(__OpenBSD__) || defined(__FreeBSD__) aoqi@0: *priority_ptr = pthread_getprio(thread->osthread()->pthread_id()); aoqi@0: #elif defined(__APPLE__) || defined(__NetBSD__) aoqi@0: int policy; aoqi@0: struct sched_param sp; aoqi@0: aoqi@0: pthread_getschedparam(pthread_self(), &policy, &sp); aoqi@0: *priority_ptr = sp.sched_priority; aoqi@0: #else aoqi@0: *priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id()); aoqi@0: #endif aoqi@0: return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR); aoqi@0: } aoqi@0: aoqi@0: // Hint to the underlying OS that a task switch would not be good. aoqi@0: // Void return because it's a hint and can fail. aoqi@0: void os::hint_no_preempt() {} aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // suspend/resume support aoqi@0: aoqi@0: // the low-level signal-based suspend/resume support is a remnant from the aoqi@0: // old VM-suspension that used to be for java-suspension, safepoints etc, aoqi@0: // within hotspot. Now there is a single use-case for this: aoqi@0: // - calling get_thread_pc() on the VMThread by the flat-profiler task aoqi@0: // that runs in the watcher thread. aoqi@0: // The remaining code is greatly simplified from the more general suspension aoqi@0: // code that used to be used. aoqi@0: // aoqi@0: // The protocol is quite simple: aoqi@0: // - suspend: aoqi@0: // - sends a signal to the target thread aoqi@0: // - polls the suspend state of the osthread using a yield loop aoqi@0: // - target thread signal handler (SR_handler) sets suspend state aoqi@0: // and blocks in sigsuspend until continued aoqi@0: // - resume: aoqi@0: // - sets target osthread state to continue aoqi@0: // - sends signal to end the sigsuspend loop in the SR_handler aoqi@0: // aoqi@0: // Note that the SR_lock plays no role in this suspend/resume protocol. aoqi@0: // aoqi@0: aoqi@0: static void resume_clear_context(OSThread *osthread) { aoqi@0: osthread->set_ucontext(NULL); aoqi@0: osthread->set_siginfo(NULL); aoqi@0: } aoqi@0: aoqi@0: static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) { aoqi@0: osthread->set_ucontext(context); aoqi@0: osthread->set_siginfo(siginfo); aoqi@0: } aoqi@0: aoqi@0: // aoqi@0: // Handler function invoked when a thread's execution is suspended or aoqi@0: // resumed. We have to be careful that only async-safe functions are aoqi@0: // called here (Note: most pthread functions are not async safe and aoqi@0: // should be avoided.) aoqi@0: // aoqi@0: // Note: sigwait() is a more natural fit than sigsuspend() from an aoqi@0: // interface point of view, but sigwait() prevents the signal hander aoqi@0: // from being run. libpthread would get very confused by not having aoqi@0: // its signal handlers run and prevents sigwait()'s use with the aoqi@0: // mutex granting granting signal. aoqi@0: // aoqi@0: // Currently only ever called on the VMThread or JavaThread aoqi@0: // aoqi@0: static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) { aoqi@0: // Save and restore errno to avoid confusing native code with EINTR aoqi@0: // after sigsuspend. aoqi@0: int old_errno = errno; aoqi@0: aoqi@0: Thread* thread = Thread::current(); aoqi@0: OSThread* osthread = thread->osthread(); aoqi@0: assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread"); aoqi@0: aoqi@0: os::SuspendResume::State current = osthread->sr.state(); aoqi@0: if (current == os::SuspendResume::SR_SUSPEND_REQUEST) { aoqi@0: suspend_save_context(osthread, siginfo, context); aoqi@0: aoqi@0: // attempt to switch the state, we assume we had a SUSPEND_REQUEST aoqi@0: os::SuspendResume::State state = osthread->sr.suspended(); aoqi@0: if (state == os::SuspendResume::SR_SUSPENDED) { aoqi@0: sigset_t suspend_set; // signals for sigsuspend() aoqi@0: aoqi@0: // get current set of blocked signals and unblock resume signal aoqi@0: pthread_sigmask(SIG_BLOCK, NULL, &suspend_set); aoqi@0: sigdelset(&suspend_set, SR_signum); aoqi@0: aoqi@0: sr_semaphore.signal(); aoqi@0: // wait here until we are resumed aoqi@0: while (1) { aoqi@0: sigsuspend(&suspend_set); aoqi@0: aoqi@0: os::SuspendResume::State result = osthread->sr.running(); aoqi@0: if (result == os::SuspendResume::SR_RUNNING) { aoqi@0: sr_semaphore.signal(); aoqi@0: break; aoqi@0: } else if (result != os::SuspendResume::SR_SUSPENDED) { aoqi@0: ShouldNotReachHere(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: } else if (state == os::SuspendResume::SR_RUNNING) { aoqi@0: // request was cancelled, continue aoqi@0: } else { aoqi@0: ShouldNotReachHere(); aoqi@0: } aoqi@0: aoqi@0: resume_clear_context(osthread); aoqi@0: } else if (current == os::SuspendResume::SR_RUNNING) { aoqi@0: // request was cancelled, continue aoqi@0: } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) { aoqi@0: // ignore aoqi@0: } else { aoqi@0: // ignore aoqi@0: } aoqi@0: aoqi@0: errno = old_errno; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: static int SR_initialize() { aoqi@0: struct sigaction act; aoqi@0: char *s; aoqi@0: /* Get signal number to use for suspend/resume */ aoqi@0: if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) { aoqi@0: int sig = ::strtol(s, 0, 10); aoqi@0: if (sig > 0 || sig < NSIG) { aoqi@0: SR_signum = sig; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: assert(SR_signum > SIGSEGV && SR_signum > SIGBUS, aoqi@0: "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769"); aoqi@0: aoqi@0: sigemptyset(&SR_sigset); aoqi@0: sigaddset(&SR_sigset, SR_signum); aoqi@0: aoqi@0: /* Set up signal handler for suspend/resume */ aoqi@0: act.sa_flags = SA_RESTART|SA_SIGINFO; aoqi@0: act.sa_handler = (void (*)(int)) SR_handler; aoqi@0: aoqi@0: // SR_signum is blocked by default. aoqi@0: // 4528190 - We also need to block pthread restart signal (32 on all aoqi@0: // supported Bsd platforms). Note that BsdThreads need to block aoqi@0: // this signal for all threads to work properly. So we don't have aoqi@0: // to use hard-coded signal number when setting up the mask. aoqi@0: pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask); aoqi@0: aoqi@0: if (sigaction(SR_signum, &act, 0) == -1) { aoqi@0: return -1; aoqi@0: } aoqi@0: aoqi@0: // Save signal flag aoqi@0: os::Bsd::set_our_sigflags(SR_signum, act.sa_flags); aoqi@0: return 0; aoqi@0: } aoqi@0: aoqi@0: static int sr_notify(OSThread* osthread) { aoqi@0: int status = pthread_kill(osthread->pthread_id(), SR_signum); aoqi@0: assert_status(status == 0, status, "pthread_kill"); aoqi@0: return status; aoqi@0: } aoqi@0: aoqi@0: // "Randomly" selected value for how long we want to spin aoqi@0: // before bailing out on suspending a thread, also how often aoqi@0: // we send a signal to a thread we want to resume aoqi@0: static const int RANDOMLY_LARGE_INTEGER = 1000000; aoqi@0: static const int RANDOMLY_LARGE_INTEGER2 = 100; aoqi@0: aoqi@0: // returns true on success and false on error - really an error is fatal aoqi@0: // but this seems the normal response to library errors aoqi@0: static bool do_suspend(OSThread* osthread) { aoqi@0: assert(osthread->sr.is_running(), "thread should be running"); aoqi@0: assert(!sr_semaphore.trywait(), "semaphore has invalid state"); aoqi@0: aoqi@0: // mark as suspended and send signal aoqi@0: if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) { aoqi@0: // failed to switch, state wasn't running? aoqi@0: ShouldNotReachHere(); aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: if (sr_notify(osthread) != 0) { aoqi@0: ShouldNotReachHere(); aoqi@0: } aoqi@0: aoqi@0: // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED aoqi@0: while (true) { aoqi@0: if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { aoqi@0: break; aoqi@0: } else { aoqi@0: // timeout aoqi@0: os::SuspendResume::State cancelled = osthread->sr.cancel_suspend(); aoqi@0: if (cancelled == os::SuspendResume::SR_RUNNING) { aoqi@0: return false; aoqi@0: } else if (cancelled == os::SuspendResume::SR_SUSPENDED) { aoqi@0: // make sure that we consume the signal on the semaphore as well aoqi@0: sr_semaphore.wait(); aoqi@0: break; aoqi@0: } else { aoqi@0: ShouldNotReachHere(); aoqi@0: return false; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: guarantee(osthread->sr.is_suspended(), "Must be suspended"); aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: static void do_resume(OSThread* osthread) { aoqi@0: assert(osthread->sr.is_suspended(), "thread should be suspended"); aoqi@0: assert(!sr_semaphore.trywait(), "invalid semaphore state"); aoqi@0: aoqi@0: if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) { aoqi@0: // failed to switch to WAKEUP_REQUEST aoqi@0: ShouldNotReachHere(); aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: while (true) { aoqi@0: if (sr_notify(osthread) == 0) { aoqi@0: if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) { aoqi@0: if (osthread->sr.is_running()) { aoqi@0: return; aoqi@0: } aoqi@0: } aoqi@0: } else { aoqi@0: ShouldNotReachHere(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: guarantee(osthread->sr.is_running(), "Must be running!"); aoqi@0: } aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // interrupt support aoqi@0: aoqi@0: void os::interrupt(Thread* thread) { aoqi@0: assert(Thread::current() == thread || Threads_lock->owned_by_self(), aoqi@0: "possibility of dangling Thread pointer"); aoqi@0: aoqi@0: OSThread* osthread = thread->osthread(); aoqi@0: aoqi@0: if (!osthread->interrupted()) { aoqi@0: osthread->set_interrupted(true); aoqi@0: // More than one thread can get here with the same value of osthread, aoqi@0: // resulting in multiple notifications. We do, however, want the store aoqi@0: // to interrupted() to be visible to other threads before we execute unpark(). aoqi@0: OrderAccess::fence(); aoqi@0: ParkEvent * const slp = thread->_SleepEvent ; aoqi@0: if (slp != NULL) slp->unpark() ; aoqi@0: } aoqi@0: aoqi@0: // For JSR166. Unpark even if interrupt status already was set aoqi@0: if (thread->is_Java_thread()) aoqi@0: ((JavaThread*)thread)->parker()->unpark(); aoqi@0: aoqi@0: ParkEvent * ev = thread->_ParkEvent ; aoqi@0: if (ev != NULL) ev->unpark() ; aoqi@0: aoqi@0: } aoqi@0: aoqi@0: bool os::is_interrupted(Thread* thread, bool clear_interrupted) { aoqi@0: assert(Thread::current() == thread || Threads_lock->owned_by_self(), aoqi@0: "possibility of dangling Thread pointer"); aoqi@0: aoqi@0: OSThread* osthread = thread->osthread(); aoqi@0: aoqi@0: bool interrupted = osthread->interrupted(); aoqi@0: aoqi@0: if (interrupted && clear_interrupted) { aoqi@0: osthread->set_interrupted(false); aoqi@0: // consider thread->_SleepEvent->reset() ... optional optimization aoqi@0: } aoqi@0: aoqi@0: return interrupted; aoqi@0: } aoqi@0: aoqi@0: /////////////////////////////////////////////////////////////////////////////////// aoqi@0: // signal handling (except suspend/resume) aoqi@0: aoqi@0: // This routine may be used by user applications as a "hook" to catch signals. aoqi@0: // The user-defined signal handler must pass unrecognized signals to this aoqi@0: // routine, and if it returns true (non-zero), then the signal handler must aoqi@0: // return immediately. If the flag "abort_if_unrecognized" is true, then this aoqi@0: // routine will never retun false (zero), but instead will execute a VM panic aoqi@0: // routine kill the process. aoqi@0: // aoqi@0: // If this routine returns false, it is OK to call it again. This allows aoqi@0: // the user-defined signal handler to perform checks either before or after aoqi@0: // the VM performs its own checks. Naturally, the user code would be making aoqi@0: // a serious error if it tried to handle an exception (such as a null check aoqi@0: // or breakpoint) that the VM was generating for its own correct operation. aoqi@0: // aoqi@0: // This routine may recognize any of the following kinds of signals: aoqi@0: // SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1. aoqi@0: // It should be consulted by handlers for any of those signals. aoqi@0: // aoqi@0: // The caller of this routine must pass in the three arguments supplied aoqi@0: // to the function referred to in the "sa_sigaction" (not the "sa_handler") aoqi@0: // field of the structure passed to sigaction(). This routine assumes that aoqi@0: // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART. aoqi@0: // aoqi@0: // Note that the VM will print warnings if it detects conflicting signal aoqi@0: // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers". aoqi@0: // aoqi@0: extern "C" JNIEXPORT int aoqi@0: JVM_handle_bsd_signal(int signo, siginfo_t* siginfo, aoqi@0: void* ucontext, int abort_if_unrecognized); aoqi@0: aoqi@0: void signalHandler(int sig, siginfo_t* info, void* uc) { aoqi@0: assert(info != NULL && uc != NULL, "it must be old kernel"); aoqi@0: int orig_errno = errno; // Preserve errno value over signal handler. aoqi@0: JVM_handle_bsd_signal(sig, info, uc, true); aoqi@0: errno = orig_errno; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // This boolean allows users to forward their own non-matching signals aoqi@0: // to JVM_handle_bsd_signal, harmlessly. aoqi@0: bool os::Bsd::signal_handlers_are_installed = false; aoqi@0: aoqi@0: // For signal-chaining aoqi@0: struct sigaction os::Bsd::sigact[MAXSIGNUM]; aoqi@0: unsigned int os::Bsd::sigs = 0; aoqi@0: bool os::Bsd::libjsig_is_loaded = false; aoqi@0: typedef struct sigaction *(*get_signal_t)(int); aoqi@0: get_signal_t os::Bsd::get_signal_action = NULL; aoqi@0: aoqi@0: struct sigaction* os::Bsd::get_chained_signal_action(int sig) { aoqi@0: struct sigaction *actp = NULL; aoqi@0: aoqi@0: if (libjsig_is_loaded) { aoqi@0: // Retrieve the old signal handler from libjsig aoqi@0: actp = (*get_signal_action)(sig); aoqi@0: } aoqi@0: if (actp == NULL) { aoqi@0: // Retrieve the preinstalled signal handler from jvm aoqi@0: actp = get_preinstalled_handler(sig); aoqi@0: } aoqi@0: aoqi@0: return actp; aoqi@0: } aoqi@0: aoqi@0: static bool call_chained_handler(struct sigaction *actp, int sig, aoqi@0: siginfo_t *siginfo, void *context) { aoqi@0: // Call the old signal handler aoqi@0: if (actp->sa_handler == SIG_DFL) { aoqi@0: // It's more reasonable to let jvm treat it as an unexpected exception aoqi@0: // instead of taking the default action. aoqi@0: return false; aoqi@0: } else if (actp->sa_handler != SIG_IGN) { aoqi@0: if ((actp->sa_flags & SA_NODEFER) == 0) { aoqi@0: // automaticlly block the signal aoqi@0: sigaddset(&(actp->sa_mask), sig); aoqi@0: } aoqi@0: aoqi@0: sa_handler_t hand; aoqi@0: sa_sigaction_t sa; aoqi@0: bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0; aoqi@0: // retrieve the chained handler aoqi@0: if (siginfo_flag_set) { aoqi@0: sa = actp->sa_sigaction; aoqi@0: } else { aoqi@0: hand = actp->sa_handler; aoqi@0: } aoqi@0: aoqi@0: if ((actp->sa_flags & SA_RESETHAND) != 0) { aoqi@0: actp->sa_handler = SIG_DFL; aoqi@0: } aoqi@0: aoqi@0: // try to honor the signal mask aoqi@0: sigset_t oset; aoqi@0: pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset); aoqi@0: aoqi@0: // call into the chained handler aoqi@0: if (siginfo_flag_set) { aoqi@0: (*sa)(sig, siginfo, context); aoqi@0: } else { aoqi@0: (*hand)(sig); aoqi@0: } aoqi@0: aoqi@0: // restore the signal mask aoqi@0: pthread_sigmask(SIG_SETMASK, &oset, 0); aoqi@0: } aoqi@0: // Tell jvm's signal handler the signal is taken care of. aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: bool os::Bsd::chained_handler(int sig, siginfo_t* siginfo, void* context) { aoqi@0: bool chained = false; aoqi@0: // signal-chaining aoqi@0: if (UseSignalChaining) { aoqi@0: struct sigaction *actp = get_chained_signal_action(sig); aoqi@0: if (actp != NULL) { aoqi@0: chained = call_chained_handler(actp, sig, siginfo, context); aoqi@0: } aoqi@0: } aoqi@0: return chained; aoqi@0: } aoqi@0: aoqi@0: struct sigaction* os::Bsd::get_preinstalled_handler(int sig) { aoqi@0: if ((( (unsigned int)1 << sig ) & sigs) != 0) { aoqi@0: return &sigact[sig]; aoqi@0: } aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: void os::Bsd::save_preinstalled_handler(int sig, struct sigaction& oldAct) { aoqi@0: assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); aoqi@0: sigact[sig] = oldAct; aoqi@0: sigs |= (unsigned int)1 << sig; aoqi@0: } aoqi@0: aoqi@0: // for diagnostic aoqi@0: int os::Bsd::sigflags[MAXSIGNUM]; aoqi@0: aoqi@0: int os::Bsd::get_our_sigflags(int sig) { aoqi@0: assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); aoqi@0: return sigflags[sig]; aoqi@0: } aoqi@0: aoqi@0: void os::Bsd::set_our_sigflags(int sig, int flags) { aoqi@0: assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); aoqi@0: sigflags[sig] = flags; aoqi@0: } aoqi@0: aoqi@0: void os::Bsd::set_signal_handler(int sig, bool set_installed) { aoqi@0: // Check for overwrite. aoqi@0: struct sigaction oldAct; aoqi@0: sigaction(sig, (struct sigaction*)NULL, &oldAct); aoqi@0: aoqi@0: void* oldhand = oldAct.sa_sigaction aoqi@0: ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) aoqi@0: : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); aoqi@0: if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) && aoqi@0: oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) && aoqi@0: oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)signalHandler)) { aoqi@0: if (AllowUserSignalHandlers || !set_installed) { aoqi@0: // Do not overwrite; user takes responsibility to forward to us. aoqi@0: return; aoqi@0: } else if (UseSignalChaining) { aoqi@0: // save the old handler in jvm aoqi@0: save_preinstalled_handler(sig, oldAct); aoqi@0: // libjsig also interposes the sigaction() call below and saves the aoqi@0: // old sigaction on it own. aoqi@0: } else { aoqi@0: fatal(err_msg("Encountered unexpected pre-existing sigaction handler " aoqi@0: "%#lx for signal %d.", (long)oldhand, sig)); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: struct sigaction sigAct; aoqi@0: sigfillset(&(sigAct.sa_mask)); aoqi@0: sigAct.sa_handler = SIG_DFL; aoqi@0: if (!set_installed) { aoqi@0: sigAct.sa_flags = SA_SIGINFO|SA_RESTART; aoqi@0: } else { aoqi@0: sigAct.sa_sigaction = signalHandler; aoqi@0: sigAct.sa_flags = SA_SIGINFO|SA_RESTART; aoqi@0: } aoqi@0: #ifdef __APPLE__ aoqi@0: // Needed for main thread as XNU (Mac OS X kernel) will only deliver SIGSEGV aoqi@0: // (which starts as SIGBUS) on main thread with faulting address inside "stack+guard pages" aoqi@0: // if the signal handler declares it will handle it on alternate stack. aoqi@0: // Notice we only declare we will handle it on alt stack, but we are not aoqi@0: // actually going to use real alt stack - this is just a workaround. aoqi@0: // Please see ux_exception.c, method catch_mach_exception_raise for details aoqi@0: // link http://www.opensource.apple.com/source/xnu/xnu-2050.18.24/bsd/uxkern/ux_exception.c aoqi@0: if (sig == SIGSEGV) { aoqi@0: sigAct.sa_flags |= SA_ONSTACK; aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: // Save flags, which are set by ours aoqi@0: assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range"); aoqi@0: sigflags[sig] = sigAct.sa_flags; aoqi@0: aoqi@0: int ret = sigaction(sig, &sigAct, &oldAct); aoqi@0: assert(ret == 0, "check"); aoqi@0: aoqi@0: void* oldhand2 = oldAct.sa_sigaction aoqi@0: ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) aoqi@0: : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); aoqi@0: assert(oldhand2 == oldhand, "no concurrent signal handler installation"); aoqi@0: } aoqi@0: aoqi@0: // install signal handlers for signals that HotSpot needs to aoqi@0: // handle in order to support Java-level exception handling. aoqi@0: aoqi@0: void os::Bsd::install_signal_handlers() { aoqi@0: if (!signal_handlers_are_installed) { aoqi@0: signal_handlers_are_installed = true; aoqi@0: aoqi@0: // signal-chaining aoqi@0: typedef void (*signal_setting_t)(); aoqi@0: signal_setting_t begin_signal_setting = NULL; aoqi@0: signal_setting_t end_signal_setting = NULL; aoqi@0: begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t, aoqi@0: dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting")); aoqi@0: if (begin_signal_setting != NULL) { aoqi@0: end_signal_setting = CAST_TO_FN_PTR(signal_setting_t, aoqi@0: dlsym(RTLD_DEFAULT, "JVM_end_signal_setting")); aoqi@0: get_signal_action = CAST_TO_FN_PTR(get_signal_t, aoqi@0: dlsym(RTLD_DEFAULT, "JVM_get_signal_action")); aoqi@0: libjsig_is_loaded = true; aoqi@0: assert(UseSignalChaining, "should enable signal-chaining"); aoqi@0: } aoqi@0: if (libjsig_is_loaded) { aoqi@0: // Tell libjsig jvm is setting signal handlers aoqi@0: (*begin_signal_setting)(); aoqi@0: } aoqi@0: aoqi@0: set_signal_handler(SIGSEGV, true); aoqi@0: set_signal_handler(SIGPIPE, true); aoqi@0: set_signal_handler(SIGBUS, true); aoqi@0: set_signal_handler(SIGILL, true); aoqi@0: set_signal_handler(SIGFPE, true); aoqi@0: set_signal_handler(SIGXFSZ, true); aoqi@0: aoqi@0: #if defined(__APPLE__) aoqi@0: // In Mac OS X 10.4, CrashReporter will write a crash log for all 'fatal' signals, including aoqi@0: // signals caught and handled by the JVM. To work around this, we reset the mach task aoqi@0: // signal handler that's placed on our process by CrashReporter. This disables aoqi@0: // CrashReporter-based reporting. aoqi@0: // aoqi@0: // This work-around is not necessary for 10.5+, as CrashReporter no longer intercedes aoqi@0: // on caught fatal signals. aoqi@0: // aoqi@0: // Additionally, gdb installs both standard BSD signal handlers, and mach exception aoqi@0: // handlers. By replacing the existing task exception handler, we disable gdb's mach aoqi@0: // exception handling, while leaving the standard BSD signal handlers functional. aoqi@0: kern_return_t kr; aoqi@0: kr = task_set_exception_ports(mach_task_self(), aoqi@0: EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC, aoqi@0: MACH_PORT_NULL, aoqi@0: EXCEPTION_STATE_IDENTITY, aoqi@0: MACHINE_THREAD_STATE); aoqi@0: aoqi@0: assert(kr == KERN_SUCCESS, "could not set mach task signal handler"); aoqi@0: #endif aoqi@0: aoqi@0: if (libjsig_is_loaded) { aoqi@0: // Tell libjsig jvm finishes setting signal handlers aoqi@0: (*end_signal_setting)(); aoqi@0: } aoqi@0: aoqi@0: // We don't activate signal checker if libjsig is in place, we trust ourselves aoqi@0: // and if UserSignalHandler is installed all bets are off aoqi@0: if (CheckJNICalls) { aoqi@0: if (libjsig_is_loaded) { aoqi@0: if (PrintJNIResolving) { aoqi@0: tty->print_cr("Info: libjsig is activated, all active signal checking is disabled"); aoqi@0: } aoqi@0: check_signals = false; aoqi@0: } aoqi@0: if (AllowUserSignalHandlers) { aoqi@0: if (PrintJNIResolving) { aoqi@0: tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled"); aoqi@0: } aoqi@0: check_signals = false; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: ///// aoqi@0: // glibc on Bsd platform uses non-documented flag aoqi@0: // to indicate, that some special sort of signal aoqi@0: // trampoline is used. aoqi@0: // We will never set this flag, and we should aoqi@0: // ignore this flag in our diagnostic aoqi@0: #ifdef SIGNIFICANT_SIGNAL_MASK aoqi@0: #undef SIGNIFICANT_SIGNAL_MASK aoqi@0: #endif aoqi@0: #define SIGNIFICANT_SIGNAL_MASK (~0x04000000) aoqi@0: aoqi@0: static const char* get_signal_handler_name(address handler, aoqi@0: char* buf, int buflen) { aoqi@0: int offset; aoqi@0: bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset); aoqi@0: if (found) { aoqi@0: // skip directory names aoqi@0: const char *p1, *p2; aoqi@0: p1 = buf; aoqi@0: size_t len = strlen(os::file_separator()); aoqi@0: while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; aoqi@0: jio_snprintf(buf, buflen, "%s+0x%x", p1, offset); aoqi@0: } else { aoqi@0: jio_snprintf(buf, buflen, PTR_FORMAT, handler); aoqi@0: } aoqi@0: return buf; aoqi@0: } aoqi@0: aoqi@0: static void print_signal_handler(outputStream* st, int sig, aoqi@0: char* buf, size_t buflen) { aoqi@0: struct sigaction sa; aoqi@0: aoqi@0: sigaction(sig, NULL, &sa); aoqi@0: aoqi@0: // See comment for SIGNIFICANT_SIGNAL_MASK define aoqi@0: sa.sa_flags &= SIGNIFICANT_SIGNAL_MASK; aoqi@0: aoqi@0: st->print("%s: ", os::exception_name(sig, buf, buflen)); aoqi@0: aoqi@0: address handler = (sa.sa_flags & SA_SIGINFO) aoqi@0: ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) aoqi@0: : CAST_FROM_FN_PTR(address, sa.sa_handler); aoqi@0: aoqi@0: if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) { aoqi@0: st->print("SIG_DFL"); aoqi@0: } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) { aoqi@0: st->print("SIG_IGN"); aoqi@0: } else { aoqi@0: st->print("[%s]", get_signal_handler_name(handler, buf, buflen)); aoqi@0: } aoqi@0: aoqi@0: st->print(", sa_mask[0]="); aoqi@0: os::Posix::print_signal_set_short(st, &sa.sa_mask); aoqi@0: aoqi@0: address rh = VMError::get_resetted_sighandler(sig); aoqi@0: // May be, handler was resetted by VMError? aoqi@0: if(rh != NULL) { aoqi@0: handler = rh; aoqi@0: sa.sa_flags = VMError::get_resetted_sigflags(sig) & SIGNIFICANT_SIGNAL_MASK; aoqi@0: } aoqi@0: aoqi@0: st->print(", sa_flags="); aoqi@0: os::Posix::print_sa_flags(st, sa.sa_flags); aoqi@0: aoqi@0: // Check: is it our handler? aoqi@0: if(handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) || aoqi@0: handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) { aoqi@0: // It is our signal handler aoqi@0: // check for flags, reset system-used one! aoqi@0: if((int)sa.sa_flags != os::Bsd::get_our_sigflags(sig)) { aoqi@0: st->print( aoqi@0: ", flags was changed from " PTR32_FORMAT ", consider using jsig library", aoqi@0: os::Bsd::get_our_sigflags(sig)); aoqi@0: } aoqi@0: } aoqi@0: st->cr(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: #define DO_SIGNAL_CHECK(sig) \ aoqi@0: if (!sigismember(&check_signal_done, sig)) \ aoqi@0: os::Bsd::check_signal_handler(sig) aoqi@0: aoqi@0: // This method is a periodic task to check for misbehaving JNI applications aoqi@0: // under CheckJNI, we can add any periodic checks here aoqi@0: aoqi@0: void os::run_periodic_checks() { aoqi@0: aoqi@0: if (check_signals == false) return; aoqi@0: aoqi@0: // SEGV and BUS if overridden could potentially prevent aoqi@0: // generation of hs*.log in the event of a crash, debugging aoqi@0: // such a case can be very challenging, so we absolutely aoqi@0: // check the following for a good measure: aoqi@0: DO_SIGNAL_CHECK(SIGSEGV); aoqi@0: DO_SIGNAL_CHECK(SIGILL); aoqi@0: DO_SIGNAL_CHECK(SIGFPE); aoqi@0: DO_SIGNAL_CHECK(SIGBUS); aoqi@0: DO_SIGNAL_CHECK(SIGPIPE); aoqi@0: DO_SIGNAL_CHECK(SIGXFSZ); aoqi@0: aoqi@0: aoqi@0: // ReduceSignalUsage allows the user to override these handlers aoqi@0: // see comments at the very top and jvm_solaris.h aoqi@0: if (!ReduceSignalUsage) { aoqi@0: DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL); aoqi@0: DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL); aoqi@0: DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL); aoqi@0: DO_SIGNAL_CHECK(BREAK_SIGNAL); aoqi@0: } aoqi@0: aoqi@0: DO_SIGNAL_CHECK(SR_signum); aoqi@0: DO_SIGNAL_CHECK(INTERRUPT_SIGNAL); aoqi@0: } aoqi@0: aoqi@0: typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *); aoqi@0: aoqi@0: static os_sigaction_t os_sigaction = NULL; aoqi@0: aoqi@0: void os::Bsd::check_signal_handler(int sig) { aoqi@0: char buf[O_BUFLEN]; aoqi@0: address jvmHandler = NULL; aoqi@0: aoqi@0: aoqi@0: struct sigaction act; aoqi@0: if (os_sigaction == NULL) { aoqi@0: // only trust the default sigaction, in case it has been interposed aoqi@0: os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction"); aoqi@0: if (os_sigaction == NULL) return; aoqi@0: } aoqi@0: aoqi@0: os_sigaction(sig, (struct sigaction*)NULL, &act); aoqi@0: aoqi@0: aoqi@0: act.sa_flags &= SIGNIFICANT_SIGNAL_MASK; aoqi@0: aoqi@0: address thisHandler = (act.sa_flags & SA_SIGINFO) aoqi@0: ? CAST_FROM_FN_PTR(address, act.sa_sigaction) aoqi@0: : CAST_FROM_FN_PTR(address, act.sa_handler) ; aoqi@0: aoqi@0: aoqi@0: switch(sig) { aoqi@0: case SIGSEGV: aoqi@0: case SIGBUS: aoqi@0: case SIGFPE: aoqi@0: case SIGPIPE: aoqi@0: case SIGILL: aoqi@0: case SIGXFSZ: aoqi@0: jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler); aoqi@0: break; aoqi@0: aoqi@0: case SHUTDOWN1_SIGNAL: aoqi@0: case SHUTDOWN2_SIGNAL: aoqi@0: case SHUTDOWN3_SIGNAL: aoqi@0: case BREAK_SIGNAL: aoqi@0: jvmHandler = (address)user_handler(); aoqi@0: break; aoqi@0: aoqi@0: case INTERRUPT_SIGNAL: aoqi@0: jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL); aoqi@0: break; aoqi@0: aoqi@0: default: aoqi@0: if (sig == SR_signum) { aoqi@0: jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler); aoqi@0: } else { aoqi@0: return; aoqi@0: } aoqi@0: break; aoqi@0: } aoqi@0: aoqi@0: if (thisHandler != jvmHandler) { aoqi@0: tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN)); aoqi@0: tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN)); aoqi@0: tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN)); aoqi@0: // No need to check this sig any longer aoqi@0: sigaddset(&check_signal_done, sig); minqi@7824: // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN minqi@7824: if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) { minqi@7824: tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell", minqi@7824: exception_name(sig, buf, O_BUFLEN)); minqi@7824: } aoqi@0: } else if(os::Bsd::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Bsd::get_our_sigflags(sig)) { aoqi@0: tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN)); aoqi@0: tty->print("expected:" PTR32_FORMAT, os::Bsd::get_our_sigflags(sig)); aoqi@0: tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags); aoqi@0: // No need to check this sig any longer aoqi@0: sigaddset(&check_signal_done, sig); aoqi@0: } aoqi@0: aoqi@0: // Dump all the signal aoqi@0: if (sigismember(&check_signal_done, sig)) { aoqi@0: print_signal_handlers(tty, buf, O_BUFLEN); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: extern void report_error(char* file_name, int line_no, char* title, char* format, ...); aoqi@0: aoqi@0: extern bool signal_name(int signo, char* buf, size_t len); aoqi@0: aoqi@0: const char* os::exception_name(int exception_code, char* buf, size_t size) { aoqi@0: if (0 < exception_code && exception_code <= SIGRTMAX) { aoqi@0: // signal aoqi@0: if (!signal_name(exception_code, buf, size)) { aoqi@0: jio_snprintf(buf, size, "SIG%d", exception_code); aoqi@0: } aoqi@0: return buf; aoqi@0: } else { aoqi@0: return NULL; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // this is called _before_ the most of global arguments have been parsed aoqi@0: void os::init(void) { aoqi@0: char dummy; /* used to get a guess on initial stack address */ aoqi@0: // first_hrtime = gethrtime(); aoqi@0: aoqi@0: // With BsdThreads the JavaMain thread pid (primordial thread) aoqi@0: // is different than the pid of the java launcher thread. aoqi@0: // So, on Bsd, the launcher thread pid is passed to the VM aoqi@0: // via the sun.java.launcher.pid property. aoqi@0: // Use this property instead of getpid() if it was correctly passed. aoqi@0: // See bug 6351349. aoqi@0: pid_t java_launcher_pid = (pid_t) Arguments::sun_java_launcher_pid(); aoqi@0: aoqi@0: _initial_pid = (java_launcher_pid > 0) ? java_launcher_pid : getpid(); aoqi@0: aoqi@0: clock_tics_per_sec = CLK_TCK; aoqi@0: aoqi@0: init_random(1234567); aoqi@0: aoqi@0: ThreadCritical::initialize(); aoqi@0: aoqi@0: Bsd::set_page_size(getpagesize()); aoqi@0: if (Bsd::page_size() == -1) { aoqi@0: fatal(err_msg("os_bsd.cpp: os::init: sysconf failed (%s)", aoqi@0: strerror(errno))); aoqi@0: } aoqi@0: init_page_sizes((size_t) Bsd::page_size()); aoqi@0: aoqi@0: Bsd::initialize_system_info(); aoqi@0: aoqi@0: // main_thread points to the aboriginal thread aoqi@0: Bsd::_main_thread = pthread_self(); aoqi@0: aoqi@0: Bsd::clock_init(); aoqi@0: initial_time_count = javaTimeNanos(); aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: // XXXDARWIN aoqi@0: // Work around the unaligned VM callbacks in hotspot's aoqi@0: // sharedRuntime. The callbacks don't use SSE2 instructions, and work on aoqi@0: // Linux, Solaris, and FreeBSD. On Mac OS X, dyld (rightly so) enforces aoqi@0: // alignment when doing symbol lookup. To work around this, we force early aoqi@0: // binding of all symbols now, thus binding when alignment is known-good. aoqi@0: _dyld_bind_fully_image_containing_address((const void *) &os::init); aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: // To install functions for atexit system call aoqi@0: extern "C" { aoqi@0: static void perfMemory_exit_helper() { aoqi@0: perfMemory_exit(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // this is called _after_ the global arguments have been parsed aoqi@0: jint os::init_2(void) aoqi@0: { aoqi@0: // Allocate a single page and mark it as readable for safepoint polling aoqi@0: address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); aoqi@0: guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" ); aoqi@0: aoqi@0: os::set_polling_page( polling_page ); aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: if(Verbose && PrintMiscellaneous) aoqi@0: tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); aoqi@0: #endif aoqi@0: aoqi@0: if (!UseMembar) { aoqi@0: address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); aoqi@0: guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page"); aoqi@0: os::set_memory_serialize_page( mem_serialize_page ); aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: if(Verbose && PrintMiscellaneous) aoqi@0: tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: // initialize suspend/resume support - must do this before signal_sets_init() aoqi@0: if (SR_initialize() != 0) { aoqi@0: perror("SR_initialize failed"); aoqi@0: return JNI_ERR; aoqi@0: } aoqi@0: aoqi@0: Bsd::signal_sets_init(); aoqi@0: Bsd::install_signal_handlers(); aoqi@0: aoqi@0: // Check minimum allowable stack size for thread creation and to initialize aoqi@0: // the java system classes, including StackOverflowError - depends on page aoqi@0: // size. Add a page for compiler2 recursion in main thread. aoqi@0: // Add in 2*BytesPerWord times page size to account for VM stack during aoqi@0: // class initialization depending on 32 or 64 bit VM. aoqi@0: os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed, aoqi@0: (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ aoqi@0: 2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size()); aoqi@0: aoqi@0: size_t threadStackSizeInBytes = ThreadStackSize * K; aoqi@0: if (threadStackSizeInBytes != 0 && aoqi@0: threadStackSizeInBytes < os::Bsd::min_stack_allowed) { aoqi@0: tty->print_cr("\nThe stack size specified is too small, " aoqi@0: "Specify at least %dk", aoqi@0: os::Bsd::min_stack_allowed/ K); aoqi@0: return JNI_ERR; aoqi@0: } aoqi@0: aoqi@0: // Make the stack size a multiple of the page size so that aoqi@0: // the yellow/red zones can be guarded. aoqi@0: JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, aoqi@0: vm_page_size())); aoqi@0: aoqi@0: if (MaxFDLimit) { aoqi@0: // set the number of file descriptors to max. print out error aoqi@0: // if getrlimit/setrlimit fails but continue regardless. aoqi@0: struct rlimit nbr_files; aoqi@0: int status = getrlimit(RLIMIT_NOFILE, &nbr_files); aoqi@0: if (status != 0) { aoqi@0: if (PrintMiscellaneous && (Verbose || WizardMode)) aoqi@0: perror("os::init_2 getrlimit failed"); aoqi@0: } else { aoqi@0: nbr_files.rlim_cur = nbr_files.rlim_max; aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: // Darwin returns RLIM_INFINITY for rlim_max, but fails with EINVAL if aoqi@0: // you attempt to use RLIM_INFINITY. As per setrlimit(2), OPEN_MAX must aoqi@0: // be used instead aoqi@0: nbr_files.rlim_cur = MIN(OPEN_MAX, nbr_files.rlim_cur); aoqi@0: #endif aoqi@0: aoqi@0: status = setrlimit(RLIMIT_NOFILE, &nbr_files); aoqi@0: if (status != 0) { aoqi@0: if (PrintMiscellaneous && (Verbose || WizardMode)) aoqi@0: perror("os::init_2 setrlimit failed"); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // at-exit methods are called in the reverse order of their registration. aoqi@0: // atexit functions are called on return from main or as a result of a aoqi@0: // call to exit(3C). There can be only 32 of these functions registered aoqi@0: // and atexit() does not set errno. aoqi@0: aoqi@0: if (PerfAllowAtExitRegistration) { aoqi@0: // only register atexit functions if PerfAllowAtExitRegistration is set. aoqi@0: // atexit functions can be delayed until process exit time, which aoqi@0: // can be problematic for embedded VM situations. Embedded VMs should aoqi@0: // call DestroyJavaVM() to assure that VM resources are released. aoqi@0: aoqi@0: // note: perfMemory_exit_helper atexit function may be removed in aoqi@0: // the future if the appropriate cleanup code can be added to the aoqi@0: // VM_Exit VMOperation's doit method. aoqi@0: if (atexit(perfMemory_exit_helper) != 0) { aoqi@0: warning("os::init2 atexit(perfMemory_exit_helper) failed"); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // initialize thread priority policy aoqi@0: prio_init(); aoqi@0: aoqi@0: #ifdef __APPLE__ aoqi@0: // dynamically link to objective c gc registration aoqi@0: void *handleLibObjc = dlopen(OBJC_LIB, RTLD_LAZY); aoqi@0: if (handleLibObjc != NULL) { aoqi@0: objc_registerThreadWithCollectorFunction = (objc_registerThreadWithCollector_t) dlsym(handleLibObjc, OBJC_GCREGISTER); aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: return JNI_OK; aoqi@0: } aoqi@0: aoqi@0: // Mark the polling page as unreadable aoqi@0: void os::make_polling_page_unreadable(void) { aoqi@0: if( !guard_memory((char*)_polling_page, Bsd::page_size()) ) aoqi@0: fatal("Could not disable polling page"); aoqi@0: }; aoqi@0: aoqi@0: // Mark the polling page as readable aoqi@0: void os::make_polling_page_readable(void) { aoqi@0: if( !bsd_mprotect((char *)_polling_page, Bsd::page_size(), PROT_READ)) { aoqi@0: fatal("Could not enable polling page"); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: int os::active_processor_count() { aoqi@0: return _processor_count; aoqi@0: } aoqi@0: aoqi@0: void os::set_native_thread_name(const char *name) { aoqi@0: #if defined(__APPLE__) && MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5 aoqi@0: // This is only supported in Snow Leopard and beyond aoqi@0: if (name != NULL) { aoqi@0: // Add a "Java: " prefix to the name aoqi@0: char buf[MAXTHREADNAMESIZE]; aoqi@0: snprintf(buf, sizeof(buf), "Java: %s", name); aoqi@0: pthread_setname_np(buf); aoqi@0: } aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: bool os::distribute_processes(uint length, uint* distribution) { aoqi@0: // Not yet implemented. aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: bool os::bind_to_processor(uint processor_id) { aoqi@0: // Not yet implemented. aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: void os::SuspendedThreadTask::internal_do_task() { aoqi@0: if (do_suspend(_thread->osthread())) { aoqi@0: SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext()); aoqi@0: do_task(context); aoqi@0: do_resume(_thread->osthread()); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: /// aoqi@0: class PcFetcher : public os::SuspendedThreadTask { aoqi@0: public: aoqi@0: PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {} aoqi@0: ExtendedPC result(); aoqi@0: protected: aoqi@0: void do_task(const os::SuspendedThreadTaskContext& context); aoqi@0: private: aoqi@0: ExtendedPC _epc; aoqi@0: }; aoqi@0: aoqi@0: ExtendedPC PcFetcher::result() { aoqi@0: guarantee(is_done(), "task is not done yet."); aoqi@0: return _epc; aoqi@0: } aoqi@0: aoqi@0: void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) { aoqi@0: Thread* thread = context.thread(); aoqi@0: OSThread* osthread = thread->osthread(); aoqi@0: if (osthread->ucontext() != NULL) { aoqi@0: _epc = os::Bsd::ucontext_get_pc((ucontext_t *) context.ucontext()); aoqi@0: } else { aoqi@0: // NULL context is unexpected, double-check this is the VMThread aoqi@0: guarantee(thread->is_VM_thread(), "can only be called for VMThread"); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Suspends the target using the signal mechanism and then grabs the PC before aoqi@0: // resuming the target. Used by the flat-profiler only aoqi@0: ExtendedPC os::get_thread_pc(Thread* thread) { aoqi@0: // Make sure that it is called by the watcher for the VMThread aoqi@0: assert(Thread::current()->is_Watcher_thread(), "Must be watcher"); aoqi@0: assert(thread->is_VM_thread(), "Can only be called for VMThread"); aoqi@0: aoqi@0: PcFetcher fetcher(thread); aoqi@0: fetcher.run(); aoqi@0: return fetcher.result(); aoqi@0: } aoqi@0: aoqi@0: int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) aoqi@0: { aoqi@0: return pthread_cond_timedwait(_cond, _mutex, _abstime); aoqi@0: } aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // debug support aoqi@0: aoqi@0: bool os::find(address addr, outputStream* st) { aoqi@0: Dl_info dlinfo; aoqi@0: memset(&dlinfo, 0, sizeof(dlinfo)); aoqi@0: if (dladdr(addr, &dlinfo) != 0) { aoqi@0: st->print(PTR_FORMAT ": ", addr); aoqi@0: if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) { aoqi@0: st->print("%s+%#x", dlinfo.dli_sname, aoqi@0: addr - (intptr_t)dlinfo.dli_saddr); aoqi@0: } else if (dlinfo.dli_fbase != NULL) { aoqi@0: st->print("", addr - (intptr_t)dlinfo.dli_fbase); aoqi@0: } else { aoqi@0: st->print(""); aoqi@0: } aoqi@0: if (dlinfo.dli_fname != NULL) { aoqi@0: st->print(" in %s", dlinfo.dli_fname); aoqi@0: } aoqi@0: if (dlinfo.dli_fbase != NULL) { aoqi@0: st->print(" at " PTR_FORMAT, dlinfo.dli_fbase); aoqi@0: } aoqi@0: st->cr(); aoqi@0: aoqi@0: if (Verbose) { aoqi@0: // decode some bytes around the PC aoqi@0: address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size()); aoqi@0: address end = clamp_address_in_page(addr+40, addr, os::vm_page_size()); aoqi@0: address lowest = (address) dlinfo.dli_sname; aoqi@0: if (!lowest) lowest = (address) dlinfo.dli_fbase; aoqi@0: if (begin < lowest) begin = lowest; aoqi@0: Dl_info dlinfo2; aoqi@0: if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr aoqi@0: && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) aoqi@0: end = (address) dlinfo2.dli_saddr; aoqi@0: Disassembler::decode(begin, end, st); aoqi@0: } aoqi@0: return true; aoqi@0: } aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: //////////////////////////////////////////////////////////////////////////////// aoqi@0: // misc aoqi@0: aoqi@0: // This does not do anything on Bsd. This is basically a hook for being aoqi@0: // able to use structured exception handling (thread-local exception filters) aoqi@0: // on, e.g., Win32. aoqi@0: void aoqi@0: os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, aoqi@0: JavaCallArguments* args, Thread* thread) { aoqi@0: f(value, method, args, thread); aoqi@0: } aoqi@0: aoqi@0: void os::print_statistics() { aoqi@0: } aoqi@0: aoqi@0: int os::message_box(const char* title, const char* message) { aoqi@0: int i; aoqi@0: fdStream err(defaultStream::error_fd()); aoqi@0: for (i = 0; i < 78; i++) err.print_raw("="); aoqi@0: err.cr(); aoqi@0: err.print_raw_cr(title); aoqi@0: for (i = 0; i < 78; i++) err.print_raw("-"); aoqi@0: err.cr(); aoqi@0: err.print_raw_cr(message); aoqi@0: for (i = 0; i < 78; i++) err.print_raw("="); aoqi@0: err.cr(); aoqi@0: aoqi@0: char buf[16]; aoqi@0: // Prevent process from exiting upon "read error" without consuming all CPU aoqi@0: while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); } aoqi@0: aoqi@0: return buf[0] == 'y' || buf[0] == 'Y'; aoqi@0: } aoqi@0: aoqi@0: int os::stat(const char *path, struct stat *sbuf) { aoqi@0: char pathbuf[MAX_PATH]; aoqi@0: if (strlen(path) > MAX_PATH - 1) { aoqi@0: errno = ENAMETOOLONG; aoqi@0: return -1; aoqi@0: } aoqi@0: os::native_path(strcpy(pathbuf, path)); aoqi@0: return ::stat(pathbuf, sbuf); aoqi@0: } aoqi@0: aoqi@0: bool os::check_heap(bool force) { aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: ATTRIBUTE_PRINTF(3, 0) aoqi@0: int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) { aoqi@0: return ::vsnprintf(buf, count, format, args); aoqi@0: } aoqi@0: aoqi@0: // Is a (classpath) directory empty? aoqi@0: bool os::dir_is_empty(const char* path) { aoqi@0: DIR *dir = NULL; aoqi@0: struct dirent *ptr; aoqi@0: aoqi@0: dir = opendir(path); aoqi@0: if (dir == NULL) return true; aoqi@0: aoqi@0: /* Scan the directory */ aoqi@0: bool result = true; aoqi@0: char buf[sizeof(struct dirent) + MAX_PATH]; aoqi@0: while (result && (ptr = ::readdir(dir)) != NULL) { aoqi@0: if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) { aoqi@0: result = false; aoqi@0: } aoqi@0: } aoqi@0: closedir(dir); aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: // This code originates from JDK's sysOpen and open64_w aoqi@0: // from src/solaris/hpi/src/system_md.c aoqi@0: aoqi@0: #ifndef O_DELETE aoqi@0: #define O_DELETE 0x10000 aoqi@0: #endif aoqi@0: aoqi@0: // Open a file. Unlink the file immediately after open returns aoqi@0: // if the specified oflag has the O_DELETE flag set. aoqi@0: // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c aoqi@0: aoqi@0: int os::open(const char *path, int oflag, int mode) { aoqi@0: aoqi@0: if (strlen(path) > MAX_PATH - 1) { aoqi@0: errno = ENAMETOOLONG; aoqi@0: return -1; aoqi@0: } aoqi@0: int fd; aoqi@0: int o_delete = (oflag & O_DELETE); aoqi@0: oflag = oflag & ~O_DELETE; aoqi@0: aoqi@0: fd = ::open(path, oflag, mode); aoqi@0: if (fd == -1) return -1; aoqi@0: aoqi@0: //If the open succeeded, the file might still be a directory aoqi@0: { aoqi@0: struct stat buf; aoqi@0: int ret = ::fstat(fd, &buf); aoqi@0: int st_mode = buf.st_mode; aoqi@0: aoqi@0: if (ret != -1) { aoqi@0: if ((st_mode & S_IFMT) == S_IFDIR) { aoqi@0: errno = EISDIR; aoqi@0: ::close(fd); aoqi@0: return -1; aoqi@0: } aoqi@0: } else { aoqi@0: ::close(fd); aoqi@0: return -1; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: /* aoqi@0: * All file descriptors that are opened in the JVM and not aoqi@0: * specifically destined for a subprocess should have the aoqi@0: * close-on-exec flag set. If we don't set it, then careless 3rd aoqi@0: * party native code might fork and exec without closing all aoqi@0: * appropriate file descriptors (e.g. as we do in closeDescriptors in aoqi@0: * UNIXProcess.c), and this in turn might: aoqi@0: * aoqi@0: * - cause end-of-file to fail to be detected on some file aoqi@0: * descriptors, resulting in mysterious hangs, or aoqi@0: * aoqi@0: * - might cause an fopen in the subprocess to fail on a system aoqi@0: * suffering from bug 1085341. aoqi@0: * aoqi@0: * (Yes, the default setting of the close-on-exec flag is a Unix aoqi@0: * design flaw) aoqi@0: * aoqi@0: * See: aoqi@0: * 1085341: 32-bit stdio routines should support file descriptors >255 aoqi@0: * 4843136: (process) pipe file descriptor from Runtime.exec not being closed aoqi@0: * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9 aoqi@0: */ aoqi@0: #ifdef FD_CLOEXEC aoqi@0: { aoqi@0: int flags = ::fcntl(fd, F_GETFD); aoqi@0: if (flags != -1) aoqi@0: ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC); aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: if (o_delete != 0) { aoqi@0: ::unlink(path); aoqi@0: } aoqi@0: return fd; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // create binary file, rewriting existing file if required aoqi@0: int os::create_binary_file(const char* path, bool rewrite_existing) { aoqi@0: int oflags = O_WRONLY | O_CREAT; aoqi@0: if (!rewrite_existing) { aoqi@0: oflags |= O_EXCL; aoqi@0: } aoqi@0: return ::open(path, oflags, S_IREAD | S_IWRITE); aoqi@0: } aoqi@0: aoqi@0: // return current position of file pointer aoqi@0: jlong os::current_file_offset(int fd) { aoqi@0: return (jlong)::lseek(fd, (off_t)0, SEEK_CUR); aoqi@0: } aoqi@0: aoqi@0: // move file pointer to the specified offset aoqi@0: jlong os::seek_to_file_offset(int fd, jlong offset) { aoqi@0: return (jlong)::lseek(fd, (off_t)offset, SEEK_SET); aoqi@0: } aoqi@0: aoqi@0: // This code originates from JDK's sysAvailable aoqi@0: // from src/solaris/hpi/src/native_threads/src/sys_api_td.c aoqi@0: aoqi@0: int os::available(int fd, jlong *bytes) { aoqi@0: jlong cur, end; aoqi@0: int mode; aoqi@0: struct stat buf; aoqi@0: aoqi@0: if (::fstat(fd, &buf) >= 0) { aoqi@0: mode = buf.st_mode; aoqi@0: if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { aoqi@0: /* aoqi@0: * XXX: is the following call interruptible? If so, this might aoqi@0: * need to go through the INTERRUPT_IO() wrapper as for other aoqi@0: * blocking, interruptible calls in this file. aoqi@0: */ aoqi@0: int n; aoqi@0: if (::ioctl(fd, FIONREAD, &n) >= 0) { aoqi@0: *bytes = n; aoqi@0: return 1; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: if ((cur = ::lseek(fd, 0L, SEEK_CUR)) == -1) { aoqi@0: return 0; aoqi@0: } else if ((end = ::lseek(fd, 0L, SEEK_END)) == -1) { aoqi@0: return 0; aoqi@0: } else if (::lseek(fd, cur, SEEK_SET) == -1) { aoqi@0: return 0; aoqi@0: } aoqi@0: *bytes = end - cur; aoqi@0: return 1; aoqi@0: } aoqi@0: aoqi@0: int os::socket_available(int fd, jint *pbytes) { aoqi@0: if (fd < 0) aoqi@0: return OS_OK; aoqi@0: aoqi@0: int ret; aoqi@0: aoqi@0: RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret); aoqi@0: aoqi@0: //%% note ioctl can return 0 when successful, JVM_SocketAvailable aoqi@0: // is expected to return 0 on failure and 1 on success to the jdk. aoqi@0: aoqi@0: return (ret == OS_ERR) ? 0 : 1; aoqi@0: } aoqi@0: aoqi@0: // Map a block of memory. aoqi@0: char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, aoqi@0: char *addr, size_t bytes, bool read_only, aoqi@0: bool allow_exec) { aoqi@0: int prot; aoqi@0: int flags; aoqi@0: aoqi@0: if (read_only) { aoqi@0: prot = PROT_READ; aoqi@0: flags = MAP_SHARED; aoqi@0: } else { aoqi@0: prot = PROT_READ | PROT_WRITE; aoqi@0: flags = MAP_PRIVATE; aoqi@0: } aoqi@0: aoqi@0: if (allow_exec) { aoqi@0: prot |= PROT_EXEC; aoqi@0: } aoqi@0: aoqi@0: if (addr != NULL) { aoqi@0: flags |= MAP_FIXED; aoqi@0: } aoqi@0: aoqi@0: char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags, aoqi@0: fd, file_offset); aoqi@0: if (mapped_address == MAP_FAILED) { aoqi@0: return NULL; aoqi@0: } aoqi@0: return mapped_address; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // Remap a block of memory. aoqi@0: char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, aoqi@0: char *addr, size_t bytes, bool read_only, aoqi@0: bool allow_exec) { aoqi@0: // same as map_memory() on this OS aoqi@0: return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, aoqi@0: allow_exec); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // Unmap a block of memory. aoqi@0: bool os::pd_unmap_memory(char* addr, size_t bytes) { aoqi@0: return munmap(addr, bytes) == 0; aoqi@0: } aoqi@0: aoqi@0: // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) aoqi@0: // are used by JVM M&M and JVMTI to get user+sys or user CPU time aoqi@0: // of a thread. aoqi@0: // aoqi@0: // current_thread_cpu_time() and thread_cpu_time(Thread*) returns aoqi@0: // the fast estimate available on the platform. aoqi@0: aoqi@0: jlong os::current_thread_cpu_time() { aoqi@0: #ifdef __APPLE__ aoqi@0: return os::thread_cpu_time(Thread::current(), true /* user + sys */); aoqi@0: #else aoqi@0: Unimplemented(); aoqi@0: return 0; aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: jlong os::thread_cpu_time(Thread* thread) { aoqi@0: #ifdef __APPLE__ aoqi@0: return os::thread_cpu_time(thread, true /* user + sys */); aoqi@0: #else aoqi@0: Unimplemented(); aoqi@0: return 0; aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { aoqi@0: #ifdef __APPLE__ aoqi@0: return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); aoqi@0: #else aoqi@0: Unimplemented(); aoqi@0: return 0; aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { aoqi@0: #ifdef __APPLE__ aoqi@0: struct thread_basic_info tinfo; aoqi@0: mach_msg_type_number_t tcount = THREAD_INFO_MAX; aoqi@0: kern_return_t kr; aoqi@0: thread_t mach_thread; aoqi@0: aoqi@0: mach_thread = thread->osthread()->thread_id(); aoqi@0: kr = thread_info(mach_thread, THREAD_BASIC_INFO, (thread_info_t)&tinfo, &tcount); aoqi@0: if (kr != KERN_SUCCESS) aoqi@0: return -1; aoqi@0: aoqi@0: if (user_sys_cpu_time) { aoqi@0: jlong nanos; aoqi@0: nanos = ((jlong) tinfo.system_time.seconds + tinfo.user_time.seconds) * (jlong)1000000000; aoqi@0: nanos += ((jlong) tinfo.system_time.microseconds + (jlong) tinfo.user_time.microseconds) * (jlong)1000; aoqi@0: return nanos; aoqi@0: } else { aoqi@0: return ((jlong)tinfo.user_time.seconds * 1000000000) + ((jlong)tinfo.user_time.microseconds * (jlong)1000); aoqi@0: } aoqi@0: #else aoqi@0: Unimplemented(); aoqi@0: return 0; aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { aoqi@0: info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits aoqi@0: info_ptr->may_skip_backward = false; // elapsed time not wall time aoqi@0: info_ptr->may_skip_forward = false; // elapsed time not wall time aoqi@0: info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned aoqi@0: } aoqi@0: aoqi@0: void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { aoqi@0: info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits aoqi@0: info_ptr->may_skip_backward = false; // elapsed time not wall time aoqi@0: info_ptr->may_skip_forward = false; // elapsed time not wall time aoqi@0: info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned aoqi@0: } aoqi@0: aoqi@0: bool os::is_thread_cpu_time_supported() { aoqi@0: #ifdef __APPLE__ aoqi@0: return true; aoqi@0: #else aoqi@0: return false; aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: // System loadavg support. Returns -1 if load average cannot be obtained. aoqi@0: // Bsd doesn't yet have a (official) notion of processor sets, aoqi@0: // so just return the system wide load average. aoqi@0: int os::loadavg(double loadavg[], int nelem) { aoqi@0: return ::getloadavg(loadavg, nelem); aoqi@0: } aoqi@0: aoqi@0: void os::pause() { aoqi@0: char filename[MAX_PATH]; aoqi@0: if (PauseAtStartupFile && PauseAtStartupFile[0]) { aoqi@0: jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); aoqi@0: } else { aoqi@0: jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); aoqi@0: } aoqi@0: aoqi@0: int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); aoqi@0: if (fd != -1) { aoqi@0: struct stat buf; aoqi@0: ::close(fd); aoqi@0: while (::stat(filename, &buf) == 0) { aoqi@0: (void)::poll(NULL, 0, 100); aoqi@0: } aoqi@0: } else { aoqi@0: jio_fprintf(stderr, aoqi@0: "Could not open pause file '%s', continuing immediately.\n", filename); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // Refer to the comments in os_solaris.cpp park-unpark. aoqi@0: // aoqi@0: // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can aoqi@0: // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable. aoqi@0: // For specifics regarding the bug see GLIBC BUGID 261237 : aoqi@0: // http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html. aoqi@0: // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future aoqi@0: // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar aoqi@0: // is used. (The simple C test-case provided in the GLIBC bug report manifests the aoqi@0: // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos() aoqi@0: // and monitorenter when we're using 1-0 locking. All those operations may result in aoqi@0: // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version aoqi@0: // of libpthread avoids the problem, but isn't practical. aoqi@0: // aoqi@0: // Possible remedies: aoqi@0: // aoqi@0: // 1. Establish a minimum relative wait time. 50 to 100 msecs seems to work. aoqi@0: // This is palliative and probabilistic, however. If the thread is preempted aoqi@0: // between the call to compute_abstime() and pthread_cond_timedwait(), more aoqi@0: // than the minimum period may have passed, and the abstime may be stale (in the aoqi@0: // past) resultin in a hang. Using this technique reduces the odds of a hang aoqi@0: // but the JVM is still vulnerable, particularly on heavily loaded systems. aoqi@0: // aoqi@0: // 2. Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead aoqi@0: // of the usual flag-condvar-mutex idiom. The write side of the pipe is set aoqi@0: // NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo) aoqi@0: // reduces to poll()+read(). This works well, but consumes 2 FDs per extant aoqi@0: // thread. aoqi@0: // aoqi@0: // 3. Embargo pthread_cond_timedwait() and implement a native "chron" thread aoqi@0: // that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing aoqi@0: // a timeout request to the chron thread and then blocking via pthread_cond_wait(). aoqi@0: // This also works well. In fact it avoids kernel-level scalability impediments aoqi@0: // on certain platforms that don't handle lots of active pthread_cond_timedwait() aoqi@0: // timers in a graceful fashion. aoqi@0: // aoqi@0: // 4. When the abstime value is in the past it appears that control returns aoqi@0: // correctly from pthread_cond_timedwait(), but the condvar is left corrupt. aoqi@0: // Subsequent timedwait/wait calls may hang indefinitely. Given that, we aoqi@0: // can avoid the problem by reinitializing the condvar -- by cond_destroy() aoqi@0: // followed by cond_init() -- after all calls to pthread_cond_timedwait(). aoqi@0: // It may be possible to avoid reinitialization by checking the return aoqi@0: // value from pthread_cond_timedwait(). In addition to reinitializing the aoqi@0: // condvar we must establish the invariant that cond_signal() is only called aoqi@0: // within critical sections protected by the adjunct mutex. This prevents aoqi@0: // cond_signal() from "seeing" a condvar that's in the midst of being aoqi@0: // reinitialized or that is corrupt. Sadly, this invariant obviates the aoqi@0: // desirable signal-after-unlock optimization that avoids futile context switching. aoqi@0: // aoqi@0: // I'm also concerned that some versions of NTPL might allocate an auxilliary aoqi@0: // structure when a condvar is used or initialized. cond_destroy() would aoqi@0: // release the helper structure. Our reinitialize-after-timedwait fix aoqi@0: // put excessive stress on malloc/free and locks protecting the c-heap. aoqi@0: // aoqi@0: // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag. aoqi@0: // It may be possible to refine (4) by checking the kernel and NTPL verisons aoqi@0: // and only enabling the work-around for vulnerable environments. aoqi@0: aoqi@0: // utility to compute the abstime argument to timedwait: aoqi@0: // millis is the relative timeout time aoqi@0: // abstime will be the absolute timeout time aoqi@0: // TODO: replace compute_abstime() with unpackTime() aoqi@0: aoqi@0: static struct timespec* compute_abstime(struct timespec* abstime, jlong millis) { aoqi@0: if (millis < 0) millis = 0; aoqi@0: struct timeval now; aoqi@0: int status = gettimeofday(&now, NULL); aoqi@0: assert(status == 0, "gettimeofday"); aoqi@0: jlong seconds = millis / 1000; aoqi@0: millis %= 1000; aoqi@0: if (seconds > 50000000) { // see man cond_timedwait(3T) aoqi@0: seconds = 50000000; aoqi@0: } aoqi@0: abstime->tv_sec = now.tv_sec + seconds; aoqi@0: long usec = now.tv_usec + millis * 1000; aoqi@0: if (usec >= 1000000) { aoqi@0: abstime->tv_sec += 1; aoqi@0: usec -= 1000000; aoqi@0: } aoqi@0: abstime->tv_nsec = usec * 1000; aoqi@0: return abstime; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // Test-and-clear _Event, always leaves _Event set to 0, returns immediately. aoqi@0: // Conceptually TryPark() should be equivalent to park(0). aoqi@0: aoqi@0: int os::PlatformEvent::TryPark() { aoqi@0: for (;;) { aoqi@0: const int v = _Event ; aoqi@0: guarantee ((v == 0) || (v == 1), "invariant") ; aoqi@0: if (Atomic::cmpxchg (0, &_Event, v) == v) return v ; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void os::PlatformEvent::park() { // AKA "down()" aoqi@0: // Invariant: Only the thread associated with the Event/PlatformEvent aoqi@0: // may call park(). aoqi@0: // TODO: assert that _Assoc != NULL or _Assoc == Self aoqi@0: int v ; aoqi@0: for (;;) { aoqi@0: v = _Event ; aoqi@0: if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; aoqi@0: } aoqi@0: guarantee (v >= 0, "invariant") ; aoqi@0: if (v == 0) { aoqi@0: // Do this the hard way by blocking ... aoqi@0: int status = pthread_mutex_lock(_mutex); aoqi@0: assert_status(status == 0, status, "mutex_lock"); aoqi@0: guarantee (_nParked == 0, "invariant") ; aoqi@0: ++ _nParked ; aoqi@0: while (_Event < 0) { aoqi@0: status = pthread_cond_wait(_cond, _mutex); aoqi@0: // for some reason, under 2.7 lwp_cond_wait() may return ETIME ... aoqi@0: // Treat this the same as if the wait was interrupted aoqi@0: if (status == ETIMEDOUT) { status = EINTR; } aoqi@0: assert_status(status == 0 || status == EINTR, status, "cond_wait"); aoqi@0: } aoqi@0: -- _nParked ; aoqi@0: aoqi@0: _Event = 0 ; aoqi@0: status = pthread_mutex_unlock(_mutex); aoqi@0: assert_status(status == 0, status, "mutex_unlock"); aoqi@0: // Paranoia to ensure our locked and lock-free paths interact aoqi@0: // correctly with each other. aoqi@0: OrderAccess::fence(); aoqi@0: } aoqi@0: guarantee (_Event >= 0, "invariant") ; aoqi@0: } aoqi@0: aoqi@0: int os::PlatformEvent::park(jlong millis) { aoqi@0: guarantee (_nParked == 0, "invariant") ; aoqi@0: aoqi@0: int v ; aoqi@0: for (;;) { aoqi@0: v = _Event ; aoqi@0: if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; aoqi@0: } aoqi@0: guarantee (v >= 0, "invariant") ; aoqi@0: if (v != 0) return OS_OK ; aoqi@0: aoqi@0: // We do this the hard way, by blocking the thread. aoqi@0: // Consider enforcing a minimum timeout value. aoqi@0: struct timespec abst; aoqi@0: compute_abstime(&abst, millis); aoqi@0: aoqi@0: int ret = OS_TIMEOUT; aoqi@0: int status = pthread_mutex_lock(_mutex); aoqi@0: assert_status(status == 0, status, "mutex_lock"); aoqi@0: guarantee (_nParked == 0, "invariant") ; aoqi@0: ++_nParked ; aoqi@0: aoqi@0: // Object.wait(timo) will return because of aoqi@0: // (a) notification aoqi@0: // (b) timeout aoqi@0: // (c) thread.interrupt aoqi@0: // aoqi@0: // Thread.interrupt and object.notify{All} both call Event::set. aoqi@0: // That is, we treat thread.interrupt as a special case of notification. aoqi@0: // The underlying Solaris implementation, cond_timedwait, admits aoqi@0: // spurious/premature wakeups, but the JLS/JVM spec prevents the aoqi@0: // JVM from making those visible to Java code. As such, we must aoqi@0: // filter out spurious wakeups. We assume all ETIME returns are valid. aoqi@0: // aoqi@0: // TODO: properly differentiate simultaneous notify+interrupt. aoqi@0: // In that case, we should propagate the notify to another waiter. aoqi@0: aoqi@0: while (_Event < 0) { aoqi@0: status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &abst); aoqi@0: if (status != 0 && WorkAroundNPTLTimedWaitHang) { aoqi@0: pthread_cond_destroy (_cond); aoqi@0: pthread_cond_init (_cond, NULL) ; aoqi@0: } aoqi@0: assert_status(status == 0 || status == EINTR || aoqi@0: status == ETIMEDOUT, aoqi@0: status, "cond_timedwait"); aoqi@0: if (!FilterSpuriousWakeups) break ; // previous semantics aoqi@0: if (status == ETIMEDOUT) break ; aoqi@0: // We consume and ignore EINTR and spurious wakeups. aoqi@0: } aoqi@0: --_nParked ; aoqi@0: if (_Event >= 0) { aoqi@0: ret = OS_OK; aoqi@0: } aoqi@0: _Event = 0 ; aoqi@0: status = pthread_mutex_unlock(_mutex); aoqi@0: assert_status(status == 0, status, "mutex_unlock"); aoqi@0: assert (_nParked == 0, "invariant") ; aoqi@0: // Paranoia to ensure our locked and lock-free paths interact aoqi@0: // correctly with each other. aoqi@0: OrderAccess::fence(); aoqi@0: return ret; aoqi@0: } aoqi@0: aoqi@0: void os::PlatformEvent::unpark() { aoqi@0: // Transitions for _Event: aoqi@0: // 0 :=> 1 aoqi@0: // 1 :=> 1 aoqi@0: // -1 :=> either 0 or 1; must signal target thread aoqi@0: // That is, we can safely transition _Event from -1 to either aoqi@0: // 0 or 1. Forcing 1 is slightly more efficient for back-to-back aoqi@0: // unpark() calls. aoqi@0: // See also: "Semaphores in Plan 9" by Mullender & Cox aoqi@0: // aoqi@0: // Note: Forcing a transition from "-1" to "1" on an unpark() means aoqi@0: // that it will take two back-to-back park() calls for the owning aoqi@0: // thread to block. This has the benefit of forcing a spurious return aoqi@0: // from the first park() call after an unpark() call which will help aoqi@0: // shake out uses of park() and unpark() without condition variables. aoqi@0: aoqi@0: if (Atomic::xchg(1, &_Event) >= 0) return; aoqi@0: aoqi@0: // Wait for the thread associated with the event to vacate aoqi@0: int status = pthread_mutex_lock(_mutex); aoqi@0: assert_status(status == 0, status, "mutex_lock"); aoqi@0: int AnyWaiters = _nParked; aoqi@0: assert(AnyWaiters == 0 || AnyWaiters == 1, "invariant"); aoqi@0: if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) { aoqi@0: AnyWaiters = 0; aoqi@0: pthread_cond_signal(_cond); aoqi@0: } aoqi@0: status = pthread_mutex_unlock(_mutex); aoqi@0: assert_status(status == 0, status, "mutex_unlock"); aoqi@0: if (AnyWaiters != 0) { aoqi@0: status = pthread_cond_signal(_cond); aoqi@0: assert_status(status == 0, status, "cond_signal"); aoqi@0: } aoqi@0: aoqi@0: // Note that we signal() _after dropping the lock for "immortal" Events. aoqi@0: // This is safe and avoids a common class of futile wakeups. In rare aoqi@0: // circumstances this can cause a thread to return prematurely from aoqi@0: // cond_{timed}wait() but the spurious wakeup is benign and the victim will aoqi@0: // simply re-test the condition and re-park itself. aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // JSR166 aoqi@0: // ------------------------------------------------------- aoqi@0: aoqi@0: /* aoqi@0: * The solaris and bsd implementations of park/unpark are fairly aoqi@0: * conservative for now, but can be improved. They currently use a aoqi@0: * mutex/condvar pair, plus a a count. aoqi@0: * Park decrements count if > 0, else does a condvar wait. Unpark aoqi@0: * sets count to 1 and signals condvar. Only one thread ever waits aoqi@0: * on the condvar. Contention seen when trying to park implies that someone aoqi@0: * is unparking you, so don't wait. And spurious returns are fine, so there aoqi@0: * is no need to track notifications. aoqi@0: */ aoqi@0: aoqi@0: #define MAX_SECS 100000000 aoqi@0: /* aoqi@0: * This code is common to bsd and solaris and will be moved to a aoqi@0: * common place in dolphin. aoqi@0: * aoqi@0: * The passed in time value is either a relative time in nanoseconds aoqi@0: * or an absolute time in milliseconds. Either way it has to be unpacked aoqi@0: * into suitable seconds and nanoseconds components and stored in the aoqi@0: * given timespec structure. aoqi@0: * Given time is a 64-bit value and the time_t used in the timespec is only aoqi@0: * a signed-32-bit value (except on 64-bit Bsd) we have to watch for aoqi@0: * overflow if times way in the future are given. Further on Solaris versions aoqi@0: * prior to 10 there is a restriction (see cond_timedwait) that the specified aoqi@0: * number of seconds, in abstime, is less than current_time + 100,000,000. aoqi@0: * As it will be 28 years before "now + 100000000" will overflow we can aoqi@0: * ignore overflow and just impose a hard-limit on seconds using the value aoqi@0: * of "now + 100,000,000". This places a limit on the timeout of about 3.17 aoqi@0: * years from "now". aoqi@0: */ aoqi@0: aoqi@0: static void unpackTime(struct timespec* absTime, bool isAbsolute, jlong time) { aoqi@0: assert (time > 0, "convertTime"); aoqi@0: aoqi@0: struct timeval now; aoqi@0: int status = gettimeofday(&now, NULL); aoqi@0: assert(status == 0, "gettimeofday"); aoqi@0: aoqi@0: time_t max_secs = now.tv_sec + MAX_SECS; aoqi@0: aoqi@0: if (isAbsolute) { aoqi@0: jlong secs = time / 1000; aoqi@0: if (secs > max_secs) { aoqi@0: absTime->tv_sec = max_secs; aoqi@0: } aoqi@0: else { aoqi@0: absTime->tv_sec = secs; aoqi@0: } aoqi@0: absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC; aoqi@0: } aoqi@0: else { aoqi@0: jlong secs = time / NANOSECS_PER_SEC; aoqi@0: if (secs >= MAX_SECS) { aoqi@0: absTime->tv_sec = max_secs; aoqi@0: absTime->tv_nsec = 0; aoqi@0: } aoqi@0: else { aoqi@0: absTime->tv_sec = now.tv_sec + secs; aoqi@0: absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000; aoqi@0: if (absTime->tv_nsec >= NANOSECS_PER_SEC) { aoqi@0: absTime->tv_nsec -= NANOSECS_PER_SEC; aoqi@0: ++absTime->tv_sec; // note: this must be <= max_secs aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: assert(absTime->tv_sec >= 0, "tv_sec < 0"); aoqi@0: assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs"); aoqi@0: assert(absTime->tv_nsec >= 0, "tv_nsec < 0"); aoqi@0: assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec"); aoqi@0: } aoqi@0: aoqi@0: void Parker::park(bool isAbsolute, jlong time) { aoqi@0: // Ideally we'd do something useful while spinning, such aoqi@0: // as calling unpackTime(). aoqi@0: aoqi@0: // Optional fast-path check: aoqi@0: // Return immediately if a permit is available. aoqi@0: // We depend on Atomic::xchg() having full barrier semantics aoqi@0: // since we are doing a lock-free update to _counter. aoqi@0: if (Atomic::xchg(0, &_counter) > 0) return; aoqi@0: aoqi@0: Thread* thread = Thread::current(); aoqi@0: assert(thread->is_Java_thread(), "Must be JavaThread"); aoqi@0: JavaThread *jt = (JavaThread *)thread; aoqi@0: aoqi@0: // Optional optimization -- avoid state transitions if there's an interrupt pending. aoqi@0: // Check interrupt before trying to wait aoqi@0: if (Thread::is_interrupted(thread, false)) { aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: // Next, demultiplex/decode time arguments aoqi@0: struct timespec absTime; aoqi@0: if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all aoqi@0: return; aoqi@0: } aoqi@0: if (time > 0) { aoqi@0: unpackTime(&absTime, isAbsolute, time); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // Enter safepoint region aoqi@0: // Beware of deadlocks such as 6317397. aoqi@0: // The per-thread Parker:: mutex is a classic leaf-lock. aoqi@0: // In particular a thread must never block on the Threads_lock while aoqi@0: // holding the Parker:: mutex. If safepoints are pending both the aoqi@0: // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock. aoqi@0: ThreadBlockInVM tbivm(jt); aoqi@0: aoqi@0: // Don't wait if cannot get lock since interference arises from aoqi@0: // unblocking. Also. check interrupt before trying wait aoqi@0: if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) { aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: int status ; aoqi@0: if (_counter > 0) { // no wait needed aoqi@0: _counter = 0; aoqi@0: status = pthread_mutex_unlock(_mutex); aoqi@0: assert (status == 0, "invariant") ; aoqi@0: // Paranoia to ensure our locked and lock-free paths interact aoqi@0: // correctly with each other and Java-level accesses. aoqi@0: OrderAccess::fence(); aoqi@0: return; aoqi@0: } aoqi@0: aoqi@0: #ifdef ASSERT aoqi@0: // Don't catch signals while blocked; let the running threads have the signals. aoqi@0: // (This allows a debugger to break into the running thread.) aoqi@0: sigset_t oldsigs; aoqi@0: sigset_t* allowdebug_blocked = os::Bsd::allowdebug_blocked_signals(); aoqi@0: pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs); aoqi@0: #endif aoqi@0: aoqi@0: OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); aoqi@0: jt->set_suspend_equivalent(); aoqi@0: // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() aoqi@0: aoqi@0: if (time == 0) { aoqi@0: status = pthread_cond_wait (_cond, _mutex) ; aoqi@0: } else { aoqi@0: status = os::Bsd::safe_cond_timedwait (_cond, _mutex, &absTime) ; aoqi@0: if (status != 0 && WorkAroundNPTLTimedWaitHang) { aoqi@0: pthread_cond_destroy (_cond) ; aoqi@0: pthread_cond_init (_cond, NULL); aoqi@0: } aoqi@0: } aoqi@0: assert_status(status == 0 || status == EINTR || aoqi@0: status == ETIMEDOUT, aoqi@0: status, "cond_timedwait"); aoqi@0: aoqi@0: #ifdef ASSERT aoqi@0: pthread_sigmask(SIG_SETMASK, &oldsigs, NULL); aoqi@0: #endif aoqi@0: aoqi@0: _counter = 0 ; aoqi@0: status = pthread_mutex_unlock(_mutex) ; aoqi@0: assert_status(status == 0, status, "invariant") ; aoqi@0: // Paranoia to ensure our locked and lock-free paths interact aoqi@0: // correctly with each other and Java-level accesses. aoqi@0: OrderAccess::fence(); aoqi@0: aoqi@0: // If externally suspended while waiting, re-suspend aoqi@0: if (jt->handle_special_suspend_equivalent_condition()) { aoqi@0: jt->java_suspend_self(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void Parker::unpark() { aoqi@0: int s, status ; aoqi@0: status = pthread_mutex_lock(_mutex); aoqi@0: assert (status == 0, "invariant") ; aoqi@0: s = _counter; aoqi@0: _counter = 1; aoqi@0: if (s < 1) { aoqi@0: if (WorkAroundNPTLTimedWaitHang) { aoqi@0: status = pthread_cond_signal (_cond) ; aoqi@0: assert (status == 0, "invariant") ; aoqi@0: status = pthread_mutex_unlock(_mutex); aoqi@0: assert (status == 0, "invariant") ; aoqi@0: } else { aoqi@0: status = pthread_mutex_unlock(_mutex); aoqi@0: assert (status == 0, "invariant") ; aoqi@0: status = pthread_cond_signal (_cond) ; aoqi@0: assert (status == 0, "invariant") ; aoqi@0: } aoqi@0: } else { aoqi@0: pthread_mutex_unlock(_mutex); aoqi@0: assert (status == 0, "invariant") ; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: /* Darwin has no "environ" in a dynamic library. */ aoqi@0: #ifdef __APPLE__ aoqi@0: #include aoqi@0: #define environ (*_NSGetEnviron()) aoqi@0: #else aoqi@0: extern char** environ; aoqi@0: #endif aoqi@0: aoqi@0: // Run the specified command in a separate process. Return its exit value, aoqi@0: // or -1 on failure (e.g. can't fork a new process). aoqi@0: // Unlike system(), this function can be called from signal handler. It aoqi@0: // doesn't block SIGINT et al. aoqi@0: int os::fork_and_exec(char* cmd) { aoqi@0: const char * argv[4] = {"sh", "-c", cmd, NULL}; aoqi@0: aoqi@0: // fork() in BsdThreads/NPTL is not async-safe. It needs to run aoqi@0: // pthread_atfork handlers and reset pthread library. All we need is a aoqi@0: // separate process to execve. Make a direct syscall to fork process. aoqi@0: // On IA64 there's no fork syscall, we have to use fork() and hope for aoqi@0: // the best... aoqi@0: pid_t pid = fork(); aoqi@0: aoqi@0: if (pid < 0) { aoqi@0: // fork failed aoqi@0: return -1; aoqi@0: aoqi@0: } else if (pid == 0) { aoqi@0: // child process aoqi@0: aoqi@0: // execve() in BsdThreads will call pthread_kill_other_threads_np() aoqi@0: // first to kill every thread on the thread list. Because this list is aoqi@0: // not reset by fork() (see notes above), execve() will instead kill aoqi@0: // every thread in the parent process. We know this is the only thread aoqi@0: // in the new process, so make a system call directly. aoqi@0: // IA64 should use normal execve() from glibc to match the glibc fork() aoqi@0: // above. aoqi@0: execve("/bin/sh", (char* const*)argv, environ); aoqi@0: aoqi@0: // execve failed aoqi@0: _exit(-1); aoqi@0: aoqi@0: } else { aoqi@0: // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't aoqi@0: // care about the actual exit code, for now. aoqi@0: aoqi@0: int status; aoqi@0: aoqi@0: // Wait for the child process to exit. This returns immediately if aoqi@0: // the child has already exited. */ aoqi@0: while (waitpid(pid, &status, 0) < 0) { aoqi@0: switch (errno) { aoqi@0: case ECHILD: return 0; aoqi@0: case EINTR: break; aoqi@0: default: return -1; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (WIFEXITED(status)) { aoqi@0: // The child exited normally; get its exit code. aoqi@0: return WEXITSTATUS(status); aoqi@0: } else if (WIFSIGNALED(status)) { aoqi@0: // The child exited because of a signal aoqi@0: // The best value to return is 0x80 + signal number, aoqi@0: // because that is what all Unix shells do, and because aoqi@0: // it allows callers to distinguish between process exit and aoqi@0: // process death by signal. aoqi@0: return 0x80 + WTERMSIG(status); aoqi@0: } else { aoqi@0: // Unknown exit code; pass it through aoqi@0: return status; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // is_headless_jre() aoqi@0: // aoqi@0: // Test for the existence of xawt/libmawt.so or libawt_xawt.so aoqi@0: // in order to report if we are running in a headless jre aoqi@0: // aoqi@0: // Since JDK8 xawt/libmawt.so was moved into the same directory aoqi@0: // as libawt.so, and renamed libawt_xawt.so aoqi@0: // aoqi@0: bool os::is_headless_jre() { aoqi@0: #ifdef __APPLE__ aoqi@0: // We no longer build headless-only on Mac OS X aoqi@0: return false; aoqi@0: #else aoqi@0: struct stat statbuf; aoqi@0: char buf[MAXPATHLEN]; aoqi@0: char libmawtpath[MAXPATHLEN]; aoqi@0: const char *xawtstr = "/xawt/libmawt" JNI_LIB_SUFFIX; aoqi@0: const char *new_xawtstr = "/libawt_xawt" JNI_LIB_SUFFIX; aoqi@0: char *p; aoqi@0: aoqi@0: // Get path to libjvm.so aoqi@0: os::jvm_path(buf, sizeof(buf)); aoqi@0: aoqi@0: // Get rid of libjvm.so aoqi@0: p = strrchr(buf, '/'); aoqi@0: if (p == NULL) return false; aoqi@0: else *p = '\0'; aoqi@0: aoqi@0: // Get rid of client or server aoqi@0: p = strrchr(buf, '/'); aoqi@0: if (p == NULL) return false; aoqi@0: else *p = '\0'; aoqi@0: aoqi@0: // check xawt/libmawt.so aoqi@0: strcpy(libmawtpath, buf); aoqi@0: strcat(libmawtpath, xawtstr); aoqi@0: if (::stat(libmawtpath, &statbuf) == 0) return false; aoqi@0: aoqi@0: // check libawt_xawt.so aoqi@0: strcpy(libmawtpath, buf); aoqi@0: strcat(libmawtpath, new_xawtstr); aoqi@0: if (::stat(libmawtpath, &statbuf) == 0) return false; aoqi@0: aoqi@0: return true; aoqi@0: #endif aoqi@0: } aoqi@0: aoqi@0: // Get the default path to the core file aoqi@0: // Returns the length of the string aoqi@0: int os::get_core_path(char* buffer, size_t bufferSize) { aoqi@0: int n = jio_snprintf(buffer, bufferSize, "/cores"); aoqi@0: aoqi@0: // Truncate if theoretical string was longer than bufferSize aoqi@0: n = MIN2(n, (int)bufferSize); aoqi@0: aoqi@0: return n; aoqi@0: } aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: void TestReserveMemorySpecial_test() { aoqi@0: // No tests available for this platform aoqi@0: } aoqi@0: #endif