src/os/bsd/vm/os_bsd.cpp

changeset 3156
f08d439fab8c
child 3202
436b4a3231bf
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/os/bsd/vm/os_bsd.cpp	Sun Sep 25 16:03:29 2011 -0700
     1.3 @@ -0,0 +1,5709 @@
     1.4 +/*
     1.5 + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +// no precompiled headers
    1.29 +#include "classfile/classLoader.hpp"
    1.30 +#include "classfile/systemDictionary.hpp"
    1.31 +#include "classfile/vmSymbols.hpp"
    1.32 +#include "code/icBuffer.hpp"
    1.33 +#include "code/vtableStubs.hpp"
    1.34 +#include "compiler/compileBroker.hpp"
    1.35 +#include "interpreter/interpreter.hpp"
    1.36 +#include "jvm_bsd.h"
    1.37 +#include "memory/allocation.inline.hpp"
    1.38 +#include "memory/filemap.hpp"
    1.39 +#include "mutex_bsd.inline.hpp"
    1.40 +#include "oops/oop.inline.hpp"
    1.41 +#include "os_share_bsd.hpp"
    1.42 +#include "prims/jniFastGetField.hpp"
    1.43 +#include "prims/jvm.h"
    1.44 +#include "prims/jvm_misc.hpp"
    1.45 +#include "runtime/arguments.hpp"
    1.46 +#include "runtime/extendedPC.hpp"
    1.47 +#include "runtime/globals.hpp"
    1.48 +#include "runtime/interfaceSupport.hpp"
    1.49 +#include "runtime/java.hpp"
    1.50 +#include "runtime/javaCalls.hpp"
    1.51 +#include "runtime/mutexLocker.hpp"
    1.52 +#include "runtime/objectMonitor.hpp"
    1.53 +#include "runtime/osThread.hpp"
    1.54 +#include "runtime/perfMemory.hpp"
    1.55 +#include "runtime/sharedRuntime.hpp"
    1.56 +#include "runtime/statSampler.hpp"
    1.57 +#include "runtime/stubRoutines.hpp"
    1.58 +#include "runtime/threadCritical.hpp"
    1.59 +#include "runtime/timer.hpp"
    1.60 +#include "services/attachListener.hpp"
    1.61 +#include "services/runtimeService.hpp"
    1.62 +#include "thread_bsd.inline.hpp"
    1.63 +#include "utilities/decoder.hpp"
    1.64 +#include "utilities/defaultStream.hpp"
    1.65 +#include "utilities/events.hpp"
    1.66 +#include "utilities/growableArray.hpp"
    1.67 +#include "utilities/vmError.hpp"
    1.68 +#ifdef TARGET_ARCH_x86
    1.69 +# include "assembler_x86.inline.hpp"
    1.70 +# include "nativeInst_x86.hpp"
    1.71 +#endif
    1.72 +#ifdef TARGET_ARCH_sparc
    1.73 +# include "assembler_sparc.inline.hpp"
    1.74 +# include "nativeInst_sparc.hpp"
    1.75 +#endif
    1.76 +#ifdef TARGET_ARCH_zero
    1.77 +# include "assembler_zero.inline.hpp"
    1.78 +# include "nativeInst_zero.hpp"
    1.79 +#endif
    1.80 +#ifdef TARGET_ARCH_arm
    1.81 +# include "assembler_arm.inline.hpp"
    1.82 +# include "nativeInst_arm.hpp"
    1.83 +#endif
    1.84 +#ifdef TARGET_ARCH_ppc
    1.85 +# include "assembler_ppc.inline.hpp"
    1.86 +# include "nativeInst_ppc.hpp"
    1.87 +#endif
    1.88 +#ifdef COMPILER1
    1.89 +#include "c1/c1_Runtime1.hpp"
    1.90 +#endif
    1.91 +#ifdef COMPILER2
    1.92 +#include "opto/runtime.hpp"
    1.93 +#endif
    1.94 +
    1.95 +// put OS-includes here
    1.96 +# include <sys/types.h>
    1.97 +# include <sys/mman.h>
    1.98 +# include <sys/stat.h>
    1.99 +# include <sys/select.h>
   1.100 +# include <pthread.h>
   1.101 +# include <signal.h>
   1.102 +# include <errno.h>
   1.103 +# include <dlfcn.h>
   1.104 +# include <stdio.h>
   1.105 +# include <unistd.h>
   1.106 +# include <sys/resource.h>
   1.107 +# include <pthread.h>
   1.108 +# include <sys/stat.h>
   1.109 +# include <sys/time.h>
   1.110 +# include <sys/times.h>
   1.111 +# include <sys/utsname.h>
   1.112 +# include <sys/socket.h>
   1.113 +# include <sys/wait.h>
   1.114 +# include <time.h>
   1.115 +# include <pwd.h>
   1.116 +# include <poll.h>
   1.117 +# include <semaphore.h>
   1.118 +# include <fcntl.h>
   1.119 +# include <string.h>
   1.120 +#ifdef _ALLBSD_SOURCE
   1.121 +# include <sys/param.h>
   1.122 +# include <sys/sysctl.h>
   1.123 +#else
   1.124 +# include <syscall.h>
   1.125 +# include <sys/sysinfo.h>
   1.126 +# include <gnu/libc-version.h>
   1.127 +#endif
   1.128 +# include <sys/ipc.h>
   1.129 +# include <sys/shm.h>
   1.130 +#ifndef __APPLE__
   1.131 +# include <link.h>
   1.132 +#endif
   1.133 +# include <stdint.h>
   1.134 +# include <inttypes.h>
   1.135 +# include <sys/ioctl.h>
   1.136 +
   1.137 +#if defined(__FreeBSD__) || defined(__NetBSD__)
   1.138 +# include <elf.h>
   1.139 +#endif
   1.140 +
   1.141 +#ifdef __APPLE__
   1.142 +#include <mach/mach.h> // semaphore_* API
   1.143 +#include <mach-o/dyld.h>
   1.144 +#endif
   1.145 +
   1.146 +#ifndef MAP_ANONYMOUS
   1.147 +#define MAP_ANONYMOUS MAP_ANON
   1.148 +#endif
   1.149 +
   1.150 +#define MAX_PATH    (2 * K)
   1.151 +
   1.152 +// for timer info max values which include all bits
   1.153 +#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
   1.154 +#define SEC_IN_NANOSECS  1000000000LL
   1.155 +
   1.156 +#define LARGEPAGES_BIT (1 << 6)
   1.157 +////////////////////////////////////////////////////////////////////////////////
   1.158 +// global variables
   1.159 +julong os::Bsd::_physical_memory = 0;
   1.160 +
   1.161 +#ifndef _ALLBSD_SOURCE
   1.162 +address   os::Bsd::_initial_thread_stack_bottom = NULL;
   1.163 +uintptr_t os::Bsd::_initial_thread_stack_size   = 0;
   1.164 +#endif
   1.165 +
   1.166 +int (*os::Bsd::_clock_gettime)(clockid_t, struct timespec *) = NULL;
   1.167 +#ifndef _ALLBSD_SOURCE
   1.168 +int (*os::Bsd::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
   1.169 +Mutex* os::Bsd::_createThread_lock = NULL;
   1.170 +#endif
   1.171 +pthread_t os::Bsd::_main_thread;
   1.172 +int os::Bsd::_page_size = -1;
   1.173 +#ifndef _ALLBSD_SOURCE
   1.174 +bool os::Bsd::_is_floating_stack = false;
   1.175 +bool os::Bsd::_is_NPTL = false;
   1.176 +bool os::Bsd::_supports_fast_thread_cpu_time = false;
   1.177 +const char * os::Bsd::_glibc_version = NULL;
   1.178 +const char * os::Bsd::_libpthread_version = NULL;
   1.179 +#endif
   1.180 +
   1.181 +static jlong initial_time_count=0;
   1.182 +
   1.183 +static int clock_tics_per_sec = 100;
   1.184 +
   1.185 +// For diagnostics to print a message once. see run_periodic_checks
   1.186 +static sigset_t check_signal_done;
   1.187 +static bool check_signals = true;;
   1.188 +
   1.189 +static pid_t _initial_pid = 0;
   1.190 +
   1.191 +/* Signal number used to suspend/resume a thread */
   1.192 +
   1.193 +/* do not use any signal number less than SIGSEGV, see 4355769 */
   1.194 +static int SR_signum = SIGUSR2;
   1.195 +sigset_t SR_sigset;
   1.196 +
   1.197 +
   1.198 +////////////////////////////////////////////////////////////////////////////////
   1.199 +// utility functions
   1.200 +
   1.201 +static int SR_initialize();
   1.202 +static int SR_finalize();
   1.203 +
   1.204 +julong os::available_memory() {
   1.205 +  return Bsd::available_memory();
   1.206 +}
   1.207 +
   1.208 +julong os::Bsd::available_memory() {
   1.209 +#ifdef _ALLBSD_SOURCE
   1.210 +  // XXXBSD: this is just a stopgap implementation
   1.211 +  return physical_memory() >> 2;
   1.212 +#else
   1.213 +  // values in struct sysinfo are "unsigned long"
   1.214 +  struct sysinfo si;
   1.215 +  sysinfo(&si);
   1.216 +
   1.217 +  return (julong)si.freeram * si.mem_unit;
   1.218 +#endif
   1.219 +}
   1.220 +
   1.221 +julong os::physical_memory() {
   1.222 +  return Bsd::physical_memory();
   1.223 +}
   1.224 +
   1.225 +julong os::allocatable_physical_memory(julong size) {
   1.226 +#ifdef _LP64
   1.227 +  return size;
   1.228 +#else
   1.229 +  julong result = MIN2(size, (julong)3800*M);
   1.230 +   if (!is_allocatable(result)) {
   1.231 +     // See comments under solaris for alignment considerations
   1.232 +     julong reasonable_size = (julong)2*G - 2 * os::vm_page_size();
   1.233 +     result =  MIN2(size, reasonable_size);
   1.234 +   }
   1.235 +   return result;
   1.236 +#endif // _LP64
   1.237 +}
   1.238 +
   1.239 +////////////////////////////////////////////////////////////////////////////////
   1.240 +// environment support
   1.241 +
   1.242 +bool os::getenv(const char* name, char* buf, int len) {
   1.243 +  const char* val = ::getenv(name);
   1.244 +  if (val != NULL && strlen(val) < (size_t)len) {
   1.245 +    strcpy(buf, val);
   1.246 +    return true;
   1.247 +  }
   1.248 +  if (len > 0) buf[0] = 0;  // return a null string
   1.249 +  return false;
   1.250 +}
   1.251 +
   1.252 +
   1.253 +// Return true if user is running as root.
   1.254 +
   1.255 +bool os::have_special_privileges() {
   1.256 +  static bool init = false;
   1.257 +  static bool privileges = false;
   1.258 +  if (!init) {
   1.259 +    privileges = (getuid() != geteuid()) || (getgid() != getegid());
   1.260 +    init = true;
   1.261 +  }
   1.262 +  return privileges;
   1.263 +}
   1.264 +
   1.265 +
   1.266 +#ifndef _ALLBSD_SOURCE
   1.267 +#ifndef SYS_gettid
   1.268 +// i386: 224, ia64: 1105, amd64: 186, sparc 143
   1.269 +#ifdef __ia64__
   1.270 +#define SYS_gettid 1105
   1.271 +#elif __i386__
   1.272 +#define SYS_gettid 224
   1.273 +#elif __amd64__
   1.274 +#define SYS_gettid 186
   1.275 +#elif __sparc__
   1.276 +#define SYS_gettid 143
   1.277 +#else
   1.278 +#error define gettid for the arch
   1.279 +#endif
   1.280 +#endif
   1.281 +#endif
   1.282 +
   1.283 +// Cpu architecture string
   1.284 +#if   defined(ZERO)
   1.285 +static char cpu_arch[] = ZERO_LIBARCH;
   1.286 +#elif defined(IA64)
   1.287 +static char cpu_arch[] = "ia64";
   1.288 +#elif defined(IA32)
   1.289 +static char cpu_arch[] = "i386";
   1.290 +#elif defined(AMD64)
   1.291 +static char cpu_arch[] = "amd64";
   1.292 +#elif defined(ARM)
   1.293 +static char cpu_arch[] = "arm";
   1.294 +#elif defined(PPC)
   1.295 +static char cpu_arch[] = "ppc";
   1.296 +#elif defined(SPARC)
   1.297 +#  ifdef _LP64
   1.298 +static char cpu_arch[] = "sparcv9";
   1.299 +#  else
   1.300 +static char cpu_arch[] = "sparc";
   1.301 +#  endif
   1.302 +#else
   1.303 +#error Add appropriate cpu_arch setting
   1.304 +#endif
   1.305 +
   1.306 +
   1.307 +#ifndef _ALLBSD_SOURCE
   1.308 +// pid_t gettid()
   1.309 +//
   1.310 +// Returns the kernel thread id of the currently running thread. Kernel
   1.311 +// thread id is used to access /proc.
   1.312 +//
   1.313 +// (Note that getpid() on BsdThreads returns kernel thread id too; but
   1.314 +// on NPTL, it returns the same pid for all threads, as required by POSIX.)
   1.315 +//
   1.316 +pid_t os::Bsd::gettid() {
   1.317 +  int rslt = syscall(SYS_gettid);
   1.318 +  if (rslt == -1) {
   1.319 +     // old kernel, no NPTL support
   1.320 +     return getpid();
   1.321 +  } else {
   1.322 +     return (pid_t)rslt;
   1.323 +  }
   1.324 +}
   1.325 +
   1.326 +// Most versions of bsd have a bug where the number of processors are
   1.327 +// determined by looking at the /proc file system.  In a chroot environment,
   1.328 +// the system call returns 1.  This causes the VM to act as if it is
   1.329 +// a single processor and elide locking (see is_MP() call).
   1.330 +static bool unsafe_chroot_detected = false;
   1.331 +static const char *unstable_chroot_error = "/proc file system not found.\n"
   1.332 +                     "Java may be unstable running multithreaded in a chroot "
   1.333 +                     "environment on Bsd when /proc filesystem is not mounted.";
   1.334 +#endif
   1.335 +
   1.336 +#ifdef _ALLBSD_SOURCE
   1.337 +void os::Bsd::initialize_system_info() {
   1.338 +  int mib[2];
   1.339 +  size_t len;
   1.340 +  int cpu_val;
   1.341 +  u_long mem_val;
   1.342 +
   1.343 +  /* get processors count via hw.ncpus sysctl */
   1.344 +  mib[0] = CTL_HW;
   1.345 +  mib[1] = HW_NCPU;
   1.346 +  len = sizeof(cpu_val);
   1.347 +  if (sysctl(mib, 2, &cpu_val, &len, NULL, 0) != -1 && cpu_val >= 1) {
   1.348 +       set_processor_count(cpu_val);
   1.349 +  }
   1.350 +  else {
   1.351 +       set_processor_count(1);   // fallback
   1.352 +  }
   1.353 +
   1.354 +  /* get physical memory via hw.usermem sysctl (hw.usermem is used
   1.355 +   * instead of hw.physmem because we need size of allocatable memory
   1.356 +   */
   1.357 +  mib[0] = CTL_HW;
   1.358 +  mib[1] = HW_USERMEM;
   1.359 +  len = sizeof(mem_val);
   1.360 +  if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1)
   1.361 +       _physical_memory = mem_val;
   1.362 +  else
   1.363 +       _physical_memory = 256*1024*1024;       // fallback (XXXBSD?)
   1.364 +
   1.365 +#ifdef __OpenBSD__
   1.366 +  {
   1.367 +       // limit _physical_memory memory view on OpenBSD since
   1.368 +       // datasize rlimit restricts us anyway.
   1.369 +       struct rlimit limits;
   1.370 +       getrlimit(RLIMIT_DATA, &limits);
   1.371 +       _physical_memory = MIN2(_physical_memory, (julong)limits.rlim_cur);
   1.372 +  }
   1.373 +#endif
   1.374 +}
   1.375 +#else
   1.376 +void os::Bsd::initialize_system_info() {
   1.377 +  set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
   1.378 +  if (processor_count() == 1) {
   1.379 +    pid_t pid = os::Bsd::gettid();
   1.380 +    char fname[32];
   1.381 +    jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
   1.382 +    FILE *fp = fopen(fname, "r");
   1.383 +    if (fp == NULL) {
   1.384 +      unsafe_chroot_detected = true;
   1.385 +    } else {
   1.386 +      fclose(fp);
   1.387 +    }
   1.388 +  }
   1.389 +  _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
   1.390 +  assert(processor_count() > 0, "bsd error");
   1.391 +}
   1.392 +#endif
   1.393 +
   1.394 +void os::init_system_properties_values() {
   1.395 +//  char arch[12];
   1.396 +//  sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
   1.397 +
   1.398 +  // The next steps are taken in the product version:
   1.399 +  //
   1.400 +  // Obtain the JAVA_HOME value from the location of libjvm[_g].so.
   1.401 +  // This library should be located at:
   1.402 +  // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so.
   1.403 +  //
   1.404 +  // If "/jre/lib/" appears at the right place in the path, then we
   1.405 +  // assume libjvm[_g].so is installed in a JDK and we use this path.
   1.406 +  //
   1.407 +  // Otherwise exit with message: "Could not create the Java virtual machine."
   1.408 +  //
   1.409 +  // The following extra steps are taken in the debugging version:
   1.410 +  //
   1.411 +  // If "/jre/lib/" does NOT appear at the right place in the path
   1.412 +  // instead of exit check for $JAVA_HOME environment variable.
   1.413 +  //
   1.414 +  // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
   1.415 +  // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so
   1.416 +  // it looks like libjvm[_g].so is installed there
   1.417 +  // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so.
   1.418 +  //
   1.419 +  // Otherwise exit.
   1.420 +  //
   1.421 +  // Important note: if the location of libjvm.so changes this
   1.422 +  // code needs to be changed accordingly.
   1.423 +
   1.424 +  // The next few definitions allow the code to be verbatim:
   1.425 +#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n))
   1.426 +#define getenv(n) ::getenv(n)
   1.427 +
   1.428 +/*
   1.429 + * See ld(1):
   1.430 + *      The linker uses the following search paths to locate required
   1.431 + *      shared libraries:
   1.432 + *        1: ...
   1.433 + *        ...
   1.434 + *        7: The default directories, normally /lib and /usr/lib.
   1.435 + */
   1.436 +#ifndef DEFAULT_LIBPATH
   1.437 +#define DEFAULT_LIBPATH "/lib:/usr/lib"
   1.438 +#endif
   1.439 +
   1.440 +#define EXTENSIONS_DIR  "/lib/ext"
   1.441 +#define ENDORSED_DIR    "/lib/endorsed"
   1.442 +#define REG_DIR         "/usr/java/packages"
   1.443 +
   1.444 +  {
   1.445 +    /* sysclasspath, java_home, dll_dir */
   1.446 +    {
   1.447 +        char *home_path;
   1.448 +        char *dll_path;
   1.449 +        char *pslash;
   1.450 +        char buf[MAXPATHLEN];
   1.451 +        os::jvm_path(buf, sizeof(buf));
   1.452 +
   1.453 +        // Found the full path to libjvm.so.
   1.454 +        // Now cut the path to <java_home>/jre if we can.
   1.455 +        *(strrchr(buf, '/')) = '\0';  /* get rid of /libjvm.so */
   1.456 +        pslash = strrchr(buf, '/');
   1.457 +        if (pslash != NULL)
   1.458 +            *pslash = '\0';           /* get rid of /{client|server|hotspot} */
   1.459 +        dll_path = malloc(strlen(buf) + 1);
   1.460 +        if (dll_path == NULL)
   1.461 +            return;
   1.462 +        strcpy(dll_path, buf);
   1.463 +        Arguments::set_dll_dir(dll_path);
   1.464 +
   1.465 +        if (pslash != NULL) {
   1.466 +            pslash = strrchr(buf, '/');
   1.467 +            if (pslash != NULL) {
   1.468 +                *pslash = '\0';       /* get rid of /<arch> */
   1.469 +                pslash = strrchr(buf, '/');
   1.470 +                if (pslash != NULL)
   1.471 +                    *pslash = '\0';   /* get rid of /lib */
   1.472 +            }
   1.473 +        }
   1.474 +
   1.475 +        home_path = malloc(strlen(buf) + 1);
   1.476 +        if (home_path == NULL)
   1.477 +            return;
   1.478 +        strcpy(home_path, buf);
   1.479 +        Arguments::set_java_home(home_path);
   1.480 +
   1.481 +        if (!set_boot_path('/', ':'))
   1.482 +            return;
   1.483 +    }
   1.484 +
   1.485 +    /*
   1.486 +     * Where to look for native libraries
   1.487 +     *
   1.488 +     * Note: Due to a legacy implementation, most of the library path
   1.489 +     * is set in the launcher.  This was to accomodate linking restrictions
   1.490 +     * on legacy Bsd implementations (which are no longer supported).
   1.491 +     * Eventually, all the library path setting will be done here.
   1.492 +     *
   1.493 +     * However, to prevent the proliferation of improperly built native
   1.494 +     * libraries, the new path component /usr/java/packages is added here.
   1.495 +     * Eventually, all the library path setting will be done here.
   1.496 +     */
   1.497 +    {
   1.498 +        char *ld_library_path;
   1.499 +
   1.500 +        /*
   1.501 +         * Construct the invariant part of ld_library_path. Note that the
   1.502 +         * space for the colon and the trailing null are provided by the
   1.503 +         * nulls included by the sizeof operator (so actually we allocate
   1.504 +         * a byte more than necessary).
   1.505 +         */
   1.506 +        ld_library_path = (char *) malloc(sizeof(REG_DIR) + sizeof("/lib/") +
   1.507 +            strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH));
   1.508 +        sprintf(ld_library_path, REG_DIR "/lib/%s:" DEFAULT_LIBPATH, cpu_arch);
   1.509 +
   1.510 +        /*
   1.511 +         * Get the user setting of LD_LIBRARY_PATH, and prepended it.  It
   1.512 +         * should always exist (until the legacy problem cited above is
   1.513 +         * addressed).
   1.514 +         */
   1.515 +#ifdef __APPLE__
   1.516 +        char *v = getenv("DYLD_LIBRARY_PATH");
   1.517 +#else
   1.518 +        char *v = getenv("LD_LIBRARY_PATH");
   1.519 +#endif
   1.520 +        if (v != NULL) {
   1.521 +            char *t = ld_library_path;
   1.522 +            /* That's +1 for the colon and +1 for the trailing '\0' */
   1.523 +            ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
   1.524 +            sprintf(ld_library_path, "%s:%s", v, t);
   1.525 +        }
   1.526 +        Arguments::set_library_path(ld_library_path);
   1.527 +    }
   1.528 +
   1.529 +    /*
   1.530 +     * Extensions directories.
   1.531 +     *
   1.532 +     * Note that the space for the colon and the trailing null are provided
   1.533 +     * by the nulls included by the sizeof operator (so actually one byte more
   1.534 +     * than necessary is allocated).
   1.535 +     */
   1.536 +    {
   1.537 +        char *buf = malloc(strlen(Arguments::get_java_home()) +
   1.538 +            sizeof(EXTENSIONS_DIR) + sizeof(REG_DIR) + sizeof(EXTENSIONS_DIR));
   1.539 +        sprintf(buf, "%s" EXTENSIONS_DIR ":" REG_DIR EXTENSIONS_DIR,
   1.540 +            Arguments::get_java_home());
   1.541 +        Arguments::set_ext_dirs(buf);
   1.542 +    }
   1.543 +
   1.544 +    /* Endorsed standards default directory. */
   1.545 +    {
   1.546 +        char * buf;
   1.547 +        buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
   1.548 +        sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
   1.549 +        Arguments::set_endorsed_dirs(buf);
   1.550 +    }
   1.551 +  }
   1.552 +
   1.553 +#undef malloc
   1.554 +#undef getenv
   1.555 +#undef EXTENSIONS_DIR
   1.556 +#undef ENDORSED_DIR
   1.557 +
   1.558 +  // Done
   1.559 +  return;
   1.560 +}
   1.561 +
   1.562 +////////////////////////////////////////////////////////////////////////////////
   1.563 +// breakpoint support
   1.564 +
   1.565 +void os::breakpoint() {
   1.566 +  BREAKPOINT;
   1.567 +}
   1.568 +
   1.569 +extern "C" void breakpoint() {
   1.570 +  // use debugger to set breakpoint here
   1.571 +}
   1.572 +
   1.573 +////////////////////////////////////////////////////////////////////////////////
   1.574 +// signal support
   1.575 +
   1.576 +debug_only(static bool signal_sets_initialized = false);
   1.577 +static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
   1.578 +
   1.579 +bool os::Bsd::is_sig_ignored(int sig) {
   1.580 +      struct sigaction oact;
   1.581 +      sigaction(sig, (struct sigaction*)NULL, &oact);
   1.582 +      void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
   1.583 +                                     : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
   1.584 +      if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
   1.585 +           return true;
   1.586 +      else
   1.587 +           return false;
   1.588 +}
   1.589 +
   1.590 +void os::Bsd::signal_sets_init() {
   1.591 +  // Should also have an assertion stating we are still single-threaded.
   1.592 +  assert(!signal_sets_initialized, "Already initialized");
   1.593 +  // Fill in signals that are necessarily unblocked for all threads in
   1.594 +  // the VM. Currently, we unblock the following signals:
   1.595 +  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
   1.596 +  //                         by -Xrs (=ReduceSignalUsage));
   1.597 +  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
   1.598 +  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
   1.599 +  // the dispositions or masks wrt these signals.
   1.600 +  // Programs embedding the VM that want to use the above signals for their
   1.601 +  // own purposes must, at this time, use the "-Xrs" option to prevent
   1.602 +  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
   1.603 +  // (See bug 4345157, and other related bugs).
   1.604 +  // In reality, though, unblocking these signals is really a nop, since
   1.605 +  // these signals are not blocked by default.
   1.606 +  sigemptyset(&unblocked_sigs);
   1.607 +  sigemptyset(&allowdebug_blocked_sigs);
   1.608 +  sigaddset(&unblocked_sigs, SIGILL);
   1.609 +  sigaddset(&unblocked_sigs, SIGSEGV);
   1.610 +  sigaddset(&unblocked_sigs, SIGBUS);
   1.611 +  sigaddset(&unblocked_sigs, SIGFPE);
   1.612 +  sigaddset(&unblocked_sigs, SR_signum);
   1.613 +
   1.614 +  if (!ReduceSignalUsage) {
   1.615 +   if (!os::Bsd::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
   1.616 +      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
   1.617 +      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
   1.618 +   }
   1.619 +   if (!os::Bsd::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
   1.620 +      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
   1.621 +      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
   1.622 +   }
   1.623 +   if (!os::Bsd::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
   1.624 +      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
   1.625 +      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
   1.626 +   }
   1.627 +  }
   1.628 +  // Fill in signals that are blocked by all but the VM thread.
   1.629 +  sigemptyset(&vm_sigs);
   1.630 +  if (!ReduceSignalUsage)
   1.631 +    sigaddset(&vm_sigs, BREAK_SIGNAL);
   1.632 +  debug_only(signal_sets_initialized = true);
   1.633 +
   1.634 +}
   1.635 +
   1.636 +// These are signals that are unblocked while a thread is running Java.
   1.637 +// (For some reason, they get blocked by default.)
   1.638 +sigset_t* os::Bsd::unblocked_signals() {
   1.639 +  assert(signal_sets_initialized, "Not initialized");
   1.640 +  return &unblocked_sigs;
   1.641 +}
   1.642 +
   1.643 +// These are the signals that are blocked while a (non-VM) thread is
   1.644 +// running Java. Only the VM thread handles these signals.
   1.645 +sigset_t* os::Bsd::vm_signals() {
   1.646 +  assert(signal_sets_initialized, "Not initialized");
   1.647 +  return &vm_sigs;
   1.648 +}
   1.649 +
   1.650 +// These are signals that are blocked during cond_wait to allow debugger in
   1.651 +sigset_t* os::Bsd::allowdebug_blocked_signals() {
   1.652 +  assert(signal_sets_initialized, "Not initialized");
   1.653 +  return &allowdebug_blocked_sigs;
   1.654 +}
   1.655 +
   1.656 +void os::Bsd::hotspot_sigmask(Thread* thread) {
   1.657 +
   1.658 +  //Save caller's signal mask before setting VM signal mask
   1.659 +  sigset_t caller_sigmask;
   1.660 +  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
   1.661 +
   1.662 +  OSThread* osthread = thread->osthread();
   1.663 +  osthread->set_caller_sigmask(caller_sigmask);
   1.664 +
   1.665 +  pthread_sigmask(SIG_UNBLOCK, os::Bsd::unblocked_signals(), NULL);
   1.666 +
   1.667 +  if (!ReduceSignalUsage) {
   1.668 +    if (thread->is_VM_thread()) {
   1.669 +      // Only the VM thread handles BREAK_SIGNAL ...
   1.670 +      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
   1.671 +    } else {
   1.672 +      // ... all other threads block BREAK_SIGNAL
   1.673 +      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
   1.674 +    }
   1.675 +  }
   1.676 +}
   1.677 +
   1.678 +#ifndef _ALLBSD_SOURCE
   1.679 +//////////////////////////////////////////////////////////////////////////////
   1.680 +// detecting pthread library
   1.681 +
   1.682 +void os::Bsd::libpthread_init() {
   1.683 +  // Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION
   1.684 +  // and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a
   1.685 +  // generic name for earlier versions.
   1.686 +  // Define macros here so we can build HotSpot on old systems.
   1.687 +# ifndef _CS_GNU_LIBC_VERSION
   1.688 +# define _CS_GNU_LIBC_VERSION 2
   1.689 +# endif
   1.690 +# ifndef _CS_GNU_LIBPTHREAD_VERSION
   1.691 +# define _CS_GNU_LIBPTHREAD_VERSION 3
   1.692 +# endif
   1.693 +
   1.694 +  size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
   1.695 +  if (n > 0) {
   1.696 +     char *str = (char *)malloc(n);
   1.697 +     confstr(_CS_GNU_LIBC_VERSION, str, n);
   1.698 +     os::Bsd::set_glibc_version(str);
   1.699 +  } else {
   1.700 +     // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
   1.701 +     static char _gnu_libc_version[32];
   1.702 +     jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
   1.703 +              "glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
   1.704 +     os::Bsd::set_glibc_version(_gnu_libc_version);
   1.705 +  }
   1.706 +
   1.707 +  n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
   1.708 +  if (n > 0) {
   1.709 +     char *str = (char *)malloc(n);
   1.710 +     confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
   1.711 +     // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
   1.712 +     // us "NPTL-0.29" even we are running with BsdThreads. Check if this
   1.713 +     // is the case. BsdThreads has a hard limit on max number of threads.
   1.714 +     // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
   1.715 +     // On the other hand, NPTL does not have such a limit, sysconf()
   1.716 +     // will return -1 and errno is not changed. Check if it is really NPTL.
   1.717 +     if (strcmp(os::Bsd::glibc_version(), "glibc 2.3.2") == 0 &&
   1.718 +         strstr(str, "NPTL") &&
   1.719 +         sysconf(_SC_THREAD_THREADS_MAX) > 0) {
   1.720 +       free(str);
   1.721 +       os::Bsd::set_libpthread_version("bsdthreads");
   1.722 +     } else {
   1.723 +       os::Bsd::set_libpthread_version(str);
   1.724 +     }
   1.725 +  } else {
   1.726 +    // glibc before 2.3.2 only has BsdThreads.
   1.727 +    os::Bsd::set_libpthread_version("bsdthreads");
   1.728 +  }
   1.729 +
   1.730 +  if (strstr(libpthread_version(), "NPTL")) {
   1.731 +     os::Bsd::set_is_NPTL();
   1.732 +  } else {
   1.733 +     os::Bsd::set_is_BsdThreads();
   1.734 +  }
   1.735 +
   1.736 +  // BsdThreads have two flavors: floating-stack mode, which allows variable
   1.737 +  // stack size; and fixed-stack mode. NPTL is always floating-stack.
   1.738 +  if (os::Bsd::is_NPTL() || os::Bsd::supports_variable_stack_size()) {
   1.739 +     os::Bsd::set_is_floating_stack();
   1.740 +  }
   1.741 +}
   1.742 +
   1.743 +/////////////////////////////////////////////////////////////////////////////
   1.744 +// thread stack
   1.745 +
   1.746 +// Force Bsd kernel to expand current thread stack. If "bottom" is close
   1.747 +// to the stack guard, caller should block all signals.
   1.748 +//
   1.749 +// MAP_GROWSDOWN:
   1.750 +//   A special mmap() flag that is used to implement thread stacks. It tells
   1.751 +//   kernel that the memory region should extend downwards when needed. This
   1.752 +//   allows early versions of BsdThreads to only mmap the first few pages
   1.753 +//   when creating a new thread. Bsd kernel will automatically expand thread
   1.754 +//   stack as needed (on page faults).
   1.755 +//
   1.756 +//   However, because the memory region of a MAP_GROWSDOWN stack can grow on
   1.757 +//   demand, if a page fault happens outside an already mapped MAP_GROWSDOWN
   1.758 +//   region, it's hard to tell if the fault is due to a legitimate stack
   1.759 +//   access or because of reading/writing non-exist memory (e.g. buffer
   1.760 +//   overrun). As a rule, if the fault happens below current stack pointer,
   1.761 +//   Bsd kernel does not expand stack, instead a SIGSEGV is sent to the
   1.762 +//   application (see Bsd kernel fault.c).
   1.763 +//
   1.764 +//   This Bsd feature can cause SIGSEGV when VM bangs thread stack for
   1.765 +//   stack overflow detection.
   1.766 +//
   1.767 +//   Newer version of BsdThreads (since glibc-2.2, or, RH-7.x) and NPTL do
   1.768 +//   not use this flag. However, the stack of initial thread is not created
   1.769 +//   by pthread, it is still MAP_GROWSDOWN. Also it's possible (though
   1.770 +//   unlikely) that user code can create a thread with MAP_GROWSDOWN stack
   1.771 +//   and then attach the thread to JVM.
   1.772 +//
   1.773 +// To get around the problem and allow stack banging on Bsd, we need to
   1.774 +// manually expand thread stack after receiving the SIGSEGV.
   1.775 +//
   1.776 +// There are two ways to expand thread stack to address "bottom", we used
   1.777 +// both of them in JVM before 1.5:
   1.778 +//   1. adjust stack pointer first so that it is below "bottom", and then
   1.779 +//      touch "bottom"
   1.780 +//   2. mmap() the page in question
   1.781 +//
   1.782 +// Now alternate signal stack is gone, it's harder to use 2. For instance,
   1.783 +// if current sp is already near the lower end of page 101, and we need to
   1.784 +// call mmap() to map page 100, it is possible that part of the mmap() frame
   1.785 +// will be placed in page 100. When page 100 is mapped, it is zero-filled.
   1.786 +// That will destroy the mmap() frame and cause VM to crash.
   1.787 +//
   1.788 +// The following code works by adjusting sp first, then accessing the "bottom"
   1.789 +// page to force a page fault. Bsd kernel will then automatically expand the
   1.790 +// stack mapping.
   1.791 +//
   1.792 +// _expand_stack_to() assumes its frame size is less than page size, which
   1.793 +// should always be true if the function is not inlined.
   1.794 +
   1.795 +#if __GNUC__ < 3    // gcc 2.x does not support noinline attribute
   1.796 +#define NOINLINE
   1.797 +#else
   1.798 +#define NOINLINE __attribute__ ((noinline))
   1.799 +#endif
   1.800 +
   1.801 +static void _expand_stack_to(address bottom) NOINLINE;
   1.802 +
   1.803 +static void _expand_stack_to(address bottom) {
   1.804 +  address sp;
   1.805 +  size_t size;
   1.806 +  volatile char *p;
   1.807 +
   1.808 +  // Adjust bottom to point to the largest address within the same page, it
   1.809 +  // gives us a one-page buffer if alloca() allocates slightly more memory.
   1.810 +  bottom = (address)align_size_down((uintptr_t)bottom, os::Bsd::page_size());
   1.811 +  bottom += os::Bsd::page_size() - 1;
   1.812 +
   1.813 +  // sp might be slightly above current stack pointer; if that's the case, we
   1.814 +  // will alloca() a little more space than necessary, which is OK. Don't use
   1.815 +  // os::current_stack_pointer(), as its result can be slightly below current
   1.816 +  // stack pointer, causing us to not alloca enough to reach "bottom".
   1.817 +  sp = (address)&sp;
   1.818 +
   1.819 +  if (sp > bottom) {
   1.820 +    size = sp - bottom;
   1.821 +    p = (volatile char *)alloca(size);
   1.822 +    assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?");
   1.823 +    p[0] = '\0';
   1.824 +  }
   1.825 +}
   1.826 +
   1.827 +bool os::Bsd::manually_expand_stack(JavaThread * t, address addr) {
   1.828 +  assert(t!=NULL, "just checking");
   1.829 +  assert(t->osthread()->expanding_stack(), "expand should be set");
   1.830 +  assert(t->stack_base() != NULL, "stack_base was not initialized");
   1.831 +
   1.832 +  if (addr <  t->stack_base() && addr >= t->stack_yellow_zone_base()) {
   1.833 +    sigset_t mask_all, old_sigset;
   1.834 +    sigfillset(&mask_all);
   1.835 +    pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);
   1.836 +    _expand_stack_to(addr);
   1.837 +    pthread_sigmask(SIG_SETMASK, &old_sigset, NULL);
   1.838 +    return true;
   1.839 +  }
   1.840 +  return false;
   1.841 +}
   1.842 +#endif
   1.843 +
   1.844 +//////////////////////////////////////////////////////////////////////////////
   1.845 +// create new thread
   1.846 +
   1.847 +static address highest_vm_reserved_address();
   1.848 +
   1.849 +// check if it's safe to start a new thread
   1.850 +static bool _thread_safety_check(Thread* thread) {
   1.851 +#ifdef _ALLBSD_SOURCE
   1.852 +    return true;
   1.853 +#else
   1.854 +  if (os::Bsd::is_BsdThreads() && !os::Bsd::is_floating_stack()) {
   1.855 +    // Fixed stack BsdThreads (SuSE Bsd/x86, and some versions of Redhat)
   1.856 +    //   Heap is mmap'ed at lower end of memory space. Thread stacks are
   1.857 +    //   allocated (MAP_FIXED) from high address space. Every thread stack
   1.858 +    //   occupies a fixed size slot (usually 2Mbytes, but user can change
   1.859 +    //   it to other values if they rebuild BsdThreads).
   1.860 +    //
   1.861 +    // Problem with MAP_FIXED is that mmap() can still succeed even part of
   1.862 +    // the memory region has already been mmap'ed. That means if we have too
   1.863 +    // many threads and/or very large heap, eventually thread stack will
   1.864 +    // collide with heap.
   1.865 +    //
   1.866 +    // Here we try to prevent heap/stack collision by comparing current
   1.867 +    // stack bottom with the highest address that has been mmap'ed by JVM
   1.868 +    // plus a safety margin for memory maps created by native code.
   1.869 +    //
   1.870 +    // This feature can be disabled by setting ThreadSafetyMargin to 0
   1.871 +    //
   1.872 +    if (ThreadSafetyMargin > 0) {
   1.873 +      address stack_bottom = os::current_stack_base() - os::current_stack_size();
   1.874 +
   1.875 +      // not safe if our stack extends below the safety margin
   1.876 +      return stack_bottom - ThreadSafetyMargin >= highest_vm_reserved_address();
   1.877 +    } else {
   1.878 +      return true;
   1.879 +    }
   1.880 +  } else {
   1.881 +    // Floating stack BsdThreads or NPTL:
   1.882 +    //   Unlike fixed stack BsdThreads, thread stacks are not MAP_FIXED. When
   1.883 +    //   there's not enough space left, pthread_create() will fail. If we come
   1.884 +    //   here, that means enough space has been reserved for stack.
   1.885 +    return true;
   1.886 +  }
   1.887 +#endif
   1.888 +}
   1.889 +
   1.890 +// Thread start routine for all newly created threads
   1.891 +static void *java_start(Thread *thread) {
   1.892 +  // Try to randomize the cache line index of hot stack frames.
   1.893 +  // This helps when threads of the same stack traces evict each other's
   1.894 +  // cache lines. The threads can be either from the same JVM instance, or
   1.895 +  // from different JVM instances. The benefit is especially true for
   1.896 +  // processors with hyperthreading technology.
   1.897 +  static int counter = 0;
   1.898 +  int pid = os::current_process_id();
   1.899 +  alloca(((pid ^ counter++) & 7) * 128);
   1.900 +
   1.901 +  ThreadLocalStorage::set_thread(thread);
   1.902 +
   1.903 +  OSThread* osthread = thread->osthread();
   1.904 +  Monitor* sync = osthread->startThread_lock();
   1.905 +
   1.906 +  // non floating stack BsdThreads needs extra check, see above
   1.907 +  if (!_thread_safety_check(thread)) {
   1.908 +    // notify parent thread
   1.909 +    MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
   1.910 +    osthread->set_state(ZOMBIE);
   1.911 +    sync->notify_all();
   1.912 +    return NULL;
   1.913 +  }
   1.914 +
   1.915 +#ifdef _ALLBSD_SOURCE
   1.916 +  // thread_id is pthread_id on BSD
   1.917 +  osthread->set_thread_id(::pthread_self());
   1.918 +#else
   1.919 +  // thread_id is kernel thread id (similar to Solaris LWP id)
   1.920 +  osthread->set_thread_id(os::Bsd::gettid());
   1.921 +
   1.922 +  if (UseNUMA) {
   1.923 +    int lgrp_id = os::numa_get_group_id();
   1.924 +    if (lgrp_id != -1) {
   1.925 +      thread->set_lgrp_id(lgrp_id);
   1.926 +    }
   1.927 +  }
   1.928 +#endif
   1.929 +  // initialize signal mask for this thread
   1.930 +  os::Bsd::hotspot_sigmask(thread);
   1.931 +
   1.932 +  // initialize floating point control register
   1.933 +  os::Bsd::init_thread_fpu_state();
   1.934 +
   1.935 +  // handshaking with parent thread
   1.936 +  {
   1.937 +    MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
   1.938 +
   1.939 +    // notify parent thread
   1.940 +    osthread->set_state(INITIALIZED);
   1.941 +    sync->notify_all();
   1.942 +
   1.943 +    // wait until os::start_thread()
   1.944 +    while (osthread->get_state() == INITIALIZED) {
   1.945 +      sync->wait(Mutex::_no_safepoint_check_flag);
   1.946 +    }
   1.947 +  }
   1.948 +
   1.949 +  // call one more level start routine
   1.950 +  thread->run();
   1.951 +
   1.952 +  return 0;
   1.953 +}
   1.954 +
   1.955 +bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
   1.956 +  assert(thread->osthread() == NULL, "caller responsible");
   1.957 +
   1.958 +  // Allocate the OSThread object
   1.959 +  OSThread* osthread = new OSThread(NULL, NULL);
   1.960 +  if (osthread == NULL) {
   1.961 +    return false;
   1.962 +  }
   1.963 +
   1.964 +  // set the correct thread state
   1.965 +  osthread->set_thread_type(thr_type);
   1.966 +
   1.967 +  // Initial state is ALLOCATED but not INITIALIZED
   1.968 +  osthread->set_state(ALLOCATED);
   1.969 +
   1.970 +  thread->set_osthread(osthread);
   1.971 +
   1.972 +  // init thread attributes
   1.973 +  pthread_attr_t attr;
   1.974 +  pthread_attr_init(&attr);
   1.975 +  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
   1.976 +
   1.977 +  // stack size
   1.978 +  if (os::Bsd::supports_variable_stack_size()) {
   1.979 +    // calculate stack size if it's not specified by caller
   1.980 +    if (stack_size == 0) {
   1.981 +      stack_size = os::Bsd::default_stack_size(thr_type);
   1.982 +
   1.983 +      switch (thr_type) {
   1.984 +      case os::java_thread:
   1.985 +        // Java threads use ThreadStackSize which default value can be
   1.986 +        // changed with the flag -Xss
   1.987 +        assert (JavaThread::stack_size_at_create() > 0, "this should be set");
   1.988 +        stack_size = JavaThread::stack_size_at_create();
   1.989 +        break;
   1.990 +      case os::compiler_thread:
   1.991 +        if (CompilerThreadStackSize > 0) {
   1.992 +          stack_size = (size_t)(CompilerThreadStackSize * K);
   1.993 +          break;
   1.994 +        } // else fall through:
   1.995 +          // use VMThreadStackSize if CompilerThreadStackSize is not defined
   1.996 +      case os::vm_thread:
   1.997 +      case os::pgc_thread:
   1.998 +      case os::cgc_thread:
   1.999 +      case os::watcher_thread:
  1.1000 +        if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
  1.1001 +        break;
  1.1002 +      }
  1.1003 +    }
  1.1004 +
  1.1005 +    stack_size = MAX2(stack_size, os::Bsd::min_stack_allowed);
  1.1006 +    pthread_attr_setstacksize(&attr, stack_size);
  1.1007 +  } else {
  1.1008 +    // let pthread_create() pick the default value.
  1.1009 +  }
  1.1010 +
  1.1011 +#ifndef _ALLBSD_SOURCE
  1.1012 +  // glibc guard page
  1.1013 +  pthread_attr_setguardsize(&attr, os::Bsd::default_guard_size(thr_type));
  1.1014 +#endif
  1.1015 +
  1.1016 +  ThreadState state;
  1.1017 +
  1.1018 +  {
  1.1019 +
  1.1020 +#ifndef _ALLBSD_SOURCE
  1.1021 +    // Serialize thread creation if we are running with fixed stack BsdThreads
  1.1022 +    bool lock = os::Bsd::is_BsdThreads() && !os::Bsd::is_floating_stack();
  1.1023 +    if (lock) {
  1.1024 +      os::Bsd::createThread_lock()->lock_without_safepoint_check();
  1.1025 +    }
  1.1026 +#endif
  1.1027 +
  1.1028 +    pthread_t tid;
  1.1029 +    int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
  1.1030 +
  1.1031 +    pthread_attr_destroy(&attr);
  1.1032 +
  1.1033 +    if (ret != 0) {
  1.1034 +      if (PrintMiscellaneous && (Verbose || WizardMode)) {
  1.1035 +        perror("pthread_create()");
  1.1036 +      }
  1.1037 +      // Need to clean up stuff we've allocated so far
  1.1038 +      thread->set_osthread(NULL);
  1.1039 +      delete osthread;
  1.1040 +#ifndef _ALLBSD_SOURCE
  1.1041 +      if (lock) os::Bsd::createThread_lock()->unlock();
  1.1042 +#endif
  1.1043 +      return false;
  1.1044 +    }
  1.1045 +
  1.1046 +    // Store pthread info into the OSThread
  1.1047 +    osthread->set_pthread_id(tid);
  1.1048 +
  1.1049 +    // Wait until child thread is either initialized or aborted
  1.1050 +    {
  1.1051 +      Monitor* sync_with_child = osthread->startThread_lock();
  1.1052 +      MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
  1.1053 +      while ((state = osthread->get_state()) == ALLOCATED) {
  1.1054 +        sync_with_child->wait(Mutex::_no_safepoint_check_flag);
  1.1055 +      }
  1.1056 +    }
  1.1057 +
  1.1058 +#ifndef _ALLBSD_SOURCE
  1.1059 +    if (lock) {
  1.1060 +      os::Bsd::createThread_lock()->unlock();
  1.1061 +    }
  1.1062 +#endif
  1.1063 +  }
  1.1064 +
  1.1065 +  // Aborted due to thread limit being reached
  1.1066 +  if (state == ZOMBIE) {
  1.1067 +      thread->set_osthread(NULL);
  1.1068 +      delete osthread;
  1.1069 +      return false;
  1.1070 +  }
  1.1071 +
  1.1072 +  // The thread is returned suspended (in state INITIALIZED),
  1.1073 +  // and is started higher up in the call chain
  1.1074 +  assert(state == INITIALIZED, "race condition");
  1.1075 +  return true;
  1.1076 +}
  1.1077 +
  1.1078 +/////////////////////////////////////////////////////////////////////////////
  1.1079 +// attach existing thread
  1.1080 +
  1.1081 +// bootstrap the main thread
  1.1082 +bool os::create_main_thread(JavaThread* thread) {
  1.1083 +  assert(os::Bsd::_main_thread == pthread_self(), "should be called inside main thread");
  1.1084 +  return create_attached_thread(thread);
  1.1085 +}
  1.1086 +
  1.1087 +bool os::create_attached_thread(JavaThread* thread) {
  1.1088 +#ifdef ASSERT
  1.1089 +    thread->verify_not_published();
  1.1090 +#endif
  1.1091 +
  1.1092 +  // Allocate the OSThread object
  1.1093 +  OSThread* osthread = new OSThread(NULL, NULL);
  1.1094 +
  1.1095 +  if (osthread == NULL) {
  1.1096 +    return false;
  1.1097 +  }
  1.1098 +
  1.1099 +  // Store pthread info into the OSThread
  1.1100 +#ifdef _ALLBSD_SOURCE
  1.1101 +  osthread->set_thread_id(::pthread_self());
  1.1102 +#else
  1.1103 +  osthread->set_thread_id(os::Bsd::gettid());
  1.1104 +#endif
  1.1105 +  osthread->set_pthread_id(::pthread_self());
  1.1106 +
  1.1107 +  // initialize floating point control register
  1.1108 +  os::Bsd::init_thread_fpu_state();
  1.1109 +
  1.1110 +  // Initial thread state is RUNNABLE
  1.1111 +  osthread->set_state(RUNNABLE);
  1.1112 +
  1.1113 +  thread->set_osthread(osthread);
  1.1114 +
  1.1115 +#ifndef _ALLBSD_SOURCE
  1.1116 +  if (UseNUMA) {
  1.1117 +    int lgrp_id = os::numa_get_group_id();
  1.1118 +    if (lgrp_id != -1) {
  1.1119 +      thread->set_lgrp_id(lgrp_id);
  1.1120 +    }
  1.1121 +  }
  1.1122 +
  1.1123 +  if (os::Bsd::is_initial_thread()) {
  1.1124 +    // If current thread is initial thread, its stack is mapped on demand,
  1.1125 +    // see notes about MAP_GROWSDOWN. Here we try to force kernel to map
  1.1126 +    // the entire stack region to avoid SEGV in stack banging.
  1.1127 +    // It is also useful to get around the heap-stack-gap problem on SuSE
  1.1128 +    // kernel (see 4821821 for details). We first expand stack to the top
  1.1129 +    // of yellow zone, then enable stack yellow zone (order is significant,
  1.1130 +    // enabling yellow zone first will crash JVM on SuSE Bsd), so there
  1.1131 +    // is no gap between the last two virtual memory regions.
  1.1132 +
  1.1133 +    JavaThread *jt = (JavaThread *)thread;
  1.1134 +    address addr = jt->stack_yellow_zone_base();
  1.1135 +    assert(addr != NULL, "initialization problem?");
  1.1136 +    assert(jt->stack_available(addr) > 0, "stack guard should not be enabled");
  1.1137 +
  1.1138 +    osthread->set_expanding_stack();
  1.1139 +    os::Bsd::manually_expand_stack(jt, addr);
  1.1140 +    osthread->clear_expanding_stack();
  1.1141 +  }
  1.1142 +#endif
  1.1143 +
  1.1144 +  // initialize signal mask for this thread
  1.1145 +  // and save the caller's signal mask
  1.1146 +  os::Bsd::hotspot_sigmask(thread);
  1.1147 +
  1.1148 +  return true;
  1.1149 +}
  1.1150 +
  1.1151 +void os::pd_start_thread(Thread* thread) {
  1.1152 +  OSThread * osthread = thread->osthread();
  1.1153 +  assert(osthread->get_state() != INITIALIZED, "just checking");
  1.1154 +  Monitor* sync_with_child = osthread->startThread_lock();
  1.1155 +  MutexLockerEx ml(sync_with_child, Mutex::_no_safepoint_check_flag);
  1.1156 +  sync_with_child->notify();
  1.1157 +}
  1.1158 +
  1.1159 +// Free Bsd resources related to the OSThread
  1.1160 +void os::free_thread(OSThread* osthread) {
  1.1161 +  assert(osthread != NULL, "osthread not set");
  1.1162 +
  1.1163 +  if (Thread::current()->osthread() == osthread) {
  1.1164 +    // Restore caller's signal mask
  1.1165 +    sigset_t sigmask = osthread->caller_sigmask();
  1.1166 +    pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
  1.1167 +   }
  1.1168 +
  1.1169 +  delete osthread;
  1.1170 +}
  1.1171 +
  1.1172 +//////////////////////////////////////////////////////////////////////////////
  1.1173 +// thread local storage
  1.1174 +
  1.1175 +int os::allocate_thread_local_storage() {
  1.1176 +  pthread_key_t key;
  1.1177 +  int rslt = pthread_key_create(&key, NULL);
  1.1178 +  assert(rslt == 0, "cannot allocate thread local storage");
  1.1179 +  return (int)key;
  1.1180 +}
  1.1181 +
  1.1182 +// Note: This is currently not used by VM, as we don't destroy TLS key
  1.1183 +// on VM exit.
  1.1184 +void os::free_thread_local_storage(int index) {
  1.1185 +  int rslt = pthread_key_delete((pthread_key_t)index);
  1.1186 +  assert(rslt == 0, "invalid index");
  1.1187 +}
  1.1188 +
  1.1189 +void os::thread_local_storage_at_put(int index, void* value) {
  1.1190 +  int rslt = pthread_setspecific((pthread_key_t)index, value);
  1.1191 +  assert(rslt == 0, "pthread_setspecific failed");
  1.1192 +}
  1.1193 +
  1.1194 +extern "C" Thread* get_thread() {
  1.1195 +  return ThreadLocalStorage::thread();
  1.1196 +}
  1.1197 +
  1.1198 +//////////////////////////////////////////////////////////////////////////////
  1.1199 +// initial thread
  1.1200 +
  1.1201 +#ifndef _ALLBSD_SOURCE
  1.1202 +// Check if current thread is the initial thread, similar to Solaris thr_main.
  1.1203 +bool os::Bsd::is_initial_thread(void) {
  1.1204 +  char dummy;
  1.1205 +  // If called before init complete, thread stack bottom will be null.
  1.1206 +  // Can be called if fatal error occurs before initialization.
  1.1207 +  if (initial_thread_stack_bottom() == NULL) return false;
  1.1208 +  assert(initial_thread_stack_bottom() != NULL &&
  1.1209 +         initial_thread_stack_size()   != 0,
  1.1210 +         "os::init did not locate initial thread's stack region");
  1.1211 +  if ((address)&dummy >= initial_thread_stack_bottom() &&
  1.1212 +      (address)&dummy < initial_thread_stack_bottom() + initial_thread_stack_size())
  1.1213 +       return true;
  1.1214 +  else return false;
  1.1215 +}
  1.1216 +
  1.1217 +// Find the virtual memory area that contains addr
  1.1218 +static bool find_vma(address addr, address* vma_low, address* vma_high) {
  1.1219 +  FILE *fp = fopen("/proc/self/maps", "r");
  1.1220 +  if (fp) {
  1.1221 +    address low, high;
  1.1222 +    while (!feof(fp)) {
  1.1223 +      if (fscanf(fp, "%p-%p", &low, &high) == 2) {
  1.1224 +        if (low <= addr && addr < high) {
  1.1225 +           if (vma_low)  *vma_low  = low;
  1.1226 +           if (vma_high) *vma_high = high;
  1.1227 +           fclose (fp);
  1.1228 +           return true;
  1.1229 +        }
  1.1230 +      }
  1.1231 +      for (;;) {
  1.1232 +        int ch = fgetc(fp);
  1.1233 +        if (ch == EOF || ch == (int)'\n') break;
  1.1234 +      }
  1.1235 +    }
  1.1236 +    fclose(fp);
  1.1237 +  }
  1.1238 +  return false;
  1.1239 +}
  1.1240 +
  1.1241 +// Locate initial thread stack. This special handling of initial thread stack
  1.1242 +// is needed because pthread_getattr_np() on most (all?) Bsd distros returns
  1.1243 +// bogus value for initial thread.
  1.1244 +void os::Bsd::capture_initial_stack(size_t max_size) {
  1.1245 +  // stack size is the easy part, get it from RLIMIT_STACK
  1.1246 +  size_t stack_size;
  1.1247 +  struct rlimit rlim;
  1.1248 +  getrlimit(RLIMIT_STACK, &rlim);
  1.1249 +  stack_size = rlim.rlim_cur;
  1.1250 +
  1.1251 +  // 6308388: a bug in ld.so will relocate its own .data section to the
  1.1252 +  //   lower end of primordial stack; reduce ulimit -s value a little bit
  1.1253 +  //   so we won't install guard page on ld.so's data section.
  1.1254 +  stack_size -= 2 * page_size();
  1.1255 +
  1.1256 +  // 4441425: avoid crash with "unlimited" stack size on SuSE 7.1 or Redhat
  1.1257 +  //   7.1, in both cases we will get 2G in return value.
  1.1258 +  // 4466587: glibc 2.2.x compiled w/o "--enable-kernel=2.4.0" (RH 7.0,
  1.1259 +  //   SuSE 7.2, Debian) can not handle alternate signal stack correctly
  1.1260 +  //   for initial thread if its stack size exceeds 6M. Cap it at 2M,
  1.1261 +  //   in case other parts in glibc still assumes 2M max stack size.
  1.1262 +  // FIXME: alt signal stack is gone, maybe we can relax this constraint?
  1.1263 +#ifndef IA64
  1.1264 +  if (stack_size > 2 * K * K) stack_size = 2 * K * K;
  1.1265 +#else
  1.1266 +  // Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small
  1.1267 +  if (stack_size > 4 * K * K) stack_size = 4 * K * K;
  1.1268 +#endif
  1.1269 +
  1.1270 +  // Try to figure out where the stack base (top) is. This is harder.
  1.1271 +  //
  1.1272 +  // When an application is started, glibc saves the initial stack pointer in
  1.1273 +  // a global variable "__libc_stack_end", which is then used by system
  1.1274 +  // libraries. __libc_stack_end should be pretty close to stack top. The
  1.1275 +  // variable is available since the very early days. However, because it is
  1.1276 +  // a private interface, it could disappear in the future.
  1.1277 +  //
  1.1278 +  // Bsd kernel saves start_stack information in /proc/<pid>/stat. Similar
  1.1279 +  // to __libc_stack_end, it is very close to stack top, but isn't the real
  1.1280 +  // stack top. Note that /proc may not exist if VM is running as a chroot
  1.1281 +  // program, so reading /proc/<pid>/stat could fail. Also the contents of
  1.1282 +  // /proc/<pid>/stat could change in the future (though unlikely).
  1.1283 +  //
  1.1284 +  // We try __libc_stack_end first. If that doesn't work, look for
  1.1285 +  // /proc/<pid>/stat. If neither of them works, we use current stack pointer
  1.1286 +  // as a hint, which should work well in most cases.
  1.1287 +
  1.1288 +  uintptr_t stack_start;
  1.1289 +
  1.1290 +  // try __libc_stack_end first
  1.1291 +  uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end");
  1.1292 +  if (p && *p) {
  1.1293 +    stack_start = *p;
  1.1294 +  } else {
  1.1295 +    // see if we can get the start_stack field from /proc/self/stat
  1.1296 +    FILE *fp;
  1.1297 +    int pid;
  1.1298 +    char state;
  1.1299 +    int ppid;
  1.1300 +    int pgrp;
  1.1301 +    int session;
  1.1302 +    int nr;
  1.1303 +    int tpgrp;
  1.1304 +    unsigned long flags;
  1.1305 +    unsigned long minflt;
  1.1306 +    unsigned long cminflt;
  1.1307 +    unsigned long majflt;
  1.1308 +    unsigned long cmajflt;
  1.1309 +    unsigned long utime;
  1.1310 +    unsigned long stime;
  1.1311 +    long cutime;
  1.1312 +    long cstime;
  1.1313 +    long prio;
  1.1314 +    long nice;
  1.1315 +    long junk;
  1.1316 +    long it_real;
  1.1317 +    uintptr_t start;
  1.1318 +    uintptr_t vsize;
  1.1319 +    intptr_t rss;
  1.1320 +    uintptr_t rsslim;
  1.1321 +    uintptr_t scodes;
  1.1322 +    uintptr_t ecode;
  1.1323 +    int i;
  1.1324 +
  1.1325 +    // Figure what the primordial thread stack base is. Code is inspired
  1.1326 +    // by email from Hans Boehm. /proc/self/stat begins with current pid,
  1.1327 +    // followed by command name surrounded by parentheses, state, etc.
  1.1328 +    char stat[2048];
  1.1329 +    int statlen;
  1.1330 +
  1.1331 +    fp = fopen("/proc/self/stat", "r");
  1.1332 +    if (fp) {
  1.1333 +      statlen = fread(stat, 1, 2047, fp);
  1.1334 +      stat[statlen] = '\0';
  1.1335 +      fclose(fp);
  1.1336 +
  1.1337 +      // Skip pid and the command string. Note that we could be dealing with
  1.1338 +      // weird command names, e.g. user could decide to rename java launcher
  1.1339 +      // to "java 1.4.2 :)", then the stat file would look like
  1.1340 +      //                1234 (java 1.4.2 :)) R ... ...
  1.1341 +      // We don't really need to know the command string, just find the last
  1.1342 +      // occurrence of ")" and then start parsing from there. See bug 4726580.
  1.1343 +      char * s = strrchr(stat, ')');
  1.1344 +
  1.1345 +      i = 0;
  1.1346 +      if (s) {
  1.1347 +        // Skip blank chars
  1.1348 +        do s++; while (isspace(*s));
  1.1349 +
  1.1350 +#define _UFM UINTX_FORMAT
  1.1351 +#define _DFM INTX_FORMAT
  1.1352 +
  1.1353 +        /*                                     1   1   1   1   1   1   1   1   1   1   2   2    2    2    2    2    2    2    2 */
  1.1354 +        /*              3  4  5  6  7  8   9   0   1   2   3   4   5   6   7   8   9   0   1    2    3    4    5    6    7    8 */
  1.1355 +        i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
  1.1356 +             &state,          /* 3  %c  */
  1.1357 +             &ppid,           /* 4  %d  */
  1.1358 +             &pgrp,           /* 5  %d  */
  1.1359 +             &session,        /* 6  %d  */
  1.1360 +             &nr,             /* 7  %d  */
  1.1361 +             &tpgrp,          /* 8  %d  */
  1.1362 +             &flags,          /* 9  %lu  */
  1.1363 +             &minflt,         /* 10 %lu  */
  1.1364 +             &cminflt,        /* 11 %lu  */
  1.1365 +             &majflt,         /* 12 %lu  */
  1.1366 +             &cmajflt,        /* 13 %lu  */
  1.1367 +             &utime,          /* 14 %lu  */
  1.1368 +             &stime,          /* 15 %lu  */
  1.1369 +             &cutime,         /* 16 %ld  */
  1.1370 +             &cstime,         /* 17 %ld  */
  1.1371 +             &prio,           /* 18 %ld  */
  1.1372 +             &nice,           /* 19 %ld  */
  1.1373 +             &junk,           /* 20 %ld  */
  1.1374 +             &it_real,        /* 21 %ld  */
  1.1375 +             &start,          /* 22 UINTX_FORMAT */
  1.1376 +             &vsize,          /* 23 UINTX_FORMAT */
  1.1377 +             &rss,            /* 24 INTX_FORMAT  */
  1.1378 +             &rsslim,         /* 25 UINTX_FORMAT */
  1.1379 +             &scodes,         /* 26 UINTX_FORMAT */
  1.1380 +             &ecode,          /* 27 UINTX_FORMAT */
  1.1381 +             &stack_start);   /* 28 UINTX_FORMAT */
  1.1382 +      }
  1.1383 +
  1.1384 +#undef _UFM
  1.1385 +#undef _DFM
  1.1386 +
  1.1387 +      if (i != 28 - 2) {
  1.1388 +         assert(false, "Bad conversion from /proc/self/stat");
  1.1389 +         // product mode - assume we are the initial thread, good luck in the
  1.1390 +         // embedded case.
  1.1391 +         warning("Can't detect initial thread stack location - bad conversion");
  1.1392 +         stack_start = (uintptr_t) &rlim;
  1.1393 +      }
  1.1394 +    } else {
  1.1395 +      // For some reason we can't open /proc/self/stat (for example, running on
  1.1396 +      // FreeBSD with a Bsd emulator, or inside chroot), this should work for
  1.1397 +      // most cases, so don't abort:
  1.1398 +      warning("Can't detect initial thread stack location - no /proc/self/stat");
  1.1399 +      stack_start = (uintptr_t) &rlim;
  1.1400 +    }
  1.1401 +  }
  1.1402 +
  1.1403 +  // Now we have a pointer (stack_start) very close to the stack top, the
  1.1404 +  // next thing to do is to figure out the exact location of stack top. We
  1.1405 +  // can find out the virtual memory area that contains stack_start by
  1.1406 +  // reading /proc/self/maps, it should be the last vma in /proc/self/maps,
  1.1407 +  // and its upper limit is the real stack top. (again, this would fail if
  1.1408 +  // running inside chroot, because /proc may not exist.)
  1.1409 +
  1.1410 +  uintptr_t stack_top;
  1.1411 +  address low, high;
  1.1412 +  if (find_vma((address)stack_start, &low, &high)) {
  1.1413 +    // success, "high" is the true stack top. (ignore "low", because initial
  1.1414 +    // thread stack grows on demand, its real bottom is high - RLIMIT_STACK.)
  1.1415 +    stack_top = (uintptr_t)high;
  1.1416 +  } else {
  1.1417 +    // failed, likely because /proc/self/maps does not exist
  1.1418 +    warning("Can't detect initial thread stack location - find_vma failed");
  1.1419 +    // best effort: stack_start is normally within a few pages below the real
  1.1420 +    // stack top, use it as stack top, and reduce stack size so we won't put
  1.1421 +    // guard page outside stack.
  1.1422 +    stack_top = stack_start;
  1.1423 +    stack_size -= 16 * page_size();
  1.1424 +  }
  1.1425 +
  1.1426 +  // stack_top could be partially down the page so align it
  1.1427 +  stack_top = align_size_up(stack_top, page_size());
  1.1428 +
  1.1429 +  if (max_size && stack_size > max_size) {
  1.1430 +     _initial_thread_stack_size = max_size;
  1.1431 +  } else {
  1.1432 +     _initial_thread_stack_size = stack_size;
  1.1433 +  }
  1.1434 +
  1.1435 +  _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
  1.1436 +  _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
  1.1437 +}
  1.1438 +#endif
  1.1439 +
  1.1440 +////////////////////////////////////////////////////////////////////////////////
  1.1441 +// time support
  1.1442 +
  1.1443 +// Time since start-up in seconds to a fine granularity.
  1.1444 +// Used by VMSelfDestructTimer and the MemProfiler.
  1.1445 +double os::elapsedTime() {
  1.1446 +
  1.1447 +  return (double)(os::elapsed_counter()) * 0.000001;
  1.1448 +}
  1.1449 +
  1.1450 +jlong os::elapsed_counter() {
  1.1451 +  timeval time;
  1.1452 +  int status = gettimeofday(&time, NULL);
  1.1453 +  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
  1.1454 +}
  1.1455 +
  1.1456 +jlong os::elapsed_frequency() {
  1.1457 +  return (1000 * 1000);
  1.1458 +}
  1.1459 +
  1.1460 +// XXX: For now, code this as if BSD does not support vtime.
  1.1461 +bool os::supports_vtime() { return false; }
  1.1462 +bool os::enable_vtime()   { return false; }
  1.1463 +bool os::vtime_enabled()  { return false; }
  1.1464 +double os::elapsedVTime() {
  1.1465 +  // better than nothing, but not much
  1.1466 +  return elapsedTime();
  1.1467 +}
  1.1468 +
  1.1469 +jlong os::javaTimeMillis() {
  1.1470 +  timeval time;
  1.1471 +  int status = gettimeofday(&time, NULL);
  1.1472 +  assert(status != -1, "bsd error");
  1.1473 +  return jlong(time.tv_sec) * 1000  +  jlong(time.tv_usec / 1000);
  1.1474 +}
  1.1475 +
  1.1476 +#ifndef CLOCK_MONOTONIC
  1.1477 +#define CLOCK_MONOTONIC (1)
  1.1478 +#endif
  1.1479 +
  1.1480 +#ifdef __APPLE__
  1.1481 +void os::Bsd::clock_init() {
  1.1482 +        // XXXDARWIN: Investigate replacement monotonic clock
  1.1483 +}
  1.1484 +#elif defined(_ALLBSD_SOURCE)
  1.1485 +void os::Bsd::clock_init() {
  1.1486 +  struct timespec res;
  1.1487 +  struct timespec tp;
  1.1488 +  if (::clock_getres(CLOCK_MONOTONIC, &res) == 0 &&
  1.1489 +      ::clock_gettime(CLOCK_MONOTONIC, &tp)  == 0) {
  1.1490 +    // yes, monotonic clock is supported
  1.1491 +    _clock_gettime = ::clock_gettime;
  1.1492 +  }
  1.1493 +}
  1.1494 +#else
  1.1495 +void os::Bsd::clock_init() {
  1.1496 +  // we do dlopen's in this particular order due to bug in bsd
  1.1497 +  // dynamical loader (see 6348968) leading to crash on exit
  1.1498 +  void* handle = dlopen("librt.so.1", RTLD_LAZY);
  1.1499 +  if (handle == NULL) {
  1.1500 +    handle = dlopen("librt.so", RTLD_LAZY);
  1.1501 +  }
  1.1502 +
  1.1503 +  if (handle) {
  1.1504 +    int (*clock_getres_func)(clockid_t, struct timespec*) =
  1.1505 +           (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
  1.1506 +    int (*clock_gettime_func)(clockid_t, struct timespec*) =
  1.1507 +           (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
  1.1508 +    if (clock_getres_func && clock_gettime_func) {
  1.1509 +      // See if monotonic clock is supported by the kernel. Note that some
  1.1510 +      // early implementations simply return kernel jiffies (updated every
  1.1511 +      // 1/100 or 1/1000 second). It would be bad to use such a low res clock
  1.1512 +      // for nano time (though the monotonic property is still nice to have).
  1.1513 +      // It's fixed in newer kernels, however clock_getres() still returns
  1.1514 +      // 1/HZ. We check if clock_getres() works, but will ignore its reported
  1.1515 +      // resolution for now. Hopefully as people move to new kernels, this
  1.1516 +      // won't be a problem.
  1.1517 +      struct timespec res;
  1.1518 +      struct timespec tp;
  1.1519 +      if (clock_getres_func (CLOCK_MONOTONIC, &res) == 0 &&
  1.1520 +          clock_gettime_func(CLOCK_MONOTONIC, &tp)  == 0) {
  1.1521 +        // yes, monotonic clock is supported
  1.1522 +        _clock_gettime = clock_gettime_func;
  1.1523 +      } else {
  1.1524 +        // close librt if there is no monotonic clock
  1.1525 +        dlclose(handle);
  1.1526 +      }
  1.1527 +    }
  1.1528 +  }
  1.1529 +}
  1.1530 +#endif
  1.1531 +
  1.1532 +#ifndef _ALLBSD_SOURCE
  1.1533 +#ifndef SYS_clock_getres
  1.1534 +
  1.1535 +#if defined(IA32) || defined(AMD64)
  1.1536 +#define SYS_clock_getres IA32_ONLY(266)  AMD64_ONLY(229)
  1.1537 +#define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
  1.1538 +#else
  1.1539 +#warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time"
  1.1540 +#define sys_clock_getres(x,y)  -1
  1.1541 +#endif
  1.1542 +
  1.1543 +#else
  1.1544 +#define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
  1.1545 +#endif
  1.1546 +
  1.1547 +void os::Bsd::fast_thread_clock_init() {
  1.1548 +  if (!UseBsdPosixThreadCPUClocks) {
  1.1549 +    return;
  1.1550 +  }
  1.1551 +  clockid_t clockid;
  1.1552 +  struct timespec tp;
  1.1553 +  int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =
  1.1554 +      (int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");
  1.1555 +
  1.1556 +  // Switch to using fast clocks for thread cpu time if
  1.1557 +  // the sys_clock_getres() returns 0 error code.
  1.1558 +  // Note, that some kernels may support the current thread
  1.1559 +  // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks
  1.1560 +  // returned by the pthread_getcpuclockid().
  1.1561 +  // If the fast Posix clocks are supported then the sys_clock_getres()
  1.1562 +  // must return at least tp.tv_sec == 0 which means a resolution
  1.1563 +  // better than 1 sec. This is extra check for reliability.
  1.1564 +
  1.1565 +  if(pthread_getcpuclockid_func &&
  1.1566 +     pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
  1.1567 +     sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
  1.1568 +
  1.1569 +    _supports_fast_thread_cpu_time = true;
  1.1570 +    _pthread_getcpuclockid = pthread_getcpuclockid_func;
  1.1571 +  }
  1.1572 +}
  1.1573 +#endif
  1.1574 +
  1.1575 +jlong os::javaTimeNanos() {
  1.1576 +  if (Bsd::supports_monotonic_clock()) {
  1.1577 +    struct timespec tp;
  1.1578 +    int status = Bsd::clock_gettime(CLOCK_MONOTONIC, &tp);
  1.1579 +    assert(status == 0, "gettime error");
  1.1580 +    jlong result = jlong(tp.tv_sec) * (1000 * 1000 * 1000) + jlong(tp.tv_nsec);
  1.1581 +    return result;
  1.1582 +  } else {
  1.1583 +    timeval time;
  1.1584 +    int status = gettimeofday(&time, NULL);
  1.1585 +    assert(status != -1, "bsd error");
  1.1586 +    jlong usecs = jlong(time.tv_sec) * (1000 * 1000) + jlong(time.tv_usec);
  1.1587 +    return 1000 * usecs;
  1.1588 +  }
  1.1589 +}
  1.1590 +
  1.1591 +void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
  1.1592 +  if (Bsd::supports_monotonic_clock()) {
  1.1593 +    info_ptr->max_value = ALL_64_BITS;
  1.1594 +
  1.1595 +    // CLOCK_MONOTONIC - amount of time since some arbitrary point in the past
  1.1596 +    info_ptr->may_skip_backward = false;      // not subject to resetting or drifting
  1.1597 +    info_ptr->may_skip_forward = false;       // not subject to resetting or drifting
  1.1598 +  } else {
  1.1599 +    // gettimeofday - based on time in seconds since the Epoch thus does not wrap
  1.1600 +    info_ptr->max_value = ALL_64_BITS;
  1.1601 +
  1.1602 +    // gettimeofday is a real time clock so it skips
  1.1603 +    info_ptr->may_skip_backward = true;
  1.1604 +    info_ptr->may_skip_forward = true;
  1.1605 +  }
  1.1606 +
  1.1607 +  info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
  1.1608 +}
  1.1609 +
  1.1610 +// Return the real, user, and system times in seconds from an
  1.1611 +// arbitrary fixed point in the past.
  1.1612 +bool os::getTimesSecs(double* process_real_time,
  1.1613 +                      double* process_user_time,
  1.1614 +                      double* process_system_time) {
  1.1615 +  struct tms ticks;
  1.1616 +  clock_t real_ticks = times(&ticks);
  1.1617 +
  1.1618 +  if (real_ticks == (clock_t) (-1)) {
  1.1619 +    return false;
  1.1620 +  } else {
  1.1621 +    double ticks_per_second = (double) clock_tics_per_sec;
  1.1622 +    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
  1.1623 +    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
  1.1624 +    *process_real_time = ((double) real_ticks) / ticks_per_second;
  1.1625 +
  1.1626 +    return true;
  1.1627 +  }
  1.1628 +}
  1.1629 +
  1.1630 +
  1.1631 +char * os::local_time_string(char *buf, size_t buflen) {
  1.1632 +  struct tm t;
  1.1633 +  time_t long_time;
  1.1634 +  time(&long_time);
  1.1635 +  localtime_r(&long_time, &t);
  1.1636 +  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
  1.1637 +               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
  1.1638 +               t.tm_hour, t.tm_min, t.tm_sec);
  1.1639 +  return buf;
  1.1640 +}
  1.1641 +
  1.1642 +struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
  1.1643 +  return localtime_r(clock, res);
  1.1644 +}
  1.1645 +
  1.1646 +////////////////////////////////////////////////////////////////////////////////
  1.1647 +// runtime exit support
  1.1648 +
  1.1649 +// Note: os::shutdown() might be called very early during initialization, or
  1.1650 +// called from signal handler. Before adding something to os::shutdown(), make
  1.1651 +// sure it is async-safe and can handle partially initialized VM.
  1.1652 +void os::shutdown() {
  1.1653 +
  1.1654 +  // allow PerfMemory to attempt cleanup of any persistent resources
  1.1655 +  perfMemory_exit();
  1.1656 +
  1.1657 +  // needs to remove object in file system
  1.1658 +  AttachListener::abort();
  1.1659 +
  1.1660 +  // flush buffered output, finish log files
  1.1661 +  ostream_abort();
  1.1662 +
  1.1663 +  // Check for abort hook
  1.1664 +  abort_hook_t abort_hook = Arguments::abort_hook();
  1.1665 +  if (abort_hook != NULL) {
  1.1666 +    abort_hook();
  1.1667 +  }
  1.1668 +
  1.1669 +}
  1.1670 +
  1.1671 +// Note: os::abort() might be called very early during initialization, or
  1.1672 +// called from signal handler. Before adding something to os::abort(), make
  1.1673 +// sure it is async-safe and can handle partially initialized VM.
  1.1674 +void os::abort(bool dump_core) {
  1.1675 +  os::shutdown();
  1.1676 +  if (dump_core) {
  1.1677 +#ifndef PRODUCT
  1.1678 +    fdStream out(defaultStream::output_fd());
  1.1679 +    out.print_raw("Current thread is ");
  1.1680 +    char buf[16];
  1.1681 +    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
  1.1682 +    out.print_raw_cr(buf);
  1.1683 +    out.print_raw_cr("Dumping core ...");
  1.1684 +#endif
  1.1685 +    ::abort(); // dump core
  1.1686 +  }
  1.1687 +
  1.1688 +  ::exit(1);
  1.1689 +}
  1.1690 +
  1.1691 +// Die immediately, no exit hook, no abort hook, no cleanup.
  1.1692 +void os::die() {
  1.1693 +  // _exit() on BsdThreads only kills current thread
  1.1694 +  ::abort();
  1.1695 +}
  1.1696 +
  1.1697 +// unused on bsd for now.
  1.1698 +void os::set_error_file(const char *logfile) {}
  1.1699 +
  1.1700 +
  1.1701 +// This method is a copy of JDK's sysGetLastErrorString
  1.1702 +// from src/solaris/hpi/src/system_md.c
  1.1703 +
  1.1704 +size_t os::lasterror(char *buf, size_t len) {
  1.1705 +
  1.1706 +  if (errno == 0)  return 0;
  1.1707 +
  1.1708 +  const char *s = ::strerror(errno);
  1.1709 +  size_t n = ::strlen(s);
  1.1710 +  if (n >= len) {
  1.1711 +    n = len - 1;
  1.1712 +  }
  1.1713 +  ::strncpy(buf, s, n);
  1.1714 +  buf[n] = '\0';
  1.1715 +  return n;
  1.1716 +}
  1.1717 +
  1.1718 +intx os::current_thread_id() { return (intx)pthread_self(); }
  1.1719 +int os::current_process_id() {
  1.1720 +
  1.1721 +  // Under the old bsd thread library, bsd gives each thread
  1.1722 +  // its own process id. Because of this each thread will return
  1.1723 +  // a different pid if this method were to return the result
  1.1724 +  // of getpid(2). Bsd provides no api that returns the pid
  1.1725 +  // of the launcher thread for the vm. This implementation
  1.1726 +  // returns a unique pid, the pid of the launcher thread
  1.1727 +  // that starts the vm 'process'.
  1.1728 +
  1.1729 +  // Under the NPTL, getpid() returns the same pid as the
  1.1730 +  // launcher thread rather than a unique pid per thread.
  1.1731 +  // Use gettid() if you want the old pre NPTL behaviour.
  1.1732 +
  1.1733 +  // if you are looking for the result of a call to getpid() that
  1.1734 +  // returns a unique pid for the calling thread, then look at the
  1.1735 +  // OSThread::thread_id() method in osThread_bsd.hpp file
  1.1736 +
  1.1737 +  return (int)(_initial_pid ? _initial_pid : getpid());
  1.1738 +}
  1.1739 +
  1.1740 +// DLL functions
  1.1741 +
  1.1742 +#define JNI_LIB_PREFIX "lib"
  1.1743 +#ifdef __APPLE__
  1.1744 +#define JNI_LIB_SUFFIX ".dylib"
  1.1745 +#else
  1.1746 +#define JNI_LIB_SUFFIX ".so"
  1.1747 +#endif
  1.1748 +
  1.1749 +const char* os::dll_file_extension() { return JNI_LIB_SUFFIX; }
  1.1750 +
  1.1751 +// This must be hard coded because it's the system's temporary
  1.1752 +// directory not the java application's temp directory, ala java.io.tmpdir.
  1.1753 +const char* os::get_temp_directory() { return "/tmp"; }
  1.1754 +
  1.1755 +static bool file_exists(const char* filename) {
  1.1756 +  struct stat statbuf;
  1.1757 +  if (filename == NULL || strlen(filename) == 0) {
  1.1758 +    return false;
  1.1759 +  }
  1.1760 +  return os::stat(filename, &statbuf) == 0;
  1.1761 +}
  1.1762 +
  1.1763 +void os::dll_build_name(char* buffer, size_t buflen,
  1.1764 +                        const char* pname, const char* fname) {
  1.1765 +  // Copied from libhpi
  1.1766 +  const size_t pnamelen = pname ? strlen(pname) : 0;
  1.1767 +
  1.1768 +  // Quietly truncate on buffer overflow.  Should be an error.
  1.1769 +  if (pnamelen + strlen(fname) + strlen(JNI_LIB_PREFIX) + strlen(JNI_LIB_SUFFIX) + 2 > buflen) {
  1.1770 +      *buffer = '\0';
  1.1771 +      return;
  1.1772 +  }
  1.1773 +
  1.1774 +  if (pnamelen == 0) {
  1.1775 +    snprintf(buffer, buflen, JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX, fname);
  1.1776 +  } else if (strchr(pname, *os::path_separator()) != NULL) {
  1.1777 +    int n;
  1.1778 +    char** pelements = split_path(pname, &n);
  1.1779 +    for (int i = 0 ; i < n ; i++) {
  1.1780 +      // Really shouldn't be NULL, but check can't hurt
  1.1781 +      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
  1.1782 +        continue; // skip the empty path values
  1.1783 +      }
  1.1784 +      snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX,
  1.1785 +          pelements[i], fname);
  1.1786 +      if (file_exists(buffer)) {
  1.1787 +        break;
  1.1788 +      }
  1.1789 +    }
  1.1790 +    // release the storage
  1.1791 +    for (int i = 0 ; i < n ; i++) {
  1.1792 +      if (pelements[i] != NULL) {
  1.1793 +        FREE_C_HEAP_ARRAY(char, pelements[i]);
  1.1794 +      }
  1.1795 +    }
  1.1796 +    if (pelements != NULL) {
  1.1797 +      FREE_C_HEAP_ARRAY(char*, pelements);
  1.1798 +    }
  1.1799 +  } else {
  1.1800 +    snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX, pname, fname);
  1.1801 +  }
  1.1802 +}
  1.1803 +
  1.1804 +const char* os::get_current_directory(char *buf, int buflen) {
  1.1805 +  return getcwd(buf, buflen);
  1.1806 +}
  1.1807 +
  1.1808 +// check if addr is inside libjvm[_g].so
  1.1809 +bool os::address_is_in_vm(address addr) {
  1.1810 +  static address libjvm_base_addr;
  1.1811 +  Dl_info dlinfo;
  1.1812 +
  1.1813 +  if (libjvm_base_addr == NULL) {
  1.1814 +    dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo);
  1.1815 +    libjvm_base_addr = (address)dlinfo.dli_fbase;
  1.1816 +    assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
  1.1817 +  }
  1.1818 +
  1.1819 +  if (dladdr((void *)addr, &dlinfo)) {
  1.1820 +    if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
  1.1821 +  }
  1.1822 +
  1.1823 +  return false;
  1.1824 +}
  1.1825 +
  1.1826 +bool os::dll_address_to_function_name(address addr, char *buf,
  1.1827 +                                      int buflen, int *offset) {
  1.1828 +  Dl_info dlinfo;
  1.1829 +
  1.1830 +  if (dladdr((void*)addr, &dlinfo) && dlinfo.dli_sname != NULL) {
  1.1831 +    if (buf != NULL) {
  1.1832 +      if(!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
  1.1833 +        jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
  1.1834 +      }
  1.1835 +    }
  1.1836 +    if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
  1.1837 +    return true;
  1.1838 +  } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
  1.1839 +    if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
  1.1840 +       dlinfo.dli_fname, buf, buflen, offset) == Decoder::no_error) {
  1.1841 +       return true;
  1.1842 +    }
  1.1843 +  }
  1.1844 +
  1.1845 +  if (buf != NULL) buf[0] = '\0';
  1.1846 +  if (offset != NULL) *offset = -1;
  1.1847 +  return false;
  1.1848 +}
  1.1849 +
  1.1850 +#ifdef _ALLBSD_SOURCE
  1.1851 +// ported from solaris version
  1.1852 +bool os::dll_address_to_library_name(address addr, char* buf,
  1.1853 +                                     int buflen, int* offset) {
  1.1854 +  Dl_info dlinfo;
  1.1855 +
  1.1856 +  if (dladdr((void*)addr, &dlinfo)){
  1.1857 +     if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
  1.1858 +     if (offset) *offset = addr - (address)dlinfo.dli_fbase;
  1.1859 +     return true;
  1.1860 +  } else {
  1.1861 +     if (buf) buf[0] = '\0';
  1.1862 +     if (offset) *offset = -1;
  1.1863 +     return false;
  1.1864 +  }
  1.1865 +}
  1.1866 +#else
  1.1867 +struct _address_to_library_name {
  1.1868 +  address addr;          // input : memory address
  1.1869 +  size_t  buflen;        //         size of fname
  1.1870 +  char*   fname;         // output: library name
  1.1871 +  address base;          //         library base addr
  1.1872 +};
  1.1873 +
  1.1874 +static int address_to_library_name_callback(struct dl_phdr_info *info,
  1.1875 +                                            size_t size, void *data) {
  1.1876 +  int i;
  1.1877 +  bool found = false;
  1.1878 +  address libbase = NULL;
  1.1879 +  struct _address_to_library_name * d = (struct _address_to_library_name *)data;
  1.1880 +
  1.1881 +  // iterate through all loadable segments
  1.1882 +  for (i = 0; i < info->dlpi_phnum; i++) {
  1.1883 +    address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
  1.1884 +    if (info->dlpi_phdr[i].p_type == PT_LOAD) {
  1.1885 +      // base address of a library is the lowest address of its loaded
  1.1886 +      // segments.
  1.1887 +      if (libbase == NULL || libbase > segbase) {
  1.1888 +        libbase = segbase;
  1.1889 +      }
  1.1890 +      // see if 'addr' is within current segment
  1.1891 +      if (segbase <= d->addr &&
  1.1892 +          d->addr < segbase + info->dlpi_phdr[i].p_memsz) {
  1.1893 +        found = true;
  1.1894 +      }
  1.1895 +    }
  1.1896 +  }
  1.1897 +
  1.1898 +  // dlpi_name is NULL or empty if the ELF file is executable, return 0
  1.1899 +  // so dll_address_to_library_name() can fall through to use dladdr() which
  1.1900 +  // can figure out executable name from argv[0].
  1.1901 +  if (found && info->dlpi_name && info->dlpi_name[0]) {
  1.1902 +    d->base = libbase;
  1.1903 +    if (d->fname) {
  1.1904 +      jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name);
  1.1905 +    }
  1.1906 +    return 1;
  1.1907 +  }
  1.1908 +  return 0;
  1.1909 +}
  1.1910 +
  1.1911 +bool os::dll_address_to_library_name(address addr, char* buf,
  1.1912 +                                     int buflen, int* offset) {
  1.1913 +  Dl_info dlinfo;
  1.1914 +  struct _address_to_library_name data;
  1.1915 +
  1.1916 +  // There is a bug in old glibc dladdr() implementation that it could resolve
  1.1917 +  // to wrong library name if the .so file has a base address != NULL. Here
  1.1918 +  // we iterate through the program headers of all loaded libraries to find
  1.1919 +  // out which library 'addr' really belongs to. This workaround can be
  1.1920 +  // removed once the minimum requirement for glibc is moved to 2.3.x.
  1.1921 +  data.addr = addr;
  1.1922 +  data.fname = buf;
  1.1923 +  data.buflen = buflen;
  1.1924 +  data.base = NULL;
  1.1925 +  int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data);
  1.1926 +
  1.1927 +  if (rslt) {
  1.1928 +     // buf already contains library name
  1.1929 +     if (offset) *offset = addr - data.base;
  1.1930 +     return true;
  1.1931 +  } else if (dladdr((void*)addr, &dlinfo)){
  1.1932 +     if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
  1.1933 +     if (offset) *offset = addr - (address)dlinfo.dli_fbase;
  1.1934 +     return true;
  1.1935 +  } else {
  1.1936 +     if (buf) buf[0] = '\0';
  1.1937 +     if (offset) *offset = -1;
  1.1938 +     return false;
  1.1939 +  }
  1.1940 +}
  1.1941 +#endif
  1.1942 +
  1.1943 +  // Loads .dll/.so and
  1.1944 +  // in case of error it checks if .dll/.so was built for the
  1.1945 +  // same architecture as Hotspot is running on
  1.1946 +
  1.1947 +#ifdef __APPLE__
  1.1948 +void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
  1.1949 +  void * result= ::dlopen(filename, RTLD_LAZY);
  1.1950 +  if (result != NULL) {
  1.1951 +    // Successful loading
  1.1952 +    return result;
  1.1953 +  }
  1.1954 +
  1.1955 +  // Read system error message into ebuf
  1.1956 +  ::strncpy(ebuf, ::dlerror(), ebuflen-1);
  1.1957 +  ebuf[ebuflen-1]='\0';
  1.1958 +
  1.1959 +  return NULL;
  1.1960 +}
  1.1961 +#else
  1.1962 +void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
  1.1963 +{
  1.1964 +  void * result= ::dlopen(filename, RTLD_LAZY);
  1.1965 +  if (result != NULL) {
  1.1966 +    // Successful loading
  1.1967 +    return result;
  1.1968 +  }
  1.1969 +
  1.1970 +  Elf32_Ehdr elf_head;
  1.1971 +
  1.1972 +  // Read system error message into ebuf
  1.1973 +  // It may or may not be overwritten below
  1.1974 +  ::strncpy(ebuf, ::dlerror(), ebuflen-1);
  1.1975 +  ebuf[ebuflen-1]='\0';
  1.1976 +  int diag_msg_max_length=ebuflen-strlen(ebuf);
  1.1977 +  char* diag_msg_buf=ebuf+strlen(ebuf);
  1.1978 +
  1.1979 +  if (diag_msg_max_length==0) {
  1.1980 +    // No more space in ebuf for additional diagnostics message
  1.1981 +    return NULL;
  1.1982 +  }
  1.1983 +
  1.1984 +
  1.1985 +  int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
  1.1986 +
  1.1987 +  if (file_descriptor < 0) {
  1.1988 +    // Can't open library, report dlerror() message
  1.1989 +    return NULL;
  1.1990 +  }
  1.1991 +
  1.1992 +  bool failed_to_read_elf_head=
  1.1993 +    (sizeof(elf_head)!=
  1.1994 +        (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
  1.1995 +
  1.1996 +  ::close(file_descriptor);
  1.1997 +  if (failed_to_read_elf_head) {
  1.1998 +    // file i/o error - report dlerror() msg
  1.1999 +    return NULL;
  1.2000 +  }
  1.2001 +
  1.2002 +  typedef struct {
  1.2003 +    Elf32_Half  code;         // Actual value as defined in elf.h
  1.2004 +    Elf32_Half  compat_class; // Compatibility of archs at VM's sense
  1.2005 +    char        elf_class;    // 32 or 64 bit
  1.2006 +    char        endianess;    // MSB or LSB
  1.2007 +    char*       name;         // String representation
  1.2008 +  } arch_t;
  1.2009 +
  1.2010 +  #ifndef EM_486
  1.2011 +  #define EM_486          6               /* Intel 80486 */
  1.2012 +  #endif
  1.2013 +
  1.2014 +  #ifndef EM_MIPS_RS3_LE
  1.2015 +  #define EM_MIPS_RS3_LE  10              /* MIPS */
  1.2016 +  #endif
  1.2017 +
  1.2018 +  #ifndef EM_PPC64
  1.2019 +  #define EM_PPC64        21              /* PowerPC64 */
  1.2020 +  #endif
  1.2021 +
  1.2022 +  #ifndef EM_S390
  1.2023 +  #define EM_S390         22              /* IBM System/390 */
  1.2024 +  #endif
  1.2025 +
  1.2026 +  #ifndef EM_IA_64
  1.2027 +  #define EM_IA_64        50              /* HP/Intel IA-64 */
  1.2028 +  #endif
  1.2029 +
  1.2030 +  #ifndef EM_X86_64
  1.2031 +  #define EM_X86_64       62              /* AMD x86-64 */
  1.2032 +  #endif
  1.2033 +
  1.2034 +  static const arch_t arch_array[]={
  1.2035 +    {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
  1.2036 +    {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
  1.2037 +    {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
  1.2038 +    {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
  1.2039 +    {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
  1.2040 +    {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
  1.2041 +    {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
  1.2042 +    {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
  1.2043 +    {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
  1.2044 +    {EM_ARM,         EM_ARM,     ELFCLASS32,   ELFDATA2LSB, (char*)"ARM"},
  1.2045 +    {EM_S390,        EM_S390,    ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
  1.2046 +    {EM_ALPHA,       EM_ALPHA,   ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
  1.2047 +    {EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},
  1.2048 +    {EM_MIPS,        EM_MIPS,    ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},
  1.2049 +    {EM_PARISC,      EM_PARISC,  ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"},
  1.2050 +    {EM_68K,         EM_68K,     ELFCLASS32, ELFDATA2MSB, (char*)"M68k"}
  1.2051 +  };
  1.2052 +
  1.2053 +  #if  (defined IA32)
  1.2054 +    static  Elf32_Half running_arch_code=EM_386;
  1.2055 +  #elif   (defined AMD64)
  1.2056 +    static  Elf32_Half running_arch_code=EM_X86_64;
  1.2057 +  #elif  (defined IA64)
  1.2058 +    static  Elf32_Half running_arch_code=EM_IA_64;
  1.2059 +  #elif  (defined __sparc) && (defined _LP64)
  1.2060 +    static  Elf32_Half running_arch_code=EM_SPARCV9;
  1.2061 +  #elif  (defined __sparc) && (!defined _LP64)
  1.2062 +    static  Elf32_Half running_arch_code=EM_SPARC;
  1.2063 +  #elif  (defined __powerpc64__)
  1.2064 +    static  Elf32_Half running_arch_code=EM_PPC64;
  1.2065 +  #elif  (defined __powerpc__)
  1.2066 +    static  Elf32_Half running_arch_code=EM_PPC;
  1.2067 +  #elif  (defined ARM)
  1.2068 +    static  Elf32_Half running_arch_code=EM_ARM;
  1.2069 +  #elif  (defined S390)
  1.2070 +    static  Elf32_Half running_arch_code=EM_S390;
  1.2071 +  #elif  (defined ALPHA)
  1.2072 +    static  Elf32_Half running_arch_code=EM_ALPHA;
  1.2073 +  #elif  (defined MIPSEL)
  1.2074 +    static  Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
  1.2075 +  #elif  (defined PARISC)
  1.2076 +    static  Elf32_Half running_arch_code=EM_PARISC;
  1.2077 +  #elif  (defined MIPS)
  1.2078 +    static  Elf32_Half running_arch_code=EM_MIPS;
  1.2079 +  #elif  (defined M68K)
  1.2080 +    static  Elf32_Half running_arch_code=EM_68K;
  1.2081 +  #else
  1.2082 +    #error Method os::dll_load requires that one of following is defined:\
  1.2083 +         IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
  1.2084 +  #endif
  1.2085 +
  1.2086 +  // Identify compatability class for VM's architecture and library's architecture
  1.2087 +  // Obtain string descriptions for architectures
  1.2088 +
  1.2089 +  arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
  1.2090 +  int running_arch_index=-1;
  1.2091 +
  1.2092 +  for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
  1.2093 +    if (running_arch_code == arch_array[i].code) {
  1.2094 +      running_arch_index    = i;
  1.2095 +    }
  1.2096 +    if (lib_arch.code == arch_array[i].code) {
  1.2097 +      lib_arch.compat_class = arch_array[i].compat_class;
  1.2098 +      lib_arch.name         = arch_array[i].name;
  1.2099 +    }
  1.2100 +  }
  1.2101 +
  1.2102 +  assert(running_arch_index != -1,
  1.2103 +    "Didn't find running architecture code (running_arch_code) in arch_array");
  1.2104 +  if (running_arch_index == -1) {
  1.2105 +    // Even though running architecture detection failed
  1.2106 +    // we may still continue with reporting dlerror() message
  1.2107 +    return NULL;
  1.2108 +  }
  1.2109 +
  1.2110 +  if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
  1.2111 +    ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
  1.2112 +    return NULL;
  1.2113 +  }
  1.2114 +
  1.2115 +#ifndef S390
  1.2116 +  if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
  1.2117 +    ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
  1.2118 +    return NULL;
  1.2119 +  }
  1.2120 +#endif // !S390
  1.2121 +
  1.2122 +  if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
  1.2123 +    if ( lib_arch.name!=NULL ) {
  1.2124 +      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
  1.2125 +        " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
  1.2126 +        lib_arch.name, arch_array[running_arch_index].name);
  1.2127 +    } else {
  1.2128 +      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
  1.2129 +      " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
  1.2130 +        lib_arch.code,
  1.2131 +        arch_array[running_arch_index].name);
  1.2132 +    }
  1.2133 +  }
  1.2134 +
  1.2135 +  return NULL;
  1.2136 +}
  1.2137 +#endif /* !__APPLE__ */
  1.2138 +
  1.2139 +// XXX: Do we need a lock around this as per Linux?
  1.2140 +void* os::dll_lookup(void* handle, const char* name) {
  1.2141 +  return dlsym(handle, name);
  1.2142 +}
  1.2143 +
  1.2144 +
  1.2145 +static bool _print_ascii_file(const char* filename, outputStream* st) {
  1.2146 +  int fd = ::open(filename, O_RDONLY);
  1.2147 +  if (fd == -1) {
  1.2148 +     return false;
  1.2149 +  }
  1.2150 +
  1.2151 +  char buf[32];
  1.2152 +  int bytes;
  1.2153 +  while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
  1.2154 +    st->print_raw(buf, bytes);
  1.2155 +  }
  1.2156 +
  1.2157 +  ::close(fd);
  1.2158 +
  1.2159 +  return true;
  1.2160 +}
  1.2161 +
  1.2162 +void os::print_dll_info(outputStream *st) {
  1.2163 +   st->print_cr("Dynamic libraries:");
  1.2164 +#ifdef _ALLBSD_SOURCE
  1.2165 +#ifdef RTLD_DI_LINKMAP
  1.2166 +    Dl_info dli;
  1.2167 +    void *handle;
  1.2168 +    Link_map *map;
  1.2169 +    Link_map *p;
  1.2170 +
  1.2171 +    if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) {
  1.2172 +        st->print_cr("Error: Cannot print dynamic libraries.");
  1.2173 +        return;
  1.2174 +    }
  1.2175 +    handle = dlopen(dli.dli_fname, RTLD_LAZY);
  1.2176 +    if (handle == NULL) {
  1.2177 +        st->print_cr("Error: Cannot print dynamic libraries.");
  1.2178 +        return;
  1.2179 +    }
  1.2180 +    dlinfo(handle, RTLD_DI_LINKMAP, &map);
  1.2181 +    if (map == NULL) {
  1.2182 +        st->print_cr("Error: Cannot print dynamic libraries.");
  1.2183 +        return;
  1.2184 +    }
  1.2185 +
  1.2186 +    while (map->l_prev != NULL)
  1.2187 +        map = map->l_prev;
  1.2188 +
  1.2189 +    while (map != NULL) {
  1.2190 +        st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
  1.2191 +        map = map->l_next;
  1.2192 +    }
  1.2193 +
  1.2194 +    dlclose(handle);
  1.2195 +#elif defined(__APPLE__)
  1.2196 +    uint32_t count;
  1.2197 +    uint32_t i;
  1.2198 +
  1.2199 +    count = _dyld_image_count();
  1.2200 +    for (i = 1; i < count; i++) {
  1.2201 +        const char *name = _dyld_get_image_name(i);
  1.2202 +        intptr_t slide = _dyld_get_image_vmaddr_slide(i);
  1.2203 +        st->print_cr(PTR_FORMAT " \t%s", slide, name);
  1.2204 +    }
  1.2205 +#else
  1.2206 +   st->print_cr("Error: Cannot print dynamic libraries.");
  1.2207 +#endif
  1.2208 +#else
  1.2209 +   char fname[32];
  1.2210 +   pid_t pid = os::Bsd::gettid();
  1.2211 +
  1.2212 +   jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
  1.2213 +
  1.2214 +   if (!_print_ascii_file(fname, st)) {
  1.2215 +     st->print("Can not get library information for pid = %d\n", pid);
  1.2216 +   }
  1.2217 +#endif
  1.2218 +}
  1.2219 +
  1.2220 +
  1.2221 +void os::print_os_info(outputStream* st) {
  1.2222 +  st->print("OS:");
  1.2223 +
  1.2224 +  // Try to identify popular distros.
  1.2225 +  // Most Bsd distributions have /etc/XXX-release file, which contains
  1.2226 +  // the OS version string. Some have more than one /etc/XXX-release file
  1.2227 +  // (e.g. Mandrake has both /etc/mandrake-release and /etc/redhat-release.),
  1.2228 +  // so the order is important.
  1.2229 +  if (!_print_ascii_file("/etc/mandrake-release", st) &&
  1.2230 +      !_print_ascii_file("/etc/sun-release", st) &&
  1.2231 +      !_print_ascii_file("/etc/redhat-release", st) &&
  1.2232 +      !_print_ascii_file("/etc/SuSE-release", st) &&
  1.2233 +      !_print_ascii_file("/etc/turbobsd-release", st) &&
  1.2234 +      !_print_ascii_file("/etc/gentoo-release", st) &&
  1.2235 +      !_print_ascii_file("/etc/debian_version", st) &&
  1.2236 +      !_print_ascii_file("/etc/ltib-release", st) &&
  1.2237 +      !_print_ascii_file("/etc/angstrom-version", st)) {
  1.2238 +      st->print("Bsd");
  1.2239 +  }
  1.2240 +  st->cr();
  1.2241 +
  1.2242 +  // kernel
  1.2243 +  st->print("uname:");
  1.2244 +  struct utsname name;
  1.2245 +  uname(&name);
  1.2246 +  st->print(name.sysname); st->print(" ");
  1.2247 +  st->print(name.release); st->print(" ");
  1.2248 +  st->print(name.version); st->print(" ");
  1.2249 +  st->print(name.machine);
  1.2250 +  st->cr();
  1.2251 +
  1.2252 +#ifndef _ALLBSD_SOURCE
  1.2253 +  // Print warning if unsafe chroot environment detected
  1.2254 +  if (unsafe_chroot_detected) {
  1.2255 +    st->print("WARNING!! ");
  1.2256 +    st->print_cr(unstable_chroot_error);
  1.2257 +  }
  1.2258 +
  1.2259 +  // libc, pthread
  1.2260 +  st->print("libc:");
  1.2261 +  st->print(os::Bsd::glibc_version()); st->print(" ");
  1.2262 +  st->print(os::Bsd::libpthread_version()); st->print(" ");
  1.2263 +  if (os::Bsd::is_BsdThreads()) {
  1.2264 +     st->print("(%s stack)", os::Bsd::is_floating_stack() ? "floating" : "fixed");
  1.2265 +  }
  1.2266 +  st->cr();
  1.2267 +#endif
  1.2268 +
  1.2269 +  // rlimit
  1.2270 +  st->print("rlimit:");
  1.2271 +  struct rlimit rlim;
  1.2272 +
  1.2273 +  st->print(" STACK ");
  1.2274 +  getrlimit(RLIMIT_STACK, &rlim);
  1.2275 +  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  1.2276 +  else st->print("%uk", rlim.rlim_cur >> 10);
  1.2277 +
  1.2278 +  st->print(", CORE ");
  1.2279 +  getrlimit(RLIMIT_CORE, &rlim);
  1.2280 +  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  1.2281 +  else st->print("%uk", rlim.rlim_cur >> 10);
  1.2282 +
  1.2283 +  st->print(", NPROC ");
  1.2284 +  getrlimit(RLIMIT_NPROC, &rlim);
  1.2285 +  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  1.2286 +  else st->print("%d", rlim.rlim_cur);
  1.2287 +
  1.2288 +  st->print(", NOFILE ");
  1.2289 +  getrlimit(RLIMIT_NOFILE, &rlim);
  1.2290 +  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  1.2291 +  else st->print("%d", rlim.rlim_cur);
  1.2292 +
  1.2293 +#ifndef _ALLBSD_SOURCE
  1.2294 +  st->print(", AS ");
  1.2295 +  getrlimit(RLIMIT_AS, &rlim);
  1.2296 +  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  1.2297 +  else st->print("%uk", rlim.rlim_cur >> 10);
  1.2298 +  st->cr();
  1.2299 +
  1.2300 +  // load average
  1.2301 +  st->print("load average:");
  1.2302 +  double loadavg[3];
  1.2303 +  os::loadavg(loadavg, 3);
  1.2304 +  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
  1.2305 +  st->cr();
  1.2306 +#endif
  1.2307 +}
  1.2308 +
  1.2309 +void os::pd_print_cpu_info(outputStream* st) {
  1.2310 +  // Nothing to do for now.
  1.2311 +}
  1.2312 +
  1.2313 +void os::print_memory_info(outputStream* st) {
  1.2314 +
  1.2315 +  st->print("Memory:");
  1.2316 +  st->print(" %dk page", os::vm_page_size()>>10);
  1.2317 +
  1.2318 +#ifndef _ALLBSD_SOURCE
  1.2319 +  // values in struct sysinfo are "unsigned long"
  1.2320 +  struct sysinfo si;
  1.2321 +  sysinfo(&si);
  1.2322 +#endif
  1.2323 +
  1.2324 +  st->print(", physical " UINT64_FORMAT "k",
  1.2325 +            os::physical_memory() >> 10);
  1.2326 +  st->print("(" UINT64_FORMAT "k free)",
  1.2327 +            os::available_memory() >> 10);
  1.2328 +#ifndef _ALLBSD_SOURCE
  1.2329 +  st->print(", swap " UINT64_FORMAT "k",
  1.2330 +            ((jlong)si.totalswap * si.mem_unit) >> 10);
  1.2331 +  st->print("(" UINT64_FORMAT "k free)",
  1.2332 +            ((jlong)si.freeswap * si.mem_unit) >> 10);
  1.2333 +#endif
  1.2334 +  st->cr();
  1.2335 +
  1.2336 +  // meminfo
  1.2337 +  st->print("\n/proc/meminfo:\n");
  1.2338 +  _print_ascii_file("/proc/meminfo", st);
  1.2339 +  st->cr();
  1.2340 +}
  1.2341 +
  1.2342 +// Taken from /usr/include/bits/siginfo.h  Supposed to be architecture specific
  1.2343 +// but they're the same for all the bsd arch that we support
  1.2344 +// and they're the same for solaris but there's no common place to put this.
  1.2345 +const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR",
  1.2346 +                          "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG",
  1.2347 +                          "ILL_COPROC", "ILL_BADSTK" };
  1.2348 +
  1.2349 +const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV",
  1.2350 +                          "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES",
  1.2351 +                          "FPE_FLTINV", "FPE_FLTSUB", "FPE_FLTDEN" };
  1.2352 +
  1.2353 +const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" };
  1.2354 +
  1.2355 +const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" };
  1.2356 +
  1.2357 +void os::print_siginfo(outputStream* st, void* siginfo) {
  1.2358 +  st->print("siginfo:");
  1.2359 +
  1.2360 +  const int buflen = 100;
  1.2361 +  char buf[buflen];
  1.2362 +  siginfo_t *si = (siginfo_t*)siginfo;
  1.2363 +  st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen));
  1.2364 +  if (si->si_errno != 0 && strerror_r(si->si_errno, buf, buflen) == 0) {
  1.2365 +    st->print("si_errno=%s", buf);
  1.2366 +  } else {
  1.2367 +    st->print("si_errno=%d", si->si_errno);
  1.2368 +  }
  1.2369 +  const int c = si->si_code;
  1.2370 +  assert(c > 0, "unexpected si_code");
  1.2371 +  switch (si->si_signo) {
  1.2372 +  case SIGILL:
  1.2373 +    st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]);
  1.2374 +    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
  1.2375 +    break;
  1.2376 +  case SIGFPE:
  1.2377 +    st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]);
  1.2378 +    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
  1.2379 +    break;
  1.2380 +  case SIGSEGV:
  1.2381 +    st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]);
  1.2382 +    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
  1.2383 +    break;
  1.2384 +  case SIGBUS:
  1.2385 +    st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]);
  1.2386 +    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
  1.2387 +    break;
  1.2388 +  default:
  1.2389 +    st->print(", si_code=%d", si->si_code);
  1.2390 +    // no si_addr
  1.2391 +  }
  1.2392 +
  1.2393 +  if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
  1.2394 +      UseSharedSpaces) {
  1.2395 +    FileMapInfo* mapinfo = FileMapInfo::current_info();
  1.2396 +    if (mapinfo->is_in_shared_space(si->si_addr)) {
  1.2397 +      st->print("\n\nError accessing class data sharing archive."   \
  1.2398 +                " Mapped file inaccessible during execution, "      \
  1.2399 +                " possible disk/network problem.");
  1.2400 +    }
  1.2401 +  }
  1.2402 +  st->cr();
  1.2403 +}
  1.2404 +
  1.2405 +
  1.2406 +static void print_signal_handler(outputStream* st, int sig,
  1.2407 +                                 char* buf, size_t buflen);
  1.2408 +
  1.2409 +void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
  1.2410 +  st->print_cr("Signal Handlers:");
  1.2411 +  print_signal_handler(st, SIGSEGV, buf, buflen);
  1.2412 +  print_signal_handler(st, SIGBUS , buf, buflen);
  1.2413 +  print_signal_handler(st, SIGFPE , buf, buflen);
  1.2414 +  print_signal_handler(st, SIGPIPE, buf, buflen);
  1.2415 +  print_signal_handler(st, SIGXFSZ, buf, buflen);
  1.2416 +  print_signal_handler(st, SIGILL , buf, buflen);
  1.2417 +  print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
  1.2418 +  print_signal_handler(st, SR_signum, buf, buflen);
  1.2419 +  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
  1.2420 +  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
  1.2421 +  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
  1.2422 +  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
  1.2423 +}
  1.2424 +
  1.2425 +static char saved_jvm_path[MAXPATHLEN] = {0};
  1.2426 +
  1.2427 +// Find the full path to the current module, libjvm.so or libjvm_g.so
  1.2428 +void os::jvm_path(char *buf, jint buflen) {
  1.2429 +  // Error checking.
  1.2430 +  if (buflen < MAXPATHLEN) {
  1.2431 +    assert(false, "must use a large-enough buffer");
  1.2432 +    buf[0] = '\0';
  1.2433 +    return;
  1.2434 +  }
  1.2435 +  // Lazy resolve the path to current module.
  1.2436 +  if (saved_jvm_path[0] != 0) {
  1.2437 +    strcpy(buf, saved_jvm_path);
  1.2438 +    return;
  1.2439 +  }
  1.2440 +
  1.2441 +  char dli_fname[MAXPATHLEN];
  1.2442 +  bool ret = dll_address_to_library_name(
  1.2443 +                CAST_FROM_FN_PTR(address, os::jvm_path),
  1.2444 +                dli_fname, sizeof(dli_fname), NULL);
  1.2445 +  assert(ret != 0, "cannot locate libjvm");
  1.2446 +  char *rp = realpath(dli_fname, buf);
  1.2447 +  if (rp == NULL)
  1.2448 +    return;
  1.2449 +
  1.2450 +  if (Arguments::created_by_gamma_launcher()) {
  1.2451 +    // Support for the gamma launcher.  Typical value for buf is
  1.2452 +    // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
  1.2453 +    // the right place in the string, then assume we are installed in a JDK and
  1.2454 +    // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
  1.2455 +    // up the path so it looks like libjvm.so is installed there (append a
  1.2456 +    // fake suffix hotspot/libjvm.so).
  1.2457 +    const char *p = buf + strlen(buf) - 1;
  1.2458 +    for (int count = 0; p > buf && count < 5; ++count) {
  1.2459 +      for (--p; p > buf && *p != '/'; --p)
  1.2460 +        /* empty */ ;
  1.2461 +    }
  1.2462 +
  1.2463 +    if (strncmp(p, "/jre/lib/", 9) != 0) {
  1.2464 +      // Look for JAVA_HOME in the environment.
  1.2465 +      char* java_home_var = ::getenv("JAVA_HOME");
  1.2466 +      if (java_home_var != NULL && java_home_var[0] != 0) {
  1.2467 +        char* jrelib_p;
  1.2468 +        int len;
  1.2469 +
  1.2470 +        // Check the current module name "libjvm.so" or "libjvm_g.so".
  1.2471 +        p = strrchr(buf, '/');
  1.2472 +        assert(strstr(p, "/libjvm") == p, "invalid library name");
  1.2473 +        p = strstr(p, "_g") ? "_g" : "";
  1.2474 +
  1.2475 +        rp = realpath(java_home_var, buf);
  1.2476 +        if (rp == NULL)
  1.2477 +          return;
  1.2478 +
  1.2479 +        // determine if this is a legacy image or modules image
  1.2480 +        // modules image doesn't have "jre" subdirectory
  1.2481 +        len = strlen(buf);
  1.2482 +        jrelib_p = buf + len;
  1.2483 +        snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
  1.2484 +        if (0 != access(buf, F_OK)) {
  1.2485 +          snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
  1.2486 +        }
  1.2487 +
  1.2488 +        if (0 == access(buf, F_OK)) {
  1.2489 +          // Use current module name "libjvm[_g].so" instead of
  1.2490 +          // "libjvm"debug_only("_g")".so" since for fastdebug version
  1.2491 +          // we should have "libjvm.so" but debug_only("_g") adds "_g"!
  1.2492 +          len = strlen(buf);
  1.2493 +          snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
  1.2494 +        } else {
  1.2495 +          // Go back to path of .so
  1.2496 +          rp = realpath(dli_fname, buf);
  1.2497 +          if (rp == NULL)
  1.2498 +            return;
  1.2499 +        }
  1.2500 +      }
  1.2501 +    }
  1.2502 +  }
  1.2503 +
  1.2504 +  strcpy(saved_jvm_path, buf);
  1.2505 +}
  1.2506 +
  1.2507 +void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
  1.2508 +  // no prefix required, not even "_"
  1.2509 +}
  1.2510 +
  1.2511 +void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
  1.2512 +  // no suffix required
  1.2513 +}
  1.2514 +
  1.2515 +////////////////////////////////////////////////////////////////////////////////
  1.2516 +// sun.misc.Signal support
  1.2517 +
  1.2518 +static volatile jint sigint_count = 0;
  1.2519 +
  1.2520 +static void
  1.2521 +UserHandler(int sig, void *siginfo, void *context) {
  1.2522 +  // 4511530 - sem_post is serialized and handled by the manager thread. When
  1.2523 +  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
  1.2524 +  // don't want to flood the manager thread with sem_post requests.
  1.2525 +  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
  1.2526 +      return;
  1.2527 +
  1.2528 +  // Ctrl-C is pressed during error reporting, likely because the error
  1.2529 +  // handler fails to abort. Let VM die immediately.
  1.2530 +  if (sig == SIGINT && is_error_reported()) {
  1.2531 +     os::die();
  1.2532 +  }
  1.2533 +
  1.2534 +  os::signal_notify(sig);
  1.2535 +}
  1.2536 +
  1.2537 +void* os::user_handler() {
  1.2538 +  return CAST_FROM_FN_PTR(void*, UserHandler);
  1.2539 +}
  1.2540 +
  1.2541 +extern "C" {
  1.2542 +  typedef void (*sa_handler_t)(int);
  1.2543 +  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
  1.2544 +}
  1.2545 +
  1.2546 +void* os::signal(int signal_number, void* handler) {
  1.2547 +  struct sigaction sigAct, oldSigAct;
  1.2548 +
  1.2549 +  sigfillset(&(sigAct.sa_mask));
  1.2550 +  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
  1.2551 +  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
  1.2552 +
  1.2553 +  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
  1.2554 +    // -1 means registration failed
  1.2555 +    return (void *)-1;
  1.2556 +  }
  1.2557 +
  1.2558 +  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
  1.2559 +}
  1.2560 +
  1.2561 +void os::signal_raise(int signal_number) {
  1.2562 +  ::raise(signal_number);
  1.2563 +}
  1.2564 +
  1.2565 +/*
  1.2566 + * The following code is moved from os.cpp for making this
  1.2567 + * code platform specific, which it is by its very nature.
  1.2568 + */
  1.2569 +
  1.2570 +// Will be modified when max signal is changed to be dynamic
  1.2571 +int os::sigexitnum_pd() {
  1.2572 +  return NSIG;
  1.2573 +}
  1.2574 +
  1.2575 +// a counter for each possible signal value
  1.2576 +static volatile jint pending_signals[NSIG+1] = { 0 };
  1.2577 +
  1.2578 +// Bsd(POSIX) specific hand shaking semaphore.
  1.2579 +#ifdef __APPLE__
  1.2580 +static semaphore_t sig_sem;
  1.2581 +#define SEM_INIT(sem, value)    semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, value)
  1.2582 +#define SEM_WAIT(sem)           semaphore_wait(sem);
  1.2583 +#define SEM_POST(sem)           semaphore_signal(sem);
  1.2584 +#else
  1.2585 +static sem_t sig_sem;
  1.2586 +#define SEM_INIT(sem, value)    sem_init(&sem, 0, value)
  1.2587 +#define SEM_WAIT(sem)           sem_wait(&sem);
  1.2588 +#define SEM_POST(sem)           sem_post(&sem);
  1.2589 +#endif
  1.2590 +
  1.2591 +void os::signal_init_pd() {
  1.2592 +  // Initialize signal structures
  1.2593 +  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
  1.2594 +
  1.2595 +  // Initialize signal semaphore
  1.2596 +  ::SEM_INIT(sig_sem, 0);
  1.2597 +}
  1.2598 +
  1.2599 +void os::signal_notify(int sig) {
  1.2600 +  Atomic::inc(&pending_signals[sig]);
  1.2601 +  ::SEM_POST(sig_sem);
  1.2602 +}
  1.2603 +
  1.2604 +static int check_pending_signals(bool wait) {
  1.2605 +  Atomic::store(0, &sigint_count);
  1.2606 +  for (;;) {
  1.2607 +    for (int i = 0; i < NSIG + 1; i++) {
  1.2608 +      jint n = pending_signals[i];
  1.2609 +      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
  1.2610 +        return i;
  1.2611 +      }
  1.2612 +    }
  1.2613 +    if (!wait) {
  1.2614 +      return -1;
  1.2615 +    }
  1.2616 +    JavaThread *thread = JavaThread::current();
  1.2617 +    ThreadBlockInVM tbivm(thread);
  1.2618 +
  1.2619 +    bool threadIsSuspended;
  1.2620 +    do {
  1.2621 +      thread->set_suspend_equivalent();
  1.2622 +      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
  1.2623 +      ::SEM_WAIT(sig_sem);
  1.2624 +
  1.2625 +      // were we externally suspended while we were waiting?
  1.2626 +      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
  1.2627 +      if (threadIsSuspended) {
  1.2628 +        //
  1.2629 +        // The semaphore has been incremented, but while we were waiting
  1.2630 +        // another thread suspended us. We don't want to continue running
  1.2631 +        // while suspended because that would surprise the thread that
  1.2632 +        // suspended us.
  1.2633 +        //
  1.2634 +        ::SEM_POST(sig_sem);
  1.2635 +
  1.2636 +        thread->java_suspend_self();
  1.2637 +      }
  1.2638 +    } while (threadIsSuspended);
  1.2639 +  }
  1.2640 +}
  1.2641 +
  1.2642 +int os::signal_lookup() {
  1.2643 +  return check_pending_signals(false);
  1.2644 +}
  1.2645 +
  1.2646 +int os::signal_wait() {
  1.2647 +  return check_pending_signals(true);
  1.2648 +}
  1.2649 +
  1.2650 +////////////////////////////////////////////////////////////////////////////////
  1.2651 +// Virtual Memory
  1.2652 +
  1.2653 +int os::vm_page_size() {
  1.2654 +  // Seems redundant as all get out
  1.2655 +  assert(os::Bsd::page_size() != -1, "must call os::init");
  1.2656 +  return os::Bsd::page_size();
  1.2657 +}
  1.2658 +
  1.2659 +// Solaris allocates memory by pages.
  1.2660 +int os::vm_allocation_granularity() {
  1.2661 +  assert(os::Bsd::page_size() != -1, "must call os::init");
  1.2662 +  return os::Bsd::page_size();
  1.2663 +}
  1.2664 +
  1.2665 +// Rationale behind this function:
  1.2666 +//  current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
  1.2667 +//  mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
  1.2668 +//  samples for JITted code. Here we create private executable mapping over the code cache
  1.2669 +//  and then we can use standard (well, almost, as mapping can change) way to provide
  1.2670 +//  info for the reporting script by storing timestamp and location of symbol
  1.2671 +void bsd_wrap_code(char* base, size_t size) {
  1.2672 +  static volatile jint cnt = 0;
  1.2673 +
  1.2674 +  if (!UseOprofile) {
  1.2675 +    return;
  1.2676 +  }
  1.2677 +
  1.2678 +  char buf[PATH_MAX + 1];
  1.2679 +  int num = Atomic::add(1, &cnt);
  1.2680 +
  1.2681 +  snprintf(buf, PATH_MAX + 1, "%s/hs-vm-%d-%d",
  1.2682 +           os::get_temp_directory(), os::current_process_id(), num);
  1.2683 +  unlink(buf);
  1.2684 +
  1.2685 +  int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
  1.2686 +
  1.2687 +  if (fd != -1) {
  1.2688 +    off_t rv = ::lseek(fd, size-2, SEEK_SET);
  1.2689 +    if (rv != (off_t)-1) {
  1.2690 +      if (::write(fd, "", 1) == 1) {
  1.2691 +        mmap(base, size,
  1.2692 +             PROT_READ|PROT_WRITE|PROT_EXEC,
  1.2693 +             MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
  1.2694 +      }
  1.2695 +    }
  1.2696 +    ::close(fd);
  1.2697 +    unlink(buf);
  1.2698 +  }
  1.2699 +}
  1.2700 +
  1.2701 +// NOTE: Bsd kernel does not really reserve the pages for us.
  1.2702 +//       All it does is to check if there are enough free pages
  1.2703 +//       left at the time of mmap(). This could be a potential
  1.2704 +//       problem.
  1.2705 +bool os::commit_memory(char* addr, size_t size, bool exec) {
  1.2706 +  int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
  1.2707 +#ifdef __OpenBSD__
  1.2708 +  // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
  1.2709 +  return ::mprotect(addr, size, prot) == 0;
  1.2710 +#else
  1.2711 +  uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
  1.2712 +                                   MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
  1.2713 +  return res != (uintptr_t) MAP_FAILED;
  1.2714 +#endif
  1.2715 +}
  1.2716 +
  1.2717 +#ifndef _ALLBSD_SOURCE
  1.2718 +// Define MAP_HUGETLB here so we can build HotSpot on old systems.
  1.2719 +#ifndef MAP_HUGETLB
  1.2720 +#define MAP_HUGETLB 0x40000
  1.2721 +#endif
  1.2722 +
  1.2723 +// Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
  1.2724 +#ifndef MADV_HUGEPAGE
  1.2725 +#define MADV_HUGEPAGE 14
  1.2726 +#endif
  1.2727 +#endif
  1.2728 +
  1.2729 +bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
  1.2730 +                       bool exec) {
  1.2731 +#ifndef _ALLBSD_SOURCE
  1.2732 +  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
  1.2733 +    int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
  1.2734 +    uintptr_t res =
  1.2735 +      (uintptr_t) ::mmap(addr, size, prot,
  1.2736 +                         MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
  1.2737 +                         -1, 0);
  1.2738 +    return res != (uintptr_t) MAP_FAILED;
  1.2739 +  }
  1.2740 +#endif
  1.2741 +
  1.2742 +  return commit_memory(addr, size, exec);
  1.2743 +}
  1.2744 +
  1.2745 +void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
  1.2746 +#ifndef _ALLBSD_SOURCE
  1.2747 +  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
  1.2748 +    // We don't check the return value: madvise(MADV_HUGEPAGE) may not
  1.2749 +    // be supported or the memory may already be backed by huge pages.
  1.2750 +    ::madvise(addr, bytes, MADV_HUGEPAGE);
  1.2751 +  }
  1.2752 +#endif
  1.2753 +}
  1.2754 +
  1.2755 +void os::free_memory(char *addr, size_t bytes) {
  1.2756 +  ::madvise(addr, bytes, MADV_DONTNEED);
  1.2757 +}
  1.2758 +
  1.2759 +void os::numa_make_global(char *addr, size_t bytes) {
  1.2760 +}
  1.2761 +
  1.2762 +void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
  1.2763 +}
  1.2764 +
  1.2765 +bool os::numa_topology_changed()   { return false; }
  1.2766 +
  1.2767 +size_t os::numa_get_groups_num() {
  1.2768 +  return 1;
  1.2769 +}
  1.2770 +
  1.2771 +int os::numa_get_group_id() {
  1.2772 +  return 0;
  1.2773 +}
  1.2774 +
  1.2775 +size_t os::numa_get_leaf_groups(int *ids, size_t size) {
  1.2776 +  if (size > 0) {
  1.2777 +    ids[0] = 0;
  1.2778 +    return 1;
  1.2779 +  }
  1.2780 +  return 0;
  1.2781 +}
  1.2782 +
  1.2783 +bool os::get_page_info(char *start, page_info* info) {
  1.2784 +  return false;
  1.2785 +}
  1.2786 +
  1.2787 +char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
  1.2788 +  return end;
  1.2789 +}
  1.2790 +
  1.2791 +#ifndef _ALLBSD_SOURCE
  1.2792 +// Something to do with the numa-aware allocator needs these symbols
  1.2793 +extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
  1.2794 +extern "C" JNIEXPORT void numa_error(char *where) { }
  1.2795 +extern "C" JNIEXPORT int fork1() { return fork(); }
  1.2796 +
  1.2797 +
  1.2798 +// If we are running with libnuma version > 2, then we should
  1.2799 +// be trying to use symbols with versions 1.1
  1.2800 +// If we are running with earlier version, which did not have symbol versions,
  1.2801 +// we should use the base version.
  1.2802 +void* os::Bsd::libnuma_dlsym(void* handle, const char *name) {
  1.2803 +  void *f = dlvsym(handle, name, "libnuma_1.1");
  1.2804 +  if (f == NULL) {
  1.2805 +    f = dlsym(handle, name);
  1.2806 +  }
  1.2807 +  return f;
  1.2808 +}
  1.2809 +
  1.2810 +bool os::Bsd::libnuma_init() {
  1.2811 +  // sched_getcpu() should be in libc.
  1.2812 +  set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
  1.2813 +                                  dlsym(RTLD_DEFAULT, "sched_getcpu")));
  1.2814 +
  1.2815 +  if (sched_getcpu() != -1) { // Does it work?
  1.2816 +    void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
  1.2817 +    if (handle != NULL) {
  1.2818 +      set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
  1.2819 +                                           libnuma_dlsym(handle, "numa_node_to_cpus")));
  1.2820 +      set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
  1.2821 +                                       libnuma_dlsym(handle, "numa_max_node")));
  1.2822 +      set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
  1.2823 +                                        libnuma_dlsym(handle, "numa_available")));
  1.2824 +      set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
  1.2825 +                                            libnuma_dlsym(handle, "numa_tonode_memory")));
  1.2826 +      set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
  1.2827 +                                            libnuma_dlsym(handle, "numa_interleave_memory")));
  1.2828 +
  1.2829 +
  1.2830 +      if (numa_available() != -1) {
  1.2831 +        set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
  1.2832 +        // Create a cpu -> node mapping
  1.2833 +        _cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray<int>(0, true);
  1.2834 +        rebuild_cpu_to_node_map();
  1.2835 +        return true;
  1.2836 +      }
  1.2837 +    }
  1.2838 +  }
  1.2839 +  return false;
  1.2840 +}
  1.2841 +
  1.2842 +// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
  1.2843 +// The table is later used in get_node_by_cpu().
  1.2844 +void os::Bsd::rebuild_cpu_to_node_map() {
  1.2845 +  const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
  1.2846 +                              // in libnuma (possible values are starting from 16,
  1.2847 +                              // and continuing up with every other power of 2, but less
  1.2848 +                              // than the maximum number of CPUs supported by kernel), and
  1.2849 +                              // is a subject to change (in libnuma version 2 the requirements
  1.2850 +                              // are more reasonable) we'll just hardcode the number they use
  1.2851 +                              // in the library.
  1.2852 +  const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
  1.2853 +
  1.2854 +  size_t cpu_num = os::active_processor_count();
  1.2855 +  size_t cpu_map_size = NCPUS / BitsPerCLong;
  1.2856 +  size_t cpu_map_valid_size =
  1.2857 +    MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
  1.2858 +
  1.2859 +  cpu_to_node()->clear();
  1.2860 +  cpu_to_node()->at_grow(cpu_num - 1);
  1.2861 +  size_t node_num = numa_get_groups_num();
  1.2862 +
  1.2863 +  unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size);
  1.2864 +  for (size_t i = 0; i < node_num; i++) {
  1.2865 +    if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
  1.2866 +      for (size_t j = 0; j < cpu_map_valid_size; j++) {
  1.2867 +        if (cpu_map[j] != 0) {
  1.2868 +          for (size_t k = 0; k < BitsPerCLong; k++) {
  1.2869 +            if (cpu_map[j] & (1UL << k)) {
  1.2870 +              cpu_to_node()->at_put(j * BitsPerCLong + k, i);
  1.2871 +            }
  1.2872 +          }
  1.2873 +        }
  1.2874 +      }
  1.2875 +    }
  1.2876 +  }
  1.2877 +  FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
  1.2878 +}
  1.2879 +
  1.2880 +int os::Bsd::get_node_by_cpu(int cpu_id) {
  1.2881 +  if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
  1.2882 +    return cpu_to_node()->at(cpu_id);
  1.2883 +  }
  1.2884 +  return -1;
  1.2885 +}
  1.2886 +
  1.2887 +GrowableArray<int>* os::Bsd::_cpu_to_node;
  1.2888 +os::Bsd::sched_getcpu_func_t os::Bsd::_sched_getcpu;
  1.2889 +os::Bsd::numa_node_to_cpus_func_t os::Bsd::_numa_node_to_cpus;
  1.2890 +os::Bsd::numa_max_node_func_t os::Bsd::_numa_max_node;
  1.2891 +os::Bsd::numa_available_func_t os::Bsd::_numa_available;
  1.2892 +os::Bsd::numa_tonode_memory_func_t os::Bsd::_numa_tonode_memory;
  1.2893 +os::Bsd::numa_interleave_memory_func_t os::Bsd::_numa_interleave_memory;
  1.2894 +unsigned long* os::Bsd::_numa_all_nodes;
  1.2895 +#endif
  1.2896 +
  1.2897 +bool os::uncommit_memory(char* addr, size_t size) {
  1.2898 +#ifdef __OpenBSD__
  1.2899 +  // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
  1.2900 +  return ::mprotect(addr, size, PROT_NONE) == 0;
  1.2901 +#else
  1.2902 +  uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
  1.2903 +                MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
  1.2904 +  return res  != (uintptr_t) MAP_FAILED;
  1.2905 +#endif
  1.2906 +}
  1.2907 +
  1.2908 +bool os::create_stack_guard_pages(char* addr, size_t size) {
  1.2909 +  return os::commit_memory(addr, size);
  1.2910 +}
  1.2911 +
  1.2912 +// If this is a growable mapping, remove the guard pages entirely by
  1.2913 +// munmap()ping them.  If not, just call uncommit_memory().
  1.2914 +bool os::remove_stack_guard_pages(char* addr, size_t size) {
  1.2915 +  return os::uncommit_memory(addr, size);
  1.2916 +}
  1.2917 +
  1.2918 +static address _highest_vm_reserved_address = NULL;
  1.2919 +
  1.2920 +// If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
  1.2921 +// at 'requested_addr'. If there are existing memory mappings at the same
  1.2922 +// location, however, they will be overwritten. If 'fixed' is false,
  1.2923 +// 'requested_addr' is only treated as a hint, the return value may or
  1.2924 +// may not start from the requested address. Unlike Bsd mmap(), this
  1.2925 +// function returns NULL to indicate failure.
  1.2926 +static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
  1.2927 +  char * addr;
  1.2928 +  int flags;
  1.2929 +
  1.2930 +  flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
  1.2931 +  if (fixed) {
  1.2932 +    assert((uintptr_t)requested_addr % os::Bsd::page_size() == 0, "unaligned address");
  1.2933 +    flags |= MAP_FIXED;
  1.2934 +  }
  1.2935 +
  1.2936 +  // Map uncommitted pages PROT_READ and PROT_WRITE, change access
  1.2937 +  // to PROT_EXEC if executable when we commit the page.
  1.2938 +  addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE,
  1.2939 +                       flags, -1, 0);
  1.2940 +
  1.2941 +  if (addr != MAP_FAILED) {
  1.2942 +    // anon_mmap() should only get called during VM initialization,
  1.2943 +    // don't need lock (actually we can skip locking even it can be called
  1.2944 +    // from multiple threads, because _highest_vm_reserved_address is just a
  1.2945 +    // hint about the upper limit of non-stack memory regions.)
  1.2946 +    if ((address)addr + bytes > _highest_vm_reserved_address) {
  1.2947 +      _highest_vm_reserved_address = (address)addr + bytes;
  1.2948 +    }
  1.2949 +  }
  1.2950 +
  1.2951 +  return addr == MAP_FAILED ? NULL : addr;
  1.2952 +}
  1.2953 +
  1.2954 +// Don't update _highest_vm_reserved_address, because there might be memory
  1.2955 +// regions above addr + size. If so, releasing a memory region only creates
  1.2956 +// a hole in the address space, it doesn't help prevent heap-stack collision.
  1.2957 +//
  1.2958 +static int anon_munmap(char * addr, size_t size) {
  1.2959 +  return ::munmap(addr, size) == 0;
  1.2960 +}
  1.2961 +
  1.2962 +char* os::reserve_memory(size_t bytes, char* requested_addr,
  1.2963 +                         size_t alignment_hint) {
  1.2964 +  return anon_mmap(requested_addr, bytes, (requested_addr != NULL));
  1.2965 +}
  1.2966 +
  1.2967 +bool os::release_memory(char* addr, size_t size) {
  1.2968 +  return anon_munmap(addr, size);
  1.2969 +}
  1.2970 +
  1.2971 +static address highest_vm_reserved_address() {
  1.2972 +  return _highest_vm_reserved_address;
  1.2973 +}
  1.2974 +
  1.2975 +static bool bsd_mprotect(char* addr, size_t size, int prot) {
  1.2976 +  // Bsd wants the mprotect address argument to be page aligned.
  1.2977 +  char* bottom = (char*)align_size_down((intptr_t)addr, os::Bsd::page_size());
  1.2978 +
  1.2979 +  // According to SUSv3, mprotect() should only be used with mappings
  1.2980 +  // established by mmap(), and mmap() always maps whole pages. Unaligned
  1.2981 +  // 'addr' likely indicates problem in the VM (e.g. trying to change
  1.2982 +  // protection of malloc'ed or statically allocated memory). Check the
  1.2983 +  // caller if you hit this assert.
  1.2984 +  assert(addr == bottom, "sanity check");
  1.2985 +
  1.2986 +  size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
  1.2987 +  return ::mprotect(bottom, size, prot) == 0;
  1.2988 +}
  1.2989 +
  1.2990 +// Set protections specified
  1.2991 +bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
  1.2992 +                        bool is_committed) {
  1.2993 +  unsigned int p = 0;
  1.2994 +  switch (prot) {
  1.2995 +  case MEM_PROT_NONE: p = PROT_NONE; break;
  1.2996 +  case MEM_PROT_READ: p = PROT_READ; break;
  1.2997 +  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
  1.2998 +  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
  1.2999 +  default:
  1.3000 +    ShouldNotReachHere();
  1.3001 +  }
  1.3002 +  // is_committed is unused.
  1.3003 +  return bsd_mprotect(addr, bytes, p);
  1.3004 +}
  1.3005 +
  1.3006 +bool os::guard_memory(char* addr, size_t size) {
  1.3007 +  return bsd_mprotect(addr, size, PROT_NONE);
  1.3008 +}
  1.3009 +
  1.3010 +bool os::unguard_memory(char* addr, size_t size) {
  1.3011 +  return bsd_mprotect(addr, size, PROT_READ|PROT_WRITE);
  1.3012 +}
  1.3013 +
  1.3014 +bool os::Bsd::hugetlbfs_sanity_check(bool warn, size_t page_size) {
  1.3015 +  bool result = false;
  1.3016 +#ifndef _ALLBSD_SOURCE
  1.3017 +  void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
  1.3018 +                  MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
  1.3019 +                  -1, 0);
  1.3020 +
  1.3021 +  if (p != (void *) -1) {
  1.3022 +    // We don't know if this really is a huge page or not.
  1.3023 +    FILE *fp = fopen("/proc/self/maps", "r");
  1.3024 +    if (fp) {
  1.3025 +      while (!feof(fp)) {
  1.3026 +        char chars[257];
  1.3027 +        long x = 0;
  1.3028 +        if (fgets(chars, sizeof(chars), fp)) {
  1.3029 +          if (sscanf(chars, "%lx-%*x", &x) == 1
  1.3030 +              && x == (long)p) {
  1.3031 +            if (strstr (chars, "hugepage")) {
  1.3032 +              result = true;
  1.3033 +              break;
  1.3034 +            }
  1.3035 +          }
  1.3036 +        }
  1.3037 +      }
  1.3038 +      fclose(fp);
  1.3039 +    }
  1.3040 +    munmap (p, page_size);
  1.3041 +    if (result)
  1.3042 +      return true;
  1.3043 +  }
  1.3044 +
  1.3045 +  if (warn) {
  1.3046 +    warning("HugeTLBFS is not supported by the operating system.");
  1.3047 +  }
  1.3048 +#endif
  1.3049 +
  1.3050 +  return result;
  1.3051 +}
  1.3052 +
  1.3053 +/*
  1.3054 +* Set the coredump_filter bits to include largepages in core dump (bit 6)
  1.3055 +*
  1.3056 +* From the coredump_filter documentation:
  1.3057 +*
  1.3058 +* - (bit 0) anonymous private memory
  1.3059 +* - (bit 1) anonymous shared memory
  1.3060 +* - (bit 2) file-backed private memory
  1.3061 +* - (bit 3) file-backed shared memory
  1.3062 +* - (bit 4) ELF header pages in file-backed private memory areas (it is
  1.3063 +*           effective only if the bit 2 is cleared)
  1.3064 +* - (bit 5) hugetlb private memory
  1.3065 +* - (bit 6) hugetlb shared memory
  1.3066 +*/
  1.3067 +static void set_coredump_filter(void) {
  1.3068 +  FILE *f;
  1.3069 +  long cdm;
  1.3070 +
  1.3071 +  if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
  1.3072 +    return;
  1.3073 +  }
  1.3074 +
  1.3075 +  if (fscanf(f, "%lx", &cdm) != 1) {
  1.3076 +    fclose(f);
  1.3077 +    return;
  1.3078 +  }
  1.3079 +
  1.3080 +  rewind(f);
  1.3081 +
  1.3082 +  if ((cdm & LARGEPAGES_BIT) == 0) {
  1.3083 +    cdm |= LARGEPAGES_BIT;
  1.3084 +    fprintf(f, "%#lx", cdm);
  1.3085 +  }
  1.3086 +
  1.3087 +  fclose(f);
  1.3088 +}
  1.3089 +
  1.3090 +// Large page support
  1.3091 +
  1.3092 +static size_t _large_page_size = 0;
  1.3093 +
  1.3094 +void os::large_page_init() {
  1.3095 +#ifndef _ALLBSD_SOURCE
  1.3096 +  if (!UseLargePages) {
  1.3097 +    UseHugeTLBFS = false;
  1.3098 +    UseSHM = false;
  1.3099 +    return;
  1.3100 +  }
  1.3101 +
  1.3102 +  if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
  1.3103 +    // If UseLargePages is specified on the command line try both methods,
  1.3104 +    // if it's default, then try only HugeTLBFS.
  1.3105 +    if (FLAG_IS_DEFAULT(UseLargePages)) {
  1.3106 +      UseHugeTLBFS = true;
  1.3107 +    } else {
  1.3108 +      UseHugeTLBFS = UseSHM = true;
  1.3109 +    }
  1.3110 +  }
  1.3111 +
  1.3112 +  if (LargePageSizeInBytes) {
  1.3113 +    _large_page_size = LargePageSizeInBytes;
  1.3114 +  } else {
  1.3115 +    // large_page_size on Bsd is used to round up heap size. x86 uses either
  1.3116 +    // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
  1.3117 +    // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
  1.3118 +    // page as large as 256M.
  1.3119 +    //
  1.3120 +    // Here we try to figure out page size by parsing /proc/meminfo and looking
  1.3121 +    // for a line with the following format:
  1.3122 +    //    Hugepagesize:     2048 kB
  1.3123 +    //
  1.3124 +    // If we can't determine the value (e.g. /proc is not mounted, or the text
  1.3125 +    // format has been changed), we'll use the largest page size supported by
  1.3126 +    // the processor.
  1.3127 +
  1.3128 +#ifndef ZERO
  1.3129 +    _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
  1.3130 +                       ARM_ONLY(2 * M) PPC_ONLY(4 * M);
  1.3131 +#endif // ZERO
  1.3132 +
  1.3133 +    FILE *fp = fopen("/proc/meminfo", "r");
  1.3134 +    if (fp) {
  1.3135 +      while (!feof(fp)) {
  1.3136 +        int x = 0;
  1.3137 +        char buf[16];
  1.3138 +        if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
  1.3139 +          if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
  1.3140 +            _large_page_size = x * K;
  1.3141 +            break;
  1.3142 +          }
  1.3143 +        } else {
  1.3144 +          // skip to next line
  1.3145 +          for (;;) {
  1.3146 +            int ch = fgetc(fp);
  1.3147 +            if (ch == EOF || ch == (int)'\n') break;
  1.3148 +          }
  1.3149 +        }
  1.3150 +      }
  1.3151 +      fclose(fp);
  1.3152 +    }
  1.3153 +  }
  1.3154 +
  1.3155 +  // print a warning if any large page related flag is specified on command line
  1.3156 +  bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
  1.3157 +
  1.3158 +  const size_t default_page_size = (size_t)Bsd::page_size();
  1.3159 +  if (_large_page_size > default_page_size) {
  1.3160 +    _page_sizes[0] = _large_page_size;
  1.3161 +    _page_sizes[1] = default_page_size;
  1.3162 +    _page_sizes[2] = 0;
  1.3163 +  }
  1.3164 +  UseHugeTLBFS = UseHugeTLBFS &&
  1.3165 +                 Bsd::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
  1.3166 +
  1.3167 +  if (UseHugeTLBFS)
  1.3168 +    UseSHM = false;
  1.3169 +
  1.3170 +  UseLargePages = UseHugeTLBFS || UseSHM;
  1.3171 +
  1.3172 +  set_coredump_filter();
  1.3173 +#endif
  1.3174 +}
  1.3175 +
  1.3176 +#ifndef _ALLBSD_SOURCE
  1.3177 +#ifndef SHM_HUGETLB
  1.3178 +#define SHM_HUGETLB 04000
  1.3179 +#endif
  1.3180 +#endif
  1.3181 +
  1.3182 +char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
  1.3183 +  // "exec" is passed in but not used.  Creating the shared image for
  1.3184 +  // the code cache doesn't have an SHM_X executable permission to check.
  1.3185 +  assert(UseLargePages && UseSHM, "only for SHM large pages");
  1.3186 +
  1.3187 +  key_t key = IPC_PRIVATE;
  1.3188 +  char *addr;
  1.3189 +
  1.3190 +  bool warn_on_failure = UseLargePages &&
  1.3191 +                        (!FLAG_IS_DEFAULT(UseLargePages) ||
  1.3192 +                         !FLAG_IS_DEFAULT(LargePageSizeInBytes)
  1.3193 +                        );
  1.3194 +  char msg[128];
  1.3195 +
  1.3196 +  // Create a large shared memory region to attach to based on size.
  1.3197 +  // Currently, size is the total size of the heap
  1.3198 +#ifndef _ALLBSD_SOURCE
  1.3199 +  int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
  1.3200 +#else
  1.3201 +  int shmid = shmget(key, bytes, IPC_CREAT|SHM_R|SHM_W);
  1.3202 +#endif
  1.3203 +  if (shmid == -1) {
  1.3204 +     // Possible reasons for shmget failure:
  1.3205 +     // 1. shmmax is too small for Java heap.
  1.3206 +     //    > check shmmax value: cat /proc/sys/kernel/shmmax
  1.3207 +     //    > increase shmmax value: echo "0xffffffff" > /proc/sys/kernel/shmmax
  1.3208 +     // 2. not enough large page memory.
  1.3209 +     //    > check available large pages: cat /proc/meminfo
  1.3210 +     //    > increase amount of large pages:
  1.3211 +     //          echo new_value > /proc/sys/vm/nr_hugepages
  1.3212 +     //      Note 1: different Bsd may use different name for this property,
  1.3213 +     //            e.g. on Redhat AS-3 it is "hugetlb_pool".
  1.3214 +     //      Note 2: it's possible there's enough physical memory available but
  1.3215 +     //            they are so fragmented after a long run that they can't
  1.3216 +     //            coalesce into large pages. Try to reserve large pages when
  1.3217 +     //            the system is still "fresh".
  1.3218 +     if (warn_on_failure) {
  1.3219 +       jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
  1.3220 +       warning(msg);
  1.3221 +     }
  1.3222 +     return NULL;
  1.3223 +  }
  1.3224 +
  1.3225 +  // attach to the region
  1.3226 +  addr = (char*)shmat(shmid, req_addr, 0);
  1.3227 +  int err = errno;
  1.3228 +
  1.3229 +  // Remove shmid. If shmat() is successful, the actual shared memory segment
  1.3230 +  // will be deleted when it's detached by shmdt() or when the process
  1.3231 +  // terminates. If shmat() is not successful this will remove the shared
  1.3232 +  // segment immediately.
  1.3233 +  shmctl(shmid, IPC_RMID, NULL);
  1.3234 +
  1.3235 +  if ((intptr_t)addr == -1) {
  1.3236 +     if (warn_on_failure) {
  1.3237 +       jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
  1.3238 +       warning(msg);
  1.3239 +     }
  1.3240 +     return NULL;
  1.3241 +  }
  1.3242 +
  1.3243 +  return addr;
  1.3244 +}
  1.3245 +
  1.3246 +bool os::release_memory_special(char* base, size_t bytes) {
  1.3247 +  // detaching the SHM segment will also delete it, see reserve_memory_special()
  1.3248 +  int rslt = shmdt(base);
  1.3249 +  return rslt == 0;
  1.3250 +}
  1.3251 +
  1.3252 +size_t os::large_page_size() {
  1.3253 +  return _large_page_size;
  1.3254 +}
  1.3255 +
  1.3256 +// HugeTLBFS allows application to commit large page memory on demand;
  1.3257 +// with SysV SHM the entire memory region must be allocated as shared
  1.3258 +// memory.
  1.3259 +bool os::can_commit_large_page_memory() {
  1.3260 +  return UseHugeTLBFS;
  1.3261 +}
  1.3262 +
  1.3263 +bool os::can_execute_large_page_memory() {
  1.3264 +  return UseHugeTLBFS;
  1.3265 +}
  1.3266 +
  1.3267 +// Reserve memory at an arbitrary address, only if that area is
  1.3268 +// available (and not reserved for something else).
  1.3269 +
  1.3270 +char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
  1.3271 +  const int max_tries = 10;
  1.3272 +  char* base[max_tries];
  1.3273 +  size_t size[max_tries];
  1.3274 +  const size_t gap = 0x000000;
  1.3275 +
  1.3276 +  // Assert only that the size is a multiple of the page size, since
  1.3277 +  // that's all that mmap requires, and since that's all we really know
  1.3278 +  // about at this low abstraction level.  If we need higher alignment,
  1.3279 +  // we can either pass an alignment to this method or verify alignment
  1.3280 +  // in one of the methods further up the call chain.  See bug 5044738.
  1.3281 +  assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
  1.3282 +
  1.3283 +  // Repeatedly allocate blocks until the block is allocated at the
  1.3284 +  // right spot. Give up after max_tries. Note that reserve_memory() will
  1.3285 +  // automatically update _highest_vm_reserved_address if the call is
  1.3286 +  // successful. The variable tracks the highest memory address every reserved
  1.3287 +  // by JVM. It is used to detect heap-stack collision if running with
  1.3288 +  // fixed-stack BsdThreads. Because here we may attempt to reserve more
  1.3289 +  // space than needed, it could confuse the collision detecting code. To
  1.3290 +  // solve the problem, save current _highest_vm_reserved_address and
  1.3291 +  // calculate the correct value before return.
  1.3292 +  address old_highest = _highest_vm_reserved_address;
  1.3293 +
  1.3294 +  // Bsd mmap allows caller to pass an address as hint; give it a try first,
  1.3295 +  // if kernel honors the hint then we can return immediately.
  1.3296 +  char * addr = anon_mmap(requested_addr, bytes, false);
  1.3297 +  if (addr == requested_addr) {
  1.3298 +     return requested_addr;
  1.3299 +  }
  1.3300 +
  1.3301 +  if (addr != NULL) {
  1.3302 +     // mmap() is successful but it fails to reserve at the requested address
  1.3303 +     anon_munmap(addr, bytes);
  1.3304 +  }
  1.3305 +
  1.3306 +  int i;
  1.3307 +  for (i = 0; i < max_tries; ++i) {
  1.3308 +    base[i] = reserve_memory(bytes);
  1.3309 +
  1.3310 +    if (base[i] != NULL) {
  1.3311 +      // Is this the block we wanted?
  1.3312 +      if (base[i] == requested_addr) {
  1.3313 +        size[i] = bytes;
  1.3314 +        break;
  1.3315 +      }
  1.3316 +
  1.3317 +      // Does this overlap the block we wanted? Give back the overlapped
  1.3318 +      // parts and try again.
  1.3319 +
  1.3320 +      size_t top_overlap = requested_addr + (bytes + gap) - base[i];
  1.3321 +      if (top_overlap >= 0 && top_overlap < bytes) {
  1.3322 +        unmap_memory(base[i], top_overlap);
  1.3323 +        base[i] += top_overlap;
  1.3324 +        size[i] = bytes - top_overlap;
  1.3325 +      } else {
  1.3326 +        size_t bottom_overlap = base[i] + bytes - requested_addr;
  1.3327 +        if (bottom_overlap >= 0 && bottom_overlap < bytes) {
  1.3328 +          unmap_memory(requested_addr, bottom_overlap);
  1.3329 +          size[i] = bytes - bottom_overlap;
  1.3330 +        } else {
  1.3331 +          size[i] = bytes;
  1.3332 +        }
  1.3333 +      }
  1.3334 +    }
  1.3335 +  }
  1.3336 +
  1.3337 +  // Give back the unused reserved pieces.
  1.3338 +
  1.3339 +  for (int j = 0; j < i; ++j) {
  1.3340 +    if (base[j] != NULL) {
  1.3341 +      unmap_memory(base[j], size[j]);
  1.3342 +    }
  1.3343 +  }
  1.3344 +
  1.3345 +  if (i < max_tries) {
  1.3346 +    _highest_vm_reserved_address = MAX2(old_highest, (address)requested_addr + bytes);
  1.3347 +    return requested_addr;
  1.3348 +  } else {
  1.3349 +    _highest_vm_reserved_address = old_highest;
  1.3350 +    return NULL;
  1.3351 +  }
  1.3352 +}
  1.3353 +
  1.3354 +size_t os::read(int fd, void *buf, unsigned int nBytes) {
  1.3355 +  RESTARTABLE_RETURN_INT(::read(fd, buf, nBytes));
  1.3356 +}
  1.3357 +
  1.3358 +// TODO-FIXME: reconcile Solaris' os::sleep with the bsd variation.
  1.3359 +// Solaris uses poll(), bsd uses park().
  1.3360 +// Poll() is likely a better choice, assuming that Thread.interrupt()
  1.3361 +// generates a SIGUSRx signal. Note that SIGUSR1 can interfere with
  1.3362 +// SIGSEGV, see 4355769.
  1.3363 +
  1.3364 +const int NANOSECS_PER_MILLISECS = 1000000;
  1.3365 +
  1.3366 +int os::sleep(Thread* thread, jlong millis, bool interruptible) {
  1.3367 +  assert(thread == Thread::current(),  "thread consistency check");
  1.3368 +
  1.3369 +  ParkEvent * const slp = thread->_SleepEvent ;
  1.3370 +  slp->reset() ;
  1.3371 +  OrderAccess::fence() ;
  1.3372 +
  1.3373 +  if (interruptible) {
  1.3374 +    jlong prevtime = javaTimeNanos();
  1.3375 +
  1.3376 +    for (;;) {
  1.3377 +      if (os::is_interrupted(thread, true)) {
  1.3378 +        return OS_INTRPT;
  1.3379 +      }
  1.3380 +
  1.3381 +      jlong newtime = javaTimeNanos();
  1.3382 +
  1.3383 +      if (newtime - prevtime < 0) {
  1.3384 +        // time moving backwards, should only happen if no monotonic clock
  1.3385 +        // not a guarantee() because JVM should not abort on kernel/glibc bugs
  1.3386 +        assert(!Bsd::supports_monotonic_clock(), "time moving backwards");
  1.3387 +      } else {
  1.3388 +        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISECS;
  1.3389 +      }
  1.3390 +
  1.3391 +      if(millis <= 0) {
  1.3392 +        return OS_OK;
  1.3393 +      }
  1.3394 +
  1.3395 +      prevtime = newtime;
  1.3396 +
  1.3397 +      {
  1.3398 +        assert(thread->is_Java_thread(), "sanity check");
  1.3399 +        JavaThread *jt = (JavaThread *) thread;
  1.3400 +        ThreadBlockInVM tbivm(jt);
  1.3401 +        OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
  1.3402 +
  1.3403 +        jt->set_suspend_equivalent();
  1.3404 +        // cleared by handle_special_suspend_equivalent_condition() or
  1.3405 +        // java_suspend_self() via check_and_wait_while_suspended()
  1.3406 +
  1.3407 +        slp->park(millis);
  1.3408 +
  1.3409 +        // were we externally suspended while we were waiting?
  1.3410 +        jt->check_and_wait_while_suspended();
  1.3411 +      }
  1.3412 +    }
  1.3413 +  } else {
  1.3414 +    OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
  1.3415 +    jlong prevtime = javaTimeNanos();
  1.3416 +
  1.3417 +    for (;;) {
  1.3418 +      // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
  1.3419 +      // the 1st iteration ...
  1.3420 +      jlong newtime = javaTimeNanos();
  1.3421 +
  1.3422 +      if (newtime - prevtime < 0) {
  1.3423 +        // time moving backwards, should only happen if no monotonic clock
  1.3424 +        // not a guarantee() because JVM should not abort on kernel/glibc bugs
  1.3425 +        assert(!Bsd::supports_monotonic_clock(), "time moving backwards");
  1.3426 +      } else {
  1.3427 +        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISECS;
  1.3428 +      }
  1.3429 +
  1.3430 +      if(millis <= 0) break ;
  1.3431 +
  1.3432 +      prevtime = newtime;
  1.3433 +      slp->park(millis);
  1.3434 +    }
  1.3435 +    return OS_OK ;
  1.3436 +  }
  1.3437 +}
  1.3438 +
  1.3439 +int os::naked_sleep() {
  1.3440 +  // %% make the sleep time an integer flag. for now use 1 millisec.
  1.3441 +  return os::sleep(Thread::current(), 1, false);
  1.3442 +}
  1.3443 +
  1.3444 +// Sleep forever; naked call to OS-specific sleep; use with CAUTION
  1.3445 +void os::infinite_sleep() {
  1.3446 +  while (true) {    // sleep forever ...
  1.3447 +    ::sleep(100);   // ... 100 seconds at a time
  1.3448 +  }
  1.3449 +}
  1.3450 +
  1.3451 +// Used to convert frequent JVM_Yield() to nops
  1.3452 +bool os::dont_yield() {
  1.3453 +  return DontYieldALot;
  1.3454 +}
  1.3455 +
  1.3456 +void os::yield() {
  1.3457 +  sched_yield();
  1.3458 +}
  1.3459 +
  1.3460 +os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN ;}
  1.3461 +
  1.3462 +void os::yield_all(int attempts) {
  1.3463 +  // Yields to all threads, including threads with lower priorities
  1.3464 +  // Threads on Bsd are all with same priority. The Solaris style
  1.3465 +  // os::yield_all() with nanosleep(1ms) is not necessary.
  1.3466 +  sched_yield();
  1.3467 +}
  1.3468 +
  1.3469 +// Called from the tight loops to possibly influence time-sharing heuristics
  1.3470 +void os::loop_breaker(int attempts) {
  1.3471 +  os::yield_all(attempts);
  1.3472 +}
  1.3473 +
  1.3474 +////////////////////////////////////////////////////////////////////////////////
  1.3475 +// thread priority support
  1.3476 +
  1.3477 +// Note: Normal Bsd applications are run with SCHED_OTHER policy. SCHED_OTHER
  1.3478 +// only supports dynamic priority, static priority must be zero. For real-time
  1.3479 +// applications, Bsd supports SCHED_RR which allows static priority (1-99).
  1.3480 +// However, for large multi-threaded applications, SCHED_RR is not only slower
  1.3481 +// than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out
  1.3482 +// of 5 runs - Sep 2005).
  1.3483 +//
  1.3484 +// The following code actually changes the niceness of kernel-thread/LWP. It
  1.3485 +// has an assumption that setpriority() only modifies one kernel-thread/LWP,
  1.3486 +// not the entire user process, and user level threads are 1:1 mapped to kernel
  1.3487 +// threads. It has always been the case, but could change in the future. For
  1.3488 +// this reason, the code should not be used as default (ThreadPriorityPolicy=0).
  1.3489 +// It is only used when ThreadPriorityPolicy=1 and requires root privilege.
  1.3490 +
  1.3491 +#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__)
  1.3492 +int os::java_to_os_priority[MaxPriority + 1] = {
  1.3493 +  19,              // 0 Entry should never be used
  1.3494 +
  1.3495 +   0,              // 1 MinPriority
  1.3496 +   3,              // 2
  1.3497 +   6,              // 3
  1.3498 +
  1.3499 +   10,              // 4
  1.3500 +   15,              // 5 NormPriority
  1.3501 +   18,              // 6
  1.3502 +
  1.3503 +   21,              // 7
  1.3504 +   25,              // 8
  1.3505 +   28,              // 9 NearMaxPriority
  1.3506 +
  1.3507 +   31              // 10 MaxPriority
  1.3508 +};
  1.3509 +#elif defined(__APPLE__)
  1.3510 +/* Using Mach high-level priority assignments */
  1.3511 +int os::java_to_os_priority[MaxPriority + 1] = {
  1.3512 +   0,              // 0 Entry should never be used (MINPRI_USER)
  1.3513 +
  1.3514 +  27,              // 1 MinPriority
  1.3515 +  28,              // 2
  1.3516 +  29,              // 3
  1.3517 +
  1.3518 +  30,              // 4
  1.3519 +  31,              // 5 NormPriority (BASEPRI_DEFAULT)
  1.3520 +  32,              // 6
  1.3521 +
  1.3522 +  33,              // 7
  1.3523 +  34,              // 8
  1.3524 +  35,              // 9 NearMaxPriority
  1.3525 +
  1.3526 +  36               // 10 MaxPriority
  1.3527 +};
  1.3528 +#else
  1.3529 +int os::java_to_os_priority[MaxPriority + 1] = {
  1.3530 +  19,              // 0 Entry should never be used
  1.3531 +
  1.3532 +   4,              // 1 MinPriority
  1.3533 +   3,              // 2
  1.3534 +   2,              // 3
  1.3535 +
  1.3536 +   1,              // 4
  1.3537 +   0,              // 5 NormPriority
  1.3538 +  -1,              // 6
  1.3539 +
  1.3540 +  -2,              // 7
  1.3541 +  -3,              // 8
  1.3542 +  -4,              // 9 NearMaxPriority
  1.3543 +
  1.3544 +  -5               // 10 MaxPriority
  1.3545 +};
  1.3546 +#endif
  1.3547 +
  1.3548 +static int prio_init() {
  1.3549 +  if (ThreadPriorityPolicy == 1) {
  1.3550 +    // Only root can raise thread priority. Don't allow ThreadPriorityPolicy=1
  1.3551 +    // if effective uid is not root. Perhaps, a more elegant way of doing
  1.3552 +    // this is to test CAP_SYS_NICE capability, but that will require libcap.so
  1.3553 +    if (geteuid() != 0) {
  1.3554 +      if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy)) {
  1.3555 +        warning("-XX:ThreadPriorityPolicy requires root privilege on Bsd");
  1.3556 +      }
  1.3557 +      ThreadPriorityPolicy = 0;
  1.3558 +    }
  1.3559 +  }
  1.3560 +  return 0;
  1.3561 +}
  1.3562 +
  1.3563 +OSReturn os::set_native_priority(Thread* thread, int newpri) {
  1.3564 +  if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) return OS_OK;
  1.3565 +
  1.3566 +#ifdef __OpenBSD__
  1.3567 +  // OpenBSD pthread_setprio starves low priority threads
  1.3568 +  return OS_OK;
  1.3569 +#elif defined(__FreeBSD__)
  1.3570 +  int ret = pthread_setprio(thread->osthread()->pthread_id(), newpri);
  1.3571 +#elif defined(__APPLE__) || defined(__NetBSD__)
  1.3572 +  struct sched_param sp;
  1.3573 +  int policy;
  1.3574 +  pthread_t self = pthread_self();
  1.3575 +
  1.3576 +  if (pthread_getschedparam(self, &policy, &sp) != 0)
  1.3577 +    return OS_ERR;
  1.3578 +
  1.3579 +  sp.sched_priority = newpri;
  1.3580 +  if (pthread_setschedparam(self, policy, &sp) != 0)
  1.3581 +    return OS_ERR;
  1.3582 +
  1.3583 +  return OS_OK;
  1.3584 +#else
  1.3585 +  int ret = setpriority(PRIO_PROCESS, thread->osthread()->thread_id(), newpri);
  1.3586 +  return (ret == 0) ? OS_OK : OS_ERR;
  1.3587 +#endif
  1.3588 +}
  1.3589 +
  1.3590 +OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
  1.3591 +  if ( !UseThreadPriorities || ThreadPriorityPolicy == 0 ) {
  1.3592 +    *priority_ptr = java_to_os_priority[NormPriority];
  1.3593 +    return OS_OK;
  1.3594 +  }
  1.3595 +
  1.3596 +  errno = 0;
  1.3597 +#if defined(__OpenBSD__) || defined(__FreeBSD__)
  1.3598 +  *priority_ptr = pthread_getprio(thread->osthread()->pthread_id());
  1.3599 +#elif defined(__APPLE__) || defined(__NetBSD__)
  1.3600 +  int policy;
  1.3601 +  struct sched_param sp;
  1.3602 +
  1.3603 +  pthread_getschedparam(pthread_self(), &policy, &sp);
  1.3604 +  *priority_ptr = sp.sched_priority;
  1.3605 +#else
  1.3606 +  *priority_ptr = getpriority(PRIO_PROCESS, thread->osthread()->thread_id());
  1.3607 +#endif
  1.3608 +  return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR);
  1.3609 +}
  1.3610 +
  1.3611 +// Hint to the underlying OS that a task switch would not be good.
  1.3612 +// Void return because it's a hint and can fail.
  1.3613 +void os::hint_no_preempt() {}
  1.3614 +
  1.3615 +////////////////////////////////////////////////////////////////////////////////
  1.3616 +// suspend/resume support
  1.3617 +
  1.3618 +//  the low-level signal-based suspend/resume support is a remnant from the
  1.3619 +//  old VM-suspension that used to be for java-suspension, safepoints etc,
  1.3620 +//  within hotspot. Now there is a single use-case for this:
  1.3621 +//    - calling get_thread_pc() on the VMThread by the flat-profiler task
  1.3622 +//      that runs in the watcher thread.
  1.3623 +//  The remaining code is greatly simplified from the more general suspension
  1.3624 +//  code that used to be used.
  1.3625 +//
  1.3626 +//  The protocol is quite simple:
  1.3627 +//  - suspend:
  1.3628 +//      - sends a signal to the target thread
  1.3629 +//      - polls the suspend state of the osthread using a yield loop
  1.3630 +//      - target thread signal handler (SR_handler) sets suspend state
  1.3631 +//        and blocks in sigsuspend until continued
  1.3632 +//  - resume:
  1.3633 +//      - sets target osthread state to continue
  1.3634 +//      - sends signal to end the sigsuspend loop in the SR_handler
  1.3635 +//
  1.3636 +//  Note that the SR_lock plays no role in this suspend/resume protocol.
  1.3637 +//
  1.3638 +
  1.3639 +static void resume_clear_context(OSThread *osthread) {
  1.3640 +  osthread->set_ucontext(NULL);
  1.3641 +  osthread->set_siginfo(NULL);
  1.3642 +
  1.3643 +  // notify the suspend action is completed, we have now resumed
  1.3644 +  osthread->sr.clear_suspended();
  1.3645 +}
  1.3646 +
  1.3647 +static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
  1.3648 +  osthread->set_ucontext(context);
  1.3649 +  osthread->set_siginfo(siginfo);
  1.3650 +}
  1.3651 +
  1.3652 +//
  1.3653 +// Handler function invoked when a thread's execution is suspended or
  1.3654 +// resumed. We have to be careful that only async-safe functions are
  1.3655 +// called here (Note: most pthread functions are not async safe and
  1.3656 +// should be avoided.)
  1.3657 +//
  1.3658 +// Note: sigwait() is a more natural fit than sigsuspend() from an
  1.3659 +// interface point of view, but sigwait() prevents the signal hander
  1.3660 +// from being run. libpthread would get very confused by not having
  1.3661 +// its signal handlers run and prevents sigwait()'s use with the
  1.3662 +// mutex granting granting signal.
  1.3663 +//
  1.3664 +// Currently only ever called on the VMThread
  1.3665 +//
  1.3666 +static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
  1.3667 +  // Save and restore errno to avoid confusing native code with EINTR
  1.3668 +  // after sigsuspend.
  1.3669 +  int old_errno = errno;
  1.3670 +
  1.3671 +  Thread* thread = Thread::current();
  1.3672 +  OSThread* osthread = thread->osthread();
  1.3673 +  assert(thread->is_VM_thread(), "Must be VMThread");
  1.3674 +  // read current suspend action
  1.3675 +  int action = osthread->sr.suspend_action();
  1.3676 +  if (action == SR_SUSPEND) {
  1.3677 +    suspend_save_context(osthread, siginfo, context);
  1.3678 +
  1.3679 +    // Notify the suspend action is about to be completed. do_suspend()
  1.3680 +    // waits until SR_SUSPENDED is set and then returns. We will wait
  1.3681 +    // here for a resume signal and that completes the suspend-other
  1.3682 +    // action. do_suspend/do_resume is always called as a pair from
  1.3683 +    // the same thread - so there are no races
  1.3684 +
  1.3685 +    // notify the caller
  1.3686 +    osthread->sr.set_suspended();
  1.3687 +
  1.3688 +    sigset_t suspend_set;  // signals for sigsuspend()
  1.3689 +
  1.3690 +    // get current set of blocked signals and unblock resume signal
  1.3691 +    pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
  1.3692 +    sigdelset(&suspend_set, SR_signum);
  1.3693 +
  1.3694 +    // wait here until we are resumed
  1.3695 +    do {
  1.3696 +      sigsuspend(&suspend_set);
  1.3697 +      // ignore all returns until we get a resume signal
  1.3698 +    } while (osthread->sr.suspend_action() != SR_CONTINUE);
  1.3699 +
  1.3700 +    resume_clear_context(osthread);
  1.3701 +
  1.3702 +  } else {
  1.3703 +    assert(action == SR_CONTINUE, "unexpected sr action");
  1.3704 +    // nothing special to do - just leave the handler
  1.3705 +  }
  1.3706 +
  1.3707 +  errno = old_errno;
  1.3708 +}
  1.3709 +
  1.3710 +
  1.3711 +static int SR_initialize() {
  1.3712 +  struct sigaction act;
  1.3713 +  char *s;
  1.3714 +  /* Get signal number to use for suspend/resume */
  1.3715 +  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
  1.3716 +    int sig = ::strtol(s, 0, 10);
  1.3717 +    if (sig > 0 || sig < NSIG) {
  1.3718 +        SR_signum = sig;
  1.3719 +    }
  1.3720 +  }
  1.3721 +
  1.3722 +  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
  1.3723 +        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
  1.3724 +
  1.3725 +  sigemptyset(&SR_sigset);
  1.3726 +  sigaddset(&SR_sigset, SR_signum);
  1.3727 +
  1.3728 +  /* Set up signal handler for suspend/resume */
  1.3729 +  act.sa_flags = SA_RESTART|SA_SIGINFO;
  1.3730 +  act.sa_handler = (void (*)(int)) SR_handler;
  1.3731 +
  1.3732 +  // SR_signum is blocked by default.
  1.3733 +  // 4528190 - We also need to block pthread restart signal (32 on all
  1.3734 +  // supported Bsd platforms). Note that BsdThreads need to block
  1.3735 +  // this signal for all threads to work properly. So we don't have
  1.3736 +  // to use hard-coded signal number when setting up the mask.
  1.3737 +  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
  1.3738 +
  1.3739 +  if (sigaction(SR_signum, &act, 0) == -1) {
  1.3740 +    return -1;
  1.3741 +  }
  1.3742 +
  1.3743 +  // Save signal flag
  1.3744 +  os::Bsd::set_our_sigflags(SR_signum, act.sa_flags);
  1.3745 +  return 0;
  1.3746 +}
  1.3747 +
  1.3748 +static int SR_finalize() {
  1.3749 +  return 0;
  1.3750 +}
  1.3751 +
  1.3752 +
  1.3753 +// returns true on success and false on error - really an error is fatal
  1.3754 +// but this seems the normal response to library errors
  1.3755 +static bool do_suspend(OSThread* osthread) {
  1.3756 +  // mark as suspended and send signal
  1.3757 +  osthread->sr.set_suspend_action(SR_SUSPEND);
  1.3758 +  int status = pthread_kill(osthread->pthread_id(), SR_signum);
  1.3759 +  assert_status(status == 0, status, "pthread_kill");
  1.3760 +
  1.3761 +  // check status and wait until notified of suspension
  1.3762 +  if (status == 0) {
  1.3763 +    for (int i = 0; !osthread->sr.is_suspended(); i++) {
  1.3764 +      os::yield_all(i);
  1.3765 +    }
  1.3766 +    osthread->sr.set_suspend_action(SR_NONE);
  1.3767 +    return true;
  1.3768 +  }
  1.3769 +  else {
  1.3770 +    osthread->sr.set_suspend_action(SR_NONE);
  1.3771 +    return false;
  1.3772 +  }
  1.3773 +}
  1.3774 +
  1.3775 +static void do_resume(OSThread* osthread) {
  1.3776 +  assert(osthread->sr.is_suspended(), "thread should be suspended");
  1.3777 +  osthread->sr.set_suspend_action(SR_CONTINUE);
  1.3778 +
  1.3779 +  int status = pthread_kill(osthread->pthread_id(), SR_signum);
  1.3780 +  assert_status(status == 0, status, "pthread_kill");
  1.3781 +  // check status and wait unit notified of resumption
  1.3782 +  if (status == 0) {
  1.3783 +    for (int i = 0; osthread->sr.is_suspended(); i++) {
  1.3784 +      os::yield_all(i);
  1.3785 +    }
  1.3786 +  }
  1.3787 +  osthread->sr.set_suspend_action(SR_NONE);
  1.3788 +}
  1.3789 +
  1.3790 +////////////////////////////////////////////////////////////////////////////////
  1.3791 +// interrupt support
  1.3792 +
  1.3793 +void os::interrupt(Thread* thread) {
  1.3794 +  assert(Thread::current() == thread || Threads_lock->owned_by_self(),
  1.3795 +    "possibility of dangling Thread pointer");
  1.3796 +
  1.3797 +  OSThread* osthread = thread->osthread();
  1.3798 +
  1.3799 +  if (!osthread->interrupted()) {
  1.3800 +    osthread->set_interrupted(true);
  1.3801 +    // More than one thread can get here with the same value of osthread,
  1.3802 +    // resulting in multiple notifications.  We do, however, want the store
  1.3803 +    // to interrupted() to be visible to other threads before we execute unpark().
  1.3804 +    OrderAccess::fence();
  1.3805 +    ParkEvent * const slp = thread->_SleepEvent ;
  1.3806 +    if (slp != NULL) slp->unpark() ;
  1.3807 +  }
  1.3808 +
  1.3809 +  // For JSR166. Unpark even if interrupt status already was set
  1.3810 +  if (thread->is_Java_thread())
  1.3811 +    ((JavaThread*)thread)->parker()->unpark();
  1.3812 +
  1.3813 +  ParkEvent * ev = thread->_ParkEvent ;
  1.3814 +  if (ev != NULL) ev->unpark() ;
  1.3815 +
  1.3816 +}
  1.3817 +
  1.3818 +bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
  1.3819 +  assert(Thread::current() == thread || Threads_lock->owned_by_self(),
  1.3820 +    "possibility of dangling Thread pointer");
  1.3821 +
  1.3822 +  OSThread* osthread = thread->osthread();
  1.3823 +
  1.3824 +  bool interrupted = osthread->interrupted();
  1.3825 +
  1.3826 +  if (interrupted && clear_interrupted) {
  1.3827 +    osthread->set_interrupted(false);
  1.3828 +    // consider thread->_SleepEvent->reset() ... optional optimization
  1.3829 +  }
  1.3830 +
  1.3831 +  return interrupted;
  1.3832 +}
  1.3833 +
  1.3834 +///////////////////////////////////////////////////////////////////////////////////
  1.3835 +// signal handling (except suspend/resume)
  1.3836 +
  1.3837 +// This routine may be used by user applications as a "hook" to catch signals.
  1.3838 +// The user-defined signal handler must pass unrecognized signals to this
  1.3839 +// routine, and if it returns true (non-zero), then the signal handler must
  1.3840 +// return immediately.  If the flag "abort_if_unrecognized" is true, then this
  1.3841 +// routine will never retun false (zero), but instead will execute a VM panic
  1.3842 +// routine kill the process.
  1.3843 +//
  1.3844 +// If this routine returns false, it is OK to call it again.  This allows
  1.3845 +// the user-defined signal handler to perform checks either before or after
  1.3846 +// the VM performs its own checks.  Naturally, the user code would be making
  1.3847 +// a serious error if it tried to handle an exception (such as a null check
  1.3848 +// or breakpoint) that the VM was generating for its own correct operation.
  1.3849 +//
  1.3850 +// This routine may recognize any of the following kinds of signals:
  1.3851 +//    SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
  1.3852 +// It should be consulted by handlers for any of those signals.
  1.3853 +//
  1.3854 +// The caller of this routine must pass in the three arguments supplied
  1.3855 +// to the function referred to in the "sa_sigaction" (not the "sa_handler")
  1.3856 +// field of the structure passed to sigaction().  This routine assumes that
  1.3857 +// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
  1.3858 +//
  1.3859 +// Note that the VM will print warnings if it detects conflicting signal
  1.3860 +// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
  1.3861 +//
  1.3862 +extern "C" JNIEXPORT int
  1.3863 +JVM_handle_bsd_signal(int signo, siginfo_t* siginfo,
  1.3864 +                        void* ucontext, int abort_if_unrecognized);
  1.3865 +
  1.3866 +void signalHandler(int sig, siginfo_t* info, void* uc) {
  1.3867 +  assert(info != NULL && uc != NULL, "it must be old kernel");
  1.3868 +  JVM_handle_bsd_signal(sig, info, uc, true);
  1.3869 +}
  1.3870 +
  1.3871 +
  1.3872 +// This boolean allows users to forward their own non-matching signals
  1.3873 +// to JVM_handle_bsd_signal, harmlessly.
  1.3874 +bool os::Bsd::signal_handlers_are_installed = false;
  1.3875 +
  1.3876 +// For signal-chaining
  1.3877 +struct sigaction os::Bsd::sigact[MAXSIGNUM];
  1.3878 +unsigned int os::Bsd::sigs = 0;
  1.3879 +bool os::Bsd::libjsig_is_loaded = false;
  1.3880 +typedef struct sigaction *(*get_signal_t)(int);
  1.3881 +get_signal_t os::Bsd::get_signal_action = NULL;
  1.3882 +
  1.3883 +struct sigaction* os::Bsd::get_chained_signal_action(int sig) {
  1.3884 +  struct sigaction *actp = NULL;
  1.3885 +
  1.3886 +  if (libjsig_is_loaded) {
  1.3887 +    // Retrieve the old signal handler from libjsig
  1.3888 +    actp = (*get_signal_action)(sig);
  1.3889 +  }
  1.3890 +  if (actp == NULL) {
  1.3891 +    // Retrieve the preinstalled signal handler from jvm
  1.3892 +    actp = get_preinstalled_handler(sig);
  1.3893 +  }
  1.3894 +
  1.3895 +  return actp;
  1.3896 +}
  1.3897 +
  1.3898 +static bool call_chained_handler(struct sigaction *actp, int sig,
  1.3899 +                                 siginfo_t *siginfo, void *context) {
  1.3900 +  // Call the old signal handler
  1.3901 +  if (actp->sa_handler == SIG_DFL) {
  1.3902 +    // It's more reasonable to let jvm treat it as an unexpected exception
  1.3903 +    // instead of taking the default action.
  1.3904 +    return false;
  1.3905 +  } else if (actp->sa_handler != SIG_IGN) {
  1.3906 +    if ((actp->sa_flags & SA_NODEFER) == 0) {
  1.3907 +      // automaticlly block the signal
  1.3908 +      sigaddset(&(actp->sa_mask), sig);
  1.3909 +    }
  1.3910 +
  1.3911 +    sa_handler_t hand;
  1.3912 +    sa_sigaction_t sa;
  1.3913 +    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
  1.3914 +    // retrieve the chained handler
  1.3915 +    if (siginfo_flag_set) {
  1.3916 +      sa = actp->sa_sigaction;
  1.3917 +    } else {
  1.3918 +      hand = actp->sa_handler;
  1.3919 +    }
  1.3920 +
  1.3921 +    if ((actp->sa_flags & SA_RESETHAND) != 0) {
  1.3922 +      actp->sa_handler = SIG_DFL;
  1.3923 +    }
  1.3924 +
  1.3925 +    // try to honor the signal mask
  1.3926 +    sigset_t oset;
  1.3927 +    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
  1.3928 +
  1.3929 +    // call into the chained handler
  1.3930 +    if (siginfo_flag_set) {
  1.3931 +      (*sa)(sig, siginfo, context);
  1.3932 +    } else {
  1.3933 +      (*hand)(sig);
  1.3934 +    }
  1.3935 +
  1.3936 +    // restore the signal mask
  1.3937 +    pthread_sigmask(SIG_SETMASK, &oset, 0);
  1.3938 +  }
  1.3939 +  // Tell jvm's signal handler the signal is taken care of.
  1.3940 +  return true;
  1.3941 +}
  1.3942 +
  1.3943 +bool os::Bsd::chained_handler(int sig, siginfo_t* siginfo, void* context) {
  1.3944 +  bool chained = false;
  1.3945 +  // signal-chaining
  1.3946 +  if (UseSignalChaining) {
  1.3947 +    struct sigaction *actp = get_chained_signal_action(sig);
  1.3948 +    if (actp != NULL) {
  1.3949 +      chained = call_chained_handler(actp, sig, siginfo, context);
  1.3950 +    }
  1.3951 +  }
  1.3952 +  return chained;
  1.3953 +}
  1.3954 +
  1.3955 +struct sigaction* os::Bsd::get_preinstalled_handler(int sig) {
  1.3956 +  if ((( (unsigned int)1 << sig ) & sigs) != 0) {
  1.3957 +    return &sigact[sig];
  1.3958 +  }
  1.3959 +  return NULL;
  1.3960 +}
  1.3961 +
  1.3962 +void os::Bsd::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
  1.3963 +  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
  1.3964 +  sigact[sig] = oldAct;
  1.3965 +  sigs |= (unsigned int)1 << sig;
  1.3966 +}
  1.3967 +
  1.3968 +// for diagnostic
  1.3969 +int os::Bsd::sigflags[MAXSIGNUM];
  1.3970 +
  1.3971 +int os::Bsd::get_our_sigflags(int sig) {
  1.3972 +  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
  1.3973 +  return sigflags[sig];
  1.3974 +}
  1.3975 +
  1.3976 +void os::Bsd::set_our_sigflags(int sig, int flags) {
  1.3977 +  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
  1.3978 +  sigflags[sig] = flags;
  1.3979 +}
  1.3980 +
  1.3981 +void os::Bsd::set_signal_handler(int sig, bool set_installed) {
  1.3982 +  // Check for overwrite.
  1.3983 +  struct sigaction oldAct;
  1.3984 +  sigaction(sig, (struct sigaction*)NULL, &oldAct);
  1.3985 +
  1.3986 +  void* oldhand = oldAct.sa_sigaction
  1.3987 +                ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
  1.3988 +                : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
  1.3989 +  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
  1.3990 +      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
  1.3991 +      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)signalHandler)) {
  1.3992 +    if (AllowUserSignalHandlers || !set_installed) {
  1.3993 +      // Do not overwrite; user takes responsibility to forward to us.
  1.3994 +      return;
  1.3995 +    } else if (UseSignalChaining) {
  1.3996 +      // save the old handler in jvm
  1.3997 +      save_preinstalled_handler(sig, oldAct);
  1.3998 +      // libjsig also interposes the sigaction() call below and saves the
  1.3999 +      // old sigaction on it own.
  1.4000 +    } else {
  1.4001 +      fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
  1.4002 +                    "%#lx for signal %d.", (long)oldhand, sig));
  1.4003 +    }
  1.4004 +  }
  1.4005 +
  1.4006 +  struct sigaction sigAct;
  1.4007 +  sigfillset(&(sigAct.sa_mask));
  1.4008 +  sigAct.sa_handler = SIG_DFL;
  1.4009 +  if (!set_installed) {
  1.4010 +    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
  1.4011 +  } else {
  1.4012 +    sigAct.sa_sigaction = signalHandler;
  1.4013 +    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
  1.4014 +  }
  1.4015 +  // Save flags, which are set by ours
  1.4016 +  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
  1.4017 +  sigflags[sig] = sigAct.sa_flags;
  1.4018 +
  1.4019 +  int ret = sigaction(sig, &sigAct, &oldAct);
  1.4020 +  assert(ret == 0, "check");
  1.4021 +
  1.4022 +  void* oldhand2  = oldAct.sa_sigaction
  1.4023 +                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
  1.4024 +                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
  1.4025 +  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
  1.4026 +}
  1.4027 +
  1.4028 +// install signal handlers for signals that HotSpot needs to
  1.4029 +// handle in order to support Java-level exception handling.
  1.4030 +
  1.4031 +void os::Bsd::install_signal_handlers() {
  1.4032 +  if (!signal_handlers_are_installed) {
  1.4033 +    signal_handlers_are_installed = true;
  1.4034 +
  1.4035 +    // signal-chaining
  1.4036 +    typedef void (*signal_setting_t)();
  1.4037 +    signal_setting_t begin_signal_setting = NULL;
  1.4038 +    signal_setting_t end_signal_setting = NULL;
  1.4039 +    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
  1.4040 +                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
  1.4041 +    if (begin_signal_setting != NULL) {
  1.4042 +      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
  1.4043 +                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
  1.4044 +      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
  1.4045 +                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
  1.4046 +      libjsig_is_loaded = true;
  1.4047 +      assert(UseSignalChaining, "should enable signal-chaining");
  1.4048 +    }
  1.4049 +    if (libjsig_is_loaded) {
  1.4050 +      // Tell libjsig jvm is setting signal handlers
  1.4051 +      (*begin_signal_setting)();
  1.4052 +    }
  1.4053 +
  1.4054 +    set_signal_handler(SIGSEGV, true);
  1.4055 +    set_signal_handler(SIGPIPE, true);
  1.4056 +    set_signal_handler(SIGBUS, true);
  1.4057 +    set_signal_handler(SIGILL, true);
  1.4058 +    set_signal_handler(SIGFPE, true);
  1.4059 +    set_signal_handler(SIGXFSZ, true);
  1.4060 +
  1.4061 +#if defined(__APPLE__)
  1.4062 +    // In Mac OS X 10.4, CrashReporter will write a crash log for all 'fatal' signals, including
  1.4063 +    // signals caught and handled by the JVM. To work around this, we reset the mach task
  1.4064 +    // signal handler that's placed on our process by CrashReporter. This disables
  1.4065 +    // CrashReporter-based reporting.
  1.4066 +    //
  1.4067 +    // This work-around is not necessary for 10.5+, as CrashReporter no longer intercedes
  1.4068 +    // on caught fatal signals.
  1.4069 +    //
  1.4070 +    // Additionally, gdb installs both standard BSD signal handlers, and mach exception
  1.4071 +    // handlers. By replacing the existing task exception handler, we disable gdb's mach
  1.4072 +    // exception handling, while leaving the standard BSD signal handlers functional.
  1.4073 +    kern_return_t kr;
  1.4074 +    kr = task_set_exception_ports(mach_task_self(),
  1.4075 +        EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC,
  1.4076 +        MACH_PORT_NULL,
  1.4077 +        EXCEPTION_STATE_IDENTITY,
  1.4078 +        MACHINE_THREAD_STATE);
  1.4079 +
  1.4080 +    assert(kr == KERN_SUCCESS, "could not set mach task signal handler");
  1.4081 +#endif
  1.4082 +
  1.4083 +    if (libjsig_is_loaded) {
  1.4084 +      // Tell libjsig jvm finishes setting signal handlers
  1.4085 +      (*end_signal_setting)();
  1.4086 +    }
  1.4087 +
  1.4088 +    // We don't activate signal checker if libjsig is in place, we trust ourselves
  1.4089 +    // and if UserSignalHandler is installed all bets are off
  1.4090 +    if (CheckJNICalls) {
  1.4091 +      if (libjsig_is_loaded) {
  1.4092 +        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
  1.4093 +        check_signals = false;
  1.4094 +      }
  1.4095 +      if (AllowUserSignalHandlers) {
  1.4096 +        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
  1.4097 +        check_signals = false;
  1.4098 +      }
  1.4099 +    }
  1.4100 +  }
  1.4101 +}
  1.4102 +
  1.4103 +#ifndef _ALLBSD_SOURCE
  1.4104 +// This is the fastest way to get thread cpu time on Bsd.
  1.4105 +// Returns cpu time (user+sys) for any thread, not only for current.
  1.4106 +// POSIX compliant clocks are implemented in the kernels 2.6.16+.
  1.4107 +// It might work on 2.6.10+ with a special kernel/glibc patch.
  1.4108 +// For reference, please, see IEEE Std 1003.1-2004:
  1.4109 +//   http://www.unix.org/single_unix_specification
  1.4110 +
  1.4111 +jlong os::Bsd::fast_thread_cpu_time(clockid_t clockid) {
  1.4112 +  struct timespec tp;
  1.4113 +  int rc = os::Bsd::clock_gettime(clockid, &tp);
  1.4114 +  assert(rc == 0, "clock_gettime is expected to return 0 code");
  1.4115 +
  1.4116 +  return (tp.tv_sec * SEC_IN_NANOSECS) + tp.tv_nsec;
  1.4117 +}
  1.4118 +#endif
  1.4119 +
  1.4120 +/////
  1.4121 +// glibc on Bsd platform uses non-documented flag
  1.4122 +// to indicate, that some special sort of signal
  1.4123 +// trampoline is used.
  1.4124 +// We will never set this flag, and we should
  1.4125 +// ignore this flag in our diagnostic
  1.4126 +#ifdef SIGNIFICANT_SIGNAL_MASK
  1.4127 +#undef SIGNIFICANT_SIGNAL_MASK
  1.4128 +#endif
  1.4129 +#define SIGNIFICANT_SIGNAL_MASK (~0x04000000)
  1.4130 +
  1.4131 +static const char* get_signal_handler_name(address handler,
  1.4132 +                                           char* buf, int buflen) {
  1.4133 +  int offset;
  1.4134 +  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
  1.4135 +  if (found) {
  1.4136 +    // skip directory names
  1.4137 +    const char *p1, *p2;
  1.4138 +    p1 = buf;
  1.4139 +    size_t len = strlen(os::file_separator());
  1.4140 +    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
  1.4141 +    jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
  1.4142 +  } else {
  1.4143 +    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
  1.4144 +  }
  1.4145 +  return buf;
  1.4146 +}
  1.4147 +
  1.4148 +static void print_signal_handler(outputStream* st, int sig,
  1.4149 +                                 char* buf, size_t buflen) {
  1.4150 +  struct sigaction sa;
  1.4151 +
  1.4152 +  sigaction(sig, NULL, &sa);
  1.4153 +
  1.4154 +  // See comment for SIGNIFICANT_SIGNAL_MASK define
  1.4155 +  sa.sa_flags &= SIGNIFICANT_SIGNAL_MASK;
  1.4156 +
  1.4157 +  st->print("%s: ", os::exception_name(sig, buf, buflen));
  1.4158 +
  1.4159 +  address handler = (sa.sa_flags & SA_SIGINFO)
  1.4160 +    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
  1.4161 +    : CAST_FROM_FN_PTR(address, sa.sa_handler);
  1.4162 +
  1.4163 +  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
  1.4164 +    st->print("SIG_DFL");
  1.4165 +  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
  1.4166 +    st->print("SIG_IGN");
  1.4167 +  } else {
  1.4168 +    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
  1.4169 +  }
  1.4170 +
  1.4171 +  st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask);
  1.4172 +
  1.4173 +  address rh = VMError::get_resetted_sighandler(sig);
  1.4174 +  // May be, handler was resetted by VMError?
  1.4175 +  if(rh != NULL) {
  1.4176 +    handler = rh;
  1.4177 +    sa.sa_flags = VMError::get_resetted_sigflags(sig) & SIGNIFICANT_SIGNAL_MASK;
  1.4178 +  }
  1.4179 +
  1.4180 +  st->print(", sa_flags="   PTR32_FORMAT, sa.sa_flags);
  1.4181 +
  1.4182 +  // Check: is it our handler?
  1.4183 +  if(handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
  1.4184 +     handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
  1.4185 +    // It is our signal handler
  1.4186 +    // check for flags, reset system-used one!
  1.4187 +    if((int)sa.sa_flags != os::Bsd::get_our_sigflags(sig)) {
  1.4188 +      st->print(
  1.4189 +                ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
  1.4190 +                os::Bsd::get_our_sigflags(sig));
  1.4191 +    }
  1.4192 +  }
  1.4193 +  st->cr();
  1.4194 +}
  1.4195 +
  1.4196 +
  1.4197 +#define DO_SIGNAL_CHECK(sig) \
  1.4198 +  if (!sigismember(&check_signal_done, sig)) \
  1.4199 +    os::Bsd::check_signal_handler(sig)
  1.4200 +
  1.4201 +// This method is a periodic task to check for misbehaving JNI applications
  1.4202 +// under CheckJNI, we can add any periodic checks here
  1.4203 +
  1.4204 +void os::run_periodic_checks() {
  1.4205 +
  1.4206 +  if (check_signals == false) return;
  1.4207 +
  1.4208 +  // SEGV and BUS if overridden could potentially prevent
  1.4209 +  // generation of hs*.log in the event of a crash, debugging
  1.4210 +  // such a case can be very challenging, so we absolutely
  1.4211 +  // check the following for a good measure:
  1.4212 +  DO_SIGNAL_CHECK(SIGSEGV);
  1.4213 +  DO_SIGNAL_CHECK(SIGILL);
  1.4214 +  DO_SIGNAL_CHECK(SIGFPE);
  1.4215 +  DO_SIGNAL_CHECK(SIGBUS);
  1.4216 +  DO_SIGNAL_CHECK(SIGPIPE);
  1.4217 +  DO_SIGNAL_CHECK(SIGXFSZ);
  1.4218 +
  1.4219 +
  1.4220 +  // ReduceSignalUsage allows the user to override these handlers
  1.4221 +  // see comments at the very top and jvm_solaris.h
  1.4222 +  if (!ReduceSignalUsage) {
  1.4223 +    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
  1.4224 +    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
  1.4225 +    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
  1.4226 +    DO_SIGNAL_CHECK(BREAK_SIGNAL);
  1.4227 +  }
  1.4228 +
  1.4229 +  DO_SIGNAL_CHECK(SR_signum);
  1.4230 +  DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
  1.4231 +}
  1.4232 +
  1.4233 +typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
  1.4234 +
  1.4235 +static os_sigaction_t os_sigaction = NULL;
  1.4236 +
  1.4237 +void os::Bsd::check_signal_handler(int sig) {
  1.4238 +  char buf[O_BUFLEN];
  1.4239 +  address jvmHandler = NULL;
  1.4240 +
  1.4241 +
  1.4242 +  struct sigaction act;
  1.4243 +  if (os_sigaction == NULL) {
  1.4244 +    // only trust the default sigaction, in case it has been interposed
  1.4245 +    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
  1.4246 +    if (os_sigaction == NULL) return;
  1.4247 +  }
  1.4248 +
  1.4249 +  os_sigaction(sig, (struct sigaction*)NULL, &act);
  1.4250 +
  1.4251 +
  1.4252 +  act.sa_flags &= SIGNIFICANT_SIGNAL_MASK;
  1.4253 +
  1.4254 +  address thisHandler = (act.sa_flags & SA_SIGINFO)
  1.4255 +    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
  1.4256 +    : CAST_FROM_FN_PTR(address, act.sa_handler) ;
  1.4257 +
  1.4258 +
  1.4259 +  switch(sig) {
  1.4260 +  case SIGSEGV:
  1.4261 +  case SIGBUS:
  1.4262 +  case SIGFPE:
  1.4263 +  case SIGPIPE:
  1.4264 +  case SIGILL:
  1.4265 +  case SIGXFSZ:
  1.4266 +    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler);
  1.4267 +    break;
  1.4268 +
  1.4269 +  case SHUTDOWN1_SIGNAL:
  1.4270 +  case SHUTDOWN2_SIGNAL:
  1.4271 +  case SHUTDOWN3_SIGNAL:
  1.4272 +  case BREAK_SIGNAL:
  1.4273 +    jvmHandler = (address)user_handler();
  1.4274 +    break;
  1.4275 +
  1.4276 +  case INTERRUPT_SIGNAL:
  1.4277 +    jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
  1.4278 +    break;
  1.4279 +
  1.4280 +  default:
  1.4281 +    if (sig == SR_signum) {
  1.4282 +      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
  1.4283 +    } else {
  1.4284 +      return;
  1.4285 +    }
  1.4286 +    break;
  1.4287 +  }
  1.4288 +
  1.4289 +  if (thisHandler != jvmHandler) {
  1.4290 +    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
  1.4291 +    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
  1.4292 +    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
  1.4293 +    // No need to check this sig any longer
  1.4294 +    sigaddset(&check_signal_done, sig);
  1.4295 +  } else if(os::Bsd::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Bsd::get_our_sigflags(sig)) {
  1.4296 +    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
  1.4297 +    tty->print("expected:" PTR32_FORMAT, os::Bsd::get_our_sigflags(sig));
  1.4298 +    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
  1.4299 +    // No need to check this sig any longer
  1.4300 +    sigaddset(&check_signal_done, sig);
  1.4301 +  }
  1.4302 +
  1.4303 +  // Dump all the signal
  1.4304 +  if (sigismember(&check_signal_done, sig)) {
  1.4305 +    print_signal_handlers(tty, buf, O_BUFLEN);
  1.4306 +  }
  1.4307 +}
  1.4308 +
  1.4309 +extern void report_error(char* file_name, int line_no, char* title, char* format, ...);
  1.4310 +
  1.4311 +extern bool signal_name(int signo, char* buf, size_t len);
  1.4312 +
  1.4313 +const char* os::exception_name(int exception_code, char* buf, size_t size) {
  1.4314 +  if (0 < exception_code && exception_code <= SIGRTMAX) {
  1.4315 +    // signal
  1.4316 +    if (!signal_name(exception_code, buf, size)) {
  1.4317 +      jio_snprintf(buf, size, "SIG%d", exception_code);
  1.4318 +    }
  1.4319 +    return buf;
  1.4320 +  } else {
  1.4321 +    return NULL;
  1.4322 +  }
  1.4323 +}
  1.4324 +
  1.4325 +// this is called _before_ the most of global arguments have been parsed
  1.4326 +void os::init(void) {
  1.4327 +  char dummy;   /* used to get a guess on initial stack address */
  1.4328 +//  first_hrtime = gethrtime();
  1.4329 +
  1.4330 +  // With BsdThreads the JavaMain thread pid (primordial thread)
  1.4331 +  // is different than the pid of the java launcher thread.
  1.4332 +  // So, on Bsd, the launcher thread pid is passed to the VM
  1.4333 +  // via the sun.java.launcher.pid property.
  1.4334 +  // Use this property instead of getpid() if it was correctly passed.
  1.4335 +  // See bug 6351349.
  1.4336 +  pid_t java_launcher_pid = (pid_t) Arguments::sun_java_launcher_pid();
  1.4337 +
  1.4338 +  _initial_pid = (java_launcher_pid > 0) ? java_launcher_pid : getpid();
  1.4339 +
  1.4340 +  clock_tics_per_sec = CLK_TCK;
  1.4341 +
  1.4342 +  init_random(1234567);
  1.4343 +
  1.4344 +  ThreadCritical::initialize();
  1.4345 +
  1.4346 +  Bsd::set_page_size(getpagesize());
  1.4347 +  if (Bsd::page_size() == -1) {
  1.4348 +    fatal(err_msg("os_bsd.cpp: os::init: sysconf failed (%s)",
  1.4349 +                  strerror(errno)));
  1.4350 +  }
  1.4351 +  init_page_sizes((size_t) Bsd::page_size());
  1.4352 +
  1.4353 +  Bsd::initialize_system_info();
  1.4354 +
  1.4355 +  // main_thread points to the aboriginal thread
  1.4356 +  Bsd::_main_thread = pthread_self();
  1.4357 +
  1.4358 +  Bsd::clock_init();
  1.4359 +  initial_time_count = os::elapsed_counter();
  1.4360 +
  1.4361 +#ifdef __APPLE__
  1.4362 +  // XXXDARWIN
  1.4363 +  // Work around the unaligned VM callbacks in hotspot's
  1.4364 +  // sharedRuntime. The callbacks don't use SSE2 instructions, and work on
  1.4365 +  // Linux, Solaris, and FreeBSD. On Mac OS X, dyld (rightly so) enforces
  1.4366 +  // alignment when doing symbol lookup. To work around this, we force early
  1.4367 +  // binding of all symbols now, thus binding when alignment is known-good.
  1.4368 +  _dyld_bind_fully_image_containing_address((const void *) &os::init);
  1.4369 +#endif
  1.4370 +}
  1.4371 +
  1.4372 +// To install functions for atexit system call
  1.4373 +extern "C" {
  1.4374 +  static void perfMemory_exit_helper() {
  1.4375 +    perfMemory_exit();
  1.4376 +  }
  1.4377 +}
  1.4378 +
  1.4379 +// this is called _after_ the global arguments have been parsed
  1.4380 +jint os::init_2(void)
  1.4381 +{
  1.4382 +#ifndef _ALLBSD_SOURCE
  1.4383 +  Bsd::fast_thread_clock_init();
  1.4384 +#endif
  1.4385 +
  1.4386 +  // Allocate a single page and mark it as readable for safepoint polling
  1.4387 +  address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
  1.4388 +  guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" );
  1.4389 +
  1.4390 +  os::set_polling_page( polling_page );
  1.4391 +
  1.4392 +#ifndef PRODUCT
  1.4393 +  if(Verbose && PrintMiscellaneous)
  1.4394 +    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
  1.4395 +#endif
  1.4396 +
  1.4397 +  if (!UseMembar) {
  1.4398 +    address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
  1.4399 +    guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
  1.4400 +    os::set_memory_serialize_page( mem_serialize_page );
  1.4401 +
  1.4402 +#ifndef PRODUCT
  1.4403 +    if(Verbose && PrintMiscellaneous)
  1.4404 +      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
  1.4405 +#endif
  1.4406 +  }
  1.4407 +
  1.4408 +  os::large_page_init();
  1.4409 +
  1.4410 +  // initialize suspend/resume support - must do this before signal_sets_init()
  1.4411 +  if (SR_initialize() != 0) {
  1.4412 +    perror("SR_initialize failed");
  1.4413 +    return JNI_ERR;
  1.4414 +  }
  1.4415 +
  1.4416 +  Bsd::signal_sets_init();
  1.4417 +  Bsd::install_signal_handlers();
  1.4418 +
  1.4419 +  // Check minimum allowable stack size for thread creation and to initialize
  1.4420 +  // the java system classes, including StackOverflowError - depends on page
  1.4421 +  // size.  Add a page for compiler2 recursion in main thread.
  1.4422 +  // Add in 2*BytesPerWord times page size to account for VM stack during
  1.4423 +  // class initialization depending on 32 or 64 bit VM.
  1.4424 +  os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed,
  1.4425 +            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
  1.4426 +                    2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size());
  1.4427 +
  1.4428 +  size_t threadStackSizeInBytes = ThreadStackSize * K;
  1.4429 +  if (threadStackSizeInBytes != 0 &&
  1.4430 +      threadStackSizeInBytes < os::Bsd::min_stack_allowed) {
  1.4431 +        tty->print_cr("\nThe stack size specified is too small, "
  1.4432 +                      "Specify at least %dk",
  1.4433 +                      os::Bsd::min_stack_allowed/ K);
  1.4434 +        return JNI_ERR;
  1.4435 +  }
  1.4436 +
  1.4437 +  // Make the stack size a multiple of the page size so that
  1.4438 +  // the yellow/red zones can be guarded.
  1.4439 +  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
  1.4440 +        vm_page_size()));
  1.4441 +
  1.4442 +#ifndef _ALLBSD_SOURCE
  1.4443 +  Bsd::capture_initial_stack(JavaThread::stack_size_at_create());
  1.4444 +
  1.4445 +  Bsd::libpthread_init();
  1.4446 +  if (PrintMiscellaneous && (Verbose || WizardMode)) {
  1.4447 +     tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
  1.4448 +          Bsd::glibc_version(), Bsd::libpthread_version(),
  1.4449 +          Bsd::is_floating_stack() ? "floating stack" : "fixed stack");
  1.4450 +  }
  1.4451 +
  1.4452 +  if (UseNUMA) {
  1.4453 +    if (!Bsd::libnuma_init()) {
  1.4454 +      UseNUMA = false;
  1.4455 +    } else {
  1.4456 +      if ((Bsd::numa_max_node() < 1)) {
  1.4457 +        // There's only one node(they start from 0), disable NUMA.
  1.4458 +        UseNUMA = false;
  1.4459 +      }
  1.4460 +    }
  1.4461 +    // With SHM large pages we cannot uncommit a page, so there's not way
  1.4462 +    // we can make the adaptive lgrp chunk resizing work. If the user specified
  1.4463 +    // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
  1.4464 +    // disable adaptive resizing.
  1.4465 +    if (UseNUMA && UseLargePages && UseSHM) {
  1.4466 +      if (!FLAG_IS_DEFAULT(UseNUMA)) {
  1.4467 +        if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
  1.4468 +          UseLargePages = false;
  1.4469 +        } else {
  1.4470 +          warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
  1.4471 +          UseAdaptiveSizePolicy = false;
  1.4472 +          UseAdaptiveNUMAChunkSizing = false;
  1.4473 +        }
  1.4474 +      } else {
  1.4475 +        UseNUMA = false;
  1.4476 +      }
  1.4477 +    }
  1.4478 +    if (!UseNUMA && ForceNUMA) {
  1.4479 +      UseNUMA = true;
  1.4480 +    }
  1.4481 +  }
  1.4482 +#endif
  1.4483 +
  1.4484 +  if (MaxFDLimit) {
  1.4485 +    // set the number of file descriptors to max. print out error
  1.4486 +    // if getrlimit/setrlimit fails but continue regardless.
  1.4487 +    struct rlimit nbr_files;
  1.4488 +    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
  1.4489 +    if (status != 0) {
  1.4490 +      if (PrintMiscellaneous && (Verbose || WizardMode))
  1.4491 +        perror("os::init_2 getrlimit failed");
  1.4492 +    } else {
  1.4493 +      nbr_files.rlim_cur = nbr_files.rlim_max;
  1.4494 +
  1.4495 +#ifdef __APPLE__
  1.4496 +      // Darwin returns RLIM_INFINITY for rlim_max, but fails with EINVAL if
  1.4497 +      // you attempt to use RLIM_INFINITY. As per setrlimit(2), OPEN_MAX must
  1.4498 +      // be used instead
  1.4499 +      nbr_files.rlim_cur = MIN(OPEN_MAX, nbr_files.rlim_cur);
  1.4500 +#endif
  1.4501 +
  1.4502 +      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
  1.4503 +      if (status != 0) {
  1.4504 +        if (PrintMiscellaneous && (Verbose || WizardMode))
  1.4505 +          perror("os::init_2 setrlimit failed");
  1.4506 +      }
  1.4507 +    }
  1.4508 +  }
  1.4509 +
  1.4510 +#ifndef _ALLBSD_SOURCE
  1.4511 +  // Initialize lock used to serialize thread creation (see os::create_thread)
  1.4512 +  Bsd::set_createThread_lock(new Mutex(Mutex::leaf, "createThread_lock", false));
  1.4513 +#endif
  1.4514 +
  1.4515 +  // at-exit methods are called in the reverse order of their registration.
  1.4516 +  // atexit functions are called on return from main or as a result of a
  1.4517 +  // call to exit(3C). There can be only 32 of these functions registered
  1.4518 +  // and atexit() does not set errno.
  1.4519 +
  1.4520 +  if (PerfAllowAtExitRegistration) {
  1.4521 +    // only register atexit functions if PerfAllowAtExitRegistration is set.
  1.4522 +    // atexit functions can be delayed until process exit time, which
  1.4523 +    // can be problematic for embedded VM situations. Embedded VMs should
  1.4524 +    // call DestroyJavaVM() to assure that VM resources are released.
  1.4525 +
  1.4526 +    // note: perfMemory_exit_helper atexit function may be removed in
  1.4527 +    // the future if the appropriate cleanup code can be added to the
  1.4528 +    // VM_Exit VMOperation's doit method.
  1.4529 +    if (atexit(perfMemory_exit_helper) != 0) {
  1.4530 +      warning("os::init2 atexit(perfMemory_exit_helper) failed");
  1.4531 +    }
  1.4532 +  }
  1.4533 +
  1.4534 +  // initialize thread priority policy
  1.4535 +  prio_init();
  1.4536 +
  1.4537 +  return JNI_OK;
  1.4538 +}
  1.4539 +
  1.4540 +// this is called at the end of vm_initialization
  1.4541 +void os::init_3(void) { }
  1.4542 +
  1.4543 +// Mark the polling page as unreadable
  1.4544 +void os::make_polling_page_unreadable(void) {
  1.4545 +  if( !guard_memory((char*)_polling_page, Bsd::page_size()) )
  1.4546 +    fatal("Could not disable polling page");
  1.4547 +};
  1.4548 +
  1.4549 +// Mark the polling page as readable
  1.4550 +void os::make_polling_page_readable(void) {
  1.4551 +  if( !bsd_mprotect((char *)_polling_page, Bsd::page_size(), PROT_READ)) {
  1.4552 +    fatal("Could not enable polling page");
  1.4553 +  }
  1.4554 +};
  1.4555 +
  1.4556 +int os::active_processor_count() {
  1.4557 +#ifdef _ALLBSD_SOURCE
  1.4558 +  return _processor_count;
  1.4559 +#else
  1.4560 +  // Bsd doesn't yet have a (official) notion of processor sets,
  1.4561 +  // so just return the number of online processors.
  1.4562 +  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
  1.4563 +  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
  1.4564 +  return online_cpus;
  1.4565 +#endif
  1.4566 +}
  1.4567 +
  1.4568 +bool os::distribute_processes(uint length, uint* distribution) {
  1.4569 +  // Not yet implemented.
  1.4570 +  return false;
  1.4571 +}
  1.4572 +
  1.4573 +bool os::bind_to_processor(uint processor_id) {
  1.4574 +  // Not yet implemented.
  1.4575 +  return false;
  1.4576 +}
  1.4577 +
  1.4578 +///
  1.4579 +
  1.4580 +// Suspends the target using the signal mechanism and then grabs the PC before
  1.4581 +// resuming the target. Used by the flat-profiler only
  1.4582 +ExtendedPC os::get_thread_pc(Thread* thread) {
  1.4583 +  // Make sure that it is called by the watcher for the VMThread
  1.4584 +  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
  1.4585 +  assert(thread->is_VM_thread(), "Can only be called for VMThread");
  1.4586 +
  1.4587 +  ExtendedPC epc;
  1.4588 +
  1.4589 +  OSThread* osthread = thread->osthread();
  1.4590 +  if (do_suspend(osthread)) {
  1.4591 +    if (osthread->ucontext() != NULL) {
  1.4592 +      epc = os::Bsd::ucontext_get_pc(osthread->ucontext());
  1.4593 +    } else {
  1.4594 +      // NULL context is unexpected, double-check this is the VMThread
  1.4595 +      guarantee(thread->is_VM_thread(), "can only be called for VMThread");
  1.4596 +    }
  1.4597 +    do_resume(osthread);
  1.4598 +  }
  1.4599 +  // failure means pthread_kill failed for some reason - arguably this is
  1.4600 +  // a fatal problem, but such problems are ignored elsewhere
  1.4601 +
  1.4602 +  return epc;
  1.4603 +}
  1.4604 +
  1.4605 +int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
  1.4606 +{
  1.4607 +#ifdef _ALLBSD_SOURCE
  1.4608 +  return pthread_cond_timedwait(_cond, _mutex, _abstime);
  1.4609 +#else
  1.4610 +   if (is_NPTL()) {
  1.4611 +      return pthread_cond_timedwait(_cond, _mutex, _abstime);
  1.4612 +   } else {
  1.4613 +#ifndef IA64
  1.4614 +      // 6292965: BsdThreads pthread_cond_timedwait() resets FPU control
  1.4615 +      // word back to default 64bit precision if condvar is signaled. Java
  1.4616 +      // wants 53bit precision.  Save and restore current value.
  1.4617 +      int fpu = get_fpu_control_word();
  1.4618 +#endif // IA64
  1.4619 +      int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
  1.4620 +#ifndef IA64
  1.4621 +      set_fpu_control_word(fpu);
  1.4622 +#endif // IA64
  1.4623 +      return status;
  1.4624 +   }
  1.4625 +#endif
  1.4626 +}
  1.4627 +
  1.4628 +////////////////////////////////////////////////////////////////////////////////
  1.4629 +// debug support
  1.4630 +
  1.4631 +static address same_page(address x, address y) {
  1.4632 +  int page_bits = -os::vm_page_size();
  1.4633 +  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
  1.4634 +    return x;
  1.4635 +  else if (x > y)
  1.4636 +    return (address)(intptr_t(y) | ~page_bits) + 1;
  1.4637 +  else
  1.4638 +    return (address)(intptr_t(y) & page_bits);
  1.4639 +}
  1.4640 +
  1.4641 +bool os::find(address addr, outputStream* st) {
  1.4642 +  Dl_info dlinfo;
  1.4643 +  memset(&dlinfo, 0, sizeof(dlinfo));
  1.4644 +  if (dladdr(addr, &dlinfo)) {
  1.4645 +    st->print(PTR_FORMAT ": ", addr);
  1.4646 +    if (dlinfo.dli_sname != NULL) {
  1.4647 +      st->print("%s+%#x", dlinfo.dli_sname,
  1.4648 +                 addr - (intptr_t)dlinfo.dli_saddr);
  1.4649 +    } else if (dlinfo.dli_fname) {
  1.4650 +      st->print("<offset %#x>", addr - (intptr_t)dlinfo.dli_fbase);
  1.4651 +    } else {
  1.4652 +      st->print("<absolute address>");
  1.4653 +    }
  1.4654 +    if (dlinfo.dli_fname) {
  1.4655 +      st->print(" in %s", dlinfo.dli_fname);
  1.4656 +    }
  1.4657 +    if (dlinfo.dli_fbase) {
  1.4658 +      st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
  1.4659 +    }
  1.4660 +    st->cr();
  1.4661 +
  1.4662 +    if (Verbose) {
  1.4663 +      // decode some bytes around the PC
  1.4664 +      address begin = same_page(addr-40, addr);
  1.4665 +      address end   = same_page(addr+40, addr);
  1.4666 +      address       lowest = (address) dlinfo.dli_sname;
  1.4667 +      if (!lowest)  lowest = (address) dlinfo.dli_fbase;
  1.4668 +      if (begin < lowest)  begin = lowest;
  1.4669 +      Dl_info dlinfo2;
  1.4670 +      if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
  1.4671 +          && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
  1.4672 +        end = (address) dlinfo2.dli_saddr;
  1.4673 +      Disassembler::decode(begin, end, st);
  1.4674 +    }
  1.4675 +    return true;
  1.4676 +  }
  1.4677 +  return false;
  1.4678 +}
  1.4679 +
  1.4680 +////////////////////////////////////////////////////////////////////////////////
  1.4681 +// misc
  1.4682 +
  1.4683 +// This does not do anything on Bsd. This is basically a hook for being
  1.4684 +// able to use structured exception handling (thread-local exception filters)
  1.4685 +// on, e.g., Win32.
  1.4686 +void
  1.4687 +os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
  1.4688 +                         JavaCallArguments* args, Thread* thread) {
  1.4689 +  f(value, method, args, thread);
  1.4690 +}
  1.4691 +
  1.4692 +void os::print_statistics() {
  1.4693 +}
  1.4694 +
  1.4695 +int os::message_box(const char* title, const char* message) {
  1.4696 +  int i;
  1.4697 +  fdStream err(defaultStream::error_fd());
  1.4698 +  for (i = 0; i < 78; i++) err.print_raw("=");
  1.4699 +  err.cr();
  1.4700 +  err.print_raw_cr(title);
  1.4701 +  for (i = 0; i < 78; i++) err.print_raw("-");
  1.4702 +  err.cr();
  1.4703 +  err.print_raw_cr(message);
  1.4704 +  for (i = 0; i < 78; i++) err.print_raw("=");
  1.4705 +  err.cr();
  1.4706 +
  1.4707 +  char buf[16];
  1.4708 +  // Prevent process from exiting upon "read error" without consuming all CPU
  1.4709 +  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
  1.4710 +
  1.4711 +  return buf[0] == 'y' || buf[0] == 'Y';
  1.4712 +}
  1.4713 +
  1.4714 +int os::stat(const char *path, struct stat *sbuf) {
  1.4715 +  char pathbuf[MAX_PATH];
  1.4716 +  if (strlen(path) > MAX_PATH - 1) {
  1.4717 +    errno = ENAMETOOLONG;
  1.4718 +    return -1;
  1.4719 +  }
  1.4720 +  os::native_path(strcpy(pathbuf, path));
  1.4721 +  return ::stat(pathbuf, sbuf);
  1.4722 +}
  1.4723 +
  1.4724 +bool os::check_heap(bool force) {
  1.4725 +  return true;
  1.4726 +}
  1.4727 +
  1.4728 +int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
  1.4729 +  return ::vsnprintf(buf, count, format, args);
  1.4730 +}
  1.4731 +
  1.4732 +// Is a (classpath) directory empty?
  1.4733 +bool os::dir_is_empty(const char* path) {
  1.4734 +  DIR *dir = NULL;
  1.4735 +  struct dirent *ptr;
  1.4736 +
  1.4737 +  dir = opendir(path);
  1.4738 +  if (dir == NULL) return true;
  1.4739 +
  1.4740 +  /* Scan the directory */
  1.4741 +  bool result = true;
  1.4742 +  char buf[sizeof(struct dirent) + MAX_PATH];
  1.4743 +  while (result && (ptr = ::readdir(dir)) != NULL) {
  1.4744 +    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
  1.4745 +      result = false;
  1.4746 +    }
  1.4747 +  }
  1.4748 +  closedir(dir);
  1.4749 +  return result;
  1.4750 +}
  1.4751 +
  1.4752 +// This code originates from JDK's sysOpen and open64_w
  1.4753 +// from src/solaris/hpi/src/system_md.c
  1.4754 +
  1.4755 +#ifndef O_DELETE
  1.4756 +#define O_DELETE 0x10000
  1.4757 +#endif
  1.4758 +
  1.4759 +// Open a file. Unlink the file immediately after open returns
  1.4760 +// if the specified oflag has the O_DELETE flag set.
  1.4761 +// O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
  1.4762 +
  1.4763 +int os::open(const char *path, int oflag, int mode) {
  1.4764 +
  1.4765 +  if (strlen(path) > MAX_PATH - 1) {
  1.4766 +    errno = ENAMETOOLONG;
  1.4767 +    return -1;
  1.4768 +  }
  1.4769 +  int fd;
  1.4770 +  int o_delete = (oflag & O_DELETE);
  1.4771 +  oflag = oflag & ~O_DELETE;
  1.4772 +
  1.4773 +  fd = ::open(path, oflag, mode);
  1.4774 +  if (fd == -1) return -1;
  1.4775 +
  1.4776 +  //If the open succeeded, the file might still be a directory
  1.4777 +  {
  1.4778 +    struct stat buf;
  1.4779 +    int ret = ::fstat(fd, &buf);
  1.4780 +    int st_mode = buf.st_mode;
  1.4781 +
  1.4782 +    if (ret != -1) {
  1.4783 +      if ((st_mode & S_IFMT) == S_IFDIR) {
  1.4784 +        errno = EISDIR;
  1.4785 +        ::close(fd);
  1.4786 +        return -1;
  1.4787 +      }
  1.4788 +    } else {
  1.4789 +      ::close(fd);
  1.4790 +      return -1;
  1.4791 +    }
  1.4792 +  }
  1.4793 +
  1.4794 +    /*
  1.4795 +     * All file descriptors that are opened in the JVM and not
  1.4796 +     * specifically destined for a subprocess should have the
  1.4797 +     * close-on-exec flag set.  If we don't set it, then careless 3rd
  1.4798 +     * party native code might fork and exec without closing all
  1.4799 +     * appropriate file descriptors (e.g. as we do in closeDescriptors in
  1.4800 +     * UNIXProcess.c), and this in turn might:
  1.4801 +     *
  1.4802 +     * - cause end-of-file to fail to be detected on some file
  1.4803 +     *   descriptors, resulting in mysterious hangs, or
  1.4804 +     *
  1.4805 +     * - might cause an fopen in the subprocess to fail on a system
  1.4806 +     *   suffering from bug 1085341.
  1.4807 +     *
  1.4808 +     * (Yes, the default setting of the close-on-exec flag is a Unix
  1.4809 +     * design flaw)
  1.4810 +     *
  1.4811 +     * See:
  1.4812 +     * 1085341: 32-bit stdio routines should support file descriptors >255
  1.4813 +     * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
  1.4814 +     * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
  1.4815 +     */
  1.4816 +#ifdef FD_CLOEXEC
  1.4817 +    {
  1.4818 +        int flags = ::fcntl(fd, F_GETFD);
  1.4819 +        if (flags != -1)
  1.4820 +            ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
  1.4821 +    }
  1.4822 +#endif
  1.4823 +
  1.4824 +  if (o_delete != 0) {
  1.4825 +    ::unlink(path);
  1.4826 +  }
  1.4827 +  return fd;
  1.4828 +}
  1.4829 +
  1.4830 +
  1.4831 +// create binary file, rewriting existing file if required
  1.4832 +int os::create_binary_file(const char* path, bool rewrite_existing) {
  1.4833 +  int oflags = O_WRONLY | O_CREAT;
  1.4834 +  if (!rewrite_existing) {
  1.4835 +    oflags |= O_EXCL;
  1.4836 +  }
  1.4837 +  return ::open(path, oflags, S_IREAD | S_IWRITE);
  1.4838 +}
  1.4839 +
  1.4840 +// return current position of file pointer
  1.4841 +jlong os::current_file_offset(int fd) {
  1.4842 +  return (jlong)::lseek(fd, (off_t)0, SEEK_CUR);
  1.4843 +}
  1.4844 +
  1.4845 +// move file pointer to the specified offset
  1.4846 +jlong os::seek_to_file_offset(int fd, jlong offset) {
  1.4847 +  return (jlong)::lseek(fd, (off_t)offset, SEEK_SET);
  1.4848 +}
  1.4849 +
  1.4850 +// This code originates from JDK's sysAvailable
  1.4851 +// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
  1.4852 +
  1.4853 +int os::available(int fd, jlong *bytes) {
  1.4854 +  jlong cur, end;
  1.4855 +  int mode;
  1.4856 +  struct stat buf;
  1.4857 +
  1.4858 +  if (::fstat(fd, &buf) >= 0) {
  1.4859 +    mode = buf.st_mode;
  1.4860 +    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
  1.4861 +      /*
  1.4862 +      * XXX: is the following call interruptible? If so, this might
  1.4863 +      * need to go through the INTERRUPT_IO() wrapper as for other
  1.4864 +      * blocking, interruptible calls in this file.
  1.4865 +      */
  1.4866 +      int n;
  1.4867 +      if (::ioctl(fd, FIONREAD, &n) >= 0) {
  1.4868 +        *bytes = n;
  1.4869 +        return 1;
  1.4870 +      }
  1.4871 +    }
  1.4872 +  }
  1.4873 +  if ((cur = ::lseek(fd, 0L, SEEK_CUR)) == -1) {
  1.4874 +    return 0;
  1.4875 +  } else if ((end = ::lseek(fd, 0L, SEEK_END)) == -1) {
  1.4876 +    return 0;
  1.4877 +  } else if (::lseek(fd, cur, SEEK_SET) == -1) {
  1.4878 +    return 0;
  1.4879 +  }
  1.4880 +  *bytes = end - cur;
  1.4881 +  return 1;
  1.4882 +}
  1.4883 +
  1.4884 +int os::socket_available(int fd, jint *pbytes) {
  1.4885 +   if (fd < 0)
  1.4886 +     return OS_OK;
  1.4887 +
  1.4888 +   int ret;
  1.4889 +
  1.4890 +   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
  1.4891 +
  1.4892 +   //%% note ioctl can return 0 when successful, JVM_SocketAvailable
  1.4893 +   // is expected to return 0 on failure and 1 on success to the jdk.
  1.4894 +
  1.4895 +   return (ret == OS_ERR) ? 0 : 1;
  1.4896 +}
  1.4897 +
  1.4898 +// Map a block of memory.
  1.4899 +char* os::map_memory(int fd, const char* file_name, size_t file_offset,
  1.4900 +                     char *addr, size_t bytes, bool read_only,
  1.4901 +                     bool allow_exec) {
  1.4902 +  int prot;
  1.4903 +  int flags;
  1.4904 +
  1.4905 +  if (read_only) {
  1.4906 +    prot = PROT_READ;
  1.4907 +    flags = MAP_SHARED;
  1.4908 +  } else {
  1.4909 +    prot = PROT_READ | PROT_WRITE;
  1.4910 +    flags = MAP_PRIVATE;
  1.4911 +  }
  1.4912 +
  1.4913 +  if (allow_exec) {
  1.4914 +    prot |= PROT_EXEC;
  1.4915 +  }
  1.4916 +
  1.4917 +  if (addr != NULL) {
  1.4918 +    flags |= MAP_FIXED;
  1.4919 +  }
  1.4920 +
  1.4921 +  char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
  1.4922 +                                     fd, file_offset);
  1.4923 +  if (mapped_address == MAP_FAILED) {
  1.4924 +    return NULL;
  1.4925 +  }
  1.4926 +  return mapped_address;
  1.4927 +}
  1.4928 +
  1.4929 +
  1.4930 +// Remap a block of memory.
  1.4931 +char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
  1.4932 +                       char *addr, size_t bytes, bool read_only,
  1.4933 +                       bool allow_exec) {
  1.4934 +  // same as map_memory() on this OS
  1.4935 +  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
  1.4936 +                        allow_exec);
  1.4937 +}
  1.4938 +
  1.4939 +
  1.4940 +// Unmap a block of memory.
  1.4941 +bool os::unmap_memory(char* addr, size_t bytes) {
  1.4942 +  return munmap(addr, bytes) == 0;
  1.4943 +}
  1.4944 +
  1.4945 +#ifndef _ALLBSD_SOURCE
  1.4946 +static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
  1.4947 +
  1.4948 +static clockid_t thread_cpu_clockid(Thread* thread) {
  1.4949 +  pthread_t tid = thread->osthread()->pthread_id();
  1.4950 +  clockid_t clockid;
  1.4951 +
  1.4952 +  // Get thread clockid
  1.4953 +  int rc = os::Bsd::pthread_getcpuclockid(tid, &clockid);
  1.4954 +  assert(rc == 0, "pthread_getcpuclockid is expected to return 0 code");
  1.4955 +  return clockid;
  1.4956 +}
  1.4957 +#endif
  1.4958 +
  1.4959 +// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
  1.4960 +// are used by JVM M&M and JVMTI to get user+sys or user CPU time
  1.4961 +// of a thread.
  1.4962 +//
  1.4963 +// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
  1.4964 +// the fast estimate available on the platform.
  1.4965 +
  1.4966 +jlong os::current_thread_cpu_time() {
  1.4967 +#ifdef __APPLE__
  1.4968 +  return os::thread_cpu_time(Thread::current(), true /* user + sys */);
  1.4969 +#elif !defined(_ALLBSD_SOURCE)
  1.4970 +  if (os::Bsd::supports_fast_thread_cpu_time()) {
  1.4971 +    return os::Bsd::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
  1.4972 +  } else {
  1.4973 +    // return user + sys since the cost is the same
  1.4974 +    return slow_thread_cpu_time(Thread::current(), true /* user + sys */);
  1.4975 +  }
  1.4976 +#endif
  1.4977 +}
  1.4978 +
  1.4979 +jlong os::thread_cpu_time(Thread* thread) {
  1.4980 +#ifndef _ALLBSD_SOURCE
  1.4981 +  // consistent with what current_thread_cpu_time() returns
  1.4982 +  if (os::Bsd::supports_fast_thread_cpu_time()) {
  1.4983 +    return os::Bsd::fast_thread_cpu_time(thread_cpu_clockid(thread));
  1.4984 +  } else {
  1.4985 +    return slow_thread_cpu_time(thread, true /* user + sys */);
  1.4986 +  }
  1.4987 +#endif
  1.4988 +}
  1.4989 +
  1.4990 +jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
  1.4991 +#ifdef __APPLE__
  1.4992 +  return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
  1.4993 +#elif !defined(_ALLBSD_SOURCE)
  1.4994 +  if (user_sys_cpu_time && os::Bsd::supports_fast_thread_cpu_time()) {
  1.4995 +    return os::Bsd::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
  1.4996 +  } else {
  1.4997 +    return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time);
  1.4998 +  }
  1.4999 +#endif
  1.5000 +}
  1.5001 +
  1.5002 +jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
  1.5003 +#ifdef __APPLE__
  1.5004 +  struct thread_basic_info tinfo;
  1.5005 +  mach_msg_type_number_t tcount = THREAD_INFO_MAX;
  1.5006 +  kern_return_t kr;
  1.5007 +  mach_port_t mach_thread;
  1.5008 +
  1.5009 +  mach_thread = pthread_mach_thread_np(thread->osthread()->thread_id());
  1.5010 +  kr = thread_info(mach_thread, THREAD_BASIC_INFO, (thread_info_t)&tinfo, &tcount);
  1.5011 +  if (kr != KERN_SUCCESS)
  1.5012 +    return -1;
  1.5013 +
  1.5014 +  if (user_sys_cpu_time) {
  1.5015 +    jlong nanos;
  1.5016 +    nanos = ((jlong) tinfo.system_time.seconds + tinfo.user_time.seconds) * (jlong)1000000000;
  1.5017 +    nanos += ((jlong) tinfo.system_time.microseconds + (jlong) tinfo.user_time.microseconds) * (jlong)1000;
  1.5018 +    return nanos;
  1.5019 +  } else {
  1.5020 +    return ((jlong)tinfo.user_time.seconds * 1000000000) + ((jlong)tinfo.user_time.microseconds * (jlong)1000);
  1.5021 +  }
  1.5022 +#elif !defined(_ALLBSD_SOURCE)
  1.5023 +  if (user_sys_cpu_time && os::Bsd::supports_fast_thread_cpu_time()) {
  1.5024 +    return os::Bsd::fast_thread_cpu_time(thread_cpu_clockid(thread));
  1.5025 +  } else {
  1.5026 +    return slow_thread_cpu_time(thread, user_sys_cpu_time);
  1.5027 +  }
  1.5028 +#endif
  1.5029 +}
  1.5030 +
  1.5031 +#ifndef _ALLBSD_SOURCE
  1.5032 +//
  1.5033 +//  -1 on error.
  1.5034 +//
  1.5035 +
  1.5036 +static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
  1.5037 +  static bool proc_pid_cpu_avail = true;
  1.5038 +  static bool proc_task_unchecked = true;
  1.5039 +  static const char *proc_stat_path = "/proc/%d/stat";
  1.5040 +  pid_t  tid = thread->osthread()->thread_id();
  1.5041 +  int i;
  1.5042 +  char *s;
  1.5043 +  char stat[2048];
  1.5044 +  int statlen;
  1.5045 +  char proc_name[64];
  1.5046 +  int count;
  1.5047 +  long sys_time, user_time;
  1.5048 +  char string[64];
  1.5049 +  char cdummy;
  1.5050 +  int idummy;
  1.5051 +  long ldummy;
  1.5052 +  FILE *fp;
  1.5053 +
  1.5054 +  // We first try accessing /proc/<pid>/cpu since this is faster to
  1.5055 +  // process.  If this file is not present (bsd kernels 2.5 and above)
  1.5056 +  // then we open /proc/<pid>/stat.
  1.5057 +  if ( proc_pid_cpu_avail ) {
  1.5058 +    sprintf(proc_name, "/proc/%d/cpu", tid);
  1.5059 +    fp =  fopen(proc_name, "r");
  1.5060 +    if ( fp != NULL ) {
  1.5061 +      count = fscanf( fp, "%s %lu %lu\n", string, &user_time, &sys_time);
  1.5062 +      fclose(fp);
  1.5063 +      if ( count != 3 ) return -1;
  1.5064 +
  1.5065 +      if (user_sys_cpu_time) {
  1.5066 +        return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
  1.5067 +      } else {
  1.5068 +        return (jlong)user_time * (1000000000 / clock_tics_per_sec);
  1.5069 +      }
  1.5070 +    }
  1.5071 +    else proc_pid_cpu_avail = false;
  1.5072 +  }
  1.5073 +
  1.5074 +  // The /proc/<tid>/stat aggregates per-process usage on
  1.5075 +  // new Bsd kernels 2.6+ where NPTL is supported.
  1.5076 +  // The /proc/self/task/<tid>/stat still has the per-thread usage.
  1.5077 +  // See bug 6328462.
  1.5078 +  // There can be no directory /proc/self/task on kernels 2.4 with NPTL
  1.5079 +  // and possibly in some other cases, so we check its availability.
  1.5080 +  if (proc_task_unchecked && os::Bsd::is_NPTL()) {
  1.5081 +    // This is executed only once
  1.5082 +    proc_task_unchecked = false;
  1.5083 +    fp = fopen("/proc/self/task", "r");
  1.5084 +    if (fp != NULL) {
  1.5085 +      proc_stat_path = "/proc/self/task/%d/stat";
  1.5086 +      fclose(fp);
  1.5087 +    }
  1.5088 +  }
  1.5089 +
  1.5090 +  sprintf(proc_name, proc_stat_path, tid);
  1.5091 +  fp = fopen(proc_name, "r");
  1.5092 +  if ( fp == NULL ) return -1;
  1.5093 +  statlen = fread(stat, 1, 2047, fp);
  1.5094 +  stat[statlen] = '\0';
  1.5095 +  fclose(fp);
  1.5096 +
  1.5097 +  // Skip pid and the command string. Note that we could be dealing with
  1.5098 +  // weird command names, e.g. user could decide to rename java launcher
  1.5099 +  // to "java 1.4.2 :)", then the stat file would look like
  1.5100 +  //                1234 (java 1.4.2 :)) R ... ...
  1.5101 +  // We don't really need to know the command string, just find the last
  1.5102 +  // occurrence of ")" and then start parsing from there. See bug 4726580.
  1.5103 +  s = strrchr(stat, ')');
  1.5104 +  i = 0;
  1.5105 +  if (s == NULL ) return -1;
  1.5106 +
  1.5107 +  // Skip blank chars
  1.5108 +  do s++; while (isspace(*s));
  1.5109 +
  1.5110 +  count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
  1.5111 +                 &cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
  1.5112 +                 &ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
  1.5113 +                 &user_time, &sys_time);
  1.5114 +  if ( count != 13 ) return -1;
  1.5115 +  if (user_sys_cpu_time) {
  1.5116 +    return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
  1.5117 +  } else {
  1.5118 +    return (jlong)user_time * (1000000000 / clock_tics_per_sec);
  1.5119 +  }
  1.5120 +}
  1.5121 +#endif
  1.5122 +
  1.5123 +void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
  1.5124 +  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
  1.5125 +  info_ptr->may_skip_backward = false;     // elapsed time not wall time
  1.5126 +  info_ptr->may_skip_forward = false;      // elapsed time not wall time
  1.5127 +  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
  1.5128 +}
  1.5129 +
  1.5130 +void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
  1.5131 +  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
  1.5132 +  info_ptr->may_skip_backward = false;     // elapsed time not wall time
  1.5133 +  info_ptr->may_skip_forward = false;      // elapsed time not wall time
  1.5134 +  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
  1.5135 +}
  1.5136 +
  1.5137 +bool os::is_thread_cpu_time_supported() {
  1.5138 +#ifdef __APPLE__
  1.5139 +  return true;
  1.5140 +#elif defined(_ALLBSD_SOURCE)
  1.5141 +  return false;
  1.5142 +#else
  1.5143 +  return true;
  1.5144 +#endif
  1.5145 +}
  1.5146 +
  1.5147 +// System loadavg support.  Returns -1 if load average cannot be obtained.
  1.5148 +// Bsd doesn't yet have a (official) notion of processor sets,
  1.5149 +// so just return the system wide load average.
  1.5150 +int os::loadavg(double loadavg[], int nelem) {
  1.5151 +  return ::getloadavg(loadavg, nelem);
  1.5152 +}
  1.5153 +
  1.5154 +void os::pause() {
  1.5155 +  char filename[MAX_PATH];
  1.5156 +  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
  1.5157 +    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
  1.5158 +  } else {
  1.5159 +    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
  1.5160 +  }
  1.5161 +
  1.5162 +  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
  1.5163 +  if (fd != -1) {
  1.5164 +    struct stat buf;
  1.5165 +    ::close(fd);
  1.5166 +    while (::stat(filename, &buf) == 0) {
  1.5167 +      (void)::poll(NULL, 0, 100);
  1.5168 +    }
  1.5169 +  } else {
  1.5170 +    jio_fprintf(stderr,
  1.5171 +      "Could not open pause file '%s', continuing immediately.\n", filename);
  1.5172 +  }
  1.5173 +}
  1.5174 +
  1.5175 +
  1.5176 +// Refer to the comments in os_solaris.cpp park-unpark.
  1.5177 +//
  1.5178 +// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
  1.5179 +// hang indefinitely.  For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
  1.5180 +// For specifics regarding the bug see GLIBC BUGID 261237 :
  1.5181 +//    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
  1.5182 +// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
  1.5183 +// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
  1.5184 +// is used.  (The simple C test-case provided in the GLIBC bug report manifests the
  1.5185 +// hang).  The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
  1.5186 +// and monitorenter when we're using 1-0 locking.  All those operations may result in
  1.5187 +// calls to pthread_cond_timedwait().  Using LD_ASSUME_KERNEL to use an older version
  1.5188 +// of libpthread avoids the problem, but isn't practical.
  1.5189 +//
  1.5190 +// Possible remedies:
  1.5191 +//
  1.5192 +// 1.   Establish a minimum relative wait time.  50 to 100 msecs seems to work.
  1.5193 +//      This is palliative and probabilistic, however.  If the thread is preempted
  1.5194 +//      between the call to compute_abstime() and pthread_cond_timedwait(), more
  1.5195 +//      than the minimum period may have passed, and the abstime may be stale (in the
  1.5196 +//      past) resultin in a hang.   Using this technique reduces the odds of a hang
  1.5197 +//      but the JVM is still vulnerable, particularly on heavily loaded systems.
  1.5198 +//
  1.5199 +// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
  1.5200 +//      of the usual flag-condvar-mutex idiom.  The write side of the pipe is set
  1.5201 +//      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
  1.5202 +//      reduces to poll()+read().  This works well, but consumes 2 FDs per extant
  1.5203 +//      thread.
  1.5204 +//
  1.5205 +// 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
  1.5206 +//      that manages timeouts.  We'd emulate pthread_cond_timedwait() by enqueuing
  1.5207 +//      a timeout request to the chron thread and then blocking via pthread_cond_wait().
  1.5208 +//      This also works well.  In fact it avoids kernel-level scalability impediments
  1.5209 +//      on certain platforms that don't handle lots of active pthread_cond_timedwait()
  1.5210 +//      timers in a graceful fashion.
  1.5211 +//
  1.5212 +// 4.   When the abstime value is in the past it appears that control returns
  1.5213 +//      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
  1.5214 +//      Subsequent timedwait/wait calls may hang indefinitely.  Given that, we
  1.5215 +//      can avoid the problem by reinitializing the condvar -- by cond_destroy()
  1.5216 +//      followed by cond_init() -- after all calls to pthread_cond_timedwait().
  1.5217 +//      It may be possible to avoid reinitialization by checking the return
  1.5218 +//      value from pthread_cond_timedwait().  In addition to reinitializing the
  1.5219 +//      condvar we must establish the invariant that cond_signal() is only called
  1.5220 +//      within critical sections protected by the adjunct mutex.  This prevents
  1.5221 +//      cond_signal() from "seeing" a condvar that's in the midst of being
  1.5222 +//      reinitialized or that is corrupt.  Sadly, this invariant obviates the
  1.5223 +//      desirable signal-after-unlock optimization that avoids futile context switching.
  1.5224 +//
  1.5225 +//      I'm also concerned that some versions of NTPL might allocate an auxilliary
  1.5226 +//      structure when a condvar is used or initialized.  cond_destroy()  would
  1.5227 +//      release the helper structure.  Our reinitialize-after-timedwait fix
  1.5228 +//      put excessive stress on malloc/free and locks protecting the c-heap.
  1.5229 +//
  1.5230 +// We currently use (4).  See the WorkAroundNTPLTimedWaitHang flag.
  1.5231 +// It may be possible to refine (4) by checking the kernel and NTPL verisons
  1.5232 +// and only enabling the work-around for vulnerable environments.
  1.5233 +
  1.5234 +// utility to compute the abstime argument to timedwait:
  1.5235 +// millis is the relative timeout time
  1.5236 +// abstime will be the absolute timeout time
  1.5237 +// TODO: replace compute_abstime() with unpackTime()
  1.5238 +
  1.5239 +static struct timespec* compute_abstime(struct timespec* abstime, jlong millis) {
  1.5240 +  if (millis < 0)  millis = 0;
  1.5241 +  struct timeval now;
  1.5242 +  int status = gettimeofday(&now, NULL);
  1.5243 +  assert(status == 0, "gettimeofday");
  1.5244 +  jlong seconds = millis / 1000;
  1.5245 +  millis %= 1000;
  1.5246 +  if (seconds > 50000000) { // see man cond_timedwait(3T)
  1.5247 +    seconds = 50000000;
  1.5248 +  }
  1.5249 +  abstime->tv_sec = now.tv_sec  + seconds;
  1.5250 +  long       usec = now.tv_usec + millis * 1000;
  1.5251 +  if (usec >= 1000000) {
  1.5252 +    abstime->tv_sec += 1;
  1.5253 +    usec -= 1000000;
  1.5254 +  }
  1.5255 +  abstime->tv_nsec = usec * 1000;
  1.5256 +  return abstime;
  1.5257 +}
  1.5258 +
  1.5259 +
  1.5260 +// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
  1.5261 +// Conceptually TryPark() should be equivalent to park(0).
  1.5262 +
  1.5263 +int os::PlatformEvent::TryPark() {
  1.5264 +  for (;;) {
  1.5265 +    const int v = _Event ;
  1.5266 +    guarantee ((v == 0) || (v == 1), "invariant") ;
  1.5267 +    if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
  1.5268 +  }
  1.5269 +}
  1.5270 +
  1.5271 +void os::PlatformEvent::park() {       // AKA "down()"
  1.5272 +  // Invariant: Only the thread associated with the Event/PlatformEvent
  1.5273 +  // may call park().
  1.5274 +  // TODO: assert that _Assoc != NULL or _Assoc == Self
  1.5275 +  int v ;
  1.5276 +  for (;;) {
  1.5277 +      v = _Event ;
  1.5278 +      if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
  1.5279 +  }
  1.5280 +  guarantee (v >= 0, "invariant") ;
  1.5281 +  if (v == 0) {
  1.5282 +     // Do this the hard way by blocking ...
  1.5283 +     int status = pthread_mutex_lock(_mutex);
  1.5284 +     assert_status(status == 0, status, "mutex_lock");
  1.5285 +     guarantee (_nParked == 0, "invariant") ;
  1.5286 +     ++ _nParked ;
  1.5287 +     while (_Event < 0) {
  1.5288 +        status = pthread_cond_wait(_cond, _mutex);
  1.5289 +        // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
  1.5290 +        // Treat this the same as if the wait was interrupted
  1.5291 +        if (status == ETIMEDOUT) { status = EINTR; }
  1.5292 +        assert_status(status == 0 || status == EINTR, status, "cond_wait");
  1.5293 +     }
  1.5294 +     -- _nParked ;
  1.5295 +
  1.5296 +    // In theory we could move the ST of 0 into _Event past the unlock(),
  1.5297 +    // but then we'd need a MEMBAR after the ST.
  1.5298 +    _Event = 0 ;
  1.5299 +     status = pthread_mutex_unlock(_mutex);
  1.5300 +     assert_status(status == 0, status, "mutex_unlock");
  1.5301 +  }
  1.5302 +  guarantee (_Event >= 0, "invariant") ;
  1.5303 +}
  1.5304 +
  1.5305 +int os::PlatformEvent::park(jlong millis) {
  1.5306 +  guarantee (_nParked == 0, "invariant") ;
  1.5307 +
  1.5308 +  int v ;
  1.5309 +  for (;;) {
  1.5310 +      v = _Event ;
  1.5311 +      if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
  1.5312 +  }
  1.5313 +  guarantee (v >= 0, "invariant") ;
  1.5314 +  if (v != 0) return OS_OK ;
  1.5315 +
  1.5316 +  // We do this the hard way, by blocking the thread.
  1.5317 +  // Consider enforcing a minimum timeout value.
  1.5318 +  struct timespec abst;
  1.5319 +  compute_abstime(&abst, millis);
  1.5320 +
  1.5321 +  int ret = OS_TIMEOUT;
  1.5322 +  int status = pthread_mutex_lock(_mutex);
  1.5323 +  assert_status(status == 0, status, "mutex_lock");
  1.5324 +  guarantee (_nParked == 0, "invariant") ;
  1.5325 +  ++_nParked ;
  1.5326 +
  1.5327 +  // Object.wait(timo) will return because of
  1.5328 +  // (a) notification
  1.5329 +  // (b) timeout
  1.5330 +  // (c) thread.interrupt
  1.5331 +  //
  1.5332 +  // Thread.interrupt and object.notify{All} both call Event::set.
  1.5333 +  // That is, we treat thread.interrupt as a special case of notification.
  1.5334 +  // The underlying Solaris implementation, cond_timedwait, admits
  1.5335 +  // spurious/premature wakeups, but the JLS/JVM spec prevents the
  1.5336 +  // JVM from making those visible to Java code.  As such, we must
  1.5337 +  // filter out spurious wakeups.  We assume all ETIME returns are valid.
  1.5338 +  //
  1.5339 +  // TODO: properly differentiate simultaneous notify+interrupt.
  1.5340 +  // In that case, we should propagate the notify to another waiter.
  1.5341 +
  1.5342 +  while (_Event < 0) {
  1.5343 +    status = os::Bsd::safe_cond_timedwait(_cond, _mutex, &abst);
  1.5344 +    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
  1.5345 +      pthread_cond_destroy (_cond);
  1.5346 +      pthread_cond_init (_cond, NULL) ;
  1.5347 +    }
  1.5348 +    assert_status(status == 0 || status == EINTR ||
  1.5349 +                  status == ETIMEDOUT,
  1.5350 +                  status, "cond_timedwait");
  1.5351 +    if (!FilterSpuriousWakeups) break ;                 // previous semantics
  1.5352 +    if (status == ETIMEDOUT) break ;
  1.5353 +    // We consume and ignore EINTR and spurious wakeups.
  1.5354 +  }
  1.5355 +  --_nParked ;
  1.5356 +  if (_Event >= 0) {
  1.5357 +     ret = OS_OK;
  1.5358 +  }
  1.5359 +  _Event = 0 ;
  1.5360 +  status = pthread_mutex_unlock(_mutex);
  1.5361 +  assert_status(status == 0, status, "mutex_unlock");
  1.5362 +  assert (_nParked == 0, "invariant") ;
  1.5363 +  return ret;
  1.5364 +}
  1.5365 +
  1.5366 +void os::PlatformEvent::unpark() {
  1.5367 +  int v, AnyWaiters ;
  1.5368 +  for (;;) {
  1.5369 +      v = _Event ;
  1.5370 +      if (v > 0) {
  1.5371 +         // The LD of _Event could have reordered or be satisfied
  1.5372 +         // by a read-aside from this processor's write buffer.
  1.5373 +         // To avoid problems execute a barrier and then
  1.5374 +         // ratify the value.
  1.5375 +         OrderAccess::fence() ;
  1.5376 +         if (_Event == v) return ;
  1.5377 +         continue ;
  1.5378 +      }
  1.5379 +      if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
  1.5380 +  }
  1.5381 +  if (v < 0) {
  1.5382 +     // Wait for the thread associated with the event to vacate
  1.5383 +     int status = pthread_mutex_lock(_mutex);
  1.5384 +     assert_status(status == 0, status, "mutex_lock");
  1.5385 +     AnyWaiters = _nParked ;
  1.5386 +     assert (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
  1.5387 +     if (AnyWaiters != 0 && WorkAroundNPTLTimedWaitHang) {
  1.5388 +        AnyWaiters = 0 ;
  1.5389 +        pthread_cond_signal (_cond);
  1.5390 +     }
  1.5391 +     status = pthread_mutex_unlock(_mutex);
  1.5392 +     assert_status(status == 0, status, "mutex_unlock");
  1.5393 +     if (AnyWaiters != 0) {
  1.5394 +        status = pthread_cond_signal(_cond);
  1.5395 +        assert_status(status == 0, status, "cond_signal");
  1.5396 +     }
  1.5397 +  }
  1.5398 +
  1.5399 +  // Note that we signal() _after dropping the lock for "immortal" Events.
  1.5400 +  // This is safe and avoids a common class of  futile wakeups.  In rare
  1.5401 +  // circumstances this can cause a thread to return prematurely from
  1.5402 +  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
  1.5403 +  // simply re-test the condition and re-park itself.
  1.5404 +}
  1.5405 +
  1.5406 +
  1.5407 +// JSR166
  1.5408 +// -------------------------------------------------------
  1.5409 +
  1.5410 +/*
  1.5411 + * The solaris and bsd implementations of park/unpark are fairly
  1.5412 + * conservative for now, but can be improved. They currently use a
  1.5413 + * mutex/condvar pair, plus a a count.
  1.5414 + * Park decrements count if > 0, else does a condvar wait.  Unpark
  1.5415 + * sets count to 1 and signals condvar.  Only one thread ever waits
  1.5416 + * on the condvar. Contention seen when trying to park implies that someone
  1.5417 + * is unparking you, so don't wait. And spurious returns are fine, so there
  1.5418 + * is no need to track notifications.
  1.5419 + */
  1.5420 +
  1.5421 +
  1.5422 +#define NANOSECS_PER_SEC 1000000000
  1.5423 +#define NANOSECS_PER_MILLISEC 1000000
  1.5424 +#define MAX_SECS 100000000
  1.5425 +/*
  1.5426 + * This code is common to bsd and solaris and will be moved to a
  1.5427 + * common place in dolphin.
  1.5428 + *
  1.5429 + * The passed in time value is either a relative time in nanoseconds
  1.5430 + * or an absolute time in milliseconds. Either way it has to be unpacked
  1.5431 + * into suitable seconds and nanoseconds components and stored in the
  1.5432 + * given timespec structure.
  1.5433 + * Given time is a 64-bit value and the time_t used in the timespec is only
  1.5434 + * a signed-32-bit value (except on 64-bit Bsd) we have to watch for
  1.5435 + * overflow if times way in the future are given. Further on Solaris versions
  1.5436 + * prior to 10 there is a restriction (see cond_timedwait) that the specified
  1.5437 + * number of seconds, in abstime, is less than current_time  + 100,000,000.
  1.5438 + * As it will be 28 years before "now + 100000000" will overflow we can
  1.5439 + * ignore overflow and just impose a hard-limit on seconds using the value
  1.5440 + * of "now + 100,000,000". This places a limit on the timeout of about 3.17
  1.5441 + * years from "now".
  1.5442 + */
  1.5443 +
  1.5444 +static void unpackTime(struct timespec* absTime, bool isAbsolute, jlong time) {
  1.5445 +  assert (time > 0, "convertTime");
  1.5446 +
  1.5447 +  struct timeval now;
  1.5448 +  int status = gettimeofday(&now, NULL);
  1.5449 +  assert(status == 0, "gettimeofday");
  1.5450 +
  1.5451 +  time_t max_secs = now.tv_sec + MAX_SECS;
  1.5452 +
  1.5453 +  if (isAbsolute) {
  1.5454 +    jlong secs = time / 1000;
  1.5455 +    if (secs > max_secs) {
  1.5456 +      absTime->tv_sec = max_secs;
  1.5457 +    }
  1.5458 +    else {
  1.5459 +      absTime->tv_sec = secs;
  1.5460 +    }
  1.5461 +    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
  1.5462 +  }
  1.5463 +  else {
  1.5464 +    jlong secs = time / NANOSECS_PER_SEC;
  1.5465 +    if (secs >= MAX_SECS) {
  1.5466 +      absTime->tv_sec = max_secs;
  1.5467 +      absTime->tv_nsec = 0;
  1.5468 +    }
  1.5469 +    else {
  1.5470 +      absTime->tv_sec = now.tv_sec + secs;
  1.5471 +      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
  1.5472 +      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
  1.5473 +        absTime->tv_nsec -= NANOSECS_PER_SEC;
  1.5474 +        ++absTime->tv_sec; // note: this must be <= max_secs
  1.5475 +      }
  1.5476 +    }
  1.5477 +  }
  1.5478 +  assert(absTime->tv_sec >= 0, "tv_sec < 0");
  1.5479 +  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
  1.5480 +  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
  1.5481 +  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
  1.5482 +}
  1.5483 +
  1.5484 +void Parker::park(bool isAbsolute, jlong time) {
  1.5485 +  // Optional fast-path check:
  1.5486 +  // Return immediately if a permit is available.
  1.5487 +  if (_counter > 0) {
  1.5488 +      _counter = 0 ;
  1.5489 +      OrderAccess::fence();
  1.5490 +      return ;
  1.5491 +  }
  1.5492 +
  1.5493 +  Thread* thread = Thread::current();
  1.5494 +  assert(thread->is_Java_thread(), "Must be JavaThread");
  1.5495 +  JavaThread *jt = (JavaThread *)thread;
  1.5496 +
  1.5497 +  // Optional optimization -- avoid state transitions if there's an interrupt pending.
  1.5498 +  // Check interrupt before trying to wait
  1.5499 +  if (Thread::is_interrupted(thread, false)) {
  1.5500 +    return;
  1.5501 +  }
  1.5502 +
  1.5503 +  // Next, demultiplex/decode time arguments
  1.5504 +  struct timespec absTime;
  1.5505 +  if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
  1.5506 +    return;
  1.5507 +  }
  1.5508 +  if (time > 0) {
  1.5509 +    unpackTime(&absTime, isAbsolute, time);
  1.5510 +  }
  1.5511 +
  1.5512 +
  1.5513 +  // Enter safepoint region
  1.5514 +  // Beware of deadlocks such as 6317397.
  1.5515 +  // The per-thread Parker:: mutex is a classic leaf-lock.
  1.5516 +  // In particular a thread must never block on the Threads_lock while
  1.5517 +  // holding the Parker:: mutex.  If safepoints are pending both the
  1.5518 +  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
  1.5519 +  ThreadBlockInVM tbivm(jt);
  1.5520 +
  1.5521 +  // Don't wait if cannot get lock since interference arises from
  1.5522 +  // unblocking.  Also. check interrupt before trying wait
  1.5523 +  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
  1.5524 +    return;
  1.5525 +  }
  1.5526 +
  1.5527 +  int status ;
  1.5528 +  if (_counter > 0)  { // no wait needed
  1.5529 +    _counter = 0;
  1.5530 +    status = pthread_mutex_unlock(_mutex);
  1.5531 +    assert (status == 0, "invariant") ;
  1.5532 +    OrderAccess::fence();
  1.5533 +    return;
  1.5534 +  }
  1.5535 +
  1.5536 +#ifdef ASSERT
  1.5537 +  // Don't catch signals while blocked; let the running threads have the signals.
  1.5538 +  // (This allows a debugger to break into the running thread.)
  1.5539 +  sigset_t oldsigs;
  1.5540 +  sigset_t* allowdebug_blocked = os::Bsd::allowdebug_blocked_signals();
  1.5541 +  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
  1.5542 +#endif
  1.5543 +
  1.5544 +  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
  1.5545 +  jt->set_suspend_equivalent();
  1.5546 +  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
  1.5547 +
  1.5548 +  if (time == 0) {
  1.5549 +    status = pthread_cond_wait (_cond, _mutex) ;
  1.5550 +  } else {
  1.5551 +    status = os::Bsd::safe_cond_timedwait (_cond, _mutex, &absTime) ;
  1.5552 +    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
  1.5553 +      pthread_cond_destroy (_cond) ;
  1.5554 +      pthread_cond_init    (_cond, NULL);
  1.5555 +    }
  1.5556 +  }
  1.5557 +  assert_status(status == 0 || status == EINTR ||
  1.5558 +                status == ETIMEDOUT,
  1.5559 +                status, "cond_timedwait");
  1.5560 +
  1.5561 +#ifdef ASSERT
  1.5562 +  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
  1.5563 +#endif
  1.5564 +
  1.5565 +  _counter = 0 ;
  1.5566 +  status = pthread_mutex_unlock(_mutex) ;
  1.5567 +  assert_status(status == 0, status, "invariant") ;
  1.5568 +  // If externally suspended while waiting, re-suspend
  1.5569 +  if (jt->handle_special_suspend_equivalent_condition()) {
  1.5570 +    jt->java_suspend_self();
  1.5571 +  }
  1.5572 +
  1.5573 +  OrderAccess::fence();
  1.5574 +}
  1.5575 +
  1.5576 +void Parker::unpark() {
  1.5577 +  int s, status ;
  1.5578 +  status = pthread_mutex_lock(_mutex);
  1.5579 +  assert (status == 0, "invariant") ;
  1.5580 +  s = _counter;
  1.5581 +  _counter = 1;
  1.5582 +  if (s < 1) {
  1.5583 +     if (WorkAroundNPTLTimedWaitHang) {
  1.5584 +        status = pthread_cond_signal (_cond) ;
  1.5585 +        assert (status == 0, "invariant") ;
  1.5586 +        status = pthread_mutex_unlock(_mutex);
  1.5587 +        assert (status == 0, "invariant") ;
  1.5588 +     } else {
  1.5589 +        status = pthread_mutex_unlock(_mutex);
  1.5590 +        assert (status == 0, "invariant") ;
  1.5591 +        status = pthread_cond_signal (_cond) ;
  1.5592 +        assert (status == 0, "invariant") ;
  1.5593 +     }
  1.5594 +  } else {
  1.5595 +    pthread_mutex_unlock(_mutex);
  1.5596 +    assert (status == 0, "invariant") ;
  1.5597 +  }
  1.5598 +}
  1.5599 +
  1.5600 +
  1.5601 +/* Darwin has no "environ" in a dynamic library. */
  1.5602 +#ifdef __APPLE__
  1.5603 +#include <crt_externs.h>
  1.5604 +#define environ (*_NSGetEnviron())
  1.5605 +#else
  1.5606 +extern char** environ;
  1.5607 +#endif
  1.5608 +
  1.5609 +// Run the specified command in a separate process. Return its exit value,
  1.5610 +// or -1 on failure (e.g. can't fork a new process).
  1.5611 +// Unlike system(), this function can be called from signal handler. It
  1.5612 +// doesn't block SIGINT et al.
  1.5613 +int os::fork_and_exec(char* cmd) {
  1.5614 +  const char * argv[4] = {"sh", "-c", cmd, NULL};
  1.5615 +
  1.5616 +  // fork() in BsdThreads/NPTL is not async-safe. It needs to run
  1.5617 +  // pthread_atfork handlers and reset pthread library. All we need is a
  1.5618 +  // separate process to execve. Make a direct syscall to fork process.
  1.5619 +  // On IA64 there's no fork syscall, we have to use fork() and hope for
  1.5620 +  // the best...
  1.5621 +  pid_t pid = fork();
  1.5622 +
  1.5623 +  if (pid < 0) {
  1.5624 +    // fork failed
  1.5625 +    return -1;
  1.5626 +
  1.5627 +  } else if (pid == 0) {
  1.5628 +    // child process
  1.5629 +
  1.5630 +    // execve() in BsdThreads will call pthread_kill_other_threads_np()
  1.5631 +    // first to kill every thread on the thread list. Because this list is
  1.5632 +    // not reset by fork() (see notes above), execve() will instead kill
  1.5633 +    // every thread in the parent process. We know this is the only thread
  1.5634 +    // in the new process, so make a system call directly.
  1.5635 +    // IA64 should use normal execve() from glibc to match the glibc fork()
  1.5636 +    // above.
  1.5637 +    execve("/bin/sh", (char* const*)argv, environ);
  1.5638 +
  1.5639 +    // execve failed
  1.5640 +    _exit(-1);
  1.5641 +
  1.5642 +  } else  {
  1.5643 +    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
  1.5644 +    // care about the actual exit code, for now.
  1.5645 +
  1.5646 +    int status;
  1.5647 +
  1.5648 +    // Wait for the child process to exit.  This returns immediately if
  1.5649 +    // the child has already exited. */
  1.5650 +    while (waitpid(pid, &status, 0) < 0) {
  1.5651 +        switch (errno) {
  1.5652 +        case ECHILD: return 0;
  1.5653 +        case EINTR: break;
  1.5654 +        default: return -1;
  1.5655 +        }
  1.5656 +    }
  1.5657 +
  1.5658 +    if (WIFEXITED(status)) {
  1.5659 +       // The child exited normally; get its exit code.
  1.5660 +       return WEXITSTATUS(status);
  1.5661 +    } else if (WIFSIGNALED(status)) {
  1.5662 +       // The child exited because of a signal
  1.5663 +       // The best value to return is 0x80 + signal number,
  1.5664 +       // because that is what all Unix shells do, and because
  1.5665 +       // it allows callers to distinguish between process exit and
  1.5666 +       // process death by signal.
  1.5667 +       return 0x80 + WTERMSIG(status);
  1.5668 +    } else {
  1.5669 +       // Unknown exit code; pass it through
  1.5670 +       return status;
  1.5671 +    }
  1.5672 +  }
  1.5673 +}
  1.5674 +
  1.5675 +// is_headless_jre()
  1.5676 +//
  1.5677 +// Test for the existence of libmawt in motif21 or xawt directories
  1.5678 +// in order to report if we are running in a headless jre
  1.5679 +//
  1.5680 +bool os::is_headless_jre() {
  1.5681 +    struct stat statbuf;
  1.5682 +    char buf[MAXPATHLEN];
  1.5683 +    char libmawtpath[MAXPATHLEN];
  1.5684 +    const char *xawtstr  = "/xawt/libmawt.so";
  1.5685 +    const char *motifstr = "/motif21/libmawt.so";
  1.5686 +    char *p;
  1.5687 +
  1.5688 +    // Get path to libjvm.so
  1.5689 +    os::jvm_path(buf, sizeof(buf));
  1.5690 +
  1.5691 +    // Get rid of libjvm.so
  1.5692 +    p = strrchr(buf, '/');
  1.5693 +    if (p == NULL) return false;
  1.5694 +    else *p = '\0';
  1.5695 +
  1.5696 +    // Get rid of client or server
  1.5697 +    p = strrchr(buf, '/');
  1.5698 +    if (p == NULL) return false;
  1.5699 +    else *p = '\0';
  1.5700 +
  1.5701 +    // check xawt/libmawt.so
  1.5702 +    strcpy(libmawtpath, buf);
  1.5703 +    strcat(libmawtpath, xawtstr);
  1.5704 +    if (::stat(libmawtpath, &statbuf) == 0) return false;
  1.5705 +
  1.5706 +    // check motif21/libmawt.so
  1.5707 +    strcpy(libmawtpath, buf);
  1.5708 +    strcat(libmawtpath, motifstr);
  1.5709 +    if (::stat(libmawtpath, &statbuf) == 0) return false;
  1.5710 +
  1.5711 +    return true;
  1.5712 +}

mercurial