src/os/solaris/vm/os_solaris.cpp

Sun, 26 Jul 2009 16:40:14 -0700

author
kvn
date
Sun, 26 Jul 2009 16:40:14 -0700
changeset 1329
665be97e8704
parent 1126
956304450e80
child 1552
95e9083cf4a7
permissions
-rw-r--r--

6863420: os::javaTimeNanos() go backward on Solaris x86
Summary: Use new atomic long load method Atomic::load() to load max_hrtime.
Reviewed-by: never, ysr, johnc, phh, dcubed, acorn

     1 /*
     2  * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // do not include  precompiled  header file
    26 # include "incls/_os_solaris.cpp.incl"
    28 // put OS-includes here
    29 # include <dlfcn.h>
    30 # include <errno.h>
    31 # include <link.h>
    32 # include <poll.h>
    33 # include <pthread.h>
    34 # include <pwd.h>
    35 # include <schedctl.h>
    36 # include <setjmp.h>
    37 # include <signal.h>
    38 # include <stdio.h>
    39 # include <alloca.h>
    40 # include <sys/filio.h>
    41 # include <sys/ipc.h>
    42 # include <sys/lwp.h>
    43 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
    44 # include <sys/mman.h>
    45 # include <sys/processor.h>
    46 # include <sys/procset.h>
    47 # include <sys/pset.h>
    48 # include <sys/resource.h>
    49 # include <sys/shm.h>
    50 # include <sys/socket.h>
    51 # include <sys/stat.h>
    52 # include <sys/systeminfo.h>
    53 # include <sys/time.h>
    54 # include <sys/times.h>
    55 # include <sys/types.h>
    56 # include <sys/wait.h>
    57 # include <sys/utsname.h>
    58 # include <thread.h>
    59 # include <unistd.h>
    60 # include <sys/priocntl.h>
    61 # include <sys/rtpriocntl.h>
    62 # include <sys/tspriocntl.h>
    63 # include <sys/iapriocntl.h>
    64 # include <sys/loadavg.h>
    65 # include <string.h>
    67 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
    68 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
    70 #define MAX_PATH (2 * K)
    72 // for timer info max values which include all bits
    73 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
    75 #ifdef _GNU_SOURCE
    76 // See bug #6514594
    77 extern "C" int madvise(caddr_t, size_t, int);
    78 extern "C"  int memcntl(caddr_t addr, size_t len, int cmd, caddr_t  arg,
    79      int attr, int mask);
    80 #endif //_GNU_SOURCE
    82 /*
    83   MPSS Changes Start.
    84   The JVM binary needs to be built and run on pre-Solaris 9
    85   systems, but the constants needed by MPSS are only in Solaris 9
    86   header files.  They are textually replicated here to allow
    87   building on earlier systems.  Once building on Solaris 8 is
    88   no longer a requirement, these #defines can be replaced by ordinary
    89   system .h inclusion.
    91   In earlier versions of the  JDK and Solaris, we used ISM for large pages.
    92   But ISM requires shared memory to achieve this and thus has many caveats.
    93   MPSS is a fully transparent and is a cleaner way to get large pages.
    94   Although we still require keeping ISM for backward compatiblitiy as well as
    95   giving the opportunity to use large pages on older systems it is
    96   recommended that MPSS be used for Solaris 9 and above.
    98 */
   100 #ifndef MC_HAT_ADVISE
   102 struct memcntl_mha {
   103   uint_t          mha_cmd;        /* command(s) */
   104   uint_t          mha_flags;
   105   size_t          mha_pagesize;
   106 };
   107 #define MC_HAT_ADVISE   7       /* advise hat map size */
   108 #define MHA_MAPSIZE_VA  0x1     /* set preferred page size */
   109 #define MAP_ALIGN       0x200   /* addr specifies alignment */
   111 #endif
   112 // MPSS Changes End.
   115 // Here are some liblgrp types from sys/lgrp_user.h to be able to
   116 // compile on older systems without this header file.
   118 #ifndef MADV_ACCESS_LWP
   119 # define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
   120 #endif
   121 #ifndef MADV_ACCESS_MANY
   122 # define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
   123 #endif
   125 #ifndef LGRP_RSRC_CPU
   126 # define LGRP_RSRC_CPU           0       /* CPU resources */
   127 #endif
   128 #ifndef LGRP_RSRC_MEM
   129 # define LGRP_RSRC_MEM           1       /* memory resources */
   130 #endif
   132 // Some more macros from sys/mman.h that are not present in Solaris 8.
   134 #ifndef MAX_MEMINFO_CNT
   135 /*
   136  * info_req request type definitions for meminfo
   137  * request types starting with MEMINFO_V are used for Virtual addresses
   138  * and should not be mixed with MEMINFO_PLGRP which is targeted for Physical
   139  * addresses
   140  */
   141 # define MEMINFO_SHIFT           16
   142 # define MEMINFO_MASK            (0xFF << MEMINFO_SHIFT)
   143 # define MEMINFO_VPHYSICAL       (0x01 << MEMINFO_SHIFT) /* get physical addr */
   144 # define MEMINFO_VLGRP           (0x02 << MEMINFO_SHIFT) /* get lgroup */
   145 # define MEMINFO_VPAGESIZE       (0x03 << MEMINFO_SHIFT) /* size of phys page */
   146 # define MEMINFO_VREPLCNT        (0x04 << MEMINFO_SHIFT) /* no. of replica */
   147 # define MEMINFO_VREPL           (0x05 << MEMINFO_SHIFT) /* physical replica */
   148 # define MEMINFO_VREPL_LGRP      (0x06 << MEMINFO_SHIFT) /* lgrp of replica */
   149 # define MEMINFO_PLGRP           (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */
   151 /* maximum number of addresses meminfo() can process at a time */
   152 # define MAX_MEMINFO_CNT 256
   154 /* maximum number of request types */
   155 # define MAX_MEMINFO_REQ 31
   156 #endif
   158 // see thr_setprio(3T) for the basis of these numbers
   159 #define MinimumPriority 0
   160 #define NormalPriority  64
   161 #define MaximumPriority 127
   163 // Values for ThreadPriorityPolicy == 1
   164 int prio_policy1[MaxPriority+1] = { -99999, 0, 16, 32, 48, 64,
   165                                         80, 96, 112, 124, 127 };
   167 // System parameters used internally
   168 static clock_t clock_tics_per_sec = 100;
   170 // For diagnostics to print a message once. see run_periodic_checks
   171 static bool check_addr0_done = false;
   172 static sigset_t check_signal_done;
   173 static bool check_signals = true;
   175 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
   176 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
   178 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
   181 // "default" initializers for missing libc APIs
   182 extern "C" {
   183   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
   184   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
   186   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
   187   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
   188 }
   190 // "default" initializers for pthread-based synchronization
   191 extern "C" {
   192   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
   193   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
   194 }
   196 // Thread Local Storage
   197 // This is common to all Solaris platforms so it is defined here,
   198 // in this common file.
   199 // The declarations are in the os_cpu threadLS*.hpp files.
   200 //
   201 // Static member initialization for TLS
   202 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
   204 #ifndef PRODUCT
   205 #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
   207 int ThreadLocalStorage::_tcacheHit = 0;
   208 int ThreadLocalStorage::_tcacheMiss = 0;
   210 void ThreadLocalStorage::print_statistics() {
   211   int total = _tcacheMiss+_tcacheHit;
   212   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
   213                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
   214 }
   215 #undef _PCT
   216 #endif // PRODUCT
   218 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
   219                                                         int index) {
   220   Thread *thread = get_thread_slow();
   221   if (thread != NULL) {
   222     address sp = os::current_stack_pointer();
   223     guarantee(thread->_stack_base == NULL ||
   224               (sp <= thread->_stack_base &&
   225                  sp >= thread->_stack_base - thread->_stack_size) ||
   226                is_error_reported(),
   227               "sp must be inside of selected thread stack");
   229     thread->_self_raw_id = raw_id;  // mark for quick retrieval
   230     _get_thread_cache[ index ] = thread;
   231   }
   232   return thread;
   233 }
   236 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
   237 #define NO_CACHED_THREAD ((Thread*)all_zero)
   239 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
   241   // Store the new value before updating the cache to prevent a race
   242   // between get_thread_via_cache_slowly() and this store operation.
   243   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
   245   // Update thread cache with new thread if setting on thread create,
   246   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
   247   uintptr_t raw = pd_raw_thread_id();
   248   int ix = pd_cache_index(raw);
   249   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
   250 }
   252 void ThreadLocalStorage::pd_init() {
   253   for (int i = 0; i < _pd_cache_size; i++) {
   254     _get_thread_cache[i] = NO_CACHED_THREAD;
   255   }
   256 }
   258 // Invalidate all the caches (happens to be the same as pd_init).
   259 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
   261 #undef NO_CACHED_THREAD
   263 // END Thread Local Storage
   265 static inline size_t adjust_stack_size(address base, size_t size) {
   266   if ((ssize_t)size < 0) {
   267     // 4759953: Compensate for ridiculous stack size.
   268     size = max_intx;
   269   }
   270   if (size > (size_t)base) {
   271     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
   272     size = (size_t)base;
   273   }
   274   return size;
   275 }
   277 static inline stack_t get_stack_info() {
   278   stack_t st;
   279   int retval = thr_stksegment(&st);
   280   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
   281   assert(retval == 0, "incorrect return value from thr_stksegment");
   282   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
   283   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
   284   return st;
   285 }
   287 address os::current_stack_base() {
   288   int r = thr_main() ;
   289   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
   290   bool is_primordial_thread = r;
   292   // Workaround 4352906, avoid calls to thr_stksegment by
   293   // thr_main after the first one (it looks like we trash
   294   // some data, causing the value for ss_sp to be incorrect).
   295   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
   296     stack_t st = get_stack_info();
   297     if (is_primordial_thread) {
   298       // cache initial value of stack base
   299       os::Solaris::_main_stack_base = (address)st.ss_sp;
   300     }
   301     return (address)st.ss_sp;
   302   } else {
   303     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
   304     return os::Solaris::_main_stack_base;
   305   }
   306 }
   308 size_t os::current_stack_size() {
   309   size_t size;
   311   int r = thr_main() ;
   312   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
   313   if(!r) {
   314     size = get_stack_info().ss_size;
   315   } else {
   316     struct rlimit limits;
   317     getrlimit(RLIMIT_STACK, &limits);
   318     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
   319   }
   320   // base may not be page aligned
   321   address base = current_stack_base();
   322   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
   323   return (size_t)(base - bottom);
   324 }
   326 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
   327   return localtime_r(clock, res);
   328 }
   330 // interruptible infrastructure
   332 // setup_interruptible saves the thread state before going into an
   333 // interruptible system call.
   334 // The saved state is used to restore the thread to
   335 // its former state whether or not an interrupt is received.
   336 // Used by classloader os::read
   337 // hpi calls skip this layer and stay in _thread_in_native
   339 void os::Solaris::setup_interruptible(JavaThread* thread) {
   341   JavaThreadState thread_state = thread->thread_state();
   343   assert(thread_state != _thread_blocked, "Coming from the wrong thread");
   344   assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
   345   OSThread* osthread = thread->osthread();
   346   osthread->set_saved_interrupt_thread_state(thread_state);
   347   thread->frame_anchor()->make_walkable(thread);
   348   ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
   349 }
   351 // Version of setup_interruptible() for threads that are already in
   352 // _thread_blocked. Used by os_sleep().
   353 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
   354   thread->frame_anchor()->make_walkable(thread);
   355 }
   357 JavaThread* os::Solaris::setup_interruptible() {
   358   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
   359   setup_interruptible(thread);
   360   return thread;
   361 }
   363 void os::Solaris::try_enable_extended_io() {
   364   typedef int (*enable_extended_FILE_stdio_t)(int, int);
   366   if (!UseExtendedFileIO) {
   367     return;
   368   }
   370   enable_extended_FILE_stdio_t enabler =
   371     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
   372                                          "enable_extended_FILE_stdio");
   373   if (enabler) {
   374     enabler(-1, -1);
   375   }
   376 }
   379 #ifdef ASSERT
   381 JavaThread* os::Solaris::setup_interruptible_native() {
   382   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
   383   JavaThreadState thread_state = thread->thread_state();
   384   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
   385   return thread;
   386 }
   388 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
   389   JavaThreadState thread_state = thread->thread_state();
   390   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
   391 }
   392 #endif
   394 // cleanup_interruptible reverses the effects of setup_interruptible
   395 // setup_interruptible_already_blocked() does not need any cleanup.
   397 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
   398   OSThread* osthread = thread->osthread();
   400   ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
   401 }
   403 // I/O interruption related counters called in _INTERRUPTIBLE
   405 void os::Solaris::bump_interrupted_before_count() {
   406   RuntimeService::record_interrupted_before_count();
   407 }
   409 void os::Solaris::bump_interrupted_during_count() {
   410   RuntimeService::record_interrupted_during_count();
   411 }
   413 static int _processors_online = 0;
   415          jint os::Solaris::_os_thread_limit = 0;
   416 volatile jint os::Solaris::_os_thread_count = 0;
   418 julong os::available_memory() {
   419   return Solaris::available_memory();
   420 }
   422 julong os::Solaris::available_memory() {
   423   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
   424 }
   426 julong os::Solaris::_physical_memory = 0;
   428 julong os::physical_memory() {
   429    return Solaris::physical_memory();
   430 }
   432 julong os::allocatable_physical_memory(julong size) {
   433 #ifdef _LP64
   434    return size;
   435 #else
   436    julong result = MIN2(size, (julong)3835*M);
   437    if (!is_allocatable(result)) {
   438      // Memory allocations will be aligned but the alignment
   439      // is not known at this point.  Alignments will
   440      // be at most to LargePageSizeInBytes.  Protect
   441      // allocations from alignments up to illegal
   442      // values. If at this point 2G is illegal.
   443      julong reasonable_size = (julong)2*G - 2 * LargePageSizeInBytes;
   444      result =  MIN2(size, reasonable_size);
   445    }
   446    return result;
   447 #endif
   448 }
   450 static hrtime_t first_hrtime = 0;
   451 static const hrtime_t hrtime_hz = 1000*1000*1000;
   452 const int LOCK_BUSY = 1;
   453 const int LOCK_FREE = 0;
   454 const int LOCK_INVALID = -1;
   455 static volatile hrtime_t max_hrtime = 0;
   456 static volatile int max_hrtime_lock = LOCK_FREE;     // Update counter with LSB as lock-in-progress
   459 void os::Solaris::initialize_system_info() {
   460   _processor_count = sysconf(_SC_NPROCESSORS_CONF);
   461   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
   462   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
   463 }
   465 int os::active_processor_count() {
   466   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
   467   pid_t pid = getpid();
   468   psetid_t pset = PS_NONE;
   469   // Are we running in a processor set or is there any processor set around?
   470   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
   471     uint_t pset_cpus;
   472     // Query the number of cpus available to us.
   473     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
   474       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
   475       _processors_online = pset_cpus;
   476       return pset_cpus;
   477     }
   478   }
   479   // Otherwise return number of online cpus
   480   return online_cpus;
   481 }
   483 static bool find_processors_in_pset(psetid_t        pset,
   484                                     processorid_t** id_array,
   485                                     uint_t*         id_length) {
   486   bool result = false;
   487   // Find the number of processors in the processor set.
   488   if (pset_info(pset, NULL, id_length, NULL) == 0) {
   489     // Make up an array to hold their ids.
   490     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length);
   491     // Fill in the array with their processor ids.
   492     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
   493       result = true;
   494     }
   495   }
   496   return result;
   497 }
   499 // Callers of find_processors_online() must tolerate imprecise results --
   500 // the system configuration can change asynchronously because of DR
   501 // or explicit psradm operations.
   502 //
   503 // We also need to take care that the loop (below) terminates as the
   504 // number of processors online can change between the _SC_NPROCESSORS_ONLN
   505 // request and the loop that builds the list of processor ids.   Unfortunately
   506 // there's no reliable way to determine the maximum valid processor id,
   507 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
   508 // man pages, which claim the processor id set is "sparse, but
   509 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
   510 // exit the loop.
   511 //
   512 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
   513 // not available on S8.0.
   515 static bool find_processors_online(processorid_t** id_array,
   516                                    uint*           id_length) {
   517   const processorid_t MAX_PROCESSOR_ID = 100000 ;
   518   // Find the number of processors online.
   519   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
   520   // Make up an array to hold their ids.
   521   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length);
   522   // Processors need not be numbered consecutively.
   523   long found = 0;
   524   processorid_t next = 0;
   525   while (found < *id_length && next < MAX_PROCESSOR_ID) {
   526     processor_info_t info;
   527     if (processor_info(next, &info) == 0) {
   528       // NB, PI_NOINTR processors are effectively online ...
   529       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
   530         (*id_array)[found] = next;
   531         found += 1;
   532       }
   533     }
   534     next += 1;
   535   }
   536   if (found < *id_length) {
   537       // The loop above didn't identify the expected number of processors.
   538       // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
   539       // and re-running the loop, above, but there's no guarantee of progress
   540       // if the system configuration is in flux.  Instead, we just return what
   541       // we've got.  Note that in the worst case find_processors_online() could
   542       // return an empty set.  (As a fall-back in the case of the empty set we
   543       // could just return the ID of the current processor).
   544       *id_length = found ;
   545   }
   547   return true;
   548 }
   550 static bool assign_distribution(processorid_t* id_array,
   551                                 uint           id_length,
   552                                 uint*          distribution,
   553                                 uint           distribution_length) {
   554   // We assume we can assign processorid_t's to uint's.
   555   assert(sizeof(processorid_t) == sizeof(uint),
   556          "can't convert processorid_t to uint");
   557   // Quick check to see if we won't succeed.
   558   if (id_length < distribution_length) {
   559     return false;
   560   }
   561   // Assign processor ids to the distribution.
   562   // Try to shuffle processors to distribute work across boards,
   563   // assuming 4 processors per board.
   564   const uint processors_per_board = ProcessDistributionStride;
   565   // Find the maximum processor id.
   566   processorid_t max_id = 0;
   567   for (uint m = 0; m < id_length; m += 1) {
   568     max_id = MAX2(max_id, id_array[m]);
   569   }
   570   // The next id, to limit loops.
   571   const processorid_t limit_id = max_id + 1;
   572   // Make up markers for available processors.
   573   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id);
   574   for (uint c = 0; c < limit_id; c += 1) {
   575     available_id[c] = false;
   576   }
   577   for (uint a = 0; a < id_length; a += 1) {
   578     available_id[id_array[a]] = true;
   579   }
   580   // Step by "boards", then by "slot", copying to "assigned".
   581   // NEEDS_CLEANUP: The assignment of processors should be stateful,
   582   //                remembering which processors have been assigned by
   583   //                previous calls, etc., so as to distribute several
   584   //                independent calls of this method.  What we'd like is
   585   //                It would be nice to have an API that let us ask
   586   //                how many processes are bound to a processor,
   587   //                but we don't have that, either.
   588   //                In the short term, "board" is static so that
   589   //                subsequent distributions don't all start at board 0.
   590   static uint board = 0;
   591   uint assigned = 0;
   592   // Until we've found enough processors ....
   593   while (assigned < distribution_length) {
   594     // ... find the next available processor in the board.
   595     for (uint slot = 0; slot < processors_per_board; slot += 1) {
   596       uint try_id = board * processors_per_board + slot;
   597       if ((try_id < limit_id) && (available_id[try_id] == true)) {
   598         distribution[assigned] = try_id;
   599         available_id[try_id] = false;
   600         assigned += 1;
   601         break;
   602       }
   603     }
   604     board += 1;
   605     if (board * processors_per_board + 0 >= limit_id) {
   606       board = 0;
   607     }
   608   }
   609   if (available_id != NULL) {
   610     FREE_C_HEAP_ARRAY(bool, available_id);
   611   }
   612   return true;
   613 }
   615 bool os::distribute_processes(uint length, uint* distribution) {
   616   bool result = false;
   617   // Find the processor id's of all the available CPUs.
   618   processorid_t* id_array  = NULL;
   619   uint           id_length = 0;
   620   // There are some races between querying information and using it,
   621   // since processor sets can change dynamically.
   622   psetid_t pset = PS_NONE;
   623   // Are we running in a processor set?
   624   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
   625     result = find_processors_in_pset(pset, &id_array, &id_length);
   626   } else {
   627     result = find_processors_online(&id_array, &id_length);
   628   }
   629   if (result == true) {
   630     if (id_length >= length) {
   631       result = assign_distribution(id_array, id_length, distribution, length);
   632     } else {
   633       result = false;
   634     }
   635   }
   636   if (id_array != NULL) {
   637     FREE_C_HEAP_ARRAY(processorid_t, id_array);
   638   }
   639   return result;
   640 }
   642 bool os::bind_to_processor(uint processor_id) {
   643   // We assume that a processorid_t can be stored in a uint.
   644   assert(sizeof(uint) == sizeof(processorid_t),
   645          "can't convert uint to processorid_t");
   646   int bind_result =
   647     processor_bind(P_LWPID,                       // bind LWP.
   648                    P_MYID,                        // bind current LWP.
   649                    (processorid_t) processor_id,  // id.
   650                    NULL);                         // don't return old binding.
   651   return (bind_result == 0);
   652 }
   654 bool os::getenv(const char* name, char* buffer, int len) {
   655   char* val = ::getenv( name );
   656   if ( val == NULL
   657   ||   strlen(val) + 1  >  len ) {
   658     if (len > 0)  buffer[0] = 0; // return a null string
   659     return false;
   660   }
   661   strcpy( buffer, val );
   662   return true;
   663 }
   666 // Return true if user is running as root.
   668 bool os::have_special_privileges() {
   669   static bool init = false;
   670   static bool privileges = false;
   671   if (!init) {
   672     privileges = (getuid() != geteuid()) || (getgid() != getegid());
   673     init = true;
   674   }
   675   return privileges;
   676 }
   679 static char* get_property(char* name, char* buffer, int buffer_size) {
   680   if (os::getenv(name, buffer, buffer_size)) {
   681     return buffer;
   682   }
   683   static char empty[] = "";
   684   return empty;
   685 }
   688 void os::init_system_properties_values() {
   689   char arch[12];
   690   sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
   692   // The next steps are taken in the product version:
   693   //
   694   // Obtain the JAVA_HOME value from the location of libjvm[_g].so.
   695   // This library should be located at:
   696   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so.
   697   //
   698   // If "/jre/lib/" appears at the right place in the path, then we
   699   // assume libjvm[_g].so is installed in a JDK and we use this path.
   700   //
   701   // Otherwise exit with message: "Could not create the Java virtual machine."
   702   //
   703   // The following extra steps are taken in the debugging version:
   704   //
   705   // If "/jre/lib/" does NOT appear at the right place in the path
   706   // instead of exit check for $JAVA_HOME environment variable.
   707   //
   708   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
   709   // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so
   710   // it looks like libjvm[_g].so is installed there
   711   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so.
   712   //
   713   // Otherwise exit.
   714   //
   715   // Important note: if the location of libjvm.so changes this
   716   // code needs to be changed accordingly.
   718   // The next few definitions allow the code to be verbatim:
   719 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n))
   720 #define free(p) FREE_C_HEAP_ARRAY(char, p)
   721 #define getenv(n) ::getenv(n)
   723 #define EXTENSIONS_DIR  "/lib/ext"
   724 #define ENDORSED_DIR    "/lib/endorsed"
   725 #define COMMON_DIR      "/usr/jdk/packages"
   727   {
   728     /* sysclasspath, java_home, dll_dir */
   729     {
   730         char *home_path;
   731         char *dll_path;
   732         char *pslash;
   733         char buf[MAXPATHLEN];
   734         os::jvm_path(buf, sizeof(buf));
   736         // Found the full path to libjvm.so.
   737         // Now cut the path to <java_home>/jre if we can.
   738         *(strrchr(buf, '/')) = '\0';  /* get rid of /libjvm.so */
   739         pslash = strrchr(buf, '/');
   740         if (pslash != NULL)
   741             *pslash = '\0';           /* get rid of /{client|server|hotspot} */
   742         dll_path = malloc(strlen(buf) + 1);
   743         if (dll_path == NULL)
   744             return;
   745         strcpy(dll_path, buf);
   746         Arguments::set_dll_dir(dll_path);
   748         if (pslash != NULL) {
   749             pslash = strrchr(buf, '/');
   750             if (pslash != NULL) {
   751                 *pslash = '\0';       /* get rid of /<arch> */
   752                 pslash = strrchr(buf, '/');
   753                 if (pslash != NULL)
   754                     *pslash = '\0';   /* get rid of /lib */
   755             }
   756         }
   758         home_path = malloc(strlen(buf) + 1);
   759         if (home_path == NULL)
   760             return;
   761         strcpy(home_path, buf);
   762         Arguments::set_java_home(home_path);
   764         if (!set_boot_path('/', ':'))
   765             return;
   766     }
   768     /*
   769      * Where to look for native libraries
   770      */
   771     {
   772       // Use dlinfo() to determine the correct java.library.path.
   773       //
   774       // If we're launched by the Java launcher, and the user
   775       // does not set java.library.path explicitly on the commandline,
   776       // the Java launcher sets LD_LIBRARY_PATH for us and unsets
   777       // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
   778       // dlinfo returns LD_LIBRARY_PATH + crle settings (including
   779       // /usr/lib), which is exactly what we want.
   780       //
   781       // If the user does set java.library.path, it completely
   782       // overwrites this setting, and always has.
   783       //
   784       // If we're not launched by the Java launcher, we may
   785       // get here with any/all of the LD_LIBRARY_PATH[_32|64]
   786       // settings.  Again, dlinfo does exactly what we want.
   788       Dl_serinfo     _info, *info = &_info;
   789       Dl_serpath     *path;
   790       char*          library_path;
   791       char           *common_path;
   792       int            i;
   794       // determine search path count and required buffer size
   795       if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
   796         vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
   797       }
   799       // allocate new buffer and initialize
   800       info = (Dl_serinfo*)malloc(_info.dls_size);
   801       if (info == NULL) {
   802         vm_exit_out_of_memory(_info.dls_size,
   803                               "init_system_properties_values info");
   804       }
   805       info->dls_size = _info.dls_size;
   806       info->dls_cnt = _info.dls_cnt;
   808       // obtain search path information
   809       if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
   810         free(info);
   811         vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
   812       }
   814       path = &info->dls_serpath[0];
   816       // Note: Due to a legacy implementation, most of the library path
   817       // is set in the launcher.  This was to accomodate linking restrictions
   818       // on legacy Solaris implementations (which are no longer supported).
   819       // Eventually, all the library path setting will be done here.
   820       //
   821       // However, to prevent the proliferation of improperly built native
   822       // libraries, the new path component /usr/jdk/packages is added here.
   824       // Determine the actual CPU architecture.
   825       char cpu_arch[12];
   826       sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
   827 #ifdef _LP64
   828       // If we are a 64-bit vm, perform the following translations:
   829       //   sparc   -> sparcv9
   830       //   i386    -> amd64
   831       if (strcmp(cpu_arch, "sparc") == 0)
   832         strcat(cpu_arch, "v9");
   833       else if (strcmp(cpu_arch, "i386") == 0)
   834         strcpy(cpu_arch, "amd64");
   835 #endif
   837       // Construct the invariant part of ld_library_path. Note that the
   838       // space for the colon and the trailing null are provided by the
   839       // nulls included by the sizeof operator.
   840       size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch);
   841       common_path = malloc(bufsize);
   842       if (common_path == NULL) {
   843         free(info);
   844         vm_exit_out_of_memory(bufsize,
   845                               "init_system_properties_values common_path");
   846       }
   847       sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch);
   849       // struct size is more than sufficient for the path components obtained
   850       // through the dlinfo() call, so only add additional space for the path
   851       // components explicitly added here.
   852       bufsize = info->dls_size + strlen(common_path);
   853       library_path = malloc(bufsize);
   854       if (library_path == NULL) {
   855         free(info);
   856         free(common_path);
   857         vm_exit_out_of_memory(bufsize,
   858                               "init_system_properties_values library_path");
   859       }
   860       library_path[0] = '\0';
   862       // Construct the desired Java library path from the linker's library
   863       // search path.
   864       //
   865       // For compatibility, it is optimal that we insert the additional path
   866       // components specific to the Java VM after those components specified
   867       // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
   868       // infrastructure.
   869       if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it
   870         strcpy(library_path, common_path);
   871       } else {
   872         int inserted = 0;
   873         for (i = 0; i < info->dls_cnt; i++, path++) {
   874           uint_t flags = path->dls_flags & LA_SER_MASK;
   875           if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
   876             strcat(library_path, common_path);
   877             strcat(library_path, os::path_separator());
   878             inserted = 1;
   879           }
   880           strcat(library_path, path->dls_name);
   881           strcat(library_path, os::path_separator());
   882         }
   883         // eliminate trailing path separator
   884         library_path[strlen(library_path)-1] = '\0';
   885       }
   887       // happens before argument parsing - can't use a trace flag
   888       // tty->print_raw("init_system_properties_values: native lib path: ");
   889       // tty->print_raw_cr(library_path);
   891       // callee copies into its own buffer
   892       Arguments::set_library_path(library_path);
   894       free(common_path);
   895       free(library_path);
   896       free(info);
   897     }
   899     /*
   900      * Extensions directories.
   901      *
   902      * Note that the space for the colon and the trailing null are provided
   903      * by the nulls included by the sizeof operator (so actually one byte more
   904      * than necessary is allocated).
   905      */
   906     {
   907         char *buf = (char *) malloc(strlen(Arguments::get_java_home()) +
   908             sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) +
   909             sizeof(EXTENSIONS_DIR));
   910         sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR,
   911             Arguments::get_java_home());
   912         Arguments::set_ext_dirs(buf);
   913     }
   915     /* Endorsed standards default directory. */
   916     {
   917         char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
   918         sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
   919         Arguments::set_endorsed_dirs(buf);
   920     }
   921   }
   923 #undef malloc
   924 #undef free
   925 #undef getenv
   926 #undef EXTENSIONS_DIR
   927 #undef ENDORSED_DIR
   928 #undef COMMON_DIR
   930 }
   932 void os::breakpoint() {
   933   BREAKPOINT;
   934 }
   936 bool os::obsolete_option(const JavaVMOption *option)
   937 {
   938   if (!strncmp(option->optionString, "-Xt", 3)) {
   939     return true;
   940   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
   941     return true;
   942   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
   943     return true;
   944   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
   945     return true;
   946   }
   947   return false;
   948 }
   950 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
   951   address  stackStart  = (address)thread->stack_base();
   952   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
   953   if (sp < stackStart && sp >= stackEnd ) return true;
   954   return false;
   955 }
   957 extern "C" void breakpoint() {
   958   // use debugger to set breakpoint here
   959 }
   961 // Returns an estimate of the current stack pointer. Result must be guaranteed to
   962 // point into the calling threads stack, and be no lower than the current stack
   963 // pointer.
   964 address os::current_stack_pointer() {
   965   volatile int dummy;
   966   address sp = (address)&dummy + 8;     // %%%% need to confirm if this is right
   967   return sp;
   968 }
   970 static thread_t main_thread;
   972 // Thread start routine for all new Java threads
   973 extern "C" void* java_start(void* thread_addr) {
   974   // Try to randomize the cache line index of hot stack frames.
   975   // This helps when threads of the same stack traces evict each other's
   976   // cache lines. The threads can be either from the same JVM instance, or
   977   // from different JVM instances. The benefit is especially true for
   978   // processors with hyperthreading technology.
   979   static int counter = 0;
   980   int pid = os::current_process_id();
   981   alloca(((pid ^ counter++) & 7) * 128);
   983   int prio;
   984   Thread* thread = (Thread*)thread_addr;
   985   OSThread* osthr = thread->osthread();
   987   osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
   988   thread->_schedctl = (void *) schedctl_init () ;
   990   if (UseNUMA) {
   991     int lgrp_id = os::numa_get_group_id();
   992     if (lgrp_id != -1) {
   993       thread->set_lgrp_id(lgrp_id);
   994     }
   995   }
   997   // If the creator called set priority before we started,
   998   // we need to call set priority now that we have an lwp.
   999   // Get the priority from libthread and set the priority
  1000   // for the new Solaris lwp.
  1001   if ( osthr->thread_id() != -1 ) {
  1002     if ( UseThreadPriorities ) {
  1003       thr_getprio(osthr->thread_id(), &prio);
  1004       if (ThreadPriorityVerbose) {
  1005         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT ", setting priority: %d\n",
  1006                       osthr->thread_id(), osthr->lwp_id(), prio );
  1008       os::set_native_priority(thread, prio);
  1010   } else if (ThreadPriorityVerbose) {
  1011     warning("Can't set priority in _start routine, thread id hasn't been set\n");
  1014   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
  1016   // initialize signal mask for this thread
  1017   os::Solaris::hotspot_sigmask(thread);
  1019   thread->run();
  1021   // One less thread is executing
  1022   // When the VMThread gets here, the main thread may have already exited
  1023   // which frees the CodeHeap containing the Atomic::dec code
  1024   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
  1025     Atomic::dec(&os::Solaris::_os_thread_count);
  1028   if (UseDetachedThreads) {
  1029     thr_exit(NULL);
  1030     ShouldNotReachHere();
  1032   return NULL;
  1035 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
  1036   // Allocate the OSThread object
  1037   OSThread* osthread = new OSThread(NULL, NULL);
  1038   if (osthread == NULL) return NULL;
  1040   // Store info on the Solaris thread into the OSThread
  1041   osthread->set_thread_id(thread_id);
  1042   osthread->set_lwp_id(_lwp_self());
  1043   thread->_schedctl = (void *) schedctl_init () ;
  1045   if (UseNUMA) {
  1046     int lgrp_id = os::numa_get_group_id();
  1047     if (lgrp_id != -1) {
  1048       thread->set_lgrp_id(lgrp_id);
  1052   if ( ThreadPriorityVerbose ) {
  1053     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
  1054                   osthread->thread_id(), osthread->lwp_id() );
  1057   // Initial thread state is INITIALIZED, not SUSPENDED
  1058   osthread->set_state(INITIALIZED);
  1060   return osthread;
  1063 void os::Solaris::hotspot_sigmask(Thread* thread) {
  1065   //Save caller's signal mask
  1066   sigset_t sigmask;
  1067   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
  1068   OSThread *osthread = thread->osthread();
  1069   osthread->set_caller_sigmask(sigmask);
  1071   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
  1072   if (!ReduceSignalUsage) {
  1073     if (thread->is_VM_thread()) {
  1074       // Only the VM thread handles BREAK_SIGNAL ...
  1075       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
  1076     } else {
  1077       // ... all other threads block BREAK_SIGNAL
  1078       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
  1079       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
  1084 bool os::create_attached_thread(JavaThread* thread) {
  1085 #ifdef ASSERT
  1086   thread->verify_not_published();
  1087 #endif
  1088   OSThread* osthread = create_os_thread(thread, thr_self());
  1089   if (osthread == NULL) {
  1090      return false;
  1093   // Initial thread state is RUNNABLE
  1094   osthread->set_state(RUNNABLE);
  1095   thread->set_osthread(osthread);
  1097   // initialize signal mask for this thread
  1098   // and save the caller's signal mask
  1099   os::Solaris::hotspot_sigmask(thread);
  1101   return true;
  1104 bool os::create_main_thread(JavaThread* thread) {
  1105 #ifdef ASSERT
  1106   thread->verify_not_published();
  1107 #endif
  1108   if (_starting_thread == NULL) {
  1109     _starting_thread = create_os_thread(thread, main_thread);
  1110      if (_starting_thread == NULL) {
  1111         return false;
  1115   // The primodial thread is runnable from the start
  1116   _starting_thread->set_state(RUNNABLE);
  1118   thread->set_osthread(_starting_thread);
  1120   // initialize signal mask for this thread
  1121   // and save the caller's signal mask
  1122   os::Solaris::hotspot_sigmask(thread);
  1124   return true;
  1127 // _T2_libthread is true if we believe we are running with the newer
  1128 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
  1129 bool os::Solaris::_T2_libthread = false;
  1131 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
  1132   // Allocate the OSThread object
  1133   OSThread* osthread = new OSThread(NULL, NULL);
  1134   if (osthread == NULL) {
  1135     return false;
  1138   if ( ThreadPriorityVerbose ) {
  1139     char *thrtyp;
  1140     switch ( thr_type ) {
  1141       case vm_thread:
  1142         thrtyp = (char *)"vm";
  1143         break;
  1144       case cgc_thread:
  1145         thrtyp = (char *)"cgc";
  1146         break;
  1147       case pgc_thread:
  1148         thrtyp = (char *)"pgc";
  1149         break;
  1150       case java_thread:
  1151         thrtyp = (char *)"java";
  1152         break;
  1153       case compiler_thread:
  1154         thrtyp = (char *)"compiler";
  1155         break;
  1156       case watcher_thread:
  1157         thrtyp = (char *)"watcher";
  1158         break;
  1159       default:
  1160         thrtyp = (char *)"unknown";
  1161         break;
  1163     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
  1166   // Calculate stack size if it's not specified by caller.
  1167   if (stack_size == 0) {
  1168     // The default stack size 1M (2M for LP64).
  1169     stack_size = (BytesPerWord >> 2) * K * K;
  1171     switch (thr_type) {
  1172     case os::java_thread:
  1173       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
  1174       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
  1175       break;
  1176     case os::compiler_thread:
  1177       if (CompilerThreadStackSize > 0) {
  1178         stack_size = (size_t)(CompilerThreadStackSize * K);
  1179         break;
  1180       } // else fall through:
  1181         // use VMThreadStackSize if CompilerThreadStackSize is not defined
  1182     case os::vm_thread:
  1183     case os::pgc_thread:
  1184     case os::cgc_thread:
  1185     case os::watcher_thread:
  1186       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
  1187       break;
  1190   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
  1192   // Initial state is ALLOCATED but not INITIALIZED
  1193   osthread->set_state(ALLOCATED);
  1195   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
  1196     // We got lots of threads. Check if we still have some address space left.
  1197     // Need to be at least 5Mb of unreserved address space. We do check by
  1198     // trying to reserve some.
  1199     const size_t VirtualMemoryBangSize = 20*K*K;
  1200     char* mem = os::reserve_memory(VirtualMemoryBangSize);
  1201     if (mem == NULL) {
  1202       delete osthread;
  1203       return false;
  1204     } else {
  1205       // Release the memory again
  1206       os::release_memory(mem, VirtualMemoryBangSize);
  1210   // Setup osthread because the child thread may need it.
  1211   thread->set_osthread(osthread);
  1213   // Create the Solaris thread
  1214   // explicit THR_BOUND for T2_libthread case in case
  1215   // that assumption is not accurate, but our alternate signal stack
  1216   // handling is based on it which must have bound threads
  1217   thread_t tid = 0;
  1218   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
  1219                    | ((UseBoundThreads || os::Solaris::T2_libthread() ||
  1220                        (thr_type == vm_thread) ||
  1221                        (thr_type == cgc_thread) ||
  1222                        (thr_type == pgc_thread) ||
  1223                        (thr_type == compiler_thread && BackgroundCompilation)) ?
  1224                       THR_BOUND : 0);
  1225   int      status;
  1227   // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
  1228   //
  1229   // On multiprocessors systems, libthread sometimes under-provisions our
  1230   // process with LWPs.  On a 30-way systems, for instance, we could have
  1231   // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
  1232   // to our process.  This can result in under utilization of PEs.
  1233   // I suspect the problem is related to libthread's LWP
  1234   // pool management and to the kernel's SIGBLOCKING "last LWP parked"
  1235   // upcall policy.
  1236   //
  1237   // The following code is palliative -- it attempts to ensure that our
  1238   // process has sufficient LWPs to take advantage of multiple PEs.
  1239   // Proper long-term cures include using user-level threads bound to LWPs
  1240   // (THR_BOUND) or using LWP-based synchronization.  Note that there is a
  1241   // slight timing window with respect to sampling _os_thread_count, but
  1242   // the race is benign.  Also, we should periodically recompute
  1243   // _processors_online as the min of SC_NPROCESSORS_ONLN and the
  1244   // the number of PEs in our partition.  You might be tempted to use
  1245   // THR_NEW_LWP here, but I'd recommend against it as that could
  1246   // result in undesirable growth of the libthread's LWP pool.
  1247   // The fix below isn't sufficient; for instance, it doesn't take into count
  1248   // LWPs parked on IO.  It does, however, help certain CPU-bound benchmarks.
  1249   //
  1250   // Some pathologies this scheme doesn't handle:
  1251   // *  Threads can block, releasing the LWPs.  The LWPs can age out.
  1252   //    When a large number of threads become ready again there aren't
  1253   //    enough LWPs available to service them.  This can occur when the
  1254   //    number of ready threads oscillates.
  1255   // *  LWPs/Threads park on IO, thus taking the LWP out of circulation.
  1256   //
  1257   // Finally, we should call thr_setconcurrency() periodically to refresh
  1258   // the LWP pool and thwart the LWP age-out mechanism.
  1259   // The "+3" term provides a little slop -- we want to slightly overprovision.
  1261   if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
  1262     if (!(flags & THR_BOUND)) {
  1263       thr_setconcurrency (os::Solaris::_os_thread_count);       // avoid starvation
  1266   // Although this doesn't hurt, we should warn of undefined behavior
  1267   // when using unbound T1 threads with schedctl().  This should never
  1268   // happen, as the compiler and VM threads are always created bound
  1269   DEBUG_ONLY(
  1270       if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
  1271           (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
  1272           ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
  1273            (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
  1274          warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
  1276   );
  1279   // Mark that we don't have an lwp or thread id yet.
  1280   // In case we attempt to set the priority before the thread starts.
  1281   osthread->set_lwp_id(-1);
  1282   osthread->set_thread_id(-1);
  1284   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
  1285   if (status != 0) {
  1286     if (PrintMiscellaneous && (Verbose || WizardMode)) {
  1287       perror("os::create_thread");
  1289     thread->set_osthread(NULL);
  1290     // Need to clean up stuff we've allocated so far
  1291     delete osthread;
  1292     return false;
  1295   Atomic::inc(&os::Solaris::_os_thread_count);
  1297   // Store info on the Solaris thread into the OSThread
  1298   osthread->set_thread_id(tid);
  1300   // Remember that we created this thread so we can set priority on it
  1301   osthread->set_vm_created();
  1303   // Set the default thread priority otherwise use NormalPriority
  1305   if ( UseThreadPriorities ) {
  1306      thr_setprio(tid, (DefaultThreadPriority == -1) ?
  1307                         java_to_os_priority[NormPriority] :
  1308                         DefaultThreadPriority);
  1311   // Initial thread state is INITIALIZED, not SUSPENDED
  1312   osthread->set_state(INITIALIZED);
  1314   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
  1315   return true;
  1318 /* defined for >= Solaris 10. This allows builds on earlier versions
  1319  *  of Solaris to take advantage of the newly reserved Solaris JVM signals
  1320  *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
  1321  *  and -XX:+UseAltSigs does nothing since these should have no conflict
  1322  */
  1323 #if !defined(SIGJVM1)
  1324 #define SIGJVM1 39
  1325 #define SIGJVM2 40
  1326 #endif
  1328 debug_only(static bool signal_sets_initialized = false);
  1329 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
  1330 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
  1331 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
  1333 bool os::Solaris::is_sig_ignored(int sig) {
  1334       struct sigaction oact;
  1335       sigaction(sig, (struct sigaction*)NULL, &oact);
  1336       void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
  1337                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
  1338       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
  1339            return true;
  1340       else
  1341            return false;
  1344 // Note: SIGRTMIN is a macro that calls sysconf() so it will
  1345 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
  1346 static bool isJVM1available() {
  1347   return SIGJVM1 < SIGRTMIN;
  1350 void os::Solaris::signal_sets_init() {
  1351   // Should also have an assertion stating we are still single-threaded.
  1352   assert(!signal_sets_initialized, "Already initialized");
  1353   // Fill in signals that are necessarily unblocked for all threads in
  1354   // the VM. Currently, we unblock the following signals:
  1355   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
  1356   //                         by -Xrs (=ReduceSignalUsage));
  1357   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
  1358   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
  1359   // the dispositions or masks wrt these signals.
  1360   // Programs embedding the VM that want to use the above signals for their
  1361   // own purposes must, at this time, use the "-Xrs" option to prevent
  1362   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
  1363   // (See bug 4345157, and other related bugs).
  1364   // In reality, though, unblocking these signals is really a nop, since
  1365   // these signals are not blocked by default.
  1366   sigemptyset(&unblocked_sigs);
  1367   sigemptyset(&allowdebug_blocked_sigs);
  1368   sigaddset(&unblocked_sigs, SIGILL);
  1369   sigaddset(&unblocked_sigs, SIGSEGV);
  1370   sigaddset(&unblocked_sigs, SIGBUS);
  1371   sigaddset(&unblocked_sigs, SIGFPE);
  1373   if (isJVM1available) {
  1374     os::Solaris::set_SIGinterrupt(SIGJVM1);
  1375     os::Solaris::set_SIGasync(SIGJVM2);
  1376   } else if (UseAltSigs) {
  1377     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
  1378     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
  1379   } else {
  1380     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
  1381     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
  1384   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
  1385   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
  1387   if (!ReduceSignalUsage) {
  1388    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
  1389       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
  1390       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
  1392    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
  1393       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
  1394       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
  1396    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
  1397       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
  1398       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
  1401   // Fill in signals that are blocked by all but the VM thread.
  1402   sigemptyset(&vm_sigs);
  1403   if (!ReduceSignalUsage)
  1404     sigaddset(&vm_sigs, BREAK_SIGNAL);
  1405   debug_only(signal_sets_initialized = true);
  1407   // For diagnostics only used in run_periodic_checks
  1408   sigemptyset(&check_signal_done);
  1411 // These are signals that are unblocked while a thread is running Java.
  1412 // (For some reason, they get blocked by default.)
  1413 sigset_t* os::Solaris::unblocked_signals() {
  1414   assert(signal_sets_initialized, "Not initialized");
  1415   return &unblocked_sigs;
  1418 // These are the signals that are blocked while a (non-VM) thread is
  1419 // running Java. Only the VM thread handles these signals.
  1420 sigset_t* os::Solaris::vm_signals() {
  1421   assert(signal_sets_initialized, "Not initialized");
  1422   return &vm_sigs;
  1425 // These are signals that are blocked during cond_wait to allow debugger in
  1426 sigset_t* os::Solaris::allowdebug_blocked_signals() {
  1427   assert(signal_sets_initialized, "Not initialized");
  1428   return &allowdebug_blocked_sigs;
  1431 // First crack at OS-specific initialization, from inside the new thread.
  1432 void os::initialize_thread() {
  1433   int r = thr_main() ;
  1434   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
  1435   if (r) {
  1436     JavaThread* jt = (JavaThread *)Thread::current();
  1437     assert(jt != NULL,"Sanity check");
  1438     size_t stack_size;
  1439     address base = jt->stack_base();
  1440     if (Arguments::created_by_java_launcher()) {
  1441       // Use 2MB to allow for Solaris 7 64 bit mode.
  1442       stack_size = JavaThread::stack_size_at_create() == 0
  1443         ? 2048*K : JavaThread::stack_size_at_create();
  1445       // There are rare cases when we may have already used more than
  1446       // the basic stack size allotment before this method is invoked.
  1447       // Attempt to allow for a normally sized java_stack.
  1448       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
  1449       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
  1450     } else {
  1451       // 6269555: If we were not created by a Java launcher, i.e. if we are
  1452       // running embedded in a native application, treat the primordial thread
  1453       // as much like a native attached thread as possible.  This means using
  1454       // the current stack size from thr_stksegment(), unless it is too large
  1455       // to reliably setup guard pages.  A reasonable max size is 8MB.
  1456       size_t current_size = current_stack_size();
  1457       // This should never happen, but just in case....
  1458       if (current_size == 0) current_size = 2 * K * K;
  1459       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
  1461     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
  1462     stack_size = (size_t)(base - bottom);
  1464     assert(stack_size > 0, "Stack size calculation problem");
  1466     if (stack_size > jt->stack_size()) {
  1467       NOT_PRODUCT(
  1468         struct rlimit limits;
  1469         getrlimit(RLIMIT_STACK, &limits);
  1470         size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
  1471         assert(size >= jt->stack_size(), "Stack size problem in main thread");
  1473       tty->print_cr(
  1474         "Stack size of %d Kb exceeds current limit of %d Kb.\n"
  1475         "(Stack sizes are rounded up to a multiple of the system page size.)\n"
  1476         "See limit(1) to increase the stack size limit.",
  1477         stack_size / K, jt->stack_size() / K);
  1478       vm_exit(1);
  1480     assert(jt->stack_size() >= stack_size,
  1481           "Attempt to map more stack than was allocated");
  1482     jt->set_stack_size(stack_size);
  1485    // 5/22/01: Right now alternate signal stacks do not handle
  1486    // throwing stack overflow exceptions, see bug 4463178
  1487    // Until a fix is found for this, T2 will NOT imply alternate signal
  1488    // stacks.
  1489    // If using T2 libthread threads, install an alternate signal stack.
  1490    // Because alternate stacks associate with LWPs on Solaris,
  1491    // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
  1492    // we prefer to explicitly stack bang.
  1493    // If not using T2 libthread, but using UseBoundThreads any threads
  1494    // (primordial thread, jni_attachCurrentThread) we do not create,
  1495    // probably are not bound, therefore they can not have an alternate
  1496    // signal stack. Since our stack banging code is generated and
  1497    // is shared across threads, all threads must be bound to allow
  1498    // using alternate signal stacks.  The alternative is to interpose
  1499    // on _lwp_create to associate an alt sig stack with each LWP,
  1500    // and this could be a problem when the JVM is embedded.
  1501    // We would prefer to use alternate signal stacks with T2
  1502    // Since there is currently no accurate way to detect T2
  1503    // we do not. Assuming T2 when running T1 causes sig 11s or assertions
  1504    // on installing alternate signal stacks
  1507    // 05/09/03: removed alternate signal stack support for Solaris
  1508    // The alternate signal stack mechanism is no longer needed to
  1509    // handle stack overflow. This is now handled by allocating
  1510    // guard pages (red zone) and stackbanging.
  1511    // Initially the alternate signal stack mechanism was removed because
  1512    // it did not work with T1 llibthread. Alternate
  1513    // signal stacks MUST have all threads bound to lwps. Applications
  1514    // can create their own threads and attach them without their being
  1515    // bound under T1. This is frequently the case for the primordial thread.
  1516    // If we were ever to reenable this mechanism we would need to
  1517    // use the dynamic check for T2 libthread.
  1519   os::Solaris::init_thread_fpu_state();
  1524 // Free Solaris resources related to the OSThread
  1525 void os::free_thread(OSThread* osthread) {
  1526   assert(osthread != NULL, "os::free_thread but osthread not set");
  1529   // We are told to free resources of the argument thread,
  1530   // but we can only really operate on the current thread.
  1531   // The main thread must take the VMThread down synchronously
  1532   // before the main thread exits and frees up CodeHeap
  1533   guarantee((Thread::current()->osthread() == osthread
  1534      || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
  1535   if (Thread::current()->osthread() == osthread) {
  1536     // Restore caller's signal mask
  1537     sigset_t sigmask = osthread->caller_sigmask();
  1538     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
  1540   delete osthread;
  1543 void os::pd_start_thread(Thread* thread) {
  1544   int status = thr_continue(thread->osthread()->thread_id());
  1545   assert_status(status == 0, status, "thr_continue failed");
  1549 intx os::current_thread_id() {
  1550   return (intx)thr_self();
  1553 static pid_t _initial_pid = 0;
  1555 int os::current_process_id() {
  1556   return (int)(_initial_pid ? _initial_pid : getpid());
  1559 int os::allocate_thread_local_storage() {
  1560   // %%%       in Win32 this allocates a memory segment pointed to by a
  1561   //           register.  Dan Stein can implement a similar feature in
  1562   //           Solaris.  Alternatively, the VM can do the same thing
  1563   //           explicitly: malloc some storage and keep the pointer in a
  1564   //           register (which is part of the thread's context) (or keep it
  1565   //           in TLS).
  1566   // %%%       In current versions of Solaris, thr_self and TSD can
  1567   //           be accessed via short sequences of displaced indirections.
  1568   //           The value of thr_self is available as %g7(36).
  1569   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
  1570   //           assuming that the current thread already has a value bound to k.
  1571   //           It may be worth experimenting with such access patterns,
  1572   //           and later having the parameters formally exported from a Solaris
  1573   //           interface.  I think, however, that it will be faster to
  1574   //           maintain the invariant that %g2 always contains the
  1575   //           JavaThread in Java code, and have stubs simply
  1576   //           treat %g2 as a caller-save register, preserving it in a %lN.
  1577   thread_key_t tk;
  1578   if (thr_keycreate( &tk, NULL ) )
  1579     fatal1("os::allocate_thread_local_storage: thr_keycreate failed (%s)", strerror(errno));
  1580   return int(tk);
  1583 void os::free_thread_local_storage(int index) {
  1584   // %%% don't think we need anything here
  1585   // if ( pthread_key_delete((pthread_key_t) tk) )
  1586   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
  1589 #define SMALLINT 32   // libthread allocate for tsd_common is a version specific
  1590                       // small number - point is NO swap space available
  1591 void os::thread_local_storage_at_put(int index, void* value) {
  1592   // %%% this is used only in threadLocalStorage.cpp
  1593   if (thr_setspecific((thread_key_t)index, value)) {
  1594     if (errno == ENOMEM) {
  1595        vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space");
  1596     } else {
  1597       fatal1("os::thread_local_storage_at_put: thr_setspecific failed (%s)", strerror(errno));
  1599   } else {
  1600       ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
  1604 // This function could be called before TLS is initialized, for example, when
  1605 // VM receives an async signal or when VM causes a fatal error during
  1606 // initialization. Return NULL if thr_getspecific() fails.
  1607 void* os::thread_local_storage_at(int index) {
  1608   // %%% this is used only in threadLocalStorage.cpp
  1609   void* r = NULL;
  1610   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
  1614 const int NANOSECS_PER_MILLISECS = 1000000;
  1615 // gethrtime can move backwards if read from one cpu and then a different cpu
  1616 // getTimeNanos is guaranteed to not move backward on Solaris
  1617 // local spinloop created as faster for a CAS on an int than
  1618 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not
  1619 // supported on sparc v8 or pre supports_cx8 intel boxes.
  1620 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong
  1621 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes
  1622 inline hrtime_t oldgetTimeNanos() {
  1623   int gotlock = LOCK_INVALID;
  1624   hrtime_t newtime = gethrtime();
  1626   for (;;) {
  1627 // grab lock for max_hrtime
  1628     int curlock = max_hrtime_lock;
  1629     if (curlock & LOCK_BUSY)  continue;
  1630     if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue;
  1631     if (newtime > max_hrtime) {
  1632       max_hrtime = newtime;
  1633     } else {
  1634       newtime = max_hrtime;
  1636     // release lock
  1637     max_hrtime_lock = LOCK_FREE;
  1638     return newtime;
  1641 // gethrtime can move backwards if read from one cpu and then a different cpu
  1642 // getTimeNanos is guaranteed to not move backward on Solaris
  1643 inline hrtime_t getTimeNanos() {
  1644   if (VM_Version::supports_cx8()) {
  1645     const hrtime_t now = gethrtime();
  1646     // Use atomic long load since 32-bit x86 uses 2 registers to keep long.
  1647     const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime);
  1648     if (now <= prev)  return prev;   // same or retrograde time;
  1649     const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
  1650     assert(obsv >= prev, "invariant");   // Monotonicity
  1651     // If the CAS succeeded then we're done and return "now".
  1652     // If the CAS failed and the observed value "obs" is >= now then
  1653     // we should return "obs".  If the CAS failed and now > obs > prv then
  1654     // some other thread raced this thread and installed a new value, in which case
  1655     // we could either (a) retry the entire operation, (b) retry trying to install now
  1656     // or (c) just return obs.  We use (c).   No loop is required although in some cases
  1657     // we might discard a higher "now" value in deference to a slightly lower but freshly
  1658     // installed obs value.   That's entirely benign -- it admits no new orderings compared
  1659     // to (a) or (b) -- and greatly reduces coherence traffic.
  1660     // We might also condition (c) on the magnitude of the delta between obs and now.
  1661     // Avoiding excessive CAS operations to hot RW locations is critical.
  1662     // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate
  1663     return (prev == obsv) ? now : obsv ;
  1664   } else {
  1665     return oldgetTimeNanos();
  1669 // Time since start-up in seconds to a fine granularity.
  1670 // Used by VMSelfDestructTimer and the MemProfiler.
  1671 double os::elapsedTime() {
  1672   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
  1675 jlong os::elapsed_counter() {
  1676   return (jlong)(getTimeNanos() - first_hrtime);
  1679 jlong os::elapsed_frequency() {
  1680    return hrtime_hz;
  1683 // Return the real, user, and system times in seconds from an
  1684 // arbitrary fixed point in the past.
  1685 bool os::getTimesSecs(double* process_real_time,
  1686                   double* process_user_time,
  1687                   double* process_system_time) {
  1688   struct tms ticks;
  1689   clock_t real_ticks = times(&ticks);
  1691   if (real_ticks == (clock_t) (-1)) {
  1692     return false;
  1693   } else {
  1694     double ticks_per_second = (double) clock_tics_per_sec;
  1695     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
  1696     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
  1697     // For consistency return the real time from getTimeNanos()
  1698     // converted to seconds.
  1699     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
  1701     return true;
  1705 bool os::supports_vtime() { return true; }
  1707 bool os::enable_vtime() {
  1708   int fd = open("/proc/self/ctl", O_WRONLY);
  1709   if (fd == -1)
  1710     return false;
  1712   long cmd[] = { PCSET, PR_MSACCT };
  1713   int res = write(fd, cmd, sizeof(long) * 2);
  1714   close(fd);
  1715   if (res != sizeof(long) * 2)
  1716     return false;
  1718   return true;
  1721 bool os::vtime_enabled() {
  1722   int fd = open("/proc/self/status", O_RDONLY);
  1723   if (fd == -1)
  1724     return false;
  1726   pstatus_t status;
  1727   int res = read(fd, (void*) &status, sizeof(pstatus_t));
  1728   close(fd);
  1729   if (res != sizeof(pstatus_t))
  1730     return false;
  1732   return status.pr_flags & PR_MSACCT;
  1735 double os::elapsedVTime() {
  1736   return (double)gethrvtime() / (double)hrtime_hz;
  1739 // Used internally for comparisons only
  1740 // getTimeMillis guaranteed to not move backwards on Solaris
  1741 jlong getTimeMillis() {
  1742   jlong nanotime = getTimeNanos();
  1743   return (jlong)(nanotime / NANOSECS_PER_MILLISECS);
  1746 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
  1747 jlong os::javaTimeMillis() {
  1748   timeval t;
  1749   if (gettimeofday( &t, NULL) == -1)
  1750     fatal1("os::javaTimeMillis: gettimeofday (%s)", strerror(errno));
  1751   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
  1754 jlong os::javaTimeNanos() {
  1755   return (jlong)getTimeNanos();
  1758 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
  1759   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
  1760   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
  1761   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
  1762   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
  1765 char * os::local_time_string(char *buf, size_t buflen) {
  1766   struct tm t;
  1767   time_t long_time;
  1768   time(&long_time);
  1769   localtime_r(&long_time, &t);
  1770   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
  1771                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
  1772                t.tm_hour, t.tm_min, t.tm_sec);
  1773   return buf;
  1776 // Note: os::shutdown() might be called very early during initialization, or
  1777 // called from signal handler. Before adding something to os::shutdown(), make
  1778 // sure it is async-safe and can handle partially initialized VM.
  1779 void os::shutdown() {
  1781   // allow PerfMemory to attempt cleanup of any persistent resources
  1782   perfMemory_exit();
  1784   // needs to remove object in file system
  1785   AttachListener::abort();
  1787   // flush buffered output, finish log files
  1788   ostream_abort();
  1790   // Check for abort hook
  1791   abort_hook_t abort_hook = Arguments::abort_hook();
  1792   if (abort_hook != NULL) {
  1793     abort_hook();
  1797 // Note: os::abort() might be called very early during initialization, or
  1798 // called from signal handler. Before adding something to os::abort(), make
  1799 // sure it is async-safe and can handle partially initialized VM.
  1800 void os::abort(bool dump_core) {
  1801   os::shutdown();
  1802   if (dump_core) {
  1803 #ifndef PRODUCT
  1804     fdStream out(defaultStream::output_fd());
  1805     out.print_raw("Current thread is ");
  1806     char buf[16];
  1807     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
  1808     out.print_raw_cr(buf);
  1809     out.print_raw_cr("Dumping core ...");
  1810 #endif
  1811     ::abort(); // dump core (for debugging)
  1814   ::exit(1);
  1817 // Die immediately, no exit hook, no abort hook, no cleanup.
  1818 void os::die() {
  1819   _exit(-1);
  1822 // unused
  1823 void os::set_error_file(const char *logfile) {}
  1825 // DLL functions
  1827 const char* os::dll_file_extension() { return ".so"; }
  1829 const char* os::get_temp_directory() { return "/tmp/"; }
  1831 static bool file_exists(const char* filename) {
  1832   struct stat statbuf;
  1833   if (filename == NULL || strlen(filename) == 0) {
  1834     return false;
  1836   return os::stat(filename, &statbuf) == 0;
  1839 void os::dll_build_name(char* buffer, size_t buflen,
  1840                         const char* pname, const char* fname) {
  1841   // Copied from libhpi
  1842   const size_t pnamelen = pname ? strlen(pname) : 0;
  1844   // Quietly truncate on buffer overflow.  Should be an error.
  1845   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
  1846       *buffer = '\0';
  1847       return;
  1850   if (pnamelen == 0) {
  1851     snprintf(buffer, buflen, "lib%s.so", fname);
  1852   } else if (strchr(pname, *os::path_separator()) != NULL) {
  1853     int n;
  1854     char** pelements = split_path(pname, &n);
  1855     for (int i = 0 ; i < n ; i++) {
  1856       // really shouldn't be NULL but what the heck, check can't hurt
  1857       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
  1858         continue; // skip the empty path values
  1860       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
  1861       if (file_exists(buffer)) {
  1862         break;
  1865     // release the storage
  1866     for (int i = 0 ; i < n ; i++) {
  1867       if (pelements[i] != NULL) {
  1868         FREE_C_HEAP_ARRAY(char, pelements[i]);
  1871     if (pelements != NULL) {
  1872       FREE_C_HEAP_ARRAY(char*, pelements);
  1874   } else {
  1875     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
  1879 const char* os::get_current_directory(char *buf, int buflen) {
  1880   return getcwd(buf, buflen);
  1883 // check if addr is inside libjvm[_g].so
  1884 bool os::address_is_in_vm(address addr) {
  1885   static address libjvm_base_addr;
  1886   Dl_info dlinfo;
  1888   if (libjvm_base_addr == NULL) {
  1889     dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo);
  1890     libjvm_base_addr = (address)dlinfo.dli_fbase;
  1891     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
  1894   if (dladdr((void *)addr, &dlinfo)) {
  1895     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
  1898   return false;
  1901 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
  1902 static dladdr1_func_type dladdr1_func = NULL;
  1904 bool os::dll_address_to_function_name(address addr, char *buf,
  1905                                       int buflen, int * offset) {
  1906   Dl_info dlinfo;
  1908   // dladdr1_func was initialized in os::init()
  1909   if (dladdr1_func){
  1910       // yes, we have dladdr1
  1912       // Support for dladdr1 is checked at runtime; it may be
  1913       // available even if the vm is built on a machine that does
  1914       // not have dladdr1 support.  Make sure there is a value for
  1915       // RTLD_DL_SYMENT.
  1916       #ifndef RTLD_DL_SYMENT
  1917       #define RTLD_DL_SYMENT 1
  1918       #endif
  1919       Sym * info;
  1920       if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
  1921                        RTLD_DL_SYMENT)) {
  1922           if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
  1923           if (offset) *offset = addr - (address)dlinfo.dli_saddr;
  1925           // check if the returned symbol really covers addr
  1926           return ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr);
  1927       } else {
  1928           if (buf) buf[0] = '\0';
  1929           if (offset) *offset  = -1;
  1930           return false;
  1932   } else {
  1933       // no, only dladdr is available
  1934       if(dladdr((void *)addr, &dlinfo)) {
  1935           if (buf) jio_snprintf(buf, buflen, dlinfo.dli_sname);
  1936           if (offset) *offset = addr - (address)dlinfo.dli_saddr;
  1937           return true;
  1938       } else {
  1939           if (buf) buf[0] = '\0';
  1940           if (offset) *offset  = -1;
  1941           return false;
  1946 bool os::dll_address_to_library_name(address addr, char* buf,
  1947                                      int buflen, int* offset) {
  1948   Dl_info dlinfo;
  1950   if (dladdr((void*)addr, &dlinfo)){
  1951      if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
  1952      if (offset) *offset = addr - (address)dlinfo.dli_fbase;
  1953      return true;
  1954   } else {
  1955      if (buf) buf[0] = '\0';
  1956      if (offset) *offset = -1;
  1957      return false;
  1961 // Prints the names and full paths of all opened dynamic libraries
  1962 // for current process
  1963 void os::print_dll_info(outputStream * st) {
  1964     Dl_info dli;
  1965     void *handle;
  1966     Link_map *map;
  1967     Link_map *p;
  1969     st->print_cr("Dynamic libraries:"); st->flush();
  1971     if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) {
  1972         st->print_cr("Error: Cannot print dynamic libraries.");
  1973         return;
  1975     handle = dlopen(dli.dli_fname, RTLD_LAZY);
  1976     if (handle == NULL) {
  1977         st->print_cr("Error: Cannot print dynamic libraries.");
  1978         return;
  1980     dlinfo(handle, RTLD_DI_LINKMAP, &map);
  1981     if (map == NULL) {
  1982         st->print_cr("Error: Cannot print dynamic libraries.");
  1983         return;
  1986     while (map->l_prev != NULL)
  1987         map = map->l_prev;
  1989     while (map != NULL) {
  1990         st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
  1991         map = map->l_next;
  1994     dlclose(handle);
  1997   // Loads .dll/.so and
  1998   // in case of error it checks if .dll/.so was built for the
  1999   // same architecture as Hotspot is running on
  2001 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
  2003   void * result= ::dlopen(filename, RTLD_LAZY);
  2004   if (result != NULL) {
  2005     // Successful loading
  2006     return result;
  2009   Elf32_Ehdr elf_head;
  2011   // Read system error message into ebuf
  2012   // It may or may not be overwritten below
  2013   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
  2014   ebuf[ebuflen-1]='\0';
  2015   int diag_msg_max_length=ebuflen-strlen(ebuf);
  2016   char* diag_msg_buf=ebuf+strlen(ebuf);
  2018   if (diag_msg_max_length==0) {
  2019     // No more space in ebuf for additional diagnostics message
  2020     return NULL;
  2024   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
  2026   if (file_descriptor < 0) {
  2027     // Can't open library, report dlerror() message
  2028     return NULL;
  2031   bool failed_to_read_elf_head=
  2032     (sizeof(elf_head)!=
  2033         (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
  2035   ::close(file_descriptor);
  2036   if (failed_to_read_elf_head) {
  2037     // file i/o error - report dlerror() msg
  2038     return NULL;
  2041   typedef struct {
  2042     Elf32_Half  code;         // Actual value as defined in elf.h
  2043     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
  2044     char        elf_class;    // 32 or 64 bit
  2045     char        endianess;    // MSB or LSB
  2046     char*       name;         // String representation
  2047   } arch_t;
  2049   static const arch_t arch_array[]={
  2050     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
  2051     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
  2052     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
  2053     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
  2054     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
  2055     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
  2056     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
  2057     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
  2058     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}
  2059   };
  2061   #if  (defined IA32)
  2062     static  Elf32_Half running_arch_code=EM_386;
  2063   #elif   (defined AMD64)
  2064     static  Elf32_Half running_arch_code=EM_X86_64;
  2065   #elif  (defined IA64)
  2066     static  Elf32_Half running_arch_code=EM_IA_64;
  2067   #elif  (defined __sparc) && (defined _LP64)
  2068     static  Elf32_Half running_arch_code=EM_SPARCV9;
  2069   #elif  (defined __sparc) && (!defined _LP64)
  2070     static  Elf32_Half running_arch_code=EM_SPARC;
  2071   #elif  (defined __powerpc64__)
  2072     static  Elf32_Half running_arch_code=EM_PPC64;
  2073   #elif  (defined __powerpc__)
  2074     static  Elf32_Half running_arch_code=EM_PPC;
  2075   #else
  2076     #error Method os::dll_load requires that one of following is defined:\
  2077          IA32, AMD64, IA64, __sparc, __powerpc__
  2078   #endif
  2080   // Identify compatability class for VM's architecture and library's architecture
  2081   // Obtain string descriptions for architectures
  2083   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
  2084   int running_arch_index=-1;
  2086   for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
  2087     if (running_arch_code == arch_array[i].code) {
  2088       running_arch_index    = i;
  2090     if (lib_arch.code == arch_array[i].code) {
  2091       lib_arch.compat_class = arch_array[i].compat_class;
  2092       lib_arch.name         = arch_array[i].name;
  2096   assert(running_arch_index != -1,
  2097     "Didn't find running architecture code (running_arch_code) in arch_array");
  2098   if (running_arch_index == -1) {
  2099     // Even though running architecture detection failed
  2100     // we may still continue with reporting dlerror() message
  2101     return NULL;
  2104   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
  2105     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
  2106     return NULL;
  2109   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
  2110     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
  2111     return NULL;
  2114   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
  2115     if ( lib_arch.name!=NULL ) {
  2116       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
  2117         " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
  2118         lib_arch.name, arch_array[running_arch_index].name);
  2119     } else {
  2120       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
  2121       " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
  2122         lib_arch.code,
  2123         arch_array[running_arch_index].name);
  2127   return NULL;
  2130 void* os::dll_lookup(void* handle, const char* name) {
  2131   return dlsym(handle, name);
  2135 bool _print_ascii_file(const char* filename, outputStream* st) {
  2136   int fd = open(filename, O_RDONLY);
  2137   if (fd == -1) {
  2138      return false;
  2141   char buf[32];
  2142   int bytes;
  2143   while ((bytes = read(fd, buf, sizeof(buf))) > 0) {
  2144     st->print_raw(buf, bytes);
  2147   close(fd);
  2149   return true;
  2152 void os::print_os_info(outputStream* st) {
  2153   st->print("OS:");
  2155   if (!_print_ascii_file("/etc/release", st)) {
  2156     st->print("Solaris");
  2158   st->cr();
  2160   // kernel
  2161   st->print("uname:");
  2162   struct utsname name;
  2163   uname(&name);
  2164   st->print(name.sysname); st->print(" ");
  2165   st->print(name.release); st->print(" ");
  2166   st->print(name.version); st->print(" ");
  2167   st->print(name.machine);
  2169   // libthread
  2170   if (os::Solaris::T2_libthread()) st->print("  (T2 libthread)");
  2171   else st->print("  (T1 libthread)");
  2172   st->cr();
  2174   // rlimit
  2175   st->print("rlimit:");
  2176   struct rlimit rlim;
  2178   st->print(" STACK ");
  2179   getrlimit(RLIMIT_STACK, &rlim);
  2180   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  2181   else st->print("%uk", rlim.rlim_cur >> 10);
  2183   st->print(", CORE ");
  2184   getrlimit(RLIMIT_CORE, &rlim);
  2185   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  2186   else st->print("%uk", rlim.rlim_cur >> 10);
  2188   st->print(", NOFILE ");
  2189   getrlimit(RLIMIT_NOFILE, &rlim);
  2190   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  2191   else st->print("%d", rlim.rlim_cur);
  2193   st->print(", AS ");
  2194   getrlimit(RLIMIT_AS, &rlim);
  2195   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  2196   else st->print("%uk", rlim.rlim_cur >> 10);
  2197   st->cr();
  2199   // load average
  2200   st->print("load average:");
  2201   double loadavg[3];
  2202   os::loadavg(loadavg, 3);
  2203   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
  2204   st->cr();
  2208 static bool check_addr0(outputStream* st) {
  2209   jboolean status = false;
  2210   int fd = open("/proc/self/map",O_RDONLY);
  2211   if (fd >= 0) {
  2212     prmap_t p;
  2213     while(read(fd, &p, sizeof(p)) > 0) {
  2214       if (p.pr_vaddr == 0x0) {
  2215         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
  2216         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
  2217         st->print("Access:");
  2218         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
  2219         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
  2220         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
  2221         st->cr();
  2222         status = true;
  2224       close(fd);
  2227   return status;
  2230 void os::print_memory_info(outputStream* st) {
  2231   st->print("Memory:");
  2232   st->print(" %dk page", os::vm_page_size()>>10);
  2233   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
  2234   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
  2235   st->cr();
  2236   (void) check_addr0(st);
  2239 // Taken from /usr/include/sys/machsig.h  Supposed to be architecture specific
  2240 // but they're the same for all the solaris architectures that we support.
  2241 const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR",
  2242                           "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG",
  2243                           "ILL_COPROC", "ILL_BADSTK" };
  2245 const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV",
  2246                           "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES",
  2247                           "FPE_FLTINV", "FPE_FLTSUB" };
  2249 const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" };
  2251 const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" };
  2253 void os::print_siginfo(outputStream* st, void* siginfo) {
  2254   st->print("siginfo:");
  2256   const int buflen = 100;
  2257   char buf[buflen];
  2258   siginfo_t *si = (siginfo_t*)siginfo;
  2259   st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen));
  2260   char *err = strerror(si->si_errno);
  2261   if (si->si_errno != 0 && err != NULL) {
  2262     st->print("si_errno=%s", err);
  2263   } else {
  2264     st->print("si_errno=%d", si->si_errno);
  2266   const int c = si->si_code;
  2267   assert(c > 0, "unexpected si_code");
  2268   switch (si->si_signo) {
  2269   case SIGILL:
  2270     st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]);
  2271     st->print(", si_addr=" PTR_FORMAT, si->si_addr);
  2272     break;
  2273   case SIGFPE:
  2274     st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]);
  2275     st->print(", si_addr=" PTR_FORMAT, si->si_addr);
  2276     break;
  2277   case SIGSEGV:
  2278     st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]);
  2279     st->print(", si_addr=" PTR_FORMAT, si->si_addr);
  2280     break;
  2281   case SIGBUS:
  2282     st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]);
  2283     st->print(", si_addr=" PTR_FORMAT, si->si_addr);
  2284     break;
  2285   default:
  2286     st->print(", si_code=%d", si->si_code);
  2287     // no si_addr
  2290   if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
  2291       UseSharedSpaces) {
  2292     FileMapInfo* mapinfo = FileMapInfo::current_info();
  2293     if (mapinfo->is_in_shared_space(si->si_addr)) {
  2294       st->print("\n\nError accessing class data sharing archive."   \
  2295                 " Mapped file inaccessible during execution, "      \
  2296                 " possible disk/network problem.");
  2299   st->cr();
  2302 // Moved from whole group, because we need them here for diagnostic
  2303 // prints.
  2304 #define OLDMAXSIGNUM 32
  2305 static int Maxsignum = 0;
  2306 static int *ourSigFlags = NULL;
  2308 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
  2310 int os::Solaris::get_our_sigflags(int sig) {
  2311   assert(ourSigFlags!=NULL, "signal data structure not initialized");
  2312   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
  2313   return ourSigFlags[sig];
  2316 void os::Solaris::set_our_sigflags(int sig, int flags) {
  2317   assert(ourSigFlags!=NULL, "signal data structure not initialized");
  2318   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
  2319   ourSigFlags[sig] = flags;
  2323 static const char* get_signal_handler_name(address handler,
  2324                                            char* buf, int buflen) {
  2325   int offset;
  2326   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
  2327   if (found) {
  2328     // skip directory names
  2329     const char *p1, *p2;
  2330     p1 = buf;
  2331     size_t len = strlen(os::file_separator());
  2332     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
  2333     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
  2334   } else {
  2335     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
  2337   return buf;
  2340 static void print_signal_handler(outputStream* st, int sig,
  2341                                   char* buf, size_t buflen) {
  2342   struct sigaction sa;
  2344   sigaction(sig, NULL, &sa);
  2346   st->print("%s: ", os::exception_name(sig, buf, buflen));
  2348   address handler = (sa.sa_flags & SA_SIGINFO)
  2349                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
  2350                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
  2352   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
  2353     st->print("SIG_DFL");
  2354   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
  2355     st->print("SIG_IGN");
  2356   } else {
  2357     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
  2360   st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask);
  2362   address rh = VMError::get_resetted_sighandler(sig);
  2363   // May be, handler was resetted by VMError?
  2364   if(rh != NULL) {
  2365     handler = rh;
  2366     sa.sa_flags = VMError::get_resetted_sigflags(sig);
  2369   st->print(", sa_flags="   PTR32_FORMAT, sa.sa_flags);
  2371   // Check: is it our handler?
  2372   if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
  2373      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
  2374     // It is our signal handler
  2375     // check for flags
  2376     if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
  2377       st->print(
  2378         ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
  2379         os::Solaris::get_our_sigflags(sig));
  2382   st->cr();
  2385 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
  2386   st->print_cr("Signal Handlers:");
  2387   print_signal_handler(st, SIGSEGV, buf, buflen);
  2388   print_signal_handler(st, SIGBUS , buf, buflen);
  2389   print_signal_handler(st, SIGFPE , buf, buflen);
  2390   print_signal_handler(st, SIGPIPE, buf, buflen);
  2391   print_signal_handler(st, SIGXFSZ, buf, buflen);
  2392   print_signal_handler(st, SIGILL , buf, buflen);
  2393   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
  2394   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
  2395   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
  2396   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
  2397   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
  2398   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
  2399   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
  2400   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
  2403 static char saved_jvm_path[MAXPATHLEN] = { 0 };
  2405 // Find the full path to the current module, libjvm.so or libjvm_g.so
  2406 void os::jvm_path(char *buf, jint buflen) {
  2407   // Error checking.
  2408   if (buflen < MAXPATHLEN) {
  2409     assert(false, "must use a large-enough buffer");
  2410     buf[0] = '\0';
  2411     return;
  2413   // Lazy resolve the path to current module.
  2414   if (saved_jvm_path[0] != 0) {
  2415     strcpy(buf, saved_jvm_path);
  2416     return;
  2419   Dl_info dlinfo;
  2420   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
  2421   assert(ret != 0, "cannot locate libjvm");
  2422   realpath((char *)dlinfo.dli_fname, buf);
  2424   if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) {
  2425     // Support for the gamma launcher.  Typical value for buf is
  2426     // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
  2427     // the right place in the string, then assume we are installed in a JDK and
  2428     // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
  2429     // up the path so it looks like libjvm.so is installed there (append a
  2430     // fake suffix hotspot/libjvm.so).
  2431     const char *p = buf + strlen(buf) - 1;
  2432     for (int count = 0; p > buf && count < 5; ++count) {
  2433       for (--p; p > buf && *p != '/'; --p)
  2434         /* empty */ ;
  2437     if (strncmp(p, "/jre/lib/", 9) != 0) {
  2438       // Look for JAVA_HOME in the environment.
  2439       char* java_home_var = ::getenv("JAVA_HOME");
  2440       if (java_home_var != NULL && java_home_var[0] != 0) {
  2441         char cpu_arch[12];
  2442         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
  2443 #ifdef _LP64
  2444         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
  2445         if (strcmp(cpu_arch, "sparc") == 0) {
  2446           strcat(cpu_arch, "v9");
  2447         } else if (strcmp(cpu_arch, "i386") == 0) {
  2448           strcpy(cpu_arch, "amd64");
  2450 #endif
  2451         // Check the current module name "libjvm.so" or "libjvm_g.so".
  2452         p = strrchr(buf, '/');
  2453         assert(strstr(p, "/libjvm") == p, "invalid library name");
  2454         p = strstr(p, "_g") ? "_g" : "";
  2456         realpath(java_home_var, buf);
  2457         sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch);
  2458         if (0 == access(buf, F_OK)) {
  2459           // Use current module name "libjvm[_g].so" instead of
  2460           // "libjvm"debug_only("_g")".so" since for fastdebug version
  2461           // we should have "libjvm.so" but debug_only("_g") adds "_g"!
  2462           // It is used when we are choosing the HPI library's name
  2463           // "libhpi[_g].so" in hpi::initialize_get_interface().
  2464           sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p);
  2465         } else {
  2466           // Go back to path of .so
  2467           realpath((char *)dlinfo.dli_fname, buf);
  2473   strcpy(saved_jvm_path, buf);
  2477 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
  2478   // no prefix required, not even "_"
  2482 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
  2483   // no suffix required
  2487 // sun.misc.Signal
  2489 extern "C" {
  2490   static void UserHandler(int sig, void *siginfo, void *context) {
  2491     // Ctrl-C is pressed during error reporting, likely because the error
  2492     // handler fails to abort. Let VM die immediately.
  2493     if (sig == SIGINT && is_error_reported()) {
  2494        os::die();
  2497     os::signal_notify(sig);
  2498     // We do not need to reinstate the signal handler each time...
  2502 void* os::user_handler() {
  2503   return CAST_FROM_FN_PTR(void*, UserHandler);
  2506 extern "C" {
  2507   typedef void (*sa_handler_t)(int);
  2508   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
  2511 void* os::signal(int signal_number, void* handler) {
  2512   struct sigaction sigAct, oldSigAct;
  2513   sigfillset(&(sigAct.sa_mask));
  2514   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
  2515   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
  2517   if (sigaction(signal_number, &sigAct, &oldSigAct))
  2518     // -1 means registration failed
  2519     return (void *)-1;
  2521   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
  2524 void os::signal_raise(int signal_number) {
  2525   raise(signal_number);
  2528 /*
  2529  * The following code is moved from os.cpp for making this
  2530  * code platform specific, which it is by its very nature.
  2531  */
  2533 // a counter for each possible signal value
  2534 static int Sigexit = 0;
  2535 static int Maxlibjsigsigs;
  2536 static jint *pending_signals = NULL;
  2537 static int *preinstalled_sigs = NULL;
  2538 static struct sigaction *chainedsigactions = NULL;
  2539 static sema_t sig_sem;
  2540 typedef int (*version_getting_t)();
  2541 version_getting_t os::Solaris::get_libjsig_version = NULL;
  2542 static int libjsigversion = NULL;
  2544 int os::sigexitnum_pd() {
  2545   assert(Sigexit > 0, "signal memory not yet initialized");
  2546   return Sigexit;
  2549 void os::Solaris::init_signal_mem() {
  2550   // Initialize signal structures
  2551   Maxsignum = SIGRTMAX;
  2552   Sigexit = Maxsignum+1;
  2553   assert(Maxsignum >0, "Unable to obtain max signal number");
  2555   Maxlibjsigsigs = Maxsignum;
  2557   // pending_signals has one int per signal
  2558   // The additional signal is for SIGEXIT - exit signal to signal_thread
  2559   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1));
  2560   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
  2562   if (UseSignalChaining) {
  2563      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
  2564        * (Maxsignum + 1));
  2565      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
  2566      preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1));
  2567      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
  2569   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ));
  2570   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
  2573 void os::signal_init_pd() {
  2574   int ret;
  2576   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
  2577   assert(ret == 0, "sema_init() failed");
  2580 void os::signal_notify(int signal_number) {
  2581   int ret;
  2583   Atomic::inc(&pending_signals[signal_number]);
  2584   ret = ::sema_post(&sig_sem);
  2585   assert(ret == 0, "sema_post() failed");
  2588 static int check_pending_signals(bool wait_for_signal) {
  2589   int ret;
  2590   while (true) {
  2591     for (int i = 0; i < Sigexit + 1; i++) {
  2592       jint n = pending_signals[i];
  2593       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
  2594         return i;
  2597     if (!wait_for_signal) {
  2598       return -1;
  2600     JavaThread *thread = JavaThread::current();
  2601     ThreadBlockInVM tbivm(thread);
  2603     bool threadIsSuspended;
  2604     do {
  2605       thread->set_suspend_equivalent();
  2606       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
  2607       while((ret = ::sema_wait(&sig_sem)) == EINTR)
  2609       assert(ret == 0, "sema_wait() failed");
  2611       // were we externally suspended while we were waiting?
  2612       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
  2613       if (threadIsSuspended) {
  2614         //
  2615         // The semaphore has been incremented, but while we were waiting
  2616         // another thread suspended us. We don't want to continue running
  2617         // while suspended because that would surprise the thread that
  2618         // suspended us.
  2619         //
  2620         ret = ::sema_post(&sig_sem);
  2621         assert(ret == 0, "sema_post() failed");
  2623         thread->java_suspend_self();
  2625     } while (threadIsSuspended);
  2629 int os::signal_lookup() {
  2630   return check_pending_signals(false);
  2633 int os::signal_wait() {
  2634   return check_pending_signals(true);
  2637 ////////////////////////////////////////////////////////////////////////////////
  2638 // Virtual Memory
  2640 static int page_size = -1;
  2642 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
  2643 // clear this var if support is not available.
  2644 static bool has_map_align = true;
  2646 int os::vm_page_size() {
  2647   assert(page_size != -1, "must call os::init");
  2648   return page_size;
  2651 // Solaris allocates memory by pages.
  2652 int os::vm_allocation_granularity() {
  2653   assert(page_size != -1, "must call os::init");
  2654   return page_size;
  2657 bool os::commit_memory(char* addr, size_t bytes, bool exec) {
  2658   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
  2659   size_t size = bytes;
  2660   return
  2661      NULL != Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
  2664 bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint,
  2665                        bool exec) {
  2666   if (commit_memory(addr, bytes, exec)) {
  2667     if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
  2668       // If the large page size has been set and the VM
  2669       // is using large pages, use the large page size
  2670       // if it is smaller than the alignment hint. This is
  2671       // a case where the VM wants to use a larger alignment size
  2672       // for its own reasons but still want to use large pages
  2673       // (which is what matters to setting the mpss range.
  2674       size_t page_size = 0;
  2675       if (large_page_size() < alignment_hint) {
  2676         assert(UseLargePages, "Expected to be here for large page use only");
  2677         page_size = large_page_size();
  2678       } else {
  2679         // If the alignment hint is less than the large page
  2680         // size, the VM wants a particular alignment (thus the hint)
  2681         // for internal reasons.  Try to set the mpss range using
  2682         // the alignment_hint.
  2683         page_size = alignment_hint;
  2685       // Since this is a hint, ignore any failures.
  2686       (void)Solaris::set_mpss_range(addr, bytes, page_size);
  2688     return true;
  2690   return false;
  2693 // Uncommit the pages in a specified region.
  2694 void os::free_memory(char* addr, size_t bytes) {
  2695   if (madvise(addr, bytes, MADV_FREE) < 0) {
  2696     debug_only(warning("MADV_FREE failed."));
  2697     return;
  2701 // Change the page size in a given range.
  2702 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
  2703   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
  2704   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
  2705   Solaris::set_mpss_range(addr, bytes, alignment_hint);
  2708 // Tell the OS to make the range local to the first-touching LWP
  2709 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
  2710   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
  2711   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
  2712     debug_only(warning("MADV_ACCESS_LWP failed."));
  2716 // Tell the OS that this range would be accessed from different LWPs.
  2717 void os::numa_make_global(char *addr, size_t bytes) {
  2718   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
  2719   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
  2720     debug_only(warning("MADV_ACCESS_MANY failed."));
  2724 // Get the number of the locality groups.
  2725 size_t os::numa_get_groups_num() {
  2726   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
  2727   return n != -1 ? n : 1;
  2730 // Get a list of leaf locality groups. A leaf lgroup is group that
  2731 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
  2732 // board. An LWP is assigned to one of these groups upon creation.
  2733 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
  2734    if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
  2735      ids[0] = 0;
  2736      return 1;
  2738    int result_size = 0, top = 1, bottom = 0, cur = 0;
  2739    for (int k = 0; k < size; k++) {
  2740      int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
  2741                                     (Solaris::lgrp_id_t*)&ids[top], size - top);
  2742      if (r == -1) {
  2743        ids[0] = 0;
  2744        return 1;
  2746      if (!r) {
  2747        // That's a leaf node.
  2748        assert (bottom <= cur, "Sanity check");
  2749        // Check if the node has memory
  2750        if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
  2751                                    NULL, 0, LGRP_RSRC_MEM) > 0) {
  2752          ids[bottom++] = ids[cur];
  2755      top += r;
  2756      cur++;
  2758    if (bottom == 0) {
  2759      // Handle a situation, when the OS reports no memory available.
  2760      // Assume UMA architecture.
  2761      ids[0] = 0;
  2762      return 1;
  2764    return bottom;
  2767 // Detect the topology change. Typically happens during CPU plugging-unplugging.
  2768 bool os::numa_topology_changed() {
  2769   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
  2770   if (is_stale != -1 && is_stale) {
  2771     Solaris::lgrp_fini(Solaris::lgrp_cookie());
  2772     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
  2773     assert(c != 0, "Failure to initialize LGRP API");
  2774     Solaris::set_lgrp_cookie(c);
  2775     return true;
  2777   return false;
  2780 // Get the group id of the current LWP.
  2781 int os::numa_get_group_id() {
  2782   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
  2783   if (lgrp_id == -1) {
  2784     return 0;
  2786   const int size = os::numa_get_groups_num();
  2787   int *ids = (int*)alloca(size * sizeof(int));
  2789   // Get the ids of all lgroups with memory; r is the count.
  2790   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
  2791                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
  2792   if (r <= 0) {
  2793     return 0;
  2795   return ids[os::random() % r];
  2798 // Request information about the page.
  2799 bool os::get_page_info(char *start, page_info* info) {
  2800   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
  2801   uint64_t addr = (uintptr_t)start;
  2802   uint64_t outdata[2];
  2803   uint_t validity = 0;
  2805   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
  2806     return false;
  2809   info->size = 0;
  2810   info->lgrp_id = -1;
  2812   if ((validity & 1) != 0) {
  2813     if ((validity & 2) != 0) {
  2814       info->lgrp_id = outdata[0];
  2816     if ((validity & 4) != 0) {
  2817       info->size = outdata[1];
  2819     return true;
  2821   return false;
  2824 // Scan the pages from start to end until a page different than
  2825 // the one described in the info parameter is encountered.
  2826 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
  2827   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
  2828   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
  2829   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT];
  2830   uint_t validity[MAX_MEMINFO_CNT];
  2832   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
  2833   uint64_t p = (uint64_t)start;
  2834   while (p < (uint64_t)end) {
  2835     addrs[0] = p;
  2836     size_t addrs_count = 1;
  2837     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] < (uint64_t)end) {
  2838       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
  2839       addrs_count++;
  2842     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
  2843       return NULL;
  2846     size_t i = 0;
  2847     for (; i < addrs_count; i++) {
  2848       if ((validity[i] & 1) != 0) {
  2849         if ((validity[i] & 4) != 0) {
  2850           if (outdata[types * i + 1] != page_expected->size) {
  2851             break;
  2853         } else
  2854           if (page_expected->size != 0) {
  2855             break;
  2858         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
  2859           if (outdata[types * i] != page_expected->lgrp_id) {
  2860             break;
  2863       } else {
  2864         return NULL;
  2868     if (i != addrs_count) {
  2869       if ((validity[i] & 2) != 0) {
  2870         page_found->lgrp_id = outdata[types * i];
  2871       } else {
  2872         page_found->lgrp_id = -1;
  2874       if ((validity[i] & 4) != 0) {
  2875         page_found->size = outdata[types * i + 1];
  2876       } else {
  2877         page_found->size = 0;
  2879       return (char*)addrs[i];
  2882     p = addrs[addrs_count - 1] + page_size;
  2884   return end;
  2887 bool os::uncommit_memory(char* addr, size_t bytes) {
  2888   size_t size = bytes;
  2889   // Map uncommitted pages PROT_NONE so we fail early if we touch an
  2890   // uncommitted page. Otherwise, the read/write might succeed if we
  2891   // have enough swap space to back the physical page.
  2892   return
  2893     NULL != Solaris::mmap_chunk(addr, size,
  2894                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
  2895                                 PROT_NONE);
  2898 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
  2899   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
  2901   if (b == MAP_FAILED) {
  2902     return NULL;
  2904   return b;
  2907 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
  2908   char* addr = requested_addr;
  2909   int flags = MAP_PRIVATE | MAP_NORESERVE;
  2911   assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
  2913   if (fixed) {
  2914     flags |= MAP_FIXED;
  2915   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
  2916     flags |= MAP_ALIGN;
  2917     addr = (char*) alignment_hint;
  2920   // Map uncommitted pages PROT_NONE so we fail early if we touch an
  2921   // uncommitted page. Otherwise, the read/write might succeed if we
  2922   // have enough swap space to back the physical page.
  2923   return mmap_chunk(addr, bytes, flags, PROT_NONE);
  2926 char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
  2927   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
  2929   guarantee(requested_addr == NULL || requested_addr == addr,
  2930             "OS failed to return requested mmap address.");
  2931   return addr;
  2934 // Reserve memory at an arbitrary address, only if that area is
  2935 // available (and not reserved for something else).
  2937 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
  2938   const int max_tries = 10;
  2939   char* base[max_tries];
  2940   size_t size[max_tries];
  2942   // Solaris adds a gap between mmap'ed regions.  The size of the gap
  2943   // is dependent on the requested size and the MMU.  Our initial gap
  2944   // value here is just a guess and will be corrected later.
  2945   bool had_top_overlap = false;
  2946   bool have_adjusted_gap = false;
  2947   size_t gap = 0x400000;
  2949   // Assert only that the size is a multiple of the page size, since
  2950   // that's all that mmap requires, and since that's all we really know
  2951   // about at this low abstraction level.  If we need higher alignment,
  2952   // we can either pass an alignment to this method or verify alignment
  2953   // in one of the methods further up the call chain.  See bug 5044738.
  2954   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
  2956   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
  2957   // Give it a try, if the kernel honors the hint we can return immediately.
  2958   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
  2959   volatile int err = errno;
  2960   if (addr == requested_addr) {
  2961     return addr;
  2962   } else if (addr != NULL) {
  2963     unmap_memory(addr, bytes);
  2966   if (PrintMiscellaneous && Verbose) {
  2967     char buf[256];
  2968     buf[0] = '\0';
  2969     if (addr == NULL) {
  2970       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
  2972     warning("attempt_reserve_memory_at: couldn't reserve %d bytes at "
  2973             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
  2974             "%s", bytes, requested_addr, addr, buf);
  2977   // Address hint method didn't work.  Fall back to the old method.
  2978   // In theory, once SNV becomes our oldest supported platform, this
  2979   // code will no longer be needed.
  2980   //
  2981   // Repeatedly allocate blocks until the block is allocated at the
  2982   // right spot. Give up after max_tries.
  2983   int i;
  2984   for (i = 0; i < max_tries; ++i) {
  2985     base[i] = reserve_memory(bytes);
  2987     if (base[i] != NULL) {
  2988       // Is this the block we wanted?
  2989       if (base[i] == requested_addr) {
  2990         size[i] = bytes;
  2991         break;
  2994       // check that the gap value is right
  2995       if (had_top_overlap && !have_adjusted_gap) {
  2996         size_t actual_gap = base[i-1] - base[i] - bytes;
  2997         if (gap != actual_gap) {
  2998           // adjust the gap value and retry the last 2 allocations
  2999           assert(i > 0, "gap adjustment code problem");
  3000           have_adjusted_gap = true;  // adjust the gap only once, just in case
  3001           gap = actual_gap;
  3002           if (PrintMiscellaneous && Verbose) {
  3003             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
  3005           unmap_memory(base[i], bytes);
  3006           unmap_memory(base[i-1], size[i-1]);
  3007           i-=2;
  3008           continue;
  3012       // Does this overlap the block we wanted? Give back the overlapped
  3013       // parts and try again.
  3014       //
  3015       // There is still a bug in this code: if top_overlap == bytes,
  3016       // the overlap is offset from requested region by the value of gap.
  3017       // In this case giving back the overlapped part will not work,
  3018       // because we'll give back the entire block at base[i] and
  3019       // therefore the subsequent allocation will not generate a new gap.
  3020       // This could be fixed with a new algorithm that used larger
  3021       // or variable size chunks to find the requested region -
  3022       // but such a change would introduce additional complications.
  3023       // It's rare enough that the planets align for this bug,
  3024       // so we'll just wait for a fix for 6204603/5003415 which
  3025       // will provide a mmap flag to allow us to avoid this business.
  3027       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
  3028       if (top_overlap >= 0 && top_overlap < bytes) {
  3029         had_top_overlap = true;
  3030         unmap_memory(base[i], top_overlap);
  3031         base[i] += top_overlap;
  3032         size[i] = bytes - top_overlap;
  3033       } else {
  3034         size_t bottom_overlap = base[i] + bytes - requested_addr;
  3035         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
  3036           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
  3037             warning("attempt_reserve_memory_at: possible alignment bug");
  3039           unmap_memory(requested_addr, bottom_overlap);
  3040           size[i] = bytes - bottom_overlap;
  3041         } else {
  3042           size[i] = bytes;
  3048   // Give back the unused reserved pieces.
  3050   for (int j = 0; j < i; ++j) {
  3051     if (base[j] != NULL) {
  3052       unmap_memory(base[j], size[j]);
  3056   return (i < max_tries) ? requested_addr : NULL;
  3059 bool os::release_memory(char* addr, size_t bytes) {
  3060   size_t size = bytes;
  3061   return munmap(addr, size) == 0;
  3064 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
  3065   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
  3066          "addr must be page aligned");
  3067   int retVal = mprotect(addr, bytes, prot);
  3068   return retVal == 0;
  3071 // Protect memory (Used to pass readonly pages through
  3072 // JNI GetArray<type>Elements with empty arrays.)
  3073 // Also, used for serialization page and for compressed oops null pointer
  3074 // checking.
  3075 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
  3076                         bool is_committed) {
  3077   unsigned int p = 0;
  3078   switch (prot) {
  3079   case MEM_PROT_NONE: p = PROT_NONE; break;
  3080   case MEM_PROT_READ: p = PROT_READ; break;
  3081   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
  3082   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
  3083   default:
  3084     ShouldNotReachHere();
  3086   // is_committed is unused.
  3087   return solaris_mprotect(addr, bytes, p);
  3090 // guard_memory and unguard_memory only happens within stack guard pages.
  3091 // Since ISM pertains only to the heap, guard and unguard memory should not
  3092 /// happen with an ISM region.
  3093 bool os::guard_memory(char* addr, size_t bytes) {
  3094   return solaris_mprotect(addr, bytes, PROT_NONE);
  3097 bool os::unguard_memory(char* addr, size_t bytes) {
  3098   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
  3101 // Large page support
  3103 // UseLargePages is the master flag to enable/disable large page memory.
  3104 // UseMPSS and UseISM are supported for compatibility reasons. Their combined
  3105 // effects can be described in the following table:
  3106 //
  3107 // UseLargePages UseMPSS UseISM
  3108 //    false         *       *   => UseLargePages is the master switch, turning
  3109 //                                 it off will turn off both UseMPSS and
  3110 //                                 UseISM. VM will not use large page memory
  3111 //                                 regardless the settings of UseMPSS/UseISM.
  3112 //     true      false    false => Unless future Solaris provides other
  3113 //                                 mechanism to use large page memory, this
  3114 //                                 combination is equivalent to -UseLargePages,
  3115 //                                 VM will not use large page memory
  3116 //     true      true     false => JVM will use MPSS for large page memory.
  3117 //                                 This is the default behavior.
  3118 //     true      false    true  => JVM will use ISM for large page memory.
  3119 //     true      true     true  => JVM will use ISM if it is available.
  3120 //                                 Otherwise, JVM will fall back to MPSS.
  3121 //                                 Becaues ISM is now available on all
  3122 //                                 supported Solaris versions, this combination
  3123 //                                 is equivalent to +UseISM -UseMPSS.
  3125 typedef int (*getpagesizes_func_type) (size_t[], int);
  3126 static size_t _large_page_size = 0;
  3128 bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) {
  3129   // x86 uses either 2M or 4M page, depending on whether PAE (Physical Address
  3130   // Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc
  3131   // can support multiple page sizes.
  3133   // Don't bother to probe page size because getpagesizes() comes with MPSS.
  3134   // ISM is only recommended on old Solaris where there is no MPSS support.
  3135   // Simply choose a conservative value as default.
  3136   *page_size = LargePageSizeInBytes ? LargePageSizeInBytes :
  3137                SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M);
  3139   // ISM is available on all supported Solaris versions
  3140   return true;
  3143 // Insertion sort for small arrays (descending order).
  3144 static void insertion_sort_descending(size_t* array, int len) {
  3145   for (int i = 0; i < len; i++) {
  3146     size_t val = array[i];
  3147     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
  3148       size_t tmp = array[key];
  3149       array[key] = array[key - 1];
  3150       array[key - 1] = tmp;
  3155 bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) {
  3156   getpagesizes_func_type getpagesizes_func =
  3157     CAST_TO_FN_PTR(getpagesizes_func_type, dlsym(RTLD_DEFAULT, "getpagesizes"));
  3158   if (getpagesizes_func == NULL) {
  3159     if (warn) {
  3160       warning("MPSS is not supported by the operating system.");
  3162     return false;
  3165   const unsigned int usable_count = VM_Version::page_size_count();
  3166   if (usable_count == 1) {
  3167     return false;
  3170   // Fill the array of page sizes.
  3171   int n = getpagesizes_func(_page_sizes, page_sizes_max);
  3172   assert(n > 0, "Solaris bug?");
  3173   if (n == page_sizes_max) {
  3174     // Add a sentinel value (necessary only if the array was completely filled
  3175     // since it is static (zeroed at initialization)).
  3176     _page_sizes[--n] = 0;
  3177     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
  3179   assert(_page_sizes[n] == 0, "missing sentinel");
  3181   if (n == 1) return false;     // Only one page size available.
  3183   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
  3184   // select up to usable_count elements.  First sort the array, find the first
  3185   // acceptable value, then copy the usable sizes to the top of the array and
  3186   // trim the rest.  Make sure to include the default page size :-).
  3187   //
  3188   // A better policy could get rid of the 4M limit by taking the sizes of the
  3189   // important VM memory regions (java heap and possibly the code cache) into
  3190   // account.
  3191   insertion_sort_descending(_page_sizes, n);
  3192   const size_t size_limit =
  3193     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
  3194   int beg;
  3195   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
  3196   const int end = MIN2((int)usable_count, n) - 1;
  3197   for (int cur = 0; cur < end; ++cur, ++beg) {
  3198     _page_sizes[cur] = _page_sizes[beg];
  3200   _page_sizes[end] = vm_page_size();
  3201   _page_sizes[end + 1] = 0;
  3203   if (_page_sizes[end] > _page_sizes[end - 1]) {
  3204     // Default page size is not the smallest; sort again.
  3205     insertion_sort_descending(_page_sizes, end + 1);
  3207   *page_size = _page_sizes[0];
  3209   return true;
  3212 bool os::large_page_init() {
  3213   if (!UseLargePages) {
  3214     UseISM = false;
  3215     UseMPSS = false;
  3216     return false;
  3219   // print a warning if any large page related flag is specified on command line
  3220   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
  3221                          !FLAG_IS_DEFAULT(UseISM)               ||
  3222                          !FLAG_IS_DEFAULT(UseMPSS)              ||
  3223                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
  3224   UseISM = UseISM &&
  3225            Solaris::ism_sanity_check(warn_on_failure, &_large_page_size);
  3226   if (UseISM) {
  3227     // ISM disables MPSS to be compatible with old JDK behavior
  3228     UseMPSS = false;
  3229     _page_sizes[0] = _large_page_size;
  3230     _page_sizes[1] = vm_page_size();
  3233   UseMPSS = UseMPSS &&
  3234             Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
  3236   UseLargePages = UseISM || UseMPSS;
  3237   return UseLargePages;
  3240 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
  3241   // Signal to OS that we want large pages for addresses
  3242   // from addr, addr + bytes
  3243   struct memcntl_mha mpss_struct;
  3244   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
  3245   mpss_struct.mha_pagesize = align;
  3246   mpss_struct.mha_flags = 0;
  3247   if (memcntl(start, bytes, MC_HAT_ADVISE,
  3248               (caddr_t) &mpss_struct, 0, 0) < 0) {
  3249     debug_only(warning("Attempt to use MPSS failed."));
  3250     return false;
  3252   return true;
  3255 char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
  3256   // "exec" is passed in but not used.  Creating the shared image for
  3257   // the code cache doesn't have an SHM_X executable permission to check.
  3258   assert(UseLargePages && UseISM, "only for ISM large pages");
  3260   size_t size = bytes;
  3261   char* retAddr = NULL;
  3262   int shmid;
  3263   key_t ismKey;
  3265   bool warn_on_failure = UseISM &&
  3266                         (!FLAG_IS_DEFAULT(UseLargePages)         ||
  3267                          !FLAG_IS_DEFAULT(UseISM)                ||
  3268                          !FLAG_IS_DEFAULT(LargePageSizeInBytes)
  3269                         );
  3270   char msg[128];
  3272   ismKey = IPC_PRIVATE;
  3274   // Create a large shared memory region to attach to based on size.
  3275   // Currently, size is the total size of the heap
  3276   shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT);
  3277   if (shmid == -1){
  3278      if (warn_on_failure) {
  3279        jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
  3280        warning(msg);
  3282      return NULL;
  3285   // Attach to the region
  3286   retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W);
  3287   int err = errno;
  3289   // Remove shmid. If shmat() is successful, the actual shared memory segment
  3290   // will be deleted when it's detached by shmdt() or when the process
  3291   // terminates. If shmat() is not successful this will remove the shared
  3292   // segment immediately.
  3293   shmctl(shmid, IPC_RMID, NULL);
  3295   if (retAddr == (char *) -1) {
  3296     if (warn_on_failure) {
  3297       jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
  3298       warning(msg);
  3300     return NULL;
  3303   return retAddr;
  3306 bool os::release_memory_special(char* base, size_t bytes) {
  3307   // detaching the SHM segment will also delete it, see reserve_memory_special()
  3308   int rslt = shmdt(base);
  3309   return rslt == 0;
  3312 size_t os::large_page_size() {
  3313   return _large_page_size;
  3316 // MPSS allows application to commit large page memory on demand; with ISM
  3317 // the entire memory region must be allocated as shared memory.
  3318 bool os::can_commit_large_page_memory() {
  3319   return UseISM ? false : true;
  3322 bool os::can_execute_large_page_memory() {
  3323   return UseISM ? false : true;
  3326 static int os_sleep(jlong millis, bool interruptible) {
  3327   const jlong limit = INT_MAX;
  3328   jlong prevtime;
  3329   int res;
  3331   while (millis > limit) {
  3332     if ((res = os_sleep(limit, interruptible)) != OS_OK)
  3333       return res;
  3334     millis -= limit;
  3337   // Restart interrupted polls with new parameters until the proper delay
  3338   // has been completed.
  3340   prevtime = getTimeMillis();
  3342   while (millis > 0) {
  3343     jlong newtime;
  3345     if (!interruptible) {
  3346       // Following assert fails for os::yield_all:
  3347       // assert(!thread->is_Java_thread(), "must not be java thread");
  3348       res = poll(NULL, 0, millis);
  3349     } else {
  3350       JavaThread *jt = JavaThread::current();
  3352       INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
  3353         os::Solaris::clear_interrupted);
  3356     // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
  3357     // thread.Interrupt.
  3359     if((res == OS_ERR) && (errno == EINTR)) {
  3360       newtime = getTimeMillis();
  3361       assert(newtime >= prevtime, "time moving backwards");
  3362     /* Doing prevtime and newtime in microseconds doesn't help precision,
  3363        and trying to round up to avoid lost milliseconds can result in a
  3364        too-short delay. */
  3365       millis -= newtime - prevtime;
  3366       if(millis <= 0)
  3367         return OS_OK;
  3368       prevtime = newtime;
  3369     } else
  3370       return res;
  3373   return OS_OK;
  3376 // Read calls from inside the vm need to perform state transitions
  3377 size_t os::read(int fd, void *buf, unsigned int nBytes) {
  3378   INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
  3381 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
  3382   assert(thread == Thread::current(),  "thread consistency check");
  3384   // TODO-FIXME: this should be removed.
  3385   // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
  3386   // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
  3387   // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
  3388   // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
  3389   // is fooled into believing that the system is making progress. In the code below we block the
  3390   // the watcher thread while safepoint is in progress so that it would not appear as though the
  3391   // system is making progress.
  3392   if (!Solaris::T2_libthread() &&
  3393       thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
  3394     // We now try to acquire the threads lock. Since this lock is held by the VM thread during
  3395     // the entire safepoint, the watcher thread will  line up here during the safepoint.
  3396     Threads_lock->lock_without_safepoint_check();
  3397     Threads_lock->unlock();
  3400   if (thread->is_Java_thread()) {
  3401     // This is a JavaThread so we honor the _thread_blocked protocol
  3402     // even for sleeps of 0 milliseconds. This was originally done
  3403     // as a workaround for bug 4338139. However, now we also do it
  3404     // to honor the suspend-equivalent protocol.
  3406     JavaThread *jt = (JavaThread *) thread;
  3407     ThreadBlockInVM tbivm(jt);
  3409     jt->set_suspend_equivalent();
  3410     // cleared by handle_special_suspend_equivalent_condition() or
  3411     // java_suspend_self() via check_and_wait_while_suspended()
  3413     int ret_code;
  3414     if (millis <= 0) {
  3415       thr_yield();
  3416       ret_code = 0;
  3417     } else {
  3418       // The original sleep() implementation did not create an
  3419       // OSThreadWaitState helper for sleeps of 0 milliseconds.
  3420       // I'm preserving that decision for now.
  3421       OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
  3423       ret_code = os_sleep(millis, interruptible);
  3426     // were we externally suspended while we were waiting?
  3427     jt->check_and_wait_while_suspended();
  3429     return ret_code;
  3432   // non-JavaThread from this point on:
  3434   if (millis <= 0) {
  3435     thr_yield();
  3436     return 0;
  3439   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
  3441   return os_sleep(millis, interruptible);
  3444 int os::naked_sleep() {
  3445   // %% make the sleep time an integer flag. for now use 1 millisec.
  3446   return os_sleep(1, false);
  3449 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
  3450 void os::infinite_sleep() {
  3451   while (true) {    // sleep forever ...
  3452     ::sleep(100);   // ... 100 seconds at a time
  3456 // Used to convert frequent JVM_Yield() to nops
  3457 bool os::dont_yield() {
  3458   if (DontYieldALot) {
  3459     static hrtime_t last_time = 0;
  3460     hrtime_t diff = getTimeNanos() - last_time;
  3462     if (diff < DontYieldALotInterval * 1000000)
  3463       return true;
  3465     last_time += diff;
  3467     return false;
  3469   else {
  3470     return false;
  3474 // Caveat: Solaris os::yield() causes a thread-state transition whereas
  3475 // the linux and win32 implementations do not.  This should be checked.
  3477 void os::yield() {
  3478   // Yields to all threads with same or greater priority
  3479   os::sleep(Thread::current(), 0, false);
  3482 // Note that yield semantics are defined by the scheduling class to which
  3483 // the thread currently belongs.  Typically, yield will _not yield to
  3484 // other equal or higher priority threads that reside on the dispatch queues
  3485 // of other CPUs.
  3487 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
  3490 // On Solaris we found that yield_all doesn't always yield to all other threads.
  3491 // There have been cases where there is a thread ready to execute but it doesn't
  3492 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
  3493 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
  3494 // SIGWAITING signal which will cause a new lwp to be created. So we count the
  3495 // number of times yield_all is called in the one loop and increase the sleep
  3496 // time after 8 attempts. If this fails too we increase the concurrency level
  3497 // so that the starving thread would get an lwp
  3499 void os::yield_all(int attempts) {
  3500   // Yields to all threads, including threads with lower priorities
  3501   if (attempts == 0) {
  3502     os::sleep(Thread::current(), 1, false);
  3503   } else {
  3504     int iterations = attempts % 30;
  3505     if (iterations == 0 && !os::Solaris::T2_libthread()) {
  3506       // thr_setconcurrency and _getconcurrency make sense only under T1.
  3507       int noofLWPS = thr_getconcurrency();
  3508       if (noofLWPS < (Threads::number_of_threads() + 2)) {
  3509         thr_setconcurrency(thr_getconcurrency() + 1);
  3511     } else if (iterations < 25) {
  3512       os::sleep(Thread::current(), 1, false);
  3513     } else {
  3514       os::sleep(Thread::current(), 10, false);
  3519 // Called from the tight loops to possibly influence time-sharing heuristics
  3520 void os::loop_breaker(int attempts) {
  3521   os::yield_all(attempts);
  3525 // Interface for setting lwp priorities.  If we are using T2 libthread,
  3526 // which forces the use of BoundThreads or we manually set UseBoundThreads,
  3527 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
  3528 // function is meaningless in this mode so we must adjust the real lwp's priority
  3529 // The routines below implement the getting and setting of lwp priorities.
  3530 //
  3531 // Note: There are three priority scales used on Solaris.  Java priotities
  3532 //       which range from 1 to 10, libthread "thr_setprio" scale which range
  3533 //       from 0 to 127, and the current scheduling class of the process we
  3534 //       are running in.  This is typically from -60 to +60.
  3535 //       The setting of the lwp priorities in done after a call to thr_setprio
  3536 //       so Java priorities are mapped to libthread priorities and we map from
  3537 //       the latter to lwp priorities.  We don't keep priorities stored in
  3538 //       Java priorities since some of our worker threads want to set priorities
  3539 //       higher than all Java threads.
  3540 //
  3541 // For related information:
  3542 // (1)  man -s 2 priocntl
  3543 // (2)  man -s 4 priocntl
  3544 // (3)  man dispadmin
  3545 // =    librt.so
  3546 // =    libthread/common/rtsched.c - thrp_setlwpprio().
  3547 // =    ps -cL <pid> ... to validate priority.
  3548 // =    sched_get_priority_min and _max
  3549 //              pthread_create
  3550 //              sched_setparam
  3551 //              pthread_setschedparam
  3552 //
  3553 // Assumptions:
  3554 // +    We assume that all threads in the process belong to the same
  3555 //              scheduling class.   IE. an homogenous process.
  3556 // +    Must be root or in IA group to change change "interactive" attribute.
  3557 //              Priocntl() will fail silently.  The only indication of failure is when
  3558 //              we read-back the value and notice that it hasn't changed.
  3559 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
  3560 // +    For RT, change timeslice as well.  Invariant:
  3561 //              constant "priority integral"
  3562 //              Konst == TimeSlice * (60-Priority)
  3563 //              Given a priority, compute appropriate timeslice.
  3564 // +    Higher numerical values have higher priority.
  3566 // sched class attributes
  3567 typedef struct {
  3568         int   schedPolicy;              // classID
  3569         int   maxPrio;
  3570         int   minPrio;
  3571 } SchedInfo;
  3574 static SchedInfo tsLimits, iaLimits, rtLimits;
  3576 #ifdef ASSERT
  3577 static int  ReadBackValidate = 1;
  3578 #endif
  3579 static int  myClass     = 0;
  3580 static int  myMin       = 0;
  3581 static int  myMax       = 0;
  3582 static int  myCur       = 0;
  3583 static bool priocntl_enable = false;
  3586 // Call the version of priocntl suitable for all supported versions
  3587 // of Solaris. We need to call through this wrapper so that we can
  3588 // build on Solaris 9 and run on Solaris 8, 9 and 10.
  3589 //
  3590 // This code should be removed if we ever stop supporting Solaris 8
  3591 // and earlier releases.
  3593 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg);
  3594 typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg);
  3595 static priocntl_type priocntl_ptr = priocntl_stub;
  3597 // Stub to set the value of the real pointer, and then call the real
  3598 // function.
  3600 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) {
  3601   // Try Solaris 8- name only.
  3602   priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl");
  3603   guarantee(tmp != NULL, "priocntl function not found.");
  3604   priocntl_ptr = tmp;
  3605   return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg);
  3609 // lwp_priocntl_init
  3610 //
  3611 // Try to determine the priority scale for our process.
  3612 //
  3613 // Return errno or 0 if OK.
  3614 //
  3615 static
  3616 int     lwp_priocntl_init ()
  3618   int rslt;
  3619   pcinfo_t ClassInfo;
  3620   pcparms_t ParmInfo;
  3621   int i;
  3623   if (!UseThreadPriorities) return 0;
  3625   // We are using Bound threads, we need to determine our priority ranges
  3626   if (os::Solaris::T2_libthread() || UseBoundThreads) {
  3627     // If ThreadPriorityPolicy is 1, switch tables
  3628     if (ThreadPriorityPolicy == 1) {
  3629       for (i = 0 ; i < MaxPriority+1; i++)
  3630         os::java_to_os_priority[i] = prio_policy1[i];
  3633   // Not using Bound Threads, set to ThreadPolicy 1
  3634   else {
  3635     for ( i = 0 ; i < MaxPriority+1; i++ ) {
  3636       os::java_to_os_priority[i] = prio_policy1[i];
  3638     return 0;
  3642   // Get IDs for a set of well-known scheduling classes.
  3643   // TODO-FIXME: GETCLINFO returns the current # of classes in the
  3644   // the system.  We should have a loop that iterates over the
  3645   // classID values, which are known to be "small" integers.
  3647   strcpy(ClassInfo.pc_clname, "TS");
  3648   ClassInfo.pc_cid = -1;
  3649   rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
  3650   if (rslt < 0) return errno;
  3651   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
  3652   tsLimits.schedPolicy = ClassInfo.pc_cid;
  3653   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
  3654   tsLimits.minPrio = -tsLimits.maxPrio;
  3656   strcpy(ClassInfo.pc_clname, "IA");
  3657   ClassInfo.pc_cid = -1;
  3658   rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
  3659   if (rslt < 0) return errno;
  3660   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
  3661   iaLimits.schedPolicy = ClassInfo.pc_cid;
  3662   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
  3663   iaLimits.minPrio = -iaLimits.maxPrio;
  3665   strcpy(ClassInfo.pc_clname, "RT");
  3666   ClassInfo.pc_cid = -1;
  3667   rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
  3668   if (rslt < 0) return errno;
  3669   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
  3670   rtLimits.schedPolicy = ClassInfo.pc_cid;
  3671   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
  3672   rtLimits.minPrio = 0;
  3675   // Query our "current" scheduling class.
  3676   // This will normally be IA,TS or, rarely, RT.
  3677   memset (&ParmInfo, 0, sizeof(ParmInfo));
  3678   ParmInfo.pc_cid = PC_CLNULL;
  3679   rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo );
  3680   if ( rslt < 0 ) return errno;
  3681   myClass = ParmInfo.pc_cid;
  3683   // We now know our scheduling classId, get specific information
  3684   // the class.
  3685   ClassInfo.pc_cid = myClass;
  3686   ClassInfo.pc_clname[0] = 0;
  3687   rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo );
  3688   if ( rslt < 0 ) return errno;
  3690   if (ThreadPriorityVerbose)
  3691     tty->print_cr ("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
  3693   memset(&ParmInfo, 0, sizeof(pcparms_t));
  3694   ParmInfo.pc_cid = PC_CLNULL;
  3695   rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
  3696   if (rslt < 0) return errno;
  3698   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
  3699     myMin = rtLimits.minPrio;
  3700     myMax = rtLimits.maxPrio;
  3701   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
  3702     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
  3703     myMin = iaLimits.minPrio;
  3704     myMax = iaLimits.maxPrio;
  3705     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
  3706   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
  3707     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
  3708     myMin = tsLimits.minPrio;
  3709     myMax = tsLimits.maxPrio;
  3710     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
  3711   } else {
  3712     // No clue - punt
  3713     if (ThreadPriorityVerbose)
  3714       tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
  3715     return EINVAL;      // no clue, punt
  3718   if (ThreadPriorityVerbose)
  3719         tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
  3721   priocntl_enable = true;  // Enable changing priorities
  3722   return 0;
  3725 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
  3726 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
  3727 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
  3730 // scale_to_lwp_priority
  3731 //
  3732 // Convert from the libthread "thr_setprio" scale to our current
  3733 // lwp scheduling class scale.
  3734 //
  3735 static
  3736 int     scale_to_lwp_priority (int rMin, int rMax, int x)
  3738   int v;
  3740   if (x == 127) return rMax;            // avoid round-down
  3741     v = (((x*(rMax-rMin)))/128)+rMin;
  3742   return v;
  3746 // set_lwp_priority
  3747 //
  3748 // Set the priority of the lwp.  This call should only be made
  3749 // when using bound threads (T2 threads are bound by default).
  3750 //
  3751 int     set_lwp_priority (int ThreadID, int lwpid, int newPrio )
  3753   int rslt;
  3754   int Actual, Expected, prv;
  3755   pcparms_t ParmInfo;                   // for GET-SET
  3756 #ifdef ASSERT
  3757   pcparms_t ReadBack;                   // for readback
  3758 #endif
  3760   // Set priority via PC_GETPARMS, update, PC_SETPARMS
  3761   // Query current values.
  3762   // TODO: accelerate this by eliminating the PC_GETPARMS call.
  3763   // Cache "pcparms_t" in global ParmCache.
  3764   // TODO: elide set-to-same-value
  3766   // If something went wrong on init, don't change priorities.
  3767   if ( !priocntl_enable ) {
  3768     if (ThreadPriorityVerbose)
  3769       tty->print_cr("Trying to set priority but init failed, ignoring");
  3770     return EINVAL;
  3774   // If lwp hasn't started yet, just return
  3775   // the _start routine will call us again.
  3776   if ( lwpid <= 0 ) {
  3777     if (ThreadPriorityVerbose) {
  3778       tty->print_cr ("deferring the set_lwp_priority of thread " INTPTR_FORMAT " to %d, lwpid not set",
  3779                      ThreadID, newPrio);
  3781     return 0;
  3784   if (ThreadPriorityVerbose) {
  3785     tty->print_cr ("set_lwp_priority(" INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
  3786                    ThreadID, lwpid, newPrio);
  3789   memset(&ParmInfo, 0, sizeof(pcparms_t));
  3790   ParmInfo.pc_cid = PC_CLNULL;
  3791   rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
  3792   if (rslt < 0) return errno;
  3794   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
  3795     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
  3796     rtInfo->rt_pri     = scale_to_lwp_priority (rtLimits.minPrio, rtLimits.maxPrio, newPrio);
  3797     rtInfo->rt_tqsecs  = RT_NOCHANGE;
  3798     rtInfo->rt_tqnsecs = RT_NOCHANGE;
  3799     if (ThreadPriorityVerbose) {
  3800       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
  3802   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
  3803     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
  3804     int maxClamped     = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim);
  3805     iaInfo->ia_upri    = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio);
  3806     iaInfo->ia_uprilim = IA_NOCHANGE;
  3807     iaInfo->ia_mode    = IA_NOCHANGE;
  3808     if (ThreadPriorityVerbose) {
  3809       tty->print_cr ("IA: [%d...%d] %d->%d\n",
  3810                iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
  3812   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
  3813     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
  3814     int maxClamped     = MIN2(tsLimits.maxPrio, (int)tsInfo->ts_uprilim);
  3815     prv                = tsInfo->ts_upri;
  3816     tsInfo->ts_upri    = scale_to_lwp_priority(tsLimits.minPrio, maxClamped, newPrio);
  3817     tsInfo->ts_uprilim = IA_NOCHANGE;
  3818     if (ThreadPriorityVerbose) {
  3819       tty->print_cr ("TS: %d [%d...%d] %d->%d\n",
  3820                prv, tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
  3822     if (prv == tsInfo->ts_upri) return 0;
  3823   } else {
  3824     if ( ThreadPriorityVerbose ) {
  3825       tty->print_cr ("Unknown scheduling class\n");
  3827       return EINVAL;    // no clue, punt
  3830   rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
  3831   if (ThreadPriorityVerbose && rslt) {
  3832     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
  3834   if (rslt < 0) return errno;
  3836 #ifdef ASSERT
  3837   // Sanity check: read back what we just attempted to set.
  3838   // In theory it could have changed in the interim ...
  3839   //
  3840   // The priocntl system call is tricky.
  3841   // Sometimes it'll validate the priority value argument and
  3842   // return EINVAL if unhappy.  At other times it fails silently.
  3843   // Readbacks are prudent.
  3845   if (!ReadBackValidate) return 0;
  3847   memset(&ReadBack, 0, sizeof(pcparms_t));
  3848   ReadBack.pc_cid = PC_CLNULL;
  3849   rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
  3850   assert(rslt >= 0, "priocntl failed");
  3851   Actual = Expected = 0xBAD;
  3852   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
  3853   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
  3854     Actual   = RTPRI(ReadBack)->rt_pri;
  3855     Expected = RTPRI(ParmInfo)->rt_pri;
  3856   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
  3857     Actual   = IAPRI(ReadBack)->ia_upri;
  3858     Expected = IAPRI(ParmInfo)->ia_upri;
  3859   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
  3860     Actual   = TSPRI(ReadBack)->ts_upri;
  3861     Expected = TSPRI(ParmInfo)->ts_upri;
  3862   } else {
  3863     if ( ThreadPriorityVerbose ) {
  3864       tty->print_cr("set_lwp_priority: unexpected class in readback: %d\n", ParmInfo.pc_cid);
  3868   if (Actual != Expected) {
  3869     if ( ThreadPriorityVerbose ) {
  3870       tty->print_cr ("set_lwp_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
  3871              lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
  3874 #endif
  3876   return 0;
  3881 // Solaris only gives access to 128 real priorities at a time,
  3882 // so we expand Java's ten to fill this range.  This would be better
  3883 // if we dynamically adjusted relative priorities.
  3884 //
  3885 // The ThreadPriorityPolicy option allows us to select 2 different
  3886 // priority scales.
  3887 //
  3888 // ThreadPriorityPolicy=0
  3889 // Since the Solaris' default priority is MaximumPriority, we do not
  3890 // set a priority lower than Max unless a priority lower than
  3891 // NormPriority is requested.
  3892 //
  3893 // ThreadPriorityPolicy=1
  3894 // This mode causes the priority table to get filled with
  3895 // linear values.  NormPriority get's mapped to 50% of the
  3896 // Maximum priority an so on.  This will cause VM threads
  3897 // to get unfair treatment against other Solaris processes
  3898 // which do not explicitly alter their thread priorities.
  3899 //
  3902 int os::java_to_os_priority[MaxPriority + 1] = {
  3903   -99999,         // 0 Entry should never be used
  3905   0,              // 1 MinPriority
  3906   32,             // 2
  3907   64,             // 3
  3909   96,             // 4
  3910   127,            // 5 NormPriority
  3911   127,            // 6
  3913   127,            // 7
  3914   127,            // 8
  3915   127,            // 9 NearMaxPriority
  3917   127             // 10 MaxPriority
  3918 };
  3921 OSReturn os::set_native_priority(Thread* thread, int newpri) {
  3922   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
  3923   if ( !UseThreadPriorities ) return OS_OK;
  3924   int status = thr_setprio(thread->osthread()->thread_id(), newpri);
  3925   if ( os::Solaris::T2_libthread() || (UseBoundThreads && thread->osthread()->is_vm_created()) )
  3926     status |= (set_lwp_priority (thread->osthread()->thread_id(),
  3927                     thread->osthread()->lwp_id(), newpri ));
  3928   return (status == 0) ? OS_OK : OS_ERR;
  3932 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
  3933   int p;
  3934   if ( !UseThreadPriorities ) {
  3935     *priority_ptr = NormalPriority;
  3936     return OS_OK;
  3938   int status = thr_getprio(thread->osthread()->thread_id(), &p);
  3939   if (status != 0) {
  3940     return OS_ERR;
  3942   *priority_ptr = p;
  3943   return OS_OK;
  3947 // Hint to the underlying OS that a task switch would not be good.
  3948 // Void return because it's a hint and can fail.
  3949 void os::hint_no_preempt() {
  3950   schedctl_start(schedctl_init());
  3953 void os::interrupt(Thread* thread) {
  3954   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
  3956   OSThread* osthread = thread->osthread();
  3958   int isInterrupted = osthread->interrupted();
  3959   if (!isInterrupted) {
  3960       osthread->set_interrupted(true);
  3961       OrderAccess::fence();
  3962       // os::sleep() is implemented with either poll (NULL,0,timeout) or
  3963       // by parking on _SleepEvent.  If the former, thr_kill will unwedge
  3964       // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
  3965       ParkEvent * const slp = thread->_SleepEvent ;
  3966       if (slp != NULL) slp->unpark() ;
  3969   // For JSR166:  unpark after setting status but before thr_kill -dl
  3970   if (thread->is_Java_thread()) {
  3971     ((JavaThread*)thread)->parker()->unpark();
  3974   // Handle interruptible wait() ...
  3975   ParkEvent * const ev = thread->_ParkEvent ;
  3976   if (ev != NULL) ev->unpark() ;
  3978   // When events are used everywhere for os::sleep, then this thr_kill
  3979   // will only be needed if UseVMInterruptibleIO is true.
  3981   if (!isInterrupted) {
  3982     int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
  3983     assert_status(status == 0, status, "thr_kill");
  3985     // Bump thread interruption counter
  3986     RuntimeService::record_thread_interrupt_signaled_count();
  3991 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
  3992   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
  3994   OSThread* osthread = thread->osthread();
  3996   bool res = osthread->interrupted();
  3998   // NOTE that since there is no "lock" around these two operations,
  3999   // there is the possibility that the interrupted flag will be
  4000   // "false" but that the interrupt event will be set. This is
  4001   // intentional. The effect of this is that Object.wait() will appear
  4002   // to have a spurious wakeup, which is not harmful, and the
  4003   // possibility is so rare that it is not worth the added complexity
  4004   // to add yet another lock. It has also been recommended not to put
  4005   // the interrupted flag into the os::Solaris::Event structure,
  4006   // because it hides the issue.
  4007   if (res && clear_interrupted) {
  4008     osthread->set_interrupted(false);
  4010   return res;
  4014 void os::print_statistics() {
  4017 int os::message_box(const char* title, const char* message) {
  4018   int i;
  4019   fdStream err(defaultStream::error_fd());
  4020   for (i = 0; i < 78; i++) err.print_raw("=");
  4021   err.cr();
  4022   err.print_raw_cr(title);
  4023   for (i = 0; i < 78; i++) err.print_raw("-");
  4024   err.cr();
  4025   err.print_raw_cr(message);
  4026   for (i = 0; i < 78; i++) err.print_raw("=");
  4027   err.cr();
  4029   char buf[16];
  4030   // Prevent process from exiting upon "read error" without consuming all CPU
  4031   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
  4033   return buf[0] == 'y' || buf[0] == 'Y';
  4036 // A lightweight implementation that does not suspend the target thread and
  4037 // thus returns only a hint. Used for profiling only!
  4038 ExtendedPC os::get_thread_pc(Thread* thread) {
  4039   // Make sure that it is called by the watcher and the Threads lock is owned.
  4040   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
  4041   // For now, is only used to profile the VM Thread
  4042   assert(thread->is_VM_thread(), "Can only be called for VMThread");
  4043   ExtendedPC epc;
  4045   GetThreadPC_Callback  cb(ProfileVM_lock);
  4046   OSThread *osthread = thread->osthread();
  4047   const int time_to_wait = 400; // 400ms wait for initial response
  4048   int status = cb.interrupt(thread, time_to_wait);
  4050   if (cb.is_done() ) {
  4051     epc = cb.addr();
  4052   } else {
  4053     DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status",
  4054                               osthread->thread_id(), status););
  4055     // epc is already NULL
  4057   return epc;
  4061 // This does not do anything on Solaris. This is basically a hook for being
  4062 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
  4063 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
  4064   f(value, method, args, thread);
  4067 // This routine may be used by user applications as a "hook" to catch signals.
  4068 // The user-defined signal handler must pass unrecognized signals to this
  4069 // routine, and if it returns true (non-zero), then the signal handler must
  4070 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
  4071 // routine will never retun false (zero), but instead will execute a VM panic
  4072 // routine kill the process.
  4073 //
  4074 // If this routine returns false, it is OK to call it again.  This allows
  4075 // the user-defined signal handler to perform checks either before or after
  4076 // the VM performs its own checks.  Naturally, the user code would be making
  4077 // a serious error if it tried to handle an exception (such as a null check
  4078 // or breakpoint) that the VM was generating for its own correct operation.
  4079 //
  4080 // This routine may recognize any of the following kinds of signals:
  4081 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
  4082 // os::Solaris::SIGasync
  4083 // It should be consulted by handlers for any of those signals.
  4084 // It explicitly does not recognize os::Solaris::SIGinterrupt
  4085 //
  4086 // The caller of this routine must pass in the three arguments supplied
  4087 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
  4088 // field of the structure passed to sigaction().  This routine assumes that
  4089 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
  4090 //
  4091 // Note that the VM will print warnings if it detects conflicting signal
  4092 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
  4093 //
  4094 extern "C" int JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
  4097 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
  4098   JVM_handle_solaris_signal(sig, info, ucVoid, true);
  4101 /* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
  4102    is needed to provoke threads blocked on IO to return an EINTR
  4103    Note: this explicitly does NOT call JVM_handle_solaris_signal and
  4104    does NOT participate in signal chaining due to requirement for
  4105    NOT setting SA_RESTART to make EINTR work. */
  4106 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
  4107    if (UseSignalChaining) {
  4108       struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
  4109       if (actp && actp->sa_handler) {
  4110         vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
  4115 // This boolean allows users to forward their own non-matching signals
  4116 // to JVM_handle_solaris_signal, harmlessly.
  4117 bool os::Solaris::signal_handlers_are_installed = false;
  4119 // For signal-chaining
  4120 bool os::Solaris::libjsig_is_loaded = false;
  4121 typedef struct sigaction *(*get_signal_t)(int);
  4122 get_signal_t os::Solaris::get_signal_action = NULL;
  4124 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
  4125   struct sigaction *actp = NULL;
  4127   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
  4128     // Retrieve the old signal handler from libjsig
  4129     actp = (*get_signal_action)(sig);
  4131   if (actp == NULL) {
  4132     // Retrieve the preinstalled signal handler from jvm
  4133     actp = get_preinstalled_handler(sig);
  4136   return actp;
  4139 static bool call_chained_handler(struct sigaction *actp, int sig,
  4140                                  siginfo_t *siginfo, void *context) {
  4141   // Call the old signal handler
  4142   if (actp->sa_handler == SIG_DFL) {
  4143     // It's more reasonable to let jvm treat it as an unexpected exception
  4144     // instead of taking the default action.
  4145     return false;
  4146   } else if (actp->sa_handler != SIG_IGN) {
  4147     if ((actp->sa_flags & SA_NODEFER) == 0) {
  4148       // automaticlly block the signal
  4149       sigaddset(&(actp->sa_mask), sig);
  4152     sa_handler_t hand;
  4153     sa_sigaction_t sa;
  4154     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
  4155     // retrieve the chained handler
  4156     if (siginfo_flag_set) {
  4157       sa = actp->sa_sigaction;
  4158     } else {
  4159       hand = actp->sa_handler;
  4162     if ((actp->sa_flags & SA_RESETHAND) != 0) {
  4163       actp->sa_handler = SIG_DFL;
  4166     // try to honor the signal mask
  4167     sigset_t oset;
  4168     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
  4170     // call into the chained handler
  4171     if (siginfo_flag_set) {
  4172       (*sa)(sig, siginfo, context);
  4173     } else {
  4174       (*hand)(sig);
  4177     // restore the signal mask
  4178     thr_sigsetmask(SIG_SETMASK, &oset, 0);
  4180   // Tell jvm's signal handler the signal is taken care of.
  4181   return true;
  4184 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
  4185   bool chained = false;
  4186   // signal-chaining
  4187   if (UseSignalChaining) {
  4188     struct sigaction *actp = get_chained_signal_action(sig);
  4189     if (actp != NULL) {
  4190       chained = call_chained_handler(actp, sig, siginfo, context);
  4193   return chained;
  4196 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
  4197   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
  4198   if (preinstalled_sigs[sig] != 0) {
  4199     return &chainedsigactions[sig];
  4201   return NULL;
  4204 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
  4206   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
  4207   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
  4208   chainedsigactions[sig] = oldAct;
  4209   preinstalled_sigs[sig] = 1;
  4212 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
  4213   // Check for overwrite.
  4214   struct sigaction oldAct;
  4215   sigaction(sig, (struct sigaction*)NULL, &oldAct);
  4216   void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
  4217                                       : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
  4218   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
  4219       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
  4220       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
  4221     if (AllowUserSignalHandlers || !set_installed) {
  4222       // Do not overwrite; user takes responsibility to forward to us.
  4223       return;
  4224     } else if (UseSignalChaining) {
  4225       if (oktochain) {
  4226         // save the old handler in jvm
  4227         save_preinstalled_handler(sig, oldAct);
  4228       } else {
  4229         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
  4231       // libjsig also interposes the sigaction() call below and saves the
  4232       // old sigaction on it own.
  4233     } else {
  4234       fatal2("Encountered unexpected pre-existing sigaction handler %#lx for signal %d.", (long)oldhand, sig);
  4238   struct sigaction sigAct;
  4239   sigfillset(&(sigAct.sa_mask));
  4240   sigAct.sa_handler = SIG_DFL;
  4242   sigAct.sa_sigaction = signalHandler;
  4243   // Handle SIGSEGV on alternate signal stack if
  4244   // not using stack banging
  4245   if (!UseStackBanging && sig == SIGSEGV) {
  4246     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
  4247   // Interruptible i/o requires SA_RESTART cleared so EINTR
  4248   // is returned instead of restarting system calls
  4249   } else if (sig == os::Solaris::SIGinterrupt()) {
  4250     sigemptyset(&sigAct.sa_mask);
  4251     sigAct.sa_handler = NULL;
  4252     sigAct.sa_flags = SA_SIGINFO;
  4253     sigAct.sa_sigaction = sigINTRHandler;
  4254   } else {
  4255     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
  4257   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
  4259   sigaction(sig, &sigAct, &oldAct);
  4261   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
  4262                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
  4263   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
  4267 #define DO_SIGNAL_CHECK(sig) \
  4268   if (!sigismember(&check_signal_done, sig)) \
  4269     os::Solaris::check_signal_handler(sig)
  4271 // This method is a periodic task to check for misbehaving JNI applications
  4272 // under CheckJNI, we can add any periodic checks here
  4274 void os::run_periodic_checks() {
  4275   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
  4276   // thereby preventing a NULL checks.
  4277   if(!check_addr0_done) check_addr0_done = check_addr0(tty);
  4279   if (check_signals == false) return;
  4281   // SEGV and BUS if overridden could potentially prevent
  4282   // generation of hs*.log in the event of a crash, debugging
  4283   // such a case can be very challenging, so we absolutely
  4284   // check for the following for a good measure:
  4285   DO_SIGNAL_CHECK(SIGSEGV);
  4286   DO_SIGNAL_CHECK(SIGILL);
  4287   DO_SIGNAL_CHECK(SIGFPE);
  4288   DO_SIGNAL_CHECK(SIGBUS);
  4289   DO_SIGNAL_CHECK(SIGPIPE);
  4290   DO_SIGNAL_CHECK(SIGXFSZ);
  4292   // ReduceSignalUsage allows the user to override these handlers
  4293   // see comments at the very top and jvm_solaris.h
  4294   if (!ReduceSignalUsage) {
  4295     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
  4296     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
  4297     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
  4298     DO_SIGNAL_CHECK(BREAK_SIGNAL);
  4301   // See comments above for using JVM1/JVM2 and UseAltSigs
  4302   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
  4303   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
  4307 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
  4309 static os_sigaction_t os_sigaction = NULL;
  4311 void os::Solaris::check_signal_handler(int sig) {
  4312   char buf[O_BUFLEN];
  4313   address jvmHandler = NULL;
  4315   struct sigaction act;
  4316   if (os_sigaction == NULL) {
  4317     // only trust the default sigaction, in case it has been interposed
  4318     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
  4319     if (os_sigaction == NULL) return;
  4322   os_sigaction(sig, (struct sigaction*)NULL, &act);
  4324   address thisHandler = (act.sa_flags & SA_SIGINFO)
  4325     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
  4326     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
  4329   switch(sig) {
  4330     case SIGSEGV:
  4331     case SIGBUS:
  4332     case SIGFPE:
  4333     case SIGPIPE:
  4334     case SIGXFSZ:
  4335     case SIGILL:
  4336       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
  4337       break;
  4339     case SHUTDOWN1_SIGNAL:
  4340     case SHUTDOWN2_SIGNAL:
  4341     case SHUTDOWN3_SIGNAL:
  4342     case BREAK_SIGNAL:
  4343       jvmHandler = (address)user_handler();
  4344       break;
  4346     default:
  4347       int intrsig = os::Solaris::SIGinterrupt();
  4348       int asynsig = os::Solaris::SIGasync();
  4350       if (sig == intrsig) {
  4351         jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
  4352       } else if (sig == asynsig) {
  4353         jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
  4354       } else {
  4355         return;
  4357       break;
  4361   if (thisHandler != jvmHandler) {
  4362     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
  4363     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
  4364     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
  4365     // No need to check this sig any longer
  4366     sigaddset(&check_signal_done, sig);
  4367   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
  4368     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
  4369     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
  4370     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
  4371     // No need to check this sig any longer
  4372     sigaddset(&check_signal_done, sig);
  4375   // Print all the signal handler state
  4376   if (sigismember(&check_signal_done, sig)) {
  4377     print_signal_handlers(tty, buf, O_BUFLEN);
  4382 void os::Solaris::install_signal_handlers() {
  4383   bool libjsigdone = false;
  4384   signal_handlers_are_installed = true;
  4386   // signal-chaining
  4387   typedef void (*signal_setting_t)();
  4388   signal_setting_t begin_signal_setting = NULL;
  4389   signal_setting_t end_signal_setting = NULL;
  4390   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
  4391                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
  4392   if (begin_signal_setting != NULL) {
  4393     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
  4394                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
  4395     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
  4396                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
  4397     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
  4398                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
  4399     libjsig_is_loaded = true;
  4400     if (os::Solaris::get_libjsig_version != NULL) {
  4401       libjsigversion =  (*os::Solaris::get_libjsig_version)();
  4403     assert(UseSignalChaining, "should enable signal-chaining");
  4405   if (libjsig_is_loaded) {
  4406     // Tell libjsig jvm is setting signal handlers
  4407     (*begin_signal_setting)();
  4410   set_signal_handler(SIGSEGV, true, true);
  4411   set_signal_handler(SIGPIPE, true, true);
  4412   set_signal_handler(SIGXFSZ, true, true);
  4413   set_signal_handler(SIGBUS, true, true);
  4414   set_signal_handler(SIGILL, true, true);
  4415   set_signal_handler(SIGFPE, true, true);
  4418   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
  4420     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
  4421     // can not register overridable signals which might be > 32
  4422     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
  4423     // Tell libjsig jvm has finished setting signal handlers
  4424       (*end_signal_setting)();
  4425       libjsigdone = true;
  4429   // Never ok to chain our SIGinterrupt
  4430   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
  4431   set_signal_handler(os::Solaris::SIGasync(), true, true);
  4433   if (libjsig_is_loaded && !libjsigdone) {
  4434     // Tell libjsig jvm finishes setting signal handlers
  4435     (*end_signal_setting)();
  4438   // We don't activate signal checker if libjsig is in place, we trust ourselves
  4439   // and if UserSignalHandler is installed all bets are off
  4440   if (CheckJNICalls) {
  4441     if (libjsig_is_loaded) {
  4442       tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
  4443       check_signals = false;
  4445     if (AllowUserSignalHandlers) {
  4446       tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
  4447       check_signals = false;
  4453 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
  4455 const char * signames[] = {
  4456   "SIG0",
  4457   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
  4458   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
  4459   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
  4460   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
  4461   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
  4462   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
  4463   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
  4464   "SIGCANCEL", "SIGLOST"
  4465 };
  4467 const char* os::exception_name(int exception_code, char* buf, size_t size) {
  4468   if (0 < exception_code && exception_code <= SIGRTMAX) {
  4469     // signal
  4470     if (exception_code < sizeof(signames)/sizeof(const char*)) {
  4471        jio_snprintf(buf, size, "%s", signames[exception_code]);
  4472     } else {
  4473        jio_snprintf(buf, size, "SIG%d", exception_code);
  4475     return buf;
  4476   } else {
  4477     return NULL;
  4481 // (Static) wrappers for the new libthread API
  4482 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
  4483 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
  4484 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
  4485 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
  4486 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
  4488 // (Static) wrapper for getisax(2) call.
  4489 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
  4491 // (Static) wrappers for the liblgrp API
  4492 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
  4493 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
  4494 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
  4495 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
  4496 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
  4497 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
  4498 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
  4499 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
  4500 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
  4502 // (Static) wrapper for meminfo() call.
  4503 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
  4505 static address resolve_symbol_lazy(const char* name) {
  4506   address addr = (address) dlsym(RTLD_DEFAULT, name);
  4507   if(addr == NULL) {
  4508     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
  4509     addr = (address) dlsym(RTLD_NEXT, name);
  4511   return addr;
  4514 static address resolve_symbol(const char* name) {
  4515   address addr = resolve_symbol_lazy(name);
  4516   if(addr == NULL) {
  4517     fatal(dlerror());
  4519   return addr;
  4524 // isT2_libthread()
  4525 //
  4526 // Routine to determine if we are currently using the new T2 libthread.
  4527 //
  4528 // We determine if we are using T2 by reading /proc/self/lstatus and
  4529 // looking for a thread with the ASLWP bit set.  If we find this status
  4530 // bit set, we must assume that we are NOT using T2.  The T2 team
  4531 // has approved this algorithm.
  4532 //
  4533 // We need to determine if we are running with the new T2 libthread
  4534 // since setting native thread priorities is handled differently
  4535 // when using this library.  All threads created using T2 are bound
  4536 // threads. Calling thr_setprio is meaningless in this case.
  4537 //
  4538 bool isT2_libthread() {
  4539   static prheader_t * lwpArray = NULL;
  4540   static int lwpSize = 0;
  4541   static int lwpFile = -1;
  4542   lwpstatus_t * that;
  4543   char lwpName [128];
  4544   bool isT2 = false;
  4546 #define ADR(x)  ((uintptr_t)(x))
  4547 #define LWPINDEX(ary,ix)   ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
  4549   lwpFile = open("/proc/self/lstatus", O_RDONLY, 0);
  4550   if (lwpFile < 0) {
  4551       if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
  4552       return false;
  4554   lwpSize = 16*1024;
  4555   for (;;) {
  4556     lseek (lwpFile, 0, SEEK_SET);
  4557     lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize);
  4558     if (read(lwpFile, lwpArray, lwpSize) < 0) {
  4559       if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
  4560       break;
  4562     if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
  4563        // We got a good snapshot - now iterate over the list.
  4564       int aslwpcount = 0;
  4565       for (int i = 0; i < lwpArray->pr_nent; i++ ) {
  4566         that = LWPINDEX(lwpArray,i);
  4567         if (that->pr_flags & PR_ASLWP) {
  4568           aslwpcount++;
  4571       if (aslwpcount == 0) isT2 = true;
  4572       break;
  4574     lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
  4575     FREE_C_HEAP_ARRAY(char, lwpArray);  // retry.
  4578   FREE_C_HEAP_ARRAY(char, lwpArray);
  4579   close (lwpFile);
  4580   if (ThreadPriorityVerbose) {
  4581     if (isT2) tty->print_cr("We are running with a T2 libthread\n");
  4582     else tty->print_cr("We are not running with a T2 libthread\n");
  4584   return isT2;
  4588 void os::Solaris::libthread_init() {
  4589   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
  4591   // Determine if we are running with the new T2 libthread
  4592   os::Solaris::set_T2_libthread(isT2_libthread());
  4594   lwp_priocntl_init();
  4596   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
  4597   if(func == NULL) {
  4598     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
  4599     // Guarantee that this VM is running on an new enough OS (5.6 or
  4600     // later) that it will have a new enough libthread.so.
  4601     guarantee(func != NULL, "libthread.so is too old.");
  4604   // Initialize the new libthread getstate API wrappers
  4605   func = resolve_symbol("thr_getstate");
  4606   os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
  4608   func = resolve_symbol("thr_setstate");
  4609   os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
  4611   func = resolve_symbol("thr_setmutator");
  4612   os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
  4614   func = resolve_symbol("thr_suspend_mutator");
  4615   os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
  4617   func = resolve_symbol("thr_continue_mutator");
  4618   os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
  4620   int size;
  4621   void (*handler_info_func)(address *, int *);
  4622   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
  4623   handler_info_func(&handler_start, &size);
  4624   handler_end = handler_start + size;
  4628 int_fnP_mutex_tP os::Solaris::_mutex_lock;
  4629 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
  4630 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
  4631 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
  4632 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
  4633 int os::Solaris::_mutex_scope = USYNC_THREAD;
  4635 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
  4636 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
  4637 int_fnP_cond_tP os::Solaris::_cond_signal;
  4638 int_fnP_cond_tP os::Solaris::_cond_broadcast;
  4639 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
  4640 int_fnP_cond_tP os::Solaris::_cond_destroy;
  4641 int os::Solaris::_cond_scope = USYNC_THREAD;
  4643 void os::Solaris::synchronization_init() {
  4644   if(UseLWPSynchronization) {
  4645     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
  4646     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
  4647     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
  4648     os::Solaris::set_mutex_init(lwp_mutex_init);
  4649     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
  4650     os::Solaris::set_mutex_scope(USYNC_THREAD);
  4652     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
  4653     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
  4654     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
  4655     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
  4656     os::Solaris::set_cond_init(lwp_cond_init);
  4657     os::Solaris::set_cond_destroy(lwp_cond_destroy);
  4658     os::Solaris::set_cond_scope(USYNC_THREAD);
  4660   else {
  4661     os::Solaris::set_mutex_scope(USYNC_THREAD);
  4662     os::Solaris::set_cond_scope(USYNC_THREAD);
  4664     if(UsePthreads) {
  4665       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
  4666       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
  4667       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
  4668       os::Solaris::set_mutex_init(pthread_mutex_default_init);
  4669       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
  4671       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
  4672       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
  4673       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
  4674       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
  4675       os::Solaris::set_cond_init(pthread_cond_default_init);
  4676       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
  4678     else {
  4679       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
  4680       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
  4681       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
  4682       os::Solaris::set_mutex_init(::mutex_init);
  4683       os::Solaris::set_mutex_destroy(::mutex_destroy);
  4685       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
  4686       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
  4687       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
  4688       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
  4689       os::Solaris::set_cond_init(::cond_init);
  4690       os::Solaris::set_cond_destroy(::cond_destroy);
  4695 bool os::Solaris::liblgrp_init() {
  4696   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
  4697   if (handle != NULL) {
  4698     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
  4699     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
  4700     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
  4701     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
  4702     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
  4703     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
  4704     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
  4705     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
  4706                                        dlsym(handle, "lgrp_cookie_stale")));
  4708     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
  4709     set_lgrp_cookie(c);
  4710     return true;
  4712   return false;
  4715 void os::Solaris::misc_sym_init() {
  4716   address func;
  4718   // getisax
  4719   func = resolve_symbol_lazy("getisax");
  4720   if (func != NULL) {
  4721     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
  4724   // meminfo
  4725   func = resolve_symbol_lazy("meminfo");
  4726   if (func != NULL) {
  4727     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
  4731 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
  4732   assert(_getisax != NULL, "_getisax not set");
  4733   return _getisax(array, n);
  4736 // Symbol doesn't exist in Solaris 8 pset.h
  4737 #ifndef PS_MYID
  4738 #define PS_MYID -3
  4739 #endif
  4741 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
  4742 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
  4743 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
  4745 void init_pset_getloadavg_ptr(void) {
  4746   pset_getloadavg_ptr =
  4747     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
  4748   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
  4749     warning("pset_getloadavg function not found");
  4753 int os::Solaris::_dev_zero_fd = -1;
  4755 // this is called _before_ the global arguments have been parsed
  4756 void os::init(void) {
  4757   _initial_pid = getpid();
  4759   max_hrtime = first_hrtime = gethrtime();
  4761   init_random(1234567);
  4763   page_size = sysconf(_SC_PAGESIZE);
  4764   if (page_size == -1)
  4765     fatal1("os_solaris.cpp: os::init: sysconf failed (%s)", strerror(errno));
  4766   init_page_sizes((size_t) page_size);
  4768   Solaris::initialize_system_info();
  4770   // Initialize misc. symbols as soon as possible, so we can use them
  4771   // if we need them.
  4772   Solaris::misc_sym_init();
  4774   int fd = open("/dev/zero", O_RDWR);
  4775   if (fd < 0) {
  4776     fatal1("os::init: cannot open /dev/zero (%s)", strerror(errno));
  4777   } else {
  4778     Solaris::set_dev_zero_fd(fd);
  4780     // Close on exec, child won't inherit.
  4781     fcntl(fd, F_SETFD, FD_CLOEXEC);
  4784   clock_tics_per_sec = CLK_TCK;
  4786   // check if dladdr1() exists; dladdr1 can provide more information than
  4787   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
  4788   // and is available on linker patches for 5.7 and 5.8.
  4789   // libdl.so must have been loaded, this call is just an entry lookup
  4790   void * hdl = dlopen("libdl.so", RTLD_NOW);
  4791   if (hdl)
  4792     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
  4794   // (Solaris only) this switches to calls that actually do locking.
  4795   ThreadCritical::initialize();
  4797   main_thread = thr_self();
  4799   // Constant minimum stack size allowed. It must be at least
  4800   // the minimum of what the OS supports (thr_min_stack()), and
  4801   // enough to allow the thread to get to user bytecode execution.
  4802   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
  4803   // If the pagesize of the VM is greater than 8K determine the appropriate
  4804   // number of initial guard pages.  The user can change this with the
  4805   // command line arguments, if needed.
  4806   if (vm_page_size() > 8*K) {
  4807     StackYellowPages = 1;
  4808     StackRedPages = 1;
  4809     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
  4813 // To install functions for atexit system call
  4814 extern "C" {
  4815   static void perfMemory_exit_helper() {
  4816     perfMemory_exit();
  4820 // this is called _after_ the global arguments have been parsed
  4821 jint os::init_2(void) {
  4822   // try to enable extended file IO ASAP, see 6431278
  4823   os::Solaris::try_enable_extended_io();
  4825   // Allocate a single page and mark it as readable for safepoint polling.  Also
  4826   // use this first mmap call to check support for MAP_ALIGN.
  4827   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
  4828                                                       page_size,
  4829                                                       MAP_PRIVATE | MAP_ALIGN,
  4830                                                       PROT_READ);
  4831   if (polling_page == NULL) {
  4832     has_map_align = false;
  4833     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
  4834                                                 PROT_READ);
  4837   os::set_polling_page(polling_page);
  4839 #ifndef PRODUCT
  4840   if( Verbose && PrintMiscellaneous )
  4841     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
  4842 #endif
  4844   if (!UseMembar) {
  4845     address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
  4846     guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
  4847     os::set_memory_serialize_page( mem_serialize_page );
  4849 #ifndef PRODUCT
  4850     if(Verbose && PrintMiscellaneous)
  4851       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
  4852 #endif
  4855   FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
  4857   // Check minimum allowable stack size for thread creation and to initialize
  4858   // the java system classes, including StackOverflowError - depends on page
  4859   // size.  Add a page for compiler2 recursion in main thread.
  4860   // Add in BytesPerWord times page size to account for VM stack during
  4861   // class initialization depending on 32 or 64 bit VM.
  4862   guarantee((Solaris::min_stack_allowed >=
  4863     (StackYellowPages+StackRedPages+StackShadowPages+BytesPerWord
  4864      COMPILER2_PRESENT(+1)) * page_size),
  4865     "need to increase Solaris::min_stack_allowed on this platform");
  4867   size_t threadStackSizeInBytes = ThreadStackSize * K;
  4868   if (threadStackSizeInBytes != 0 &&
  4869     threadStackSizeInBytes < Solaris::min_stack_allowed) {
  4870     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
  4871                   Solaris::min_stack_allowed/K);
  4872     return JNI_ERR;
  4875   // For 64kbps there will be a 64kb page size, which makes
  4876   // the usable default stack size quite a bit less.  Increase the
  4877   // stack for 64kb (or any > than 8kb) pages, this increases
  4878   // virtual memory fragmentation (since we're not creating the
  4879   // stack on a power of 2 boundary.  The real fix for this
  4880   // should be to fix the guard page mechanism.
  4882   if (vm_page_size() > 8*K) {
  4883       threadStackSizeInBytes = (threadStackSizeInBytes != 0)
  4884          ? threadStackSizeInBytes +
  4885            ((StackYellowPages + StackRedPages) * vm_page_size())
  4886          : 0;
  4887       ThreadStackSize = threadStackSizeInBytes/K;
  4890   // Make the stack size a multiple of the page size so that
  4891   // the yellow/red zones can be guarded.
  4892   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
  4893         vm_page_size()));
  4895   Solaris::libthread_init();
  4897   if (UseNUMA) {
  4898     if (!Solaris::liblgrp_init()) {
  4899       UseNUMA = false;
  4900     } else {
  4901       size_t lgrp_limit = os::numa_get_groups_num();
  4902       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
  4903       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
  4904       FREE_C_HEAP_ARRAY(int, lgrp_ids);
  4905       if (lgrp_num < 2) {
  4906         // There's only one locality group, disable NUMA.
  4907         UseNUMA = false;
  4910     if (!UseNUMA && ForceNUMA) {
  4911       UseNUMA = true;
  4915   Solaris::signal_sets_init();
  4916   Solaris::init_signal_mem();
  4917   Solaris::install_signal_handlers();
  4919   if (libjsigversion < JSIG_VERSION_1_4_1) {
  4920     Maxlibjsigsigs = OLDMAXSIGNUM;
  4923   // initialize synchronization primitives to use either thread or
  4924   // lwp synchronization (controlled by UseLWPSynchronization)
  4925   Solaris::synchronization_init();
  4927   if (MaxFDLimit) {
  4928     // set the number of file descriptors to max. print out error
  4929     // if getrlimit/setrlimit fails but continue regardless.
  4930     struct rlimit nbr_files;
  4931     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
  4932     if (status != 0) {
  4933       if (PrintMiscellaneous && (Verbose || WizardMode))
  4934         perror("os::init_2 getrlimit failed");
  4935     } else {
  4936       nbr_files.rlim_cur = nbr_files.rlim_max;
  4937       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
  4938       if (status != 0) {
  4939         if (PrintMiscellaneous && (Verbose || WizardMode))
  4940           perror("os::init_2 setrlimit failed");
  4945   // Initialize HPI.
  4946   jint hpi_result = hpi::initialize();
  4947   if (hpi_result != JNI_OK) {
  4948     tty->print_cr("There was an error trying to initialize the HPI library.");
  4949     return hpi_result;
  4952   // Calculate theoretical max. size of Threads to guard gainst
  4953   // artifical out-of-memory situations, where all available address-
  4954   // space has been reserved by thread stacks. Default stack size is 1Mb.
  4955   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
  4956     JavaThread::stack_size_at_create() : (1*K*K);
  4957   assert(pre_thread_stack_size != 0, "Must have a stack");
  4958   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
  4959   // we should start doing Virtual Memory banging. Currently when the threads will
  4960   // have used all but 200Mb of space.
  4961   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
  4962   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
  4964   // at-exit methods are called in the reverse order of their registration.
  4965   // In Solaris 7 and earlier, atexit functions are called on return from
  4966   // main or as a result of a call to exit(3C). There can be only 32 of
  4967   // these functions registered and atexit() does not set errno. In Solaris
  4968   // 8 and later, there is no limit to the number of functions registered
  4969   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
  4970   // functions are called upon dlclose(3DL) in addition to return from main
  4971   // and exit(3C).
  4973   if (PerfAllowAtExitRegistration) {
  4974     // only register atexit functions if PerfAllowAtExitRegistration is set.
  4975     // atexit functions can be delayed until process exit time, which
  4976     // can be problematic for embedded VM situations. Embedded VMs should
  4977     // call DestroyJavaVM() to assure that VM resources are released.
  4979     // note: perfMemory_exit_helper atexit function may be removed in
  4980     // the future if the appropriate cleanup code can be added to the
  4981     // VM_Exit VMOperation's doit method.
  4982     if (atexit(perfMemory_exit_helper) != 0) {
  4983       warning("os::init2 atexit(perfMemory_exit_helper) failed");
  4987   // Init pset_loadavg function pointer
  4988   init_pset_getloadavg_ptr();
  4990   return JNI_OK;
  4994 // Mark the polling page as unreadable
  4995 void os::make_polling_page_unreadable(void) {
  4996   if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
  4997     fatal("Could not disable polling page");
  4998 };
  5000 // Mark the polling page as readable
  5001 void os::make_polling_page_readable(void) {
  5002   if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
  5003     fatal("Could not enable polling page");
  5004 };
  5006 // OS interface.
  5008 int os::stat(const char *path, struct stat *sbuf) {
  5009   char pathbuf[MAX_PATH];
  5010   if (strlen(path) > MAX_PATH - 1) {
  5011     errno = ENAMETOOLONG;
  5012     return -1;
  5014   hpi::native_path(strcpy(pathbuf, path));
  5015   return ::stat(pathbuf, sbuf);
  5019 bool os::check_heap(bool force) { return true; }
  5021 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
  5022 static vsnprintf_t sol_vsnprintf = NULL;
  5024 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
  5025   if (!sol_vsnprintf) {
  5026     //search  for the named symbol in the objects that were loaded after libjvm
  5027     void* where = RTLD_NEXT;
  5028     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
  5029         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
  5030     if (!sol_vsnprintf){
  5031       //search  for the named symbol in the objects that were loaded before libjvm
  5032       where = RTLD_DEFAULT;
  5033       if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
  5034         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
  5035       assert(sol_vsnprintf != NULL, "vsnprintf not found");
  5038   return (*sol_vsnprintf)(buf, count, fmt, argptr);
  5042 // Is a (classpath) directory empty?
  5043 bool os::dir_is_empty(const char* path) {
  5044   DIR *dir = NULL;
  5045   struct dirent *ptr;
  5047   dir = opendir(path);
  5048   if (dir == NULL) return true;
  5050   /* Scan the directory */
  5051   bool result = true;
  5052   char buf[sizeof(struct dirent) + MAX_PATH];
  5053   struct dirent *dbuf = (struct dirent *) buf;
  5054   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
  5055     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
  5056       result = false;
  5059   closedir(dir);
  5060   return result;
  5063 // create binary file, rewriting existing file if required
  5064 int os::create_binary_file(const char* path, bool rewrite_existing) {
  5065   int oflags = O_WRONLY | O_CREAT;
  5066   if (!rewrite_existing) {
  5067     oflags |= O_EXCL;
  5069   return ::open64(path, oflags, S_IREAD | S_IWRITE);
  5072 // return current position of file pointer
  5073 jlong os::current_file_offset(int fd) {
  5074   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
  5077 // move file pointer to the specified offset
  5078 jlong os::seek_to_file_offset(int fd, jlong offset) {
  5079   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
  5082 // Map a block of memory.
  5083 char* os::map_memory(int fd, const char* file_name, size_t file_offset,
  5084                      char *addr, size_t bytes, bool read_only,
  5085                      bool allow_exec) {
  5086   int prot;
  5087   int flags;
  5089   if (read_only) {
  5090     prot = PROT_READ;
  5091     flags = MAP_SHARED;
  5092   } else {
  5093     prot = PROT_READ | PROT_WRITE;
  5094     flags = MAP_PRIVATE;
  5097   if (allow_exec) {
  5098     prot |= PROT_EXEC;
  5101   if (addr != NULL) {
  5102     flags |= MAP_FIXED;
  5105   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
  5106                                      fd, file_offset);
  5107   if (mapped_address == MAP_FAILED) {
  5108     return NULL;
  5110   return mapped_address;
  5114 // Remap a block of memory.
  5115 char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
  5116                        char *addr, size_t bytes, bool read_only,
  5117                        bool allow_exec) {
  5118   // same as map_memory() on this OS
  5119   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
  5120                         allow_exec);
  5124 // Unmap a block of memory.
  5125 bool os::unmap_memory(char* addr, size_t bytes) {
  5126   return munmap(addr, bytes) == 0;
  5129 void os::pause() {
  5130   char filename[MAX_PATH];
  5131   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
  5132     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
  5133   } else {
  5134     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
  5137   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
  5138   if (fd != -1) {
  5139     struct stat buf;
  5140     close(fd);
  5141     while (::stat(filename, &buf) == 0) {
  5142       (void)::poll(NULL, 0, 100);
  5144   } else {
  5145     jio_fprintf(stderr,
  5146       "Could not open pause file '%s', continuing immediately.\n", filename);
  5150 #ifndef PRODUCT
  5151 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
  5152 // Turn this on if you need to trace synch operations.
  5153 // Set RECORD_SYNCH_LIMIT to a large-enough value,
  5154 // and call record_synch_enable and record_synch_disable
  5155 // around the computation of interest.
  5157 void record_synch(char* name, bool returning);  // defined below
  5159 class RecordSynch {
  5160   char* _name;
  5161  public:
  5162   RecordSynch(char* name) :_name(name)
  5163                  { record_synch(_name, false); }
  5164   ~RecordSynch() { record_synch(_name,   true);  }
  5165 };
  5167 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
  5168 extern "C" ret name params {                                    \
  5169   typedef ret name##_t params;                                  \
  5170   static name##_t* implem = NULL;                               \
  5171   static int callcount = 0;                                     \
  5172   if (implem == NULL) {                                         \
  5173     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
  5174     if (implem == NULL)  fatal(dlerror());                      \
  5175   }                                                             \
  5176   ++callcount;                                                  \
  5177   RecordSynch _rs(#name);                                       \
  5178   inner;                                                        \
  5179   return implem args;                                           \
  5181 // in dbx, examine callcounts this way:
  5182 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
  5184 #define CHECK_POINTER_OK(p) \
  5185   (Universe::perm_gen() == NULL || !Universe::is_reserved_heap((oop)(p)))
  5186 #define CHECK_MU \
  5187   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
  5188 #define CHECK_CV \
  5189   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
  5190 #define CHECK_P(p) \
  5191   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
  5193 #define CHECK_MUTEX(mutex_op) \
  5194 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
  5196 CHECK_MUTEX(   mutex_lock)
  5197 CHECK_MUTEX(  _mutex_lock)
  5198 CHECK_MUTEX( mutex_unlock)
  5199 CHECK_MUTEX(_mutex_unlock)
  5200 CHECK_MUTEX( mutex_trylock)
  5201 CHECK_MUTEX(_mutex_trylock)
  5203 #define CHECK_COND(cond_op) \
  5204 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
  5206 CHECK_COND( cond_wait);
  5207 CHECK_COND(_cond_wait);
  5208 CHECK_COND(_cond_wait_cancel);
  5210 #define CHECK_COND2(cond_op) \
  5211 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
  5213 CHECK_COND2( cond_timedwait);
  5214 CHECK_COND2(_cond_timedwait);
  5215 CHECK_COND2(_cond_timedwait_cancel);
  5217 // do the _lwp_* versions too
  5218 #define mutex_t lwp_mutex_t
  5219 #define cond_t  lwp_cond_t
  5220 CHECK_MUTEX(  _lwp_mutex_lock)
  5221 CHECK_MUTEX(  _lwp_mutex_unlock)
  5222 CHECK_MUTEX(  _lwp_mutex_trylock)
  5223 CHECK_MUTEX( __lwp_mutex_lock)
  5224 CHECK_MUTEX( __lwp_mutex_unlock)
  5225 CHECK_MUTEX( __lwp_mutex_trylock)
  5226 CHECK_MUTEX(___lwp_mutex_lock)
  5227 CHECK_MUTEX(___lwp_mutex_unlock)
  5229 CHECK_COND(  _lwp_cond_wait);
  5230 CHECK_COND( __lwp_cond_wait);
  5231 CHECK_COND(___lwp_cond_wait);
  5233 CHECK_COND2(  _lwp_cond_timedwait);
  5234 CHECK_COND2( __lwp_cond_timedwait);
  5235 #undef mutex_t
  5236 #undef cond_t
  5238 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
  5239 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
  5240 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
  5241 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
  5242 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
  5243 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
  5244 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
  5245 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
  5248 // recording machinery:
  5250 enum { RECORD_SYNCH_LIMIT = 200 };
  5251 char* record_synch_name[RECORD_SYNCH_LIMIT];
  5252 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
  5253 bool record_synch_returning[RECORD_SYNCH_LIMIT];
  5254 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
  5255 int record_synch_count = 0;
  5256 bool record_synch_enabled = false;
  5258 // in dbx, examine recorded data this way:
  5259 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
  5261 void record_synch(char* name, bool returning) {
  5262   if (record_synch_enabled) {
  5263     if (record_synch_count < RECORD_SYNCH_LIMIT) {
  5264       record_synch_name[record_synch_count] = name;
  5265       record_synch_returning[record_synch_count] = returning;
  5266       record_synch_thread[record_synch_count] = thr_self();
  5267       record_synch_arg0ptr[record_synch_count] = &name;
  5268       record_synch_count++;
  5270     // put more checking code here:
  5271     // ...
  5275 void record_synch_enable() {
  5276   // start collecting trace data, if not already doing so
  5277   if (!record_synch_enabled)  record_synch_count = 0;
  5278   record_synch_enabled = true;
  5281 void record_synch_disable() {
  5282   // stop collecting trace data
  5283   record_synch_enabled = false;
  5286 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
  5287 #endif // PRODUCT
  5289 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
  5290 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
  5291                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
  5294 // JVMTI & JVM monitoring and management support
  5295 // The thread_cpu_time() and current_thread_cpu_time() are only
  5296 // supported if is_thread_cpu_time_supported() returns true.
  5297 // They are not supported on Solaris T1.
  5299 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
  5300 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
  5301 // of a thread.
  5302 //
  5303 // current_thread_cpu_time() and thread_cpu_time(Thread *)
  5304 // returns the fast estimate available on the platform.
  5306 // hrtime_t gethrvtime() return value includes
  5307 // user time but does not include system time
  5308 jlong os::current_thread_cpu_time() {
  5309   return (jlong) gethrvtime();
  5312 jlong os::thread_cpu_time(Thread *thread) {
  5313   // return user level CPU time only to be consistent with
  5314   // what current_thread_cpu_time returns.
  5315   // thread_cpu_time_info() must be changed if this changes
  5316   return os::thread_cpu_time(thread, false /* user time only */);
  5319 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
  5320   if (user_sys_cpu_time) {
  5321     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
  5322   } else {
  5323     return os::current_thread_cpu_time();
  5327 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
  5328   char proc_name[64];
  5329   int count;
  5330   prusage_t prusage;
  5331   jlong lwp_time;
  5332   int fd;
  5334   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
  5335                      getpid(),
  5336                      thread->osthread()->lwp_id());
  5337   fd = open(proc_name, O_RDONLY);
  5338   if ( fd == -1 ) return -1;
  5340   do {
  5341     count = pread(fd,
  5342                   (void *)&prusage.pr_utime,
  5343                   thr_time_size,
  5344                   thr_time_off);
  5345   } while (count < 0 && errno == EINTR);
  5346   close(fd);
  5347   if ( count < 0 ) return -1;
  5349   if (user_sys_cpu_time) {
  5350     // user + system CPU time
  5351     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
  5352                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
  5353                  (jlong)prusage.pr_stime.tv_nsec +
  5354                  (jlong)prusage.pr_utime.tv_nsec;
  5355   } else {
  5356     // user level CPU time only
  5357     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
  5358                 (jlong)prusage.pr_utime.tv_nsec;
  5361   return(lwp_time);
  5364 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
  5365   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
  5366   info_ptr->may_skip_backward = false;    // elapsed time not wall time
  5367   info_ptr->may_skip_forward = false;     // elapsed time not wall time
  5368   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
  5371 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
  5372   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
  5373   info_ptr->may_skip_backward = false;    // elapsed time not wall time
  5374   info_ptr->may_skip_forward = false;     // elapsed time not wall time
  5375   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
  5378 bool os::is_thread_cpu_time_supported() {
  5379   if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
  5380     return true;
  5381   } else {
  5382     return false;
  5386 // System loadavg support.  Returns -1 if load average cannot be obtained.
  5387 // Return the load average for our processor set if the primitive exists
  5388 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
  5389 int os::loadavg(double loadavg[], int nelem) {
  5390   if (pset_getloadavg_ptr != NULL) {
  5391     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
  5392   } else {
  5393     return ::getloadavg(loadavg, nelem);
  5397 //---------------------------------------------------------------------------------
  5398 #ifndef PRODUCT
  5400 static address same_page(address x, address y) {
  5401   intptr_t page_bits = -os::vm_page_size();
  5402   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
  5403     return x;
  5404   else if (x > y)
  5405     return (address)(intptr_t(y) | ~page_bits) + 1;
  5406   else
  5407     return (address)(intptr_t(y) & page_bits);
  5410 bool os::find(address addr) {
  5411   Dl_info dlinfo;
  5412   memset(&dlinfo, 0, sizeof(dlinfo));
  5413   if (dladdr(addr, &dlinfo)) {
  5414 #ifdef _LP64
  5415     tty->print("0x%016lx: ", addr);
  5416 #else
  5417     tty->print("0x%08x: ", addr);
  5418 #endif
  5419     if (dlinfo.dli_sname != NULL)
  5420       tty->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
  5421     else if (dlinfo.dli_fname)
  5422       tty->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
  5423     else
  5424       tty->print("<absolute address>");
  5425     if (dlinfo.dli_fname)  tty->print(" in %s", dlinfo.dli_fname);
  5426 #ifdef _LP64
  5427     if (dlinfo.dli_fbase)  tty->print(" at 0x%016lx", dlinfo.dli_fbase);
  5428 #else
  5429     if (dlinfo.dli_fbase)  tty->print(" at 0x%08x", dlinfo.dli_fbase);
  5430 #endif
  5431     tty->cr();
  5433     if (Verbose) {
  5434       // decode some bytes around the PC
  5435       address begin = same_page(addr-40, addr);
  5436       address end   = same_page(addr+40, addr);
  5437       address       lowest = (address) dlinfo.dli_sname;
  5438       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
  5439       if (begin < lowest)  begin = lowest;
  5440       Dl_info dlinfo2;
  5441       if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
  5442           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
  5443         end = (address) dlinfo2.dli_saddr;
  5444       Disassembler::decode(begin, end);
  5446     return true;
  5448   return false;
  5451 #endif
  5454 // Following function has been added to support HotSparc's libjvm.so running
  5455 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
  5456 // src/solaris/hpi/native_threads in the EVM codebase.
  5457 //
  5458 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
  5459 // libraries and should thus be removed. We will leave it behind for a while
  5460 // until we no longer want to able to run on top of 1.3.0 Solaris production
  5461 // JDK. See 4341971.
  5463 #define STACK_SLACK 0x800
  5465 extern "C" {
  5466   intptr_t sysThreadAvailableStackWithSlack() {
  5467     stack_t st;
  5468     intptr_t retval, stack_top;
  5469     retval = thr_stksegment(&st);
  5470     assert(retval == 0, "incorrect return value from thr_stksegment");
  5471     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
  5472     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
  5473     stack_top=(intptr_t)st.ss_sp-st.ss_size;
  5474     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
  5478 // Just to get the Kernel build to link on solaris for testing.
  5480 extern "C" {
  5481 class ASGCT_CallTrace;
  5482 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext)
  5483   KERNEL_RETURN;
  5487 // ObjectMonitor park-unpark infrastructure ...
  5488 //
  5489 // We implement Solaris and Linux PlatformEvents with the
  5490 // obvious condvar-mutex-flag triple.
  5491 // Another alternative that works quite well is pipes:
  5492 // Each PlatformEvent consists of a pipe-pair.
  5493 // The thread associated with the PlatformEvent
  5494 // calls park(), which reads from the input end of the pipe.
  5495 // Unpark() writes into the other end of the pipe.
  5496 // The write-side of the pipe must be set NDELAY.
  5497 // Unfortunately pipes consume a large # of handles.
  5498 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
  5499 // Using pipes for the 1st few threads might be workable, however.
  5500 //
  5501 // park() is permitted to return spuriously.
  5502 // Callers of park() should wrap the call to park() in
  5503 // an appropriate loop.  A litmus test for the correct
  5504 // usage of park is the following: if park() were modified
  5505 // to immediately return 0 your code should still work,
  5506 // albeit degenerating to a spin loop.
  5507 //
  5508 // An interesting optimization for park() is to use a trylock()
  5509 // to attempt to acquire the mutex.  If the trylock() fails
  5510 // then we know that a concurrent unpark() operation is in-progress.
  5511 // in that case the park() code could simply set _count to 0
  5512 // and return immediately.  The subsequent park() operation *might*
  5513 // return immediately.  That's harmless as the caller of park() is
  5514 // expected to loop.  By using trylock() we will have avoided a
  5515 // avoided a context switch caused by contention on the per-thread mutex.
  5516 //
  5517 // TODO-FIXME:
  5518 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
  5519 //     objectmonitor implementation.
  5520 // 2.  Collapse the JSR166 parker event, and the
  5521 //     objectmonitor ParkEvent into a single "Event" construct.
  5522 // 3.  In park() and unpark() add:
  5523 //     assert (Thread::current() == AssociatedWith).
  5524 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
  5525 //     1-out-of-N park() operations will return immediately.
  5526 //
  5527 // _Event transitions in park()
  5528 //   -1 => -1 : illegal
  5529 //    1 =>  0 : pass - return immediately
  5530 //    0 => -1 : block
  5531 //
  5532 // _Event serves as a restricted-range semaphore.
  5533 //
  5534 // Another possible encoding of _Event would be with
  5535 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
  5536 //
  5537 // TODO-FIXME: add DTRACE probes for:
  5538 // 1.   Tx parks
  5539 // 2.   Ty unparks Tx
  5540 // 3.   Tx resumes from park
  5543 // value determined through experimentation
  5544 #define ROUNDINGFIX 11
  5546 // utility to compute the abstime argument to timedwait.
  5547 // TODO-FIXME: switch from compute_abstime() to unpackTime().
  5549 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
  5550   // millis is the relative timeout time
  5551   // abstime will be the absolute timeout time
  5552   if (millis < 0)  millis = 0;
  5553   struct timeval now;
  5554   int status = gettimeofday(&now, NULL);
  5555   assert(status == 0, "gettimeofday");
  5556   jlong seconds = millis / 1000;
  5557   jlong max_wait_period;
  5559   if (UseLWPSynchronization) {
  5560     // forward port of fix for 4275818 (not sleeping long enough)
  5561     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
  5562     // _lwp_cond_timedwait() used a round_down algorithm rather
  5563     // than a round_up. For millis less than our roundfactor
  5564     // it rounded down to 0 which doesn't meet the spec.
  5565     // For millis > roundfactor we may return a bit sooner, but
  5566     // since we can not accurately identify the patch level and
  5567     // this has already been fixed in Solaris 9 and 8 we will
  5568     // leave it alone rather than always rounding down.
  5570     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
  5571        // It appears that when we go directly through Solaris _lwp_cond_timedwait()
  5572            // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
  5573            max_wait_period = 21000000;
  5574   } else {
  5575     max_wait_period = 50000000;
  5577   millis %= 1000;
  5578   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
  5579      seconds = max_wait_period;
  5581   abstime->tv_sec = now.tv_sec  + seconds;
  5582   long       usec = now.tv_usec + millis * 1000;
  5583   if (usec >= 1000000) {
  5584     abstime->tv_sec += 1;
  5585     usec -= 1000000;
  5587   abstime->tv_nsec = usec * 1000;
  5588   return abstime;
  5591 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
  5592 // Conceptually TryPark() should be equivalent to park(0).
  5594 int os::PlatformEvent::TryPark() {
  5595   for (;;) {
  5596     const int v = _Event ;
  5597     guarantee ((v == 0) || (v == 1), "invariant") ;
  5598     if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
  5602 void os::PlatformEvent::park() {           // AKA: down()
  5603   // Invariant: Only the thread associated with the Event/PlatformEvent
  5604   // may call park().
  5605   int v ;
  5606   for (;;) {
  5607       v = _Event ;
  5608       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
  5610   guarantee (v >= 0, "invariant") ;
  5611   if (v == 0) {
  5612      // Do this the hard way by blocking ...
  5613      // See http://monaco.sfbay/detail.jsf?cr=5094058.
  5614      // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
  5615      // Only for SPARC >= V8PlusA
  5616 #if defined(__sparc) && defined(COMPILER2)
  5617      if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
  5618 #endif
  5619      int status = os::Solaris::mutex_lock(_mutex);
  5620      assert_status(status == 0, status,  "mutex_lock");
  5621      guarantee (_nParked == 0, "invariant") ;
  5622      ++ _nParked ;
  5623      while (_Event < 0) {
  5624         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
  5625         // Treat this the same as if the wait was interrupted
  5626         // With usr/lib/lwp going to kernel, always handle ETIME
  5627         status = os::Solaris::cond_wait(_cond, _mutex);
  5628         if (status == ETIME) status = EINTR ;
  5629         assert_status(status == 0 || status == EINTR, status, "cond_wait");
  5631      -- _nParked ;
  5632      _Event = 0 ;
  5633      status = os::Solaris::mutex_unlock(_mutex);
  5634      assert_status(status == 0, status, "mutex_unlock");
  5638 int os::PlatformEvent::park(jlong millis) {
  5639   guarantee (_nParked == 0, "invariant") ;
  5640   int v ;
  5641   for (;;) {
  5642       v = _Event ;
  5643       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
  5645   guarantee (v >= 0, "invariant") ;
  5646   if (v != 0) return OS_OK ;
  5648   int ret = OS_TIMEOUT;
  5649   timestruc_t abst;
  5650   compute_abstime (&abst, millis);
  5652   // See http://monaco.sfbay/detail.jsf?cr=5094058.
  5653   // For Solaris SPARC set fprs.FEF=0 prior to parking.
  5654   // Only for SPARC >= V8PlusA
  5655 #if defined(__sparc) && defined(COMPILER2)
  5656  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
  5657 #endif
  5658   int status = os::Solaris::mutex_lock(_mutex);
  5659   assert_status(status == 0, status, "mutex_lock");
  5660   guarantee (_nParked == 0, "invariant") ;
  5661   ++ _nParked ;
  5662   while (_Event < 0) {
  5663      int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
  5664      assert_status(status == 0 || status == EINTR ||
  5665                    status == ETIME || status == ETIMEDOUT,
  5666                    status, "cond_timedwait");
  5667      if (!FilterSpuriousWakeups) break ;                // previous semantics
  5668      if (status == ETIME || status == ETIMEDOUT) break ;
  5669      // We consume and ignore EINTR and spurious wakeups.
  5671   -- _nParked ;
  5672   if (_Event >= 0) ret = OS_OK ;
  5673   _Event = 0 ;
  5674   status = os::Solaris::mutex_unlock(_mutex);
  5675   assert_status(status == 0, status, "mutex_unlock");
  5676   return ret;
  5679 void os::PlatformEvent::unpark() {
  5680   int v, AnyWaiters;
  5682   // Increment _Event.
  5683   // Another acceptable implementation would be to simply swap 1
  5684   // into _Event:
  5685   //   if (Swap (&_Event, 1) < 0) {
  5686   //      mutex_lock (_mutex) ; AnyWaiters = nParked; mutex_unlock (_mutex) ;
  5687   //      if (AnyWaiters) cond_signal (_cond) ;
  5688   //   }
  5690   for (;;) {
  5691     v = _Event ;
  5692     if (v > 0) {
  5693        // The LD of _Event could have reordered or be satisfied
  5694        // by a read-aside from this processor's write buffer.
  5695        // To avoid problems execute a barrier and then
  5696        // ratify the value.  A degenerate CAS() would also work.
  5697        // Viz., CAS (v+0, &_Event, v) == v).
  5698        OrderAccess::fence() ;
  5699        if (_Event == v) return ;
  5700        continue ;
  5702     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
  5705   // If the thread associated with the event was parked, wake it.
  5706   if (v < 0) {
  5707      int status ;
  5708      // Wait for the thread assoc with the PlatformEvent to vacate.
  5709      status = os::Solaris::mutex_lock(_mutex);
  5710      assert_status(status == 0, status, "mutex_lock");
  5711      AnyWaiters = _nParked ;
  5712      status = os::Solaris::mutex_unlock(_mutex);
  5713      assert_status(status == 0, status, "mutex_unlock");
  5714      guarantee (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
  5715      if (AnyWaiters != 0) {
  5716        // We intentional signal *after* dropping the lock
  5717        // to avoid a common class of futile wakeups.
  5718        status = os::Solaris::cond_signal(_cond);
  5719        assert_status(status == 0, status, "cond_signal");
  5724 // JSR166
  5725 // -------------------------------------------------------
  5727 /*
  5728  * The solaris and linux implementations of park/unpark are fairly
  5729  * conservative for now, but can be improved. They currently use a
  5730  * mutex/condvar pair, plus _counter.
  5731  * Park decrements _counter if > 0, else does a condvar wait.  Unpark
  5732  * sets count to 1 and signals condvar.  Only one thread ever waits
  5733  * on the condvar. Contention seen when trying to park implies that someone
  5734  * is unparking you, so don't wait. And spurious returns are fine, so there
  5735  * is no need to track notifications.
  5736  */
  5738 #define NANOSECS_PER_SEC 1000000000
  5739 #define NANOSECS_PER_MILLISEC 1000000
  5740 #define MAX_SECS 100000000
  5742 /*
  5743  * This code is common to linux and solaris and will be moved to a
  5744  * common place in dolphin.
  5746  * The passed in time value is either a relative time in nanoseconds
  5747  * or an absolute time in milliseconds. Either way it has to be unpacked
  5748  * into suitable seconds and nanoseconds components and stored in the
  5749  * given timespec structure.
  5750  * Given time is a 64-bit value and the time_t used in the timespec is only
  5751  * a signed-32-bit value (except on 64-bit Linux) we have to watch for
  5752  * overflow if times way in the future are given. Further on Solaris versions
  5753  * prior to 10 there is a restriction (see cond_timedwait) that the specified
  5754  * number of seconds, in abstime, is less than current_time  + 100,000,000.
  5755  * As it will be 28 years before "now + 100000000" will overflow we can
  5756  * ignore overflow and just impose a hard-limit on seconds using the value
  5757  * of "now + 100,000,000". This places a limit on the timeout of about 3.17
  5758  * years from "now".
  5759  */
  5760 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
  5761   assert (time > 0, "convertTime");
  5763   struct timeval now;
  5764   int status = gettimeofday(&now, NULL);
  5765   assert(status == 0, "gettimeofday");
  5767   time_t max_secs = now.tv_sec + MAX_SECS;
  5769   if (isAbsolute) {
  5770     jlong secs = time / 1000;
  5771     if (secs > max_secs) {
  5772       absTime->tv_sec = max_secs;
  5774     else {
  5775       absTime->tv_sec = secs;
  5777     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
  5779   else {
  5780     jlong secs = time / NANOSECS_PER_SEC;
  5781     if (secs >= MAX_SECS) {
  5782       absTime->tv_sec = max_secs;
  5783       absTime->tv_nsec = 0;
  5785     else {
  5786       absTime->tv_sec = now.tv_sec + secs;
  5787       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
  5788       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
  5789         absTime->tv_nsec -= NANOSECS_PER_SEC;
  5790         ++absTime->tv_sec; // note: this must be <= max_secs
  5794   assert(absTime->tv_sec >= 0, "tv_sec < 0");
  5795   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
  5796   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
  5797   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
  5800 void Parker::park(bool isAbsolute, jlong time) {
  5802   // Optional fast-path check:
  5803   // Return immediately if a permit is available.
  5804   if (_counter > 0) {
  5805       _counter = 0 ;
  5806       return ;
  5809   // Optional fast-exit: Check interrupt before trying to wait
  5810   Thread* thread = Thread::current();
  5811   assert(thread->is_Java_thread(), "Must be JavaThread");
  5812   JavaThread *jt = (JavaThread *)thread;
  5813   if (Thread::is_interrupted(thread, false)) {
  5814     return;
  5817   // First, demultiplex/decode time arguments
  5818   timespec absTime;
  5819   if (time < 0) { // don't wait at all
  5820     return;
  5822   if (time > 0) {
  5823     // Warning: this code might be exposed to the old Solaris time
  5824     // round-down bugs.  Grep "roundingFix" for details.
  5825     unpackTime(&absTime, isAbsolute, time);
  5828   // Enter safepoint region
  5829   // Beware of deadlocks such as 6317397.
  5830   // The per-thread Parker:: _mutex is a classic leaf-lock.
  5831   // In particular a thread must never block on the Threads_lock while
  5832   // holding the Parker:: mutex.  If safepoints are pending both the
  5833   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
  5834   ThreadBlockInVM tbivm(jt);
  5836   // Don't wait if cannot get lock since interference arises from
  5837   // unblocking.  Also. check interrupt before trying wait
  5838   if (Thread::is_interrupted(thread, false) ||
  5839       os::Solaris::mutex_trylock(_mutex) != 0) {
  5840     return;
  5843   int status ;
  5845   if (_counter > 0)  { // no wait needed
  5846     _counter = 0;
  5847     status = os::Solaris::mutex_unlock(_mutex);
  5848     assert (status == 0, "invariant") ;
  5849     return;
  5852 #ifdef ASSERT
  5853   // Don't catch signals while blocked; let the running threads have the signals.
  5854   // (This allows a debugger to break into the running thread.)
  5855   sigset_t oldsigs;
  5856   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
  5857   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
  5858 #endif
  5860   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
  5861   jt->set_suspend_equivalent();
  5862   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
  5864   // Do this the hard way by blocking ...
  5865   // See http://monaco.sfbay/detail.jsf?cr=5094058.
  5866   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
  5867   // Only for SPARC >= V8PlusA
  5868 #if defined(__sparc) && defined(COMPILER2)
  5869   if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
  5870 #endif
  5872   if (time == 0) {
  5873     status = os::Solaris::cond_wait (_cond, _mutex) ;
  5874   } else {
  5875     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
  5877   // Note that an untimed cond_wait() can sometimes return ETIME on older
  5878   // versions of the Solaris.
  5879   assert_status(status == 0 || status == EINTR ||
  5880                 status == ETIME || status == ETIMEDOUT,
  5881                 status, "cond_timedwait");
  5883 #ifdef ASSERT
  5884   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
  5885 #endif
  5886   _counter = 0 ;
  5887   status = os::Solaris::mutex_unlock(_mutex);
  5888   assert_status(status == 0, status, "mutex_unlock") ;
  5890   // If externally suspended while waiting, re-suspend
  5891   if (jt->handle_special_suspend_equivalent_condition()) {
  5892     jt->java_suspend_self();
  5897 void Parker::unpark() {
  5898   int s, status ;
  5899   status = os::Solaris::mutex_lock (_mutex) ;
  5900   assert (status == 0, "invariant") ;
  5901   s = _counter;
  5902   _counter = 1;
  5903   status = os::Solaris::mutex_unlock (_mutex) ;
  5904   assert (status == 0, "invariant") ;
  5906   if (s < 1) {
  5907     status = os::Solaris::cond_signal (_cond) ;
  5908     assert (status == 0, "invariant") ;
  5912 extern char** environ;
  5914 // Run the specified command in a separate process. Return its exit value,
  5915 // or -1 on failure (e.g. can't fork a new process).
  5916 // Unlike system(), this function can be called from signal handler. It
  5917 // doesn't block SIGINT et al.
  5918 int os::fork_and_exec(char* cmd) {
  5919   char * argv[4];
  5920   argv[0] = (char *)"sh";
  5921   argv[1] = (char *)"-c";
  5922   argv[2] = cmd;
  5923   argv[3] = NULL;
  5925   // fork is async-safe, fork1 is not so can't use in signal handler
  5926   pid_t pid;
  5927   Thread* t = ThreadLocalStorage::get_thread_slow();
  5928   if (t != NULL && t->is_inside_signal_handler()) {
  5929     pid = fork();
  5930   } else {
  5931     pid = fork1();
  5934   if (pid < 0) {
  5935     // fork failed
  5936     warning("fork failed: %s", strerror(errno));
  5937     return -1;
  5939   } else if (pid == 0) {
  5940     // child process
  5942     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
  5943     execve("/usr/bin/sh", argv, environ);
  5945     // execve failed
  5946     _exit(-1);
  5948   } else  {
  5949     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
  5950     // care about the actual exit code, for now.
  5952     int status;
  5954     // Wait for the child process to exit.  This returns immediately if
  5955     // the child has already exited. */
  5956     while (waitpid(pid, &status, 0) < 0) {
  5957         switch (errno) {
  5958         case ECHILD: return 0;
  5959         case EINTR: break;
  5960         default: return -1;
  5964     if (WIFEXITED(status)) {
  5965        // The child exited normally; get its exit code.
  5966        return WEXITSTATUS(status);
  5967     } else if (WIFSIGNALED(status)) {
  5968        // The child exited because of a signal
  5969        // The best value to return is 0x80 + signal number,
  5970        // because that is what all Unix shells do, and because
  5971        // it allows callers to distinguish between process exit and
  5972        // process death by signal.
  5973        return 0x80 + WTERMSIG(status);
  5974     } else {
  5975        // Unknown exit code; pass it through
  5976        return status;

mercurial