src/os/solaris/vm/os_solaris.cpp

Wed, 01 Apr 2009 16:38:01 -0400

author
phh
date
Wed, 01 Apr 2009 16:38:01 -0400
changeset 1126
956304450e80
parent 1091
6bdd6923ba16
child 1329
665be97e8704
permissions
-rw-r--r--

6819213: revive sun.boot.library.path
Summary: Support multiplex and mutable sun.boot.library.path
Reviewed-by: acorn, dcubed, xlu

     1 /*
     2  * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // do not include  precompiled  header file
    26 # include "incls/_os_solaris.cpp.incl"
    28 // put OS-includes here
    29 # include <dlfcn.h>
    30 # include <errno.h>
    31 # include <link.h>
    32 # include <poll.h>
    33 # include <pthread.h>
    34 # include <pwd.h>
    35 # include <schedctl.h>
    36 # include <setjmp.h>
    37 # include <signal.h>
    38 # include <stdio.h>
    39 # include <alloca.h>
    40 # include <sys/filio.h>
    41 # include <sys/ipc.h>
    42 # include <sys/lwp.h>
    43 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
    44 # include <sys/mman.h>
    45 # include <sys/processor.h>
    46 # include <sys/procset.h>
    47 # include <sys/pset.h>
    48 # include <sys/resource.h>
    49 # include <sys/shm.h>
    50 # include <sys/socket.h>
    51 # include <sys/stat.h>
    52 # include <sys/systeminfo.h>
    53 # include <sys/time.h>
    54 # include <sys/times.h>
    55 # include <sys/types.h>
    56 # include <sys/wait.h>
    57 # include <sys/utsname.h>
    58 # include <thread.h>
    59 # include <unistd.h>
    60 # include <sys/priocntl.h>
    61 # include <sys/rtpriocntl.h>
    62 # include <sys/tspriocntl.h>
    63 # include <sys/iapriocntl.h>
    64 # include <sys/loadavg.h>
    65 # include <string.h>
    67 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
    68 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
    70 #define MAX_PATH (2 * K)
    72 // for timer info max values which include all bits
    73 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
    75 #ifdef _GNU_SOURCE
    76 // See bug #6514594
    77 extern "C" int madvise(caddr_t, size_t, int);
    78 extern "C"  int memcntl(caddr_t addr, size_t len, int cmd, caddr_t  arg,
    79      int attr, int mask);
    80 #endif //_GNU_SOURCE
    82 /*
    83   MPSS Changes Start.
    84   The JVM binary needs to be built and run on pre-Solaris 9
    85   systems, but the constants needed by MPSS are only in Solaris 9
    86   header files.  They are textually replicated here to allow
    87   building on earlier systems.  Once building on Solaris 8 is
    88   no longer a requirement, these #defines can be replaced by ordinary
    89   system .h inclusion.
    91   In earlier versions of the  JDK and Solaris, we used ISM for large pages.
    92   But ISM requires shared memory to achieve this and thus has many caveats.
    93   MPSS is a fully transparent and is a cleaner way to get large pages.
    94   Although we still require keeping ISM for backward compatiblitiy as well as
    95   giving the opportunity to use large pages on older systems it is
    96   recommended that MPSS be used for Solaris 9 and above.
    98 */
   100 #ifndef MC_HAT_ADVISE
   102 struct memcntl_mha {
   103   uint_t          mha_cmd;        /* command(s) */
   104   uint_t          mha_flags;
   105   size_t          mha_pagesize;
   106 };
   107 #define MC_HAT_ADVISE   7       /* advise hat map size */
   108 #define MHA_MAPSIZE_VA  0x1     /* set preferred page size */
   109 #define MAP_ALIGN       0x200   /* addr specifies alignment */
   111 #endif
   112 // MPSS Changes End.
   115 // Here are some liblgrp types from sys/lgrp_user.h to be able to
   116 // compile on older systems without this header file.
   118 #ifndef MADV_ACCESS_LWP
   119 # define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
   120 #endif
   121 #ifndef MADV_ACCESS_MANY
   122 # define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
   123 #endif
   125 #ifndef LGRP_RSRC_CPU
   126 # define LGRP_RSRC_CPU           0       /* CPU resources */
   127 #endif
   128 #ifndef LGRP_RSRC_MEM
   129 # define LGRP_RSRC_MEM           1       /* memory resources */
   130 #endif
   132 // Some more macros from sys/mman.h that are not present in Solaris 8.
   134 #ifndef MAX_MEMINFO_CNT
   135 /*
   136  * info_req request type definitions for meminfo
   137  * request types starting with MEMINFO_V are used for Virtual addresses
   138  * and should not be mixed with MEMINFO_PLGRP which is targeted for Physical
   139  * addresses
   140  */
   141 # define MEMINFO_SHIFT           16
   142 # define MEMINFO_MASK            (0xFF << MEMINFO_SHIFT)
   143 # define MEMINFO_VPHYSICAL       (0x01 << MEMINFO_SHIFT) /* get physical addr */
   144 # define MEMINFO_VLGRP           (0x02 << MEMINFO_SHIFT) /* get lgroup */
   145 # define MEMINFO_VPAGESIZE       (0x03 << MEMINFO_SHIFT) /* size of phys page */
   146 # define MEMINFO_VREPLCNT        (0x04 << MEMINFO_SHIFT) /* no. of replica */
   147 # define MEMINFO_VREPL           (0x05 << MEMINFO_SHIFT) /* physical replica */
   148 # define MEMINFO_VREPL_LGRP      (0x06 << MEMINFO_SHIFT) /* lgrp of replica */
   149 # define MEMINFO_PLGRP           (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */
   151 /* maximum number of addresses meminfo() can process at a time */
   152 # define MAX_MEMINFO_CNT 256
   154 /* maximum number of request types */
   155 # define MAX_MEMINFO_REQ 31
   156 #endif
   158 // see thr_setprio(3T) for the basis of these numbers
   159 #define MinimumPriority 0
   160 #define NormalPriority  64
   161 #define MaximumPriority 127
   163 // Values for ThreadPriorityPolicy == 1
   164 int prio_policy1[MaxPriority+1] = { -99999, 0, 16, 32, 48, 64,
   165                                         80, 96, 112, 124, 127 };
   167 // System parameters used internally
   168 static clock_t clock_tics_per_sec = 100;
   170 // For diagnostics to print a message once. see run_periodic_checks
   171 static bool check_addr0_done = false;
   172 static sigset_t check_signal_done;
   173 static bool check_signals = true;
   175 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
   176 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
   178 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
   181 // "default" initializers for missing libc APIs
   182 extern "C" {
   183   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
   184   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
   186   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
   187   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
   188 }
   190 // "default" initializers for pthread-based synchronization
   191 extern "C" {
   192   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
   193   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
   194 }
   196 // Thread Local Storage
   197 // This is common to all Solaris platforms so it is defined here,
   198 // in this common file.
   199 // The declarations are in the os_cpu threadLS*.hpp files.
   200 //
   201 // Static member initialization for TLS
   202 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
   204 #ifndef PRODUCT
   205 #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
   207 int ThreadLocalStorage::_tcacheHit = 0;
   208 int ThreadLocalStorage::_tcacheMiss = 0;
   210 void ThreadLocalStorage::print_statistics() {
   211   int total = _tcacheMiss+_tcacheHit;
   212   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
   213                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
   214 }
   215 #undef _PCT
   216 #endif // PRODUCT
   218 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
   219                                                         int index) {
   220   Thread *thread = get_thread_slow();
   221   if (thread != NULL) {
   222     address sp = os::current_stack_pointer();
   223     guarantee(thread->_stack_base == NULL ||
   224               (sp <= thread->_stack_base &&
   225                  sp >= thread->_stack_base - thread->_stack_size) ||
   226                is_error_reported(),
   227               "sp must be inside of selected thread stack");
   229     thread->_self_raw_id = raw_id;  // mark for quick retrieval
   230     _get_thread_cache[ index ] = thread;
   231   }
   232   return thread;
   233 }
   236 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
   237 #define NO_CACHED_THREAD ((Thread*)all_zero)
   239 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
   241   // Store the new value before updating the cache to prevent a race
   242   // between get_thread_via_cache_slowly() and this store operation.
   243   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
   245   // Update thread cache with new thread if setting on thread create,
   246   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
   247   uintptr_t raw = pd_raw_thread_id();
   248   int ix = pd_cache_index(raw);
   249   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
   250 }
   252 void ThreadLocalStorage::pd_init() {
   253   for (int i = 0; i < _pd_cache_size; i++) {
   254     _get_thread_cache[i] = NO_CACHED_THREAD;
   255   }
   256 }
   258 // Invalidate all the caches (happens to be the same as pd_init).
   259 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
   261 #undef NO_CACHED_THREAD
   263 // END Thread Local Storage
   265 static inline size_t adjust_stack_size(address base, size_t size) {
   266   if ((ssize_t)size < 0) {
   267     // 4759953: Compensate for ridiculous stack size.
   268     size = max_intx;
   269   }
   270   if (size > (size_t)base) {
   271     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
   272     size = (size_t)base;
   273   }
   274   return size;
   275 }
   277 static inline stack_t get_stack_info() {
   278   stack_t st;
   279   int retval = thr_stksegment(&st);
   280   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
   281   assert(retval == 0, "incorrect return value from thr_stksegment");
   282   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
   283   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
   284   return st;
   285 }
   287 address os::current_stack_base() {
   288   int r = thr_main() ;
   289   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
   290   bool is_primordial_thread = r;
   292   // Workaround 4352906, avoid calls to thr_stksegment by
   293   // thr_main after the first one (it looks like we trash
   294   // some data, causing the value for ss_sp to be incorrect).
   295   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
   296     stack_t st = get_stack_info();
   297     if (is_primordial_thread) {
   298       // cache initial value of stack base
   299       os::Solaris::_main_stack_base = (address)st.ss_sp;
   300     }
   301     return (address)st.ss_sp;
   302   } else {
   303     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
   304     return os::Solaris::_main_stack_base;
   305   }
   306 }
   308 size_t os::current_stack_size() {
   309   size_t size;
   311   int r = thr_main() ;
   312   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
   313   if(!r) {
   314     size = get_stack_info().ss_size;
   315   } else {
   316     struct rlimit limits;
   317     getrlimit(RLIMIT_STACK, &limits);
   318     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
   319   }
   320   // base may not be page aligned
   321   address base = current_stack_base();
   322   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
   323   return (size_t)(base - bottom);
   324 }
   326 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
   327   return localtime_r(clock, res);
   328 }
   330 // interruptible infrastructure
   332 // setup_interruptible saves the thread state before going into an
   333 // interruptible system call.
   334 // The saved state is used to restore the thread to
   335 // its former state whether or not an interrupt is received.
   336 // Used by classloader os::read
   337 // hpi calls skip this layer and stay in _thread_in_native
   339 void os::Solaris::setup_interruptible(JavaThread* thread) {
   341   JavaThreadState thread_state = thread->thread_state();
   343   assert(thread_state != _thread_blocked, "Coming from the wrong thread");
   344   assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
   345   OSThread* osthread = thread->osthread();
   346   osthread->set_saved_interrupt_thread_state(thread_state);
   347   thread->frame_anchor()->make_walkable(thread);
   348   ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
   349 }
   351 // Version of setup_interruptible() for threads that are already in
   352 // _thread_blocked. Used by os_sleep().
   353 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
   354   thread->frame_anchor()->make_walkable(thread);
   355 }
   357 JavaThread* os::Solaris::setup_interruptible() {
   358   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
   359   setup_interruptible(thread);
   360   return thread;
   361 }
   363 void os::Solaris::try_enable_extended_io() {
   364   typedef int (*enable_extended_FILE_stdio_t)(int, int);
   366   if (!UseExtendedFileIO) {
   367     return;
   368   }
   370   enable_extended_FILE_stdio_t enabler =
   371     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
   372                                          "enable_extended_FILE_stdio");
   373   if (enabler) {
   374     enabler(-1, -1);
   375   }
   376 }
   379 #ifdef ASSERT
   381 JavaThread* os::Solaris::setup_interruptible_native() {
   382   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
   383   JavaThreadState thread_state = thread->thread_state();
   384   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
   385   return thread;
   386 }
   388 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
   389   JavaThreadState thread_state = thread->thread_state();
   390   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
   391 }
   392 #endif
   394 // cleanup_interruptible reverses the effects of setup_interruptible
   395 // setup_interruptible_already_blocked() does not need any cleanup.
   397 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
   398   OSThread* osthread = thread->osthread();
   400   ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
   401 }
   403 // I/O interruption related counters called in _INTERRUPTIBLE
   405 void os::Solaris::bump_interrupted_before_count() {
   406   RuntimeService::record_interrupted_before_count();
   407 }
   409 void os::Solaris::bump_interrupted_during_count() {
   410   RuntimeService::record_interrupted_during_count();
   411 }
   413 static int _processors_online = 0;
   415          jint os::Solaris::_os_thread_limit = 0;
   416 volatile jint os::Solaris::_os_thread_count = 0;
   418 julong os::available_memory() {
   419   return Solaris::available_memory();
   420 }
   422 julong os::Solaris::available_memory() {
   423   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
   424 }
   426 julong os::Solaris::_physical_memory = 0;
   428 julong os::physical_memory() {
   429    return Solaris::physical_memory();
   430 }
   432 julong os::allocatable_physical_memory(julong size) {
   433 #ifdef _LP64
   434    return size;
   435 #else
   436    julong result = MIN2(size, (julong)3835*M);
   437    if (!is_allocatable(result)) {
   438      // Memory allocations will be aligned but the alignment
   439      // is not known at this point.  Alignments will
   440      // be at most to LargePageSizeInBytes.  Protect
   441      // allocations from alignments up to illegal
   442      // values. If at this point 2G is illegal.
   443      julong reasonable_size = (julong)2*G - 2 * LargePageSizeInBytes;
   444      result =  MIN2(size, reasonable_size);
   445    }
   446    return result;
   447 #endif
   448 }
   450 static hrtime_t first_hrtime = 0;
   451 static const hrtime_t hrtime_hz = 1000*1000*1000;
   452 const int LOCK_BUSY = 1;
   453 const int LOCK_FREE = 0;
   454 const int LOCK_INVALID = -1;
   455 static volatile hrtime_t max_hrtime = 0;
   456 static volatile int max_hrtime_lock = LOCK_FREE;     // Update counter with LSB as lock-in-progress
   459 void os::Solaris::initialize_system_info() {
   460   _processor_count = sysconf(_SC_NPROCESSORS_CONF);
   461   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
   462   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
   463 }
   465 int os::active_processor_count() {
   466   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
   467   pid_t pid = getpid();
   468   psetid_t pset = PS_NONE;
   469   // Are we running in a processor set or is there any processor set around?
   470   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
   471     uint_t pset_cpus;
   472     // Query the number of cpus available to us.
   473     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
   474       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
   475       _processors_online = pset_cpus;
   476       return pset_cpus;
   477     }
   478   }
   479   // Otherwise return number of online cpus
   480   return online_cpus;
   481 }
   483 static bool find_processors_in_pset(psetid_t        pset,
   484                                     processorid_t** id_array,
   485                                     uint_t*         id_length) {
   486   bool result = false;
   487   // Find the number of processors in the processor set.
   488   if (pset_info(pset, NULL, id_length, NULL) == 0) {
   489     // Make up an array to hold their ids.
   490     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length);
   491     // Fill in the array with their processor ids.
   492     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
   493       result = true;
   494     }
   495   }
   496   return result;
   497 }
   499 // Callers of find_processors_online() must tolerate imprecise results --
   500 // the system configuration can change asynchronously because of DR
   501 // or explicit psradm operations.
   502 //
   503 // We also need to take care that the loop (below) terminates as the
   504 // number of processors online can change between the _SC_NPROCESSORS_ONLN
   505 // request and the loop that builds the list of processor ids.   Unfortunately
   506 // there's no reliable way to determine the maximum valid processor id,
   507 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
   508 // man pages, which claim the processor id set is "sparse, but
   509 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
   510 // exit the loop.
   511 //
   512 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
   513 // not available on S8.0.
   515 static bool find_processors_online(processorid_t** id_array,
   516                                    uint*           id_length) {
   517   const processorid_t MAX_PROCESSOR_ID = 100000 ;
   518   // Find the number of processors online.
   519   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
   520   // Make up an array to hold their ids.
   521   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length);
   522   // Processors need not be numbered consecutively.
   523   long found = 0;
   524   processorid_t next = 0;
   525   while (found < *id_length && next < MAX_PROCESSOR_ID) {
   526     processor_info_t info;
   527     if (processor_info(next, &info) == 0) {
   528       // NB, PI_NOINTR processors are effectively online ...
   529       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
   530         (*id_array)[found] = next;
   531         found += 1;
   532       }
   533     }
   534     next += 1;
   535   }
   536   if (found < *id_length) {
   537       // The loop above didn't identify the expected number of processors.
   538       // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
   539       // and re-running the loop, above, but there's no guarantee of progress
   540       // if the system configuration is in flux.  Instead, we just return what
   541       // we've got.  Note that in the worst case find_processors_online() could
   542       // return an empty set.  (As a fall-back in the case of the empty set we
   543       // could just return the ID of the current processor).
   544       *id_length = found ;
   545   }
   547   return true;
   548 }
   550 static bool assign_distribution(processorid_t* id_array,
   551                                 uint           id_length,
   552                                 uint*          distribution,
   553                                 uint           distribution_length) {
   554   // We assume we can assign processorid_t's to uint's.
   555   assert(sizeof(processorid_t) == sizeof(uint),
   556          "can't convert processorid_t to uint");
   557   // Quick check to see if we won't succeed.
   558   if (id_length < distribution_length) {
   559     return false;
   560   }
   561   // Assign processor ids to the distribution.
   562   // Try to shuffle processors to distribute work across boards,
   563   // assuming 4 processors per board.
   564   const uint processors_per_board = ProcessDistributionStride;
   565   // Find the maximum processor id.
   566   processorid_t max_id = 0;
   567   for (uint m = 0; m < id_length; m += 1) {
   568     max_id = MAX2(max_id, id_array[m]);
   569   }
   570   // The next id, to limit loops.
   571   const processorid_t limit_id = max_id + 1;
   572   // Make up markers for available processors.
   573   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id);
   574   for (uint c = 0; c < limit_id; c += 1) {
   575     available_id[c] = false;
   576   }
   577   for (uint a = 0; a < id_length; a += 1) {
   578     available_id[id_array[a]] = true;
   579   }
   580   // Step by "boards", then by "slot", copying to "assigned".
   581   // NEEDS_CLEANUP: The assignment of processors should be stateful,
   582   //                remembering which processors have been assigned by
   583   //                previous calls, etc., so as to distribute several
   584   //                independent calls of this method.  What we'd like is
   585   //                It would be nice to have an API that let us ask
   586   //                how many processes are bound to a processor,
   587   //                but we don't have that, either.
   588   //                In the short term, "board" is static so that
   589   //                subsequent distributions don't all start at board 0.
   590   static uint board = 0;
   591   uint assigned = 0;
   592   // Until we've found enough processors ....
   593   while (assigned < distribution_length) {
   594     // ... find the next available processor in the board.
   595     for (uint slot = 0; slot < processors_per_board; slot += 1) {
   596       uint try_id = board * processors_per_board + slot;
   597       if ((try_id < limit_id) && (available_id[try_id] == true)) {
   598         distribution[assigned] = try_id;
   599         available_id[try_id] = false;
   600         assigned += 1;
   601         break;
   602       }
   603     }
   604     board += 1;
   605     if (board * processors_per_board + 0 >= limit_id) {
   606       board = 0;
   607     }
   608   }
   609   if (available_id != NULL) {
   610     FREE_C_HEAP_ARRAY(bool, available_id);
   611   }
   612   return true;
   613 }
   615 bool os::distribute_processes(uint length, uint* distribution) {
   616   bool result = false;
   617   // Find the processor id's of all the available CPUs.
   618   processorid_t* id_array  = NULL;
   619   uint           id_length = 0;
   620   // There are some races between querying information and using it,
   621   // since processor sets can change dynamically.
   622   psetid_t pset = PS_NONE;
   623   // Are we running in a processor set?
   624   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
   625     result = find_processors_in_pset(pset, &id_array, &id_length);
   626   } else {
   627     result = find_processors_online(&id_array, &id_length);
   628   }
   629   if (result == true) {
   630     if (id_length >= length) {
   631       result = assign_distribution(id_array, id_length, distribution, length);
   632     } else {
   633       result = false;
   634     }
   635   }
   636   if (id_array != NULL) {
   637     FREE_C_HEAP_ARRAY(processorid_t, id_array);
   638   }
   639   return result;
   640 }
   642 bool os::bind_to_processor(uint processor_id) {
   643   // We assume that a processorid_t can be stored in a uint.
   644   assert(sizeof(uint) == sizeof(processorid_t),
   645          "can't convert uint to processorid_t");
   646   int bind_result =
   647     processor_bind(P_LWPID,                       // bind LWP.
   648                    P_MYID,                        // bind current LWP.
   649                    (processorid_t) processor_id,  // id.
   650                    NULL);                         // don't return old binding.
   651   return (bind_result == 0);
   652 }
   654 bool os::getenv(const char* name, char* buffer, int len) {
   655   char* val = ::getenv( name );
   656   if ( val == NULL
   657   ||   strlen(val) + 1  >  len ) {
   658     if (len > 0)  buffer[0] = 0; // return a null string
   659     return false;
   660   }
   661   strcpy( buffer, val );
   662   return true;
   663 }
   666 // Return true if user is running as root.
   668 bool os::have_special_privileges() {
   669   static bool init = false;
   670   static bool privileges = false;
   671   if (!init) {
   672     privileges = (getuid() != geteuid()) || (getgid() != getegid());
   673     init = true;
   674   }
   675   return privileges;
   676 }
   679 static char* get_property(char* name, char* buffer, int buffer_size) {
   680   if (os::getenv(name, buffer, buffer_size)) {
   681     return buffer;
   682   }
   683   static char empty[] = "";
   684   return empty;
   685 }
   688 void os::init_system_properties_values() {
   689   char arch[12];
   690   sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
   692   // The next steps are taken in the product version:
   693   //
   694   // Obtain the JAVA_HOME value from the location of libjvm[_g].so.
   695   // This library should be located at:
   696   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so.
   697   //
   698   // If "/jre/lib/" appears at the right place in the path, then we
   699   // assume libjvm[_g].so is installed in a JDK and we use this path.
   700   //
   701   // Otherwise exit with message: "Could not create the Java virtual machine."
   702   //
   703   // The following extra steps are taken in the debugging version:
   704   //
   705   // If "/jre/lib/" does NOT appear at the right place in the path
   706   // instead of exit check for $JAVA_HOME environment variable.
   707   //
   708   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
   709   // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so
   710   // it looks like libjvm[_g].so is installed there
   711   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so.
   712   //
   713   // Otherwise exit.
   714   //
   715   // Important note: if the location of libjvm.so changes this
   716   // code needs to be changed accordingly.
   718   // The next few definitions allow the code to be verbatim:
   719 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n))
   720 #define free(p) FREE_C_HEAP_ARRAY(char, p)
   721 #define getenv(n) ::getenv(n)
   723 #define EXTENSIONS_DIR  "/lib/ext"
   724 #define ENDORSED_DIR    "/lib/endorsed"
   725 #define COMMON_DIR      "/usr/jdk/packages"
   727   {
   728     /* sysclasspath, java_home, dll_dir */
   729     {
   730         char *home_path;
   731         char *dll_path;
   732         char *pslash;
   733         char buf[MAXPATHLEN];
   734         os::jvm_path(buf, sizeof(buf));
   736         // Found the full path to libjvm.so.
   737         // Now cut the path to <java_home>/jre if we can.
   738         *(strrchr(buf, '/')) = '\0';  /* get rid of /libjvm.so */
   739         pslash = strrchr(buf, '/');
   740         if (pslash != NULL)
   741             *pslash = '\0';           /* get rid of /{client|server|hotspot} */
   742         dll_path = malloc(strlen(buf) + 1);
   743         if (dll_path == NULL)
   744             return;
   745         strcpy(dll_path, buf);
   746         Arguments::set_dll_dir(dll_path);
   748         if (pslash != NULL) {
   749             pslash = strrchr(buf, '/');
   750             if (pslash != NULL) {
   751                 *pslash = '\0';       /* get rid of /<arch> */
   752                 pslash = strrchr(buf, '/');
   753                 if (pslash != NULL)
   754                     *pslash = '\0';   /* get rid of /lib */
   755             }
   756         }
   758         home_path = malloc(strlen(buf) + 1);
   759         if (home_path == NULL)
   760             return;
   761         strcpy(home_path, buf);
   762         Arguments::set_java_home(home_path);
   764         if (!set_boot_path('/', ':'))
   765             return;
   766     }
   768     /*
   769      * Where to look for native libraries
   770      */
   771     {
   772       // Use dlinfo() to determine the correct java.library.path.
   773       //
   774       // If we're launched by the Java launcher, and the user
   775       // does not set java.library.path explicitly on the commandline,
   776       // the Java launcher sets LD_LIBRARY_PATH for us and unsets
   777       // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
   778       // dlinfo returns LD_LIBRARY_PATH + crle settings (including
   779       // /usr/lib), which is exactly what we want.
   780       //
   781       // If the user does set java.library.path, it completely
   782       // overwrites this setting, and always has.
   783       //
   784       // If we're not launched by the Java launcher, we may
   785       // get here with any/all of the LD_LIBRARY_PATH[_32|64]
   786       // settings.  Again, dlinfo does exactly what we want.
   788       Dl_serinfo     _info, *info = &_info;
   789       Dl_serpath     *path;
   790       char*          library_path;
   791       char           *common_path;
   792       int            i;
   794       // determine search path count and required buffer size
   795       if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
   796         vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
   797       }
   799       // allocate new buffer and initialize
   800       info = (Dl_serinfo*)malloc(_info.dls_size);
   801       if (info == NULL) {
   802         vm_exit_out_of_memory(_info.dls_size,
   803                               "init_system_properties_values info");
   804       }
   805       info->dls_size = _info.dls_size;
   806       info->dls_cnt = _info.dls_cnt;
   808       // obtain search path information
   809       if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
   810         free(info);
   811         vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
   812       }
   814       path = &info->dls_serpath[0];
   816       // Note: Due to a legacy implementation, most of the library path
   817       // is set in the launcher.  This was to accomodate linking restrictions
   818       // on legacy Solaris implementations (which are no longer supported).
   819       // Eventually, all the library path setting will be done here.
   820       //
   821       // However, to prevent the proliferation of improperly built native
   822       // libraries, the new path component /usr/jdk/packages is added here.
   824       // Determine the actual CPU architecture.
   825       char cpu_arch[12];
   826       sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
   827 #ifdef _LP64
   828       // If we are a 64-bit vm, perform the following translations:
   829       //   sparc   -> sparcv9
   830       //   i386    -> amd64
   831       if (strcmp(cpu_arch, "sparc") == 0)
   832         strcat(cpu_arch, "v9");
   833       else if (strcmp(cpu_arch, "i386") == 0)
   834         strcpy(cpu_arch, "amd64");
   835 #endif
   837       // Construct the invariant part of ld_library_path. Note that the
   838       // space for the colon and the trailing null are provided by the
   839       // nulls included by the sizeof operator.
   840       size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch);
   841       common_path = malloc(bufsize);
   842       if (common_path == NULL) {
   843         free(info);
   844         vm_exit_out_of_memory(bufsize,
   845                               "init_system_properties_values common_path");
   846       }
   847       sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch);
   849       // struct size is more than sufficient for the path components obtained
   850       // through the dlinfo() call, so only add additional space for the path
   851       // components explicitly added here.
   852       bufsize = info->dls_size + strlen(common_path);
   853       library_path = malloc(bufsize);
   854       if (library_path == NULL) {
   855         free(info);
   856         free(common_path);
   857         vm_exit_out_of_memory(bufsize,
   858                               "init_system_properties_values library_path");
   859       }
   860       library_path[0] = '\0';
   862       // Construct the desired Java library path from the linker's library
   863       // search path.
   864       //
   865       // For compatibility, it is optimal that we insert the additional path
   866       // components specific to the Java VM after those components specified
   867       // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
   868       // infrastructure.
   869       if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it
   870         strcpy(library_path, common_path);
   871       } else {
   872         int inserted = 0;
   873         for (i = 0; i < info->dls_cnt; i++, path++) {
   874           uint_t flags = path->dls_flags & LA_SER_MASK;
   875           if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
   876             strcat(library_path, common_path);
   877             strcat(library_path, os::path_separator());
   878             inserted = 1;
   879           }
   880           strcat(library_path, path->dls_name);
   881           strcat(library_path, os::path_separator());
   882         }
   883         // eliminate trailing path separator
   884         library_path[strlen(library_path)-1] = '\0';
   885       }
   887       // happens before argument parsing - can't use a trace flag
   888       // tty->print_raw("init_system_properties_values: native lib path: ");
   889       // tty->print_raw_cr(library_path);
   891       // callee copies into its own buffer
   892       Arguments::set_library_path(library_path);
   894       free(common_path);
   895       free(library_path);
   896       free(info);
   897     }
   899     /*
   900      * Extensions directories.
   901      *
   902      * Note that the space for the colon and the trailing null are provided
   903      * by the nulls included by the sizeof operator (so actually one byte more
   904      * than necessary is allocated).
   905      */
   906     {
   907         char *buf = (char *) malloc(strlen(Arguments::get_java_home()) +
   908             sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) +
   909             sizeof(EXTENSIONS_DIR));
   910         sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR,
   911             Arguments::get_java_home());
   912         Arguments::set_ext_dirs(buf);
   913     }
   915     /* Endorsed standards default directory. */
   916     {
   917         char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
   918         sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
   919         Arguments::set_endorsed_dirs(buf);
   920     }
   921   }
   923 #undef malloc
   924 #undef free
   925 #undef getenv
   926 #undef EXTENSIONS_DIR
   927 #undef ENDORSED_DIR
   928 #undef COMMON_DIR
   930 }
   932 void os::breakpoint() {
   933   BREAKPOINT;
   934 }
   936 bool os::obsolete_option(const JavaVMOption *option)
   937 {
   938   if (!strncmp(option->optionString, "-Xt", 3)) {
   939     return true;
   940   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
   941     return true;
   942   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
   943     return true;
   944   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
   945     return true;
   946   }
   947   return false;
   948 }
   950 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
   951   address  stackStart  = (address)thread->stack_base();
   952   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
   953   if (sp < stackStart && sp >= stackEnd ) return true;
   954   return false;
   955 }
   957 extern "C" void breakpoint() {
   958   // use debugger to set breakpoint here
   959 }
   961 // Returns an estimate of the current stack pointer. Result must be guaranteed to
   962 // point into the calling threads stack, and be no lower than the current stack
   963 // pointer.
   964 address os::current_stack_pointer() {
   965   volatile int dummy;
   966   address sp = (address)&dummy + 8;     // %%%% need to confirm if this is right
   967   return sp;
   968 }
   970 static thread_t main_thread;
   972 // Thread start routine for all new Java threads
   973 extern "C" void* java_start(void* thread_addr) {
   974   // Try to randomize the cache line index of hot stack frames.
   975   // This helps when threads of the same stack traces evict each other's
   976   // cache lines. The threads can be either from the same JVM instance, or
   977   // from different JVM instances. The benefit is especially true for
   978   // processors with hyperthreading technology.
   979   static int counter = 0;
   980   int pid = os::current_process_id();
   981   alloca(((pid ^ counter++) & 7) * 128);
   983   int prio;
   984   Thread* thread = (Thread*)thread_addr;
   985   OSThread* osthr = thread->osthread();
   987   osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
   988   thread->_schedctl = (void *) schedctl_init () ;
   990   if (UseNUMA) {
   991     int lgrp_id = os::numa_get_group_id();
   992     if (lgrp_id != -1) {
   993       thread->set_lgrp_id(lgrp_id);
   994     }
   995   }
   997   // If the creator called set priority before we started,
   998   // we need to call set priority now that we have an lwp.
   999   // Get the priority from libthread and set the priority
  1000   // for the new Solaris lwp.
  1001   if ( osthr->thread_id() != -1 ) {
  1002     if ( UseThreadPriorities ) {
  1003       thr_getprio(osthr->thread_id(), &prio);
  1004       if (ThreadPriorityVerbose) {
  1005         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT ", setting priority: %d\n",
  1006                       osthr->thread_id(), osthr->lwp_id(), prio );
  1008       os::set_native_priority(thread, prio);
  1010   } else if (ThreadPriorityVerbose) {
  1011     warning("Can't set priority in _start routine, thread id hasn't been set\n");
  1014   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
  1016   // initialize signal mask for this thread
  1017   os::Solaris::hotspot_sigmask(thread);
  1019   thread->run();
  1021   // One less thread is executing
  1022   // When the VMThread gets here, the main thread may have already exited
  1023   // which frees the CodeHeap containing the Atomic::dec code
  1024   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
  1025     Atomic::dec(&os::Solaris::_os_thread_count);
  1028   if (UseDetachedThreads) {
  1029     thr_exit(NULL);
  1030     ShouldNotReachHere();
  1032   return NULL;
  1035 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
  1036   // Allocate the OSThread object
  1037   OSThread* osthread = new OSThread(NULL, NULL);
  1038   if (osthread == NULL) return NULL;
  1040   // Store info on the Solaris thread into the OSThread
  1041   osthread->set_thread_id(thread_id);
  1042   osthread->set_lwp_id(_lwp_self());
  1043   thread->_schedctl = (void *) schedctl_init () ;
  1045   if (UseNUMA) {
  1046     int lgrp_id = os::numa_get_group_id();
  1047     if (lgrp_id != -1) {
  1048       thread->set_lgrp_id(lgrp_id);
  1052   if ( ThreadPriorityVerbose ) {
  1053     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
  1054                   osthread->thread_id(), osthread->lwp_id() );
  1057   // Initial thread state is INITIALIZED, not SUSPENDED
  1058   osthread->set_state(INITIALIZED);
  1060   return osthread;
  1063 void os::Solaris::hotspot_sigmask(Thread* thread) {
  1065   //Save caller's signal mask
  1066   sigset_t sigmask;
  1067   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
  1068   OSThread *osthread = thread->osthread();
  1069   osthread->set_caller_sigmask(sigmask);
  1071   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
  1072   if (!ReduceSignalUsage) {
  1073     if (thread->is_VM_thread()) {
  1074       // Only the VM thread handles BREAK_SIGNAL ...
  1075       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
  1076     } else {
  1077       // ... all other threads block BREAK_SIGNAL
  1078       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
  1079       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
  1084 bool os::create_attached_thread(JavaThread* thread) {
  1085 #ifdef ASSERT
  1086   thread->verify_not_published();
  1087 #endif
  1088   OSThread* osthread = create_os_thread(thread, thr_self());
  1089   if (osthread == NULL) {
  1090      return false;
  1093   // Initial thread state is RUNNABLE
  1094   osthread->set_state(RUNNABLE);
  1095   thread->set_osthread(osthread);
  1097   // initialize signal mask for this thread
  1098   // and save the caller's signal mask
  1099   os::Solaris::hotspot_sigmask(thread);
  1101   return true;
  1104 bool os::create_main_thread(JavaThread* thread) {
  1105 #ifdef ASSERT
  1106   thread->verify_not_published();
  1107 #endif
  1108   if (_starting_thread == NULL) {
  1109     _starting_thread = create_os_thread(thread, main_thread);
  1110      if (_starting_thread == NULL) {
  1111         return false;
  1115   // The primodial thread is runnable from the start
  1116   _starting_thread->set_state(RUNNABLE);
  1118   thread->set_osthread(_starting_thread);
  1120   // initialize signal mask for this thread
  1121   // and save the caller's signal mask
  1122   os::Solaris::hotspot_sigmask(thread);
  1124   return true;
  1127 // _T2_libthread is true if we believe we are running with the newer
  1128 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
  1129 bool os::Solaris::_T2_libthread = false;
  1131 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
  1132   // Allocate the OSThread object
  1133   OSThread* osthread = new OSThread(NULL, NULL);
  1134   if (osthread == NULL) {
  1135     return false;
  1138   if ( ThreadPriorityVerbose ) {
  1139     char *thrtyp;
  1140     switch ( thr_type ) {
  1141       case vm_thread:
  1142         thrtyp = (char *)"vm";
  1143         break;
  1144       case cgc_thread:
  1145         thrtyp = (char *)"cgc";
  1146         break;
  1147       case pgc_thread:
  1148         thrtyp = (char *)"pgc";
  1149         break;
  1150       case java_thread:
  1151         thrtyp = (char *)"java";
  1152         break;
  1153       case compiler_thread:
  1154         thrtyp = (char *)"compiler";
  1155         break;
  1156       case watcher_thread:
  1157         thrtyp = (char *)"watcher";
  1158         break;
  1159       default:
  1160         thrtyp = (char *)"unknown";
  1161         break;
  1163     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
  1166   // Calculate stack size if it's not specified by caller.
  1167   if (stack_size == 0) {
  1168     // The default stack size 1M (2M for LP64).
  1169     stack_size = (BytesPerWord >> 2) * K * K;
  1171     switch (thr_type) {
  1172     case os::java_thread:
  1173       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
  1174       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
  1175       break;
  1176     case os::compiler_thread:
  1177       if (CompilerThreadStackSize > 0) {
  1178         stack_size = (size_t)(CompilerThreadStackSize * K);
  1179         break;
  1180       } // else fall through:
  1181         // use VMThreadStackSize if CompilerThreadStackSize is not defined
  1182     case os::vm_thread:
  1183     case os::pgc_thread:
  1184     case os::cgc_thread:
  1185     case os::watcher_thread:
  1186       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
  1187       break;
  1190   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
  1192   // Initial state is ALLOCATED but not INITIALIZED
  1193   osthread->set_state(ALLOCATED);
  1195   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
  1196     // We got lots of threads. Check if we still have some address space left.
  1197     // Need to be at least 5Mb of unreserved address space. We do check by
  1198     // trying to reserve some.
  1199     const size_t VirtualMemoryBangSize = 20*K*K;
  1200     char* mem = os::reserve_memory(VirtualMemoryBangSize);
  1201     if (mem == NULL) {
  1202       delete osthread;
  1203       return false;
  1204     } else {
  1205       // Release the memory again
  1206       os::release_memory(mem, VirtualMemoryBangSize);
  1210   // Setup osthread because the child thread may need it.
  1211   thread->set_osthread(osthread);
  1213   // Create the Solaris thread
  1214   // explicit THR_BOUND for T2_libthread case in case
  1215   // that assumption is not accurate, but our alternate signal stack
  1216   // handling is based on it which must have bound threads
  1217   thread_t tid = 0;
  1218   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
  1219                    | ((UseBoundThreads || os::Solaris::T2_libthread() ||
  1220                        (thr_type == vm_thread) ||
  1221                        (thr_type == cgc_thread) ||
  1222                        (thr_type == pgc_thread) ||
  1223                        (thr_type == compiler_thread && BackgroundCompilation)) ?
  1224                       THR_BOUND : 0);
  1225   int      status;
  1227   // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
  1228   //
  1229   // On multiprocessors systems, libthread sometimes under-provisions our
  1230   // process with LWPs.  On a 30-way systems, for instance, we could have
  1231   // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
  1232   // to our process.  This can result in under utilization of PEs.
  1233   // I suspect the problem is related to libthread's LWP
  1234   // pool management and to the kernel's SIGBLOCKING "last LWP parked"
  1235   // upcall policy.
  1236   //
  1237   // The following code is palliative -- it attempts to ensure that our
  1238   // process has sufficient LWPs to take advantage of multiple PEs.
  1239   // Proper long-term cures include using user-level threads bound to LWPs
  1240   // (THR_BOUND) or using LWP-based synchronization.  Note that there is a
  1241   // slight timing window with respect to sampling _os_thread_count, but
  1242   // the race is benign.  Also, we should periodically recompute
  1243   // _processors_online as the min of SC_NPROCESSORS_ONLN and the
  1244   // the number of PEs in our partition.  You might be tempted to use
  1245   // THR_NEW_LWP here, but I'd recommend against it as that could
  1246   // result in undesirable growth of the libthread's LWP pool.
  1247   // The fix below isn't sufficient; for instance, it doesn't take into count
  1248   // LWPs parked on IO.  It does, however, help certain CPU-bound benchmarks.
  1249   //
  1250   // Some pathologies this scheme doesn't handle:
  1251   // *  Threads can block, releasing the LWPs.  The LWPs can age out.
  1252   //    When a large number of threads become ready again there aren't
  1253   //    enough LWPs available to service them.  This can occur when the
  1254   //    number of ready threads oscillates.
  1255   // *  LWPs/Threads park on IO, thus taking the LWP out of circulation.
  1256   //
  1257   // Finally, we should call thr_setconcurrency() periodically to refresh
  1258   // the LWP pool and thwart the LWP age-out mechanism.
  1259   // The "+3" term provides a little slop -- we want to slightly overprovision.
  1261   if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
  1262     if (!(flags & THR_BOUND)) {
  1263       thr_setconcurrency (os::Solaris::_os_thread_count);       // avoid starvation
  1266   // Although this doesn't hurt, we should warn of undefined behavior
  1267   // when using unbound T1 threads with schedctl().  This should never
  1268   // happen, as the compiler and VM threads are always created bound
  1269   DEBUG_ONLY(
  1270       if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
  1271           (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
  1272           ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
  1273            (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
  1274          warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
  1276   );
  1279   // Mark that we don't have an lwp or thread id yet.
  1280   // In case we attempt to set the priority before the thread starts.
  1281   osthread->set_lwp_id(-1);
  1282   osthread->set_thread_id(-1);
  1284   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
  1285   if (status != 0) {
  1286     if (PrintMiscellaneous && (Verbose || WizardMode)) {
  1287       perror("os::create_thread");
  1289     thread->set_osthread(NULL);
  1290     // Need to clean up stuff we've allocated so far
  1291     delete osthread;
  1292     return false;
  1295   Atomic::inc(&os::Solaris::_os_thread_count);
  1297   // Store info on the Solaris thread into the OSThread
  1298   osthread->set_thread_id(tid);
  1300   // Remember that we created this thread so we can set priority on it
  1301   osthread->set_vm_created();
  1303   // Set the default thread priority otherwise use NormalPriority
  1305   if ( UseThreadPriorities ) {
  1306      thr_setprio(tid, (DefaultThreadPriority == -1) ?
  1307                         java_to_os_priority[NormPriority] :
  1308                         DefaultThreadPriority);
  1311   // Initial thread state is INITIALIZED, not SUSPENDED
  1312   osthread->set_state(INITIALIZED);
  1314   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
  1315   return true;
  1318 /* defined for >= Solaris 10. This allows builds on earlier versions
  1319  *  of Solaris to take advantage of the newly reserved Solaris JVM signals
  1320  *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
  1321  *  and -XX:+UseAltSigs does nothing since these should have no conflict
  1322  */
  1323 #if !defined(SIGJVM1)
  1324 #define SIGJVM1 39
  1325 #define SIGJVM2 40
  1326 #endif
  1328 debug_only(static bool signal_sets_initialized = false);
  1329 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
  1330 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
  1331 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
  1333 bool os::Solaris::is_sig_ignored(int sig) {
  1334       struct sigaction oact;
  1335       sigaction(sig, (struct sigaction*)NULL, &oact);
  1336       void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
  1337                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
  1338       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
  1339            return true;
  1340       else
  1341            return false;
  1344 // Note: SIGRTMIN is a macro that calls sysconf() so it will
  1345 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
  1346 static bool isJVM1available() {
  1347   return SIGJVM1 < SIGRTMIN;
  1350 void os::Solaris::signal_sets_init() {
  1351   // Should also have an assertion stating we are still single-threaded.
  1352   assert(!signal_sets_initialized, "Already initialized");
  1353   // Fill in signals that are necessarily unblocked for all threads in
  1354   // the VM. Currently, we unblock the following signals:
  1355   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
  1356   //                         by -Xrs (=ReduceSignalUsage));
  1357   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
  1358   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
  1359   // the dispositions or masks wrt these signals.
  1360   // Programs embedding the VM that want to use the above signals for their
  1361   // own purposes must, at this time, use the "-Xrs" option to prevent
  1362   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
  1363   // (See bug 4345157, and other related bugs).
  1364   // In reality, though, unblocking these signals is really a nop, since
  1365   // these signals are not blocked by default.
  1366   sigemptyset(&unblocked_sigs);
  1367   sigemptyset(&allowdebug_blocked_sigs);
  1368   sigaddset(&unblocked_sigs, SIGILL);
  1369   sigaddset(&unblocked_sigs, SIGSEGV);
  1370   sigaddset(&unblocked_sigs, SIGBUS);
  1371   sigaddset(&unblocked_sigs, SIGFPE);
  1373   if (isJVM1available) {
  1374     os::Solaris::set_SIGinterrupt(SIGJVM1);
  1375     os::Solaris::set_SIGasync(SIGJVM2);
  1376   } else if (UseAltSigs) {
  1377     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
  1378     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
  1379   } else {
  1380     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
  1381     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
  1384   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
  1385   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
  1387   if (!ReduceSignalUsage) {
  1388    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
  1389       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
  1390       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
  1392    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
  1393       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
  1394       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
  1396    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
  1397       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
  1398       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
  1401   // Fill in signals that are blocked by all but the VM thread.
  1402   sigemptyset(&vm_sigs);
  1403   if (!ReduceSignalUsage)
  1404     sigaddset(&vm_sigs, BREAK_SIGNAL);
  1405   debug_only(signal_sets_initialized = true);
  1407   // For diagnostics only used in run_periodic_checks
  1408   sigemptyset(&check_signal_done);
  1411 // These are signals that are unblocked while a thread is running Java.
  1412 // (For some reason, they get blocked by default.)
  1413 sigset_t* os::Solaris::unblocked_signals() {
  1414   assert(signal_sets_initialized, "Not initialized");
  1415   return &unblocked_sigs;
  1418 // These are the signals that are blocked while a (non-VM) thread is
  1419 // running Java. Only the VM thread handles these signals.
  1420 sigset_t* os::Solaris::vm_signals() {
  1421   assert(signal_sets_initialized, "Not initialized");
  1422   return &vm_sigs;
  1425 // These are signals that are blocked during cond_wait to allow debugger in
  1426 sigset_t* os::Solaris::allowdebug_blocked_signals() {
  1427   assert(signal_sets_initialized, "Not initialized");
  1428   return &allowdebug_blocked_sigs;
  1431 // First crack at OS-specific initialization, from inside the new thread.
  1432 void os::initialize_thread() {
  1433   int r = thr_main() ;
  1434   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
  1435   if (r) {
  1436     JavaThread* jt = (JavaThread *)Thread::current();
  1437     assert(jt != NULL,"Sanity check");
  1438     size_t stack_size;
  1439     address base = jt->stack_base();
  1440     if (Arguments::created_by_java_launcher()) {
  1441       // Use 2MB to allow for Solaris 7 64 bit mode.
  1442       stack_size = JavaThread::stack_size_at_create() == 0
  1443         ? 2048*K : JavaThread::stack_size_at_create();
  1445       // There are rare cases when we may have already used more than
  1446       // the basic stack size allotment before this method is invoked.
  1447       // Attempt to allow for a normally sized java_stack.
  1448       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
  1449       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
  1450     } else {
  1451       // 6269555: If we were not created by a Java launcher, i.e. if we are
  1452       // running embedded in a native application, treat the primordial thread
  1453       // as much like a native attached thread as possible.  This means using
  1454       // the current stack size from thr_stksegment(), unless it is too large
  1455       // to reliably setup guard pages.  A reasonable max size is 8MB.
  1456       size_t current_size = current_stack_size();
  1457       // This should never happen, but just in case....
  1458       if (current_size == 0) current_size = 2 * K * K;
  1459       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
  1461     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
  1462     stack_size = (size_t)(base - bottom);
  1464     assert(stack_size > 0, "Stack size calculation problem");
  1466     if (stack_size > jt->stack_size()) {
  1467       NOT_PRODUCT(
  1468         struct rlimit limits;
  1469         getrlimit(RLIMIT_STACK, &limits);
  1470         size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
  1471         assert(size >= jt->stack_size(), "Stack size problem in main thread");
  1473       tty->print_cr(
  1474         "Stack size of %d Kb exceeds current limit of %d Kb.\n"
  1475         "(Stack sizes are rounded up to a multiple of the system page size.)\n"
  1476         "See limit(1) to increase the stack size limit.",
  1477         stack_size / K, jt->stack_size() / K);
  1478       vm_exit(1);
  1480     assert(jt->stack_size() >= stack_size,
  1481           "Attempt to map more stack than was allocated");
  1482     jt->set_stack_size(stack_size);
  1485    // 5/22/01: Right now alternate signal stacks do not handle
  1486    // throwing stack overflow exceptions, see bug 4463178
  1487    // Until a fix is found for this, T2 will NOT imply alternate signal
  1488    // stacks.
  1489    // If using T2 libthread threads, install an alternate signal stack.
  1490    // Because alternate stacks associate with LWPs on Solaris,
  1491    // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
  1492    // we prefer to explicitly stack bang.
  1493    // If not using T2 libthread, but using UseBoundThreads any threads
  1494    // (primordial thread, jni_attachCurrentThread) we do not create,
  1495    // probably are not bound, therefore they can not have an alternate
  1496    // signal stack. Since our stack banging code is generated and
  1497    // is shared across threads, all threads must be bound to allow
  1498    // using alternate signal stacks.  The alternative is to interpose
  1499    // on _lwp_create to associate an alt sig stack with each LWP,
  1500    // and this could be a problem when the JVM is embedded.
  1501    // We would prefer to use alternate signal stacks with T2
  1502    // Since there is currently no accurate way to detect T2
  1503    // we do not. Assuming T2 when running T1 causes sig 11s or assertions
  1504    // on installing alternate signal stacks
  1507    // 05/09/03: removed alternate signal stack support for Solaris
  1508    // The alternate signal stack mechanism is no longer needed to
  1509    // handle stack overflow. This is now handled by allocating
  1510    // guard pages (red zone) and stackbanging.
  1511    // Initially the alternate signal stack mechanism was removed because
  1512    // it did not work with T1 llibthread. Alternate
  1513    // signal stacks MUST have all threads bound to lwps. Applications
  1514    // can create their own threads and attach them without their being
  1515    // bound under T1. This is frequently the case for the primordial thread.
  1516    // If we were ever to reenable this mechanism we would need to
  1517    // use the dynamic check for T2 libthread.
  1519   os::Solaris::init_thread_fpu_state();
  1524 // Free Solaris resources related to the OSThread
  1525 void os::free_thread(OSThread* osthread) {
  1526   assert(osthread != NULL, "os::free_thread but osthread not set");
  1529   // We are told to free resources of the argument thread,
  1530   // but we can only really operate on the current thread.
  1531   // The main thread must take the VMThread down synchronously
  1532   // before the main thread exits and frees up CodeHeap
  1533   guarantee((Thread::current()->osthread() == osthread
  1534      || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
  1535   if (Thread::current()->osthread() == osthread) {
  1536     // Restore caller's signal mask
  1537     sigset_t sigmask = osthread->caller_sigmask();
  1538     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
  1540   delete osthread;
  1543 void os::pd_start_thread(Thread* thread) {
  1544   int status = thr_continue(thread->osthread()->thread_id());
  1545   assert_status(status == 0, status, "thr_continue failed");
  1549 intx os::current_thread_id() {
  1550   return (intx)thr_self();
  1553 static pid_t _initial_pid = 0;
  1555 int os::current_process_id() {
  1556   return (int)(_initial_pid ? _initial_pid : getpid());
  1559 int os::allocate_thread_local_storage() {
  1560   // %%%       in Win32 this allocates a memory segment pointed to by a
  1561   //           register.  Dan Stein can implement a similar feature in
  1562   //           Solaris.  Alternatively, the VM can do the same thing
  1563   //           explicitly: malloc some storage and keep the pointer in a
  1564   //           register (which is part of the thread's context) (or keep it
  1565   //           in TLS).
  1566   // %%%       In current versions of Solaris, thr_self and TSD can
  1567   //           be accessed via short sequences of displaced indirections.
  1568   //           The value of thr_self is available as %g7(36).
  1569   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
  1570   //           assuming that the current thread already has a value bound to k.
  1571   //           It may be worth experimenting with such access patterns,
  1572   //           and later having the parameters formally exported from a Solaris
  1573   //           interface.  I think, however, that it will be faster to
  1574   //           maintain the invariant that %g2 always contains the
  1575   //           JavaThread in Java code, and have stubs simply
  1576   //           treat %g2 as a caller-save register, preserving it in a %lN.
  1577   thread_key_t tk;
  1578   if (thr_keycreate( &tk, NULL ) )
  1579     fatal1("os::allocate_thread_local_storage: thr_keycreate failed (%s)", strerror(errno));
  1580   return int(tk);
  1583 void os::free_thread_local_storage(int index) {
  1584   // %%% don't think we need anything here
  1585   // if ( pthread_key_delete((pthread_key_t) tk) )
  1586   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
  1589 #define SMALLINT 32   // libthread allocate for tsd_common is a version specific
  1590                       // small number - point is NO swap space available
  1591 void os::thread_local_storage_at_put(int index, void* value) {
  1592   // %%% this is used only in threadLocalStorage.cpp
  1593   if (thr_setspecific((thread_key_t)index, value)) {
  1594     if (errno == ENOMEM) {
  1595        vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space");
  1596     } else {
  1597       fatal1("os::thread_local_storage_at_put: thr_setspecific failed (%s)", strerror(errno));
  1599   } else {
  1600       ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
  1604 // This function could be called before TLS is initialized, for example, when
  1605 // VM receives an async signal or when VM causes a fatal error during
  1606 // initialization. Return NULL if thr_getspecific() fails.
  1607 void* os::thread_local_storage_at(int index) {
  1608   // %%% this is used only in threadLocalStorage.cpp
  1609   void* r = NULL;
  1610   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
  1614 const int NANOSECS_PER_MILLISECS = 1000000;
  1615 // gethrtime can move backwards if read from one cpu and then a different cpu
  1616 // getTimeNanos is guaranteed to not move backward on Solaris
  1617 // local spinloop created as faster for a CAS on an int than
  1618 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not
  1619 // supported on sparc v8 or pre supports_cx8 intel boxes.
  1620 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong
  1621 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes
  1622 inline hrtime_t oldgetTimeNanos() {
  1623   int gotlock = LOCK_INVALID;
  1624   hrtime_t newtime = gethrtime();
  1626   for (;;) {
  1627 // grab lock for max_hrtime
  1628     int curlock = max_hrtime_lock;
  1629     if (curlock & LOCK_BUSY)  continue;
  1630     if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue;
  1631     if (newtime > max_hrtime) {
  1632       max_hrtime = newtime;
  1633     } else {
  1634       newtime = max_hrtime;
  1636     // release lock
  1637     max_hrtime_lock = LOCK_FREE;
  1638     return newtime;
  1641 // gethrtime can move backwards if read from one cpu and then a different cpu
  1642 // getTimeNanos is guaranteed to not move backward on Solaris
  1643 inline hrtime_t getTimeNanos() {
  1644   if (VM_Version::supports_cx8()) {
  1645     const hrtime_t now = gethrtime();
  1646     const hrtime_t prev = max_hrtime;
  1647     if (now <= prev)  return prev;   // same or retrograde time;
  1648     const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
  1649     assert(obsv >= prev, "invariant");   // Monotonicity
  1650     // If the CAS succeeded then we're done and return "now".
  1651     // If the CAS failed and the observed value "obs" is >= now then
  1652     // we should return "obs".  If the CAS failed and now > obs > prv then
  1653     // some other thread raced this thread and installed a new value, in which case
  1654     // we could either (a) retry the entire operation, (b) retry trying to install now
  1655     // or (c) just return obs.  We use (c).   No loop is required although in some cases
  1656     // we might discard a higher "now" value in deference to a slightly lower but freshly
  1657     // installed obs value.   That's entirely benign -- it admits no new orderings compared
  1658     // to (a) or (b) -- and greatly reduces coherence traffic.
  1659     // We might also condition (c) on the magnitude of the delta between obs and now.
  1660     // Avoiding excessive CAS operations to hot RW locations is critical.
  1661     // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate
  1662     return (prev == obsv) ? now : obsv ;
  1663   } else {
  1664     return oldgetTimeNanos();
  1668 // Time since start-up in seconds to a fine granularity.
  1669 // Used by VMSelfDestructTimer and the MemProfiler.
  1670 double os::elapsedTime() {
  1671   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
  1674 jlong os::elapsed_counter() {
  1675   return (jlong)(getTimeNanos() - first_hrtime);
  1678 jlong os::elapsed_frequency() {
  1679    return hrtime_hz;
  1682 // Return the real, user, and system times in seconds from an
  1683 // arbitrary fixed point in the past.
  1684 bool os::getTimesSecs(double* process_real_time,
  1685                   double* process_user_time,
  1686                   double* process_system_time) {
  1687   struct tms ticks;
  1688   clock_t real_ticks = times(&ticks);
  1690   if (real_ticks == (clock_t) (-1)) {
  1691     return false;
  1692   } else {
  1693     double ticks_per_second = (double) clock_tics_per_sec;
  1694     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
  1695     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
  1696     // For consistency return the real time from getTimeNanos()
  1697     // converted to seconds.
  1698     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
  1700     return true;
  1704 bool os::supports_vtime() { return true; }
  1706 bool os::enable_vtime() {
  1707   int fd = open("/proc/self/ctl", O_WRONLY);
  1708   if (fd == -1)
  1709     return false;
  1711   long cmd[] = { PCSET, PR_MSACCT };
  1712   int res = write(fd, cmd, sizeof(long) * 2);
  1713   close(fd);
  1714   if (res != sizeof(long) * 2)
  1715     return false;
  1717   return true;
  1720 bool os::vtime_enabled() {
  1721   int fd = open("/proc/self/status", O_RDONLY);
  1722   if (fd == -1)
  1723     return false;
  1725   pstatus_t status;
  1726   int res = read(fd, (void*) &status, sizeof(pstatus_t));
  1727   close(fd);
  1728   if (res != sizeof(pstatus_t))
  1729     return false;
  1731   return status.pr_flags & PR_MSACCT;
  1734 double os::elapsedVTime() {
  1735   return (double)gethrvtime() / (double)hrtime_hz;
  1738 // Used internally for comparisons only
  1739 // getTimeMillis guaranteed to not move backwards on Solaris
  1740 jlong getTimeMillis() {
  1741   jlong nanotime = getTimeNanos();
  1742   return (jlong)(nanotime / NANOSECS_PER_MILLISECS);
  1745 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
  1746 jlong os::javaTimeMillis() {
  1747   timeval t;
  1748   if (gettimeofday( &t, NULL) == -1)
  1749     fatal1("os::javaTimeMillis: gettimeofday (%s)", strerror(errno));
  1750   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
  1753 jlong os::javaTimeNanos() {
  1754   return (jlong)getTimeNanos();
  1757 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
  1758   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
  1759   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
  1760   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
  1761   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
  1764 char * os::local_time_string(char *buf, size_t buflen) {
  1765   struct tm t;
  1766   time_t long_time;
  1767   time(&long_time);
  1768   localtime_r(&long_time, &t);
  1769   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
  1770                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
  1771                t.tm_hour, t.tm_min, t.tm_sec);
  1772   return buf;
  1775 // Note: os::shutdown() might be called very early during initialization, or
  1776 // called from signal handler. Before adding something to os::shutdown(), make
  1777 // sure it is async-safe and can handle partially initialized VM.
  1778 void os::shutdown() {
  1780   // allow PerfMemory to attempt cleanup of any persistent resources
  1781   perfMemory_exit();
  1783   // needs to remove object in file system
  1784   AttachListener::abort();
  1786   // flush buffered output, finish log files
  1787   ostream_abort();
  1789   // Check for abort hook
  1790   abort_hook_t abort_hook = Arguments::abort_hook();
  1791   if (abort_hook != NULL) {
  1792     abort_hook();
  1796 // Note: os::abort() might be called very early during initialization, or
  1797 // called from signal handler. Before adding something to os::abort(), make
  1798 // sure it is async-safe and can handle partially initialized VM.
  1799 void os::abort(bool dump_core) {
  1800   os::shutdown();
  1801   if (dump_core) {
  1802 #ifndef PRODUCT
  1803     fdStream out(defaultStream::output_fd());
  1804     out.print_raw("Current thread is ");
  1805     char buf[16];
  1806     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
  1807     out.print_raw_cr(buf);
  1808     out.print_raw_cr("Dumping core ...");
  1809 #endif
  1810     ::abort(); // dump core (for debugging)
  1813   ::exit(1);
  1816 // Die immediately, no exit hook, no abort hook, no cleanup.
  1817 void os::die() {
  1818   _exit(-1);
  1821 // unused
  1822 void os::set_error_file(const char *logfile) {}
  1824 // DLL functions
  1826 const char* os::dll_file_extension() { return ".so"; }
  1828 const char* os::get_temp_directory() { return "/tmp/"; }
  1830 static bool file_exists(const char* filename) {
  1831   struct stat statbuf;
  1832   if (filename == NULL || strlen(filename) == 0) {
  1833     return false;
  1835   return os::stat(filename, &statbuf) == 0;
  1838 void os::dll_build_name(char* buffer, size_t buflen,
  1839                         const char* pname, const char* fname) {
  1840   // Copied from libhpi
  1841   const size_t pnamelen = pname ? strlen(pname) : 0;
  1843   // Quietly truncate on buffer overflow.  Should be an error.
  1844   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
  1845       *buffer = '\0';
  1846       return;
  1849   if (pnamelen == 0) {
  1850     snprintf(buffer, buflen, "lib%s.so", fname);
  1851   } else if (strchr(pname, *os::path_separator()) != NULL) {
  1852     int n;
  1853     char** pelements = split_path(pname, &n);
  1854     for (int i = 0 ; i < n ; i++) {
  1855       // really shouldn't be NULL but what the heck, check can't hurt
  1856       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
  1857         continue; // skip the empty path values
  1859       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
  1860       if (file_exists(buffer)) {
  1861         break;
  1864     // release the storage
  1865     for (int i = 0 ; i < n ; i++) {
  1866       if (pelements[i] != NULL) {
  1867         FREE_C_HEAP_ARRAY(char, pelements[i]);
  1870     if (pelements != NULL) {
  1871       FREE_C_HEAP_ARRAY(char*, pelements);
  1873   } else {
  1874     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
  1878 const char* os::get_current_directory(char *buf, int buflen) {
  1879   return getcwd(buf, buflen);
  1882 // check if addr is inside libjvm[_g].so
  1883 bool os::address_is_in_vm(address addr) {
  1884   static address libjvm_base_addr;
  1885   Dl_info dlinfo;
  1887   if (libjvm_base_addr == NULL) {
  1888     dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo);
  1889     libjvm_base_addr = (address)dlinfo.dli_fbase;
  1890     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
  1893   if (dladdr((void *)addr, &dlinfo)) {
  1894     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
  1897   return false;
  1900 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
  1901 static dladdr1_func_type dladdr1_func = NULL;
  1903 bool os::dll_address_to_function_name(address addr, char *buf,
  1904                                       int buflen, int * offset) {
  1905   Dl_info dlinfo;
  1907   // dladdr1_func was initialized in os::init()
  1908   if (dladdr1_func){
  1909       // yes, we have dladdr1
  1911       // Support for dladdr1 is checked at runtime; it may be
  1912       // available even if the vm is built on a machine that does
  1913       // not have dladdr1 support.  Make sure there is a value for
  1914       // RTLD_DL_SYMENT.
  1915       #ifndef RTLD_DL_SYMENT
  1916       #define RTLD_DL_SYMENT 1
  1917       #endif
  1918       Sym * info;
  1919       if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
  1920                        RTLD_DL_SYMENT)) {
  1921           if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
  1922           if (offset) *offset = addr - (address)dlinfo.dli_saddr;
  1924           // check if the returned symbol really covers addr
  1925           return ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr);
  1926       } else {
  1927           if (buf) buf[0] = '\0';
  1928           if (offset) *offset  = -1;
  1929           return false;
  1931   } else {
  1932       // no, only dladdr is available
  1933       if(dladdr((void *)addr, &dlinfo)) {
  1934           if (buf) jio_snprintf(buf, buflen, dlinfo.dli_sname);
  1935           if (offset) *offset = addr - (address)dlinfo.dli_saddr;
  1936           return true;
  1937       } else {
  1938           if (buf) buf[0] = '\0';
  1939           if (offset) *offset  = -1;
  1940           return false;
  1945 bool os::dll_address_to_library_name(address addr, char* buf,
  1946                                      int buflen, int* offset) {
  1947   Dl_info dlinfo;
  1949   if (dladdr((void*)addr, &dlinfo)){
  1950      if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
  1951      if (offset) *offset = addr - (address)dlinfo.dli_fbase;
  1952      return true;
  1953   } else {
  1954      if (buf) buf[0] = '\0';
  1955      if (offset) *offset = -1;
  1956      return false;
  1960 // Prints the names and full paths of all opened dynamic libraries
  1961 // for current process
  1962 void os::print_dll_info(outputStream * st) {
  1963     Dl_info dli;
  1964     void *handle;
  1965     Link_map *map;
  1966     Link_map *p;
  1968     st->print_cr("Dynamic libraries:"); st->flush();
  1970     if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) {
  1971         st->print_cr("Error: Cannot print dynamic libraries.");
  1972         return;
  1974     handle = dlopen(dli.dli_fname, RTLD_LAZY);
  1975     if (handle == NULL) {
  1976         st->print_cr("Error: Cannot print dynamic libraries.");
  1977         return;
  1979     dlinfo(handle, RTLD_DI_LINKMAP, &map);
  1980     if (map == NULL) {
  1981         st->print_cr("Error: Cannot print dynamic libraries.");
  1982         return;
  1985     while (map->l_prev != NULL)
  1986         map = map->l_prev;
  1988     while (map != NULL) {
  1989         st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
  1990         map = map->l_next;
  1993     dlclose(handle);
  1996   // Loads .dll/.so and
  1997   // in case of error it checks if .dll/.so was built for the
  1998   // same architecture as Hotspot is running on
  2000 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
  2002   void * result= ::dlopen(filename, RTLD_LAZY);
  2003   if (result != NULL) {
  2004     // Successful loading
  2005     return result;
  2008   Elf32_Ehdr elf_head;
  2010   // Read system error message into ebuf
  2011   // It may or may not be overwritten below
  2012   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
  2013   ebuf[ebuflen-1]='\0';
  2014   int diag_msg_max_length=ebuflen-strlen(ebuf);
  2015   char* diag_msg_buf=ebuf+strlen(ebuf);
  2017   if (diag_msg_max_length==0) {
  2018     // No more space in ebuf for additional diagnostics message
  2019     return NULL;
  2023   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
  2025   if (file_descriptor < 0) {
  2026     // Can't open library, report dlerror() message
  2027     return NULL;
  2030   bool failed_to_read_elf_head=
  2031     (sizeof(elf_head)!=
  2032         (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
  2034   ::close(file_descriptor);
  2035   if (failed_to_read_elf_head) {
  2036     // file i/o error - report dlerror() msg
  2037     return NULL;
  2040   typedef struct {
  2041     Elf32_Half  code;         // Actual value as defined in elf.h
  2042     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
  2043     char        elf_class;    // 32 or 64 bit
  2044     char        endianess;    // MSB or LSB
  2045     char*       name;         // String representation
  2046   } arch_t;
  2048   static const arch_t arch_array[]={
  2049     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
  2050     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
  2051     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
  2052     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
  2053     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
  2054     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
  2055     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
  2056     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
  2057     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}
  2058   };
  2060   #if  (defined IA32)
  2061     static  Elf32_Half running_arch_code=EM_386;
  2062   #elif   (defined AMD64)
  2063     static  Elf32_Half running_arch_code=EM_X86_64;
  2064   #elif  (defined IA64)
  2065     static  Elf32_Half running_arch_code=EM_IA_64;
  2066   #elif  (defined __sparc) && (defined _LP64)
  2067     static  Elf32_Half running_arch_code=EM_SPARCV9;
  2068   #elif  (defined __sparc) && (!defined _LP64)
  2069     static  Elf32_Half running_arch_code=EM_SPARC;
  2070   #elif  (defined __powerpc64__)
  2071     static  Elf32_Half running_arch_code=EM_PPC64;
  2072   #elif  (defined __powerpc__)
  2073     static  Elf32_Half running_arch_code=EM_PPC;
  2074   #else
  2075     #error Method os::dll_load requires that one of following is defined:\
  2076          IA32, AMD64, IA64, __sparc, __powerpc__
  2077   #endif
  2079   // Identify compatability class for VM's architecture and library's architecture
  2080   // Obtain string descriptions for architectures
  2082   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
  2083   int running_arch_index=-1;
  2085   for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
  2086     if (running_arch_code == arch_array[i].code) {
  2087       running_arch_index    = i;
  2089     if (lib_arch.code == arch_array[i].code) {
  2090       lib_arch.compat_class = arch_array[i].compat_class;
  2091       lib_arch.name         = arch_array[i].name;
  2095   assert(running_arch_index != -1,
  2096     "Didn't find running architecture code (running_arch_code) in arch_array");
  2097   if (running_arch_index == -1) {
  2098     // Even though running architecture detection failed
  2099     // we may still continue with reporting dlerror() message
  2100     return NULL;
  2103   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
  2104     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
  2105     return NULL;
  2108   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
  2109     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
  2110     return NULL;
  2113   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
  2114     if ( lib_arch.name!=NULL ) {
  2115       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
  2116         " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
  2117         lib_arch.name, arch_array[running_arch_index].name);
  2118     } else {
  2119       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
  2120       " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
  2121         lib_arch.code,
  2122         arch_array[running_arch_index].name);
  2126   return NULL;
  2129 void* os::dll_lookup(void* handle, const char* name) {
  2130   return dlsym(handle, name);
  2134 bool _print_ascii_file(const char* filename, outputStream* st) {
  2135   int fd = open(filename, O_RDONLY);
  2136   if (fd == -1) {
  2137      return false;
  2140   char buf[32];
  2141   int bytes;
  2142   while ((bytes = read(fd, buf, sizeof(buf))) > 0) {
  2143     st->print_raw(buf, bytes);
  2146   close(fd);
  2148   return true;
  2151 void os::print_os_info(outputStream* st) {
  2152   st->print("OS:");
  2154   if (!_print_ascii_file("/etc/release", st)) {
  2155     st->print("Solaris");
  2157   st->cr();
  2159   // kernel
  2160   st->print("uname:");
  2161   struct utsname name;
  2162   uname(&name);
  2163   st->print(name.sysname); st->print(" ");
  2164   st->print(name.release); st->print(" ");
  2165   st->print(name.version); st->print(" ");
  2166   st->print(name.machine);
  2168   // libthread
  2169   if (os::Solaris::T2_libthread()) st->print("  (T2 libthread)");
  2170   else st->print("  (T1 libthread)");
  2171   st->cr();
  2173   // rlimit
  2174   st->print("rlimit:");
  2175   struct rlimit rlim;
  2177   st->print(" STACK ");
  2178   getrlimit(RLIMIT_STACK, &rlim);
  2179   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  2180   else st->print("%uk", rlim.rlim_cur >> 10);
  2182   st->print(", CORE ");
  2183   getrlimit(RLIMIT_CORE, &rlim);
  2184   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  2185   else st->print("%uk", rlim.rlim_cur >> 10);
  2187   st->print(", NOFILE ");
  2188   getrlimit(RLIMIT_NOFILE, &rlim);
  2189   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  2190   else st->print("%d", rlim.rlim_cur);
  2192   st->print(", AS ");
  2193   getrlimit(RLIMIT_AS, &rlim);
  2194   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  2195   else st->print("%uk", rlim.rlim_cur >> 10);
  2196   st->cr();
  2198   // load average
  2199   st->print("load average:");
  2200   double loadavg[3];
  2201   os::loadavg(loadavg, 3);
  2202   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
  2203   st->cr();
  2207 static bool check_addr0(outputStream* st) {
  2208   jboolean status = false;
  2209   int fd = open("/proc/self/map",O_RDONLY);
  2210   if (fd >= 0) {
  2211     prmap_t p;
  2212     while(read(fd, &p, sizeof(p)) > 0) {
  2213       if (p.pr_vaddr == 0x0) {
  2214         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
  2215         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
  2216         st->print("Access:");
  2217         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
  2218         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
  2219         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
  2220         st->cr();
  2221         status = true;
  2223       close(fd);
  2226   return status;
  2229 void os::print_memory_info(outputStream* st) {
  2230   st->print("Memory:");
  2231   st->print(" %dk page", os::vm_page_size()>>10);
  2232   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
  2233   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
  2234   st->cr();
  2235   (void) check_addr0(st);
  2238 // Taken from /usr/include/sys/machsig.h  Supposed to be architecture specific
  2239 // but they're the same for all the solaris architectures that we support.
  2240 const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR",
  2241                           "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG",
  2242                           "ILL_COPROC", "ILL_BADSTK" };
  2244 const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV",
  2245                           "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES",
  2246                           "FPE_FLTINV", "FPE_FLTSUB" };
  2248 const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" };
  2250 const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" };
  2252 void os::print_siginfo(outputStream* st, void* siginfo) {
  2253   st->print("siginfo:");
  2255   const int buflen = 100;
  2256   char buf[buflen];
  2257   siginfo_t *si = (siginfo_t*)siginfo;
  2258   st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen));
  2259   char *err = strerror(si->si_errno);
  2260   if (si->si_errno != 0 && err != NULL) {
  2261     st->print("si_errno=%s", err);
  2262   } else {
  2263     st->print("si_errno=%d", si->si_errno);
  2265   const int c = si->si_code;
  2266   assert(c > 0, "unexpected si_code");
  2267   switch (si->si_signo) {
  2268   case SIGILL:
  2269     st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]);
  2270     st->print(", si_addr=" PTR_FORMAT, si->si_addr);
  2271     break;
  2272   case SIGFPE:
  2273     st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]);
  2274     st->print(", si_addr=" PTR_FORMAT, si->si_addr);
  2275     break;
  2276   case SIGSEGV:
  2277     st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]);
  2278     st->print(", si_addr=" PTR_FORMAT, si->si_addr);
  2279     break;
  2280   case SIGBUS:
  2281     st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]);
  2282     st->print(", si_addr=" PTR_FORMAT, si->si_addr);
  2283     break;
  2284   default:
  2285     st->print(", si_code=%d", si->si_code);
  2286     // no si_addr
  2289   if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
  2290       UseSharedSpaces) {
  2291     FileMapInfo* mapinfo = FileMapInfo::current_info();
  2292     if (mapinfo->is_in_shared_space(si->si_addr)) {
  2293       st->print("\n\nError accessing class data sharing archive."   \
  2294                 " Mapped file inaccessible during execution, "      \
  2295                 " possible disk/network problem.");
  2298   st->cr();
  2301 // Moved from whole group, because we need them here for diagnostic
  2302 // prints.
  2303 #define OLDMAXSIGNUM 32
  2304 static int Maxsignum = 0;
  2305 static int *ourSigFlags = NULL;
  2307 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
  2309 int os::Solaris::get_our_sigflags(int sig) {
  2310   assert(ourSigFlags!=NULL, "signal data structure not initialized");
  2311   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
  2312   return ourSigFlags[sig];
  2315 void os::Solaris::set_our_sigflags(int sig, int flags) {
  2316   assert(ourSigFlags!=NULL, "signal data structure not initialized");
  2317   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
  2318   ourSigFlags[sig] = flags;
  2322 static const char* get_signal_handler_name(address handler,
  2323                                            char* buf, int buflen) {
  2324   int offset;
  2325   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
  2326   if (found) {
  2327     // skip directory names
  2328     const char *p1, *p2;
  2329     p1 = buf;
  2330     size_t len = strlen(os::file_separator());
  2331     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
  2332     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
  2333   } else {
  2334     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
  2336   return buf;
  2339 static void print_signal_handler(outputStream* st, int sig,
  2340                                   char* buf, size_t buflen) {
  2341   struct sigaction sa;
  2343   sigaction(sig, NULL, &sa);
  2345   st->print("%s: ", os::exception_name(sig, buf, buflen));
  2347   address handler = (sa.sa_flags & SA_SIGINFO)
  2348                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
  2349                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
  2351   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
  2352     st->print("SIG_DFL");
  2353   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
  2354     st->print("SIG_IGN");
  2355   } else {
  2356     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
  2359   st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask);
  2361   address rh = VMError::get_resetted_sighandler(sig);
  2362   // May be, handler was resetted by VMError?
  2363   if(rh != NULL) {
  2364     handler = rh;
  2365     sa.sa_flags = VMError::get_resetted_sigflags(sig);
  2368   st->print(", sa_flags="   PTR32_FORMAT, sa.sa_flags);
  2370   // Check: is it our handler?
  2371   if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
  2372      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
  2373     // It is our signal handler
  2374     // check for flags
  2375     if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
  2376       st->print(
  2377         ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
  2378         os::Solaris::get_our_sigflags(sig));
  2381   st->cr();
  2384 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
  2385   st->print_cr("Signal Handlers:");
  2386   print_signal_handler(st, SIGSEGV, buf, buflen);
  2387   print_signal_handler(st, SIGBUS , buf, buflen);
  2388   print_signal_handler(st, SIGFPE , buf, buflen);
  2389   print_signal_handler(st, SIGPIPE, buf, buflen);
  2390   print_signal_handler(st, SIGXFSZ, buf, buflen);
  2391   print_signal_handler(st, SIGILL , buf, buflen);
  2392   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
  2393   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
  2394   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
  2395   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
  2396   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
  2397   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
  2398   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
  2399   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
  2402 static char saved_jvm_path[MAXPATHLEN] = { 0 };
  2404 // Find the full path to the current module, libjvm.so or libjvm_g.so
  2405 void os::jvm_path(char *buf, jint buflen) {
  2406   // Error checking.
  2407   if (buflen < MAXPATHLEN) {
  2408     assert(false, "must use a large-enough buffer");
  2409     buf[0] = '\0';
  2410     return;
  2412   // Lazy resolve the path to current module.
  2413   if (saved_jvm_path[0] != 0) {
  2414     strcpy(buf, saved_jvm_path);
  2415     return;
  2418   Dl_info dlinfo;
  2419   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
  2420   assert(ret != 0, "cannot locate libjvm");
  2421   realpath((char *)dlinfo.dli_fname, buf);
  2423   if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) {
  2424     // Support for the gamma launcher.  Typical value for buf is
  2425     // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
  2426     // the right place in the string, then assume we are installed in a JDK and
  2427     // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
  2428     // up the path so it looks like libjvm.so is installed there (append a
  2429     // fake suffix hotspot/libjvm.so).
  2430     const char *p = buf + strlen(buf) - 1;
  2431     for (int count = 0; p > buf && count < 5; ++count) {
  2432       for (--p; p > buf && *p != '/'; --p)
  2433         /* empty */ ;
  2436     if (strncmp(p, "/jre/lib/", 9) != 0) {
  2437       // Look for JAVA_HOME in the environment.
  2438       char* java_home_var = ::getenv("JAVA_HOME");
  2439       if (java_home_var != NULL && java_home_var[0] != 0) {
  2440         char cpu_arch[12];
  2441         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
  2442 #ifdef _LP64
  2443         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
  2444         if (strcmp(cpu_arch, "sparc") == 0) {
  2445           strcat(cpu_arch, "v9");
  2446         } else if (strcmp(cpu_arch, "i386") == 0) {
  2447           strcpy(cpu_arch, "amd64");
  2449 #endif
  2450         // Check the current module name "libjvm.so" or "libjvm_g.so".
  2451         p = strrchr(buf, '/');
  2452         assert(strstr(p, "/libjvm") == p, "invalid library name");
  2453         p = strstr(p, "_g") ? "_g" : "";
  2455         realpath(java_home_var, buf);
  2456         sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch);
  2457         if (0 == access(buf, F_OK)) {
  2458           // Use current module name "libjvm[_g].so" instead of
  2459           // "libjvm"debug_only("_g")".so" since for fastdebug version
  2460           // we should have "libjvm.so" but debug_only("_g") adds "_g"!
  2461           // It is used when we are choosing the HPI library's name
  2462           // "libhpi[_g].so" in hpi::initialize_get_interface().
  2463           sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p);
  2464         } else {
  2465           // Go back to path of .so
  2466           realpath((char *)dlinfo.dli_fname, buf);
  2472   strcpy(saved_jvm_path, buf);
  2476 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
  2477   // no prefix required, not even "_"
  2481 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
  2482   // no suffix required
  2486 // sun.misc.Signal
  2488 extern "C" {
  2489   static void UserHandler(int sig, void *siginfo, void *context) {
  2490     // Ctrl-C is pressed during error reporting, likely because the error
  2491     // handler fails to abort. Let VM die immediately.
  2492     if (sig == SIGINT && is_error_reported()) {
  2493        os::die();
  2496     os::signal_notify(sig);
  2497     // We do not need to reinstate the signal handler each time...
  2501 void* os::user_handler() {
  2502   return CAST_FROM_FN_PTR(void*, UserHandler);
  2505 extern "C" {
  2506   typedef void (*sa_handler_t)(int);
  2507   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
  2510 void* os::signal(int signal_number, void* handler) {
  2511   struct sigaction sigAct, oldSigAct;
  2512   sigfillset(&(sigAct.sa_mask));
  2513   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
  2514   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
  2516   if (sigaction(signal_number, &sigAct, &oldSigAct))
  2517     // -1 means registration failed
  2518     return (void *)-1;
  2520   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
  2523 void os::signal_raise(int signal_number) {
  2524   raise(signal_number);
  2527 /*
  2528  * The following code is moved from os.cpp for making this
  2529  * code platform specific, which it is by its very nature.
  2530  */
  2532 // a counter for each possible signal value
  2533 static int Sigexit = 0;
  2534 static int Maxlibjsigsigs;
  2535 static jint *pending_signals = NULL;
  2536 static int *preinstalled_sigs = NULL;
  2537 static struct sigaction *chainedsigactions = NULL;
  2538 static sema_t sig_sem;
  2539 typedef int (*version_getting_t)();
  2540 version_getting_t os::Solaris::get_libjsig_version = NULL;
  2541 static int libjsigversion = NULL;
  2543 int os::sigexitnum_pd() {
  2544   assert(Sigexit > 0, "signal memory not yet initialized");
  2545   return Sigexit;
  2548 void os::Solaris::init_signal_mem() {
  2549   // Initialize signal structures
  2550   Maxsignum = SIGRTMAX;
  2551   Sigexit = Maxsignum+1;
  2552   assert(Maxsignum >0, "Unable to obtain max signal number");
  2554   Maxlibjsigsigs = Maxsignum;
  2556   // pending_signals has one int per signal
  2557   // The additional signal is for SIGEXIT - exit signal to signal_thread
  2558   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1));
  2559   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
  2561   if (UseSignalChaining) {
  2562      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
  2563        * (Maxsignum + 1));
  2564      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
  2565      preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1));
  2566      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
  2568   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ));
  2569   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
  2572 void os::signal_init_pd() {
  2573   int ret;
  2575   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
  2576   assert(ret == 0, "sema_init() failed");
  2579 void os::signal_notify(int signal_number) {
  2580   int ret;
  2582   Atomic::inc(&pending_signals[signal_number]);
  2583   ret = ::sema_post(&sig_sem);
  2584   assert(ret == 0, "sema_post() failed");
  2587 static int check_pending_signals(bool wait_for_signal) {
  2588   int ret;
  2589   while (true) {
  2590     for (int i = 0; i < Sigexit + 1; i++) {
  2591       jint n = pending_signals[i];
  2592       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
  2593         return i;
  2596     if (!wait_for_signal) {
  2597       return -1;
  2599     JavaThread *thread = JavaThread::current();
  2600     ThreadBlockInVM tbivm(thread);
  2602     bool threadIsSuspended;
  2603     do {
  2604       thread->set_suspend_equivalent();
  2605       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
  2606       while((ret = ::sema_wait(&sig_sem)) == EINTR)
  2608       assert(ret == 0, "sema_wait() failed");
  2610       // were we externally suspended while we were waiting?
  2611       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
  2612       if (threadIsSuspended) {
  2613         //
  2614         // The semaphore has been incremented, but while we were waiting
  2615         // another thread suspended us. We don't want to continue running
  2616         // while suspended because that would surprise the thread that
  2617         // suspended us.
  2618         //
  2619         ret = ::sema_post(&sig_sem);
  2620         assert(ret == 0, "sema_post() failed");
  2622         thread->java_suspend_self();
  2624     } while (threadIsSuspended);
  2628 int os::signal_lookup() {
  2629   return check_pending_signals(false);
  2632 int os::signal_wait() {
  2633   return check_pending_signals(true);
  2636 ////////////////////////////////////////////////////////////////////////////////
  2637 // Virtual Memory
  2639 static int page_size = -1;
  2641 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
  2642 // clear this var if support is not available.
  2643 static bool has_map_align = true;
  2645 int os::vm_page_size() {
  2646   assert(page_size != -1, "must call os::init");
  2647   return page_size;
  2650 // Solaris allocates memory by pages.
  2651 int os::vm_allocation_granularity() {
  2652   assert(page_size != -1, "must call os::init");
  2653   return page_size;
  2656 bool os::commit_memory(char* addr, size_t bytes, bool exec) {
  2657   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
  2658   size_t size = bytes;
  2659   return
  2660      NULL != Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
  2663 bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint,
  2664                        bool exec) {
  2665   if (commit_memory(addr, bytes, exec)) {
  2666     if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
  2667       // If the large page size has been set and the VM
  2668       // is using large pages, use the large page size
  2669       // if it is smaller than the alignment hint. This is
  2670       // a case where the VM wants to use a larger alignment size
  2671       // for its own reasons but still want to use large pages
  2672       // (which is what matters to setting the mpss range.
  2673       size_t page_size = 0;
  2674       if (large_page_size() < alignment_hint) {
  2675         assert(UseLargePages, "Expected to be here for large page use only");
  2676         page_size = large_page_size();
  2677       } else {
  2678         // If the alignment hint is less than the large page
  2679         // size, the VM wants a particular alignment (thus the hint)
  2680         // for internal reasons.  Try to set the mpss range using
  2681         // the alignment_hint.
  2682         page_size = alignment_hint;
  2684       // Since this is a hint, ignore any failures.
  2685       (void)Solaris::set_mpss_range(addr, bytes, page_size);
  2687     return true;
  2689   return false;
  2692 // Uncommit the pages in a specified region.
  2693 void os::free_memory(char* addr, size_t bytes) {
  2694   if (madvise(addr, bytes, MADV_FREE) < 0) {
  2695     debug_only(warning("MADV_FREE failed."));
  2696     return;
  2700 // Change the page size in a given range.
  2701 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
  2702   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
  2703   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
  2704   Solaris::set_mpss_range(addr, bytes, alignment_hint);
  2707 // Tell the OS to make the range local to the first-touching LWP
  2708 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
  2709   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
  2710   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
  2711     debug_only(warning("MADV_ACCESS_LWP failed."));
  2715 // Tell the OS that this range would be accessed from different LWPs.
  2716 void os::numa_make_global(char *addr, size_t bytes) {
  2717   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
  2718   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
  2719     debug_only(warning("MADV_ACCESS_MANY failed."));
  2723 // Get the number of the locality groups.
  2724 size_t os::numa_get_groups_num() {
  2725   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
  2726   return n != -1 ? n : 1;
  2729 // Get a list of leaf locality groups. A leaf lgroup is group that
  2730 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
  2731 // board. An LWP is assigned to one of these groups upon creation.
  2732 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
  2733    if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
  2734      ids[0] = 0;
  2735      return 1;
  2737    int result_size = 0, top = 1, bottom = 0, cur = 0;
  2738    for (int k = 0; k < size; k++) {
  2739      int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
  2740                                     (Solaris::lgrp_id_t*)&ids[top], size - top);
  2741      if (r == -1) {
  2742        ids[0] = 0;
  2743        return 1;
  2745      if (!r) {
  2746        // That's a leaf node.
  2747        assert (bottom <= cur, "Sanity check");
  2748        // Check if the node has memory
  2749        if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
  2750                                    NULL, 0, LGRP_RSRC_MEM) > 0) {
  2751          ids[bottom++] = ids[cur];
  2754      top += r;
  2755      cur++;
  2757    if (bottom == 0) {
  2758      // Handle a situation, when the OS reports no memory available.
  2759      // Assume UMA architecture.
  2760      ids[0] = 0;
  2761      return 1;
  2763    return bottom;
  2766 // Detect the topology change. Typically happens during CPU plugging-unplugging.
  2767 bool os::numa_topology_changed() {
  2768   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
  2769   if (is_stale != -1 && is_stale) {
  2770     Solaris::lgrp_fini(Solaris::lgrp_cookie());
  2771     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
  2772     assert(c != 0, "Failure to initialize LGRP API");
  2773     Solaris::set_lgrp_cookie(c);
  2774     return true;
  2776   return false;
  2779 // Get the group id of the current LWP.
  2780 int os::numa_get_group_id() {
  2781   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
  2782   if (lgrp_id == -1) {
  2783     return 0;
  2785   const int size = os::numa_get_groups_num();
  2786   int *ids = (int*)alloca(size * sizeof(int));
  2788   // Get the ids of all lgroups with memory; r is the count.
  2789   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
  2790                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
  2791   if (r <= 0) {
  2792     return 0;
  2794   return ids[os::random() % r];
  2797 // Request information about the page.
  2798 bool os::get_page_info(char *start, page_info* info) {
  2799   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
  2800   uint64_t addr = (uintptr_t)start;
  2801   uint64_t outdata[2];
  2802   uint_t validity = 0;
  2804   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
  2805     return false;
  2808   info->size = 0;
  2809   info->lgrp_id = -1;
  2811   if ((validity & 1) != 0) {
  2812     if ((validity & 2) != 0) {
  2813       info->lgrp_id = outdata[0];
  2815     if ((validity & 4) != 0) {
  2816       info->size = outdata[1];
  2818     return true;
  2820   return false;
  2823 // Scan the pages from start to end until a page different than
  2824 // the one described in the info parameter is encountered.
  2825 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
  2826   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
  2827   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
  2828   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT];
  2829   uint_t validity[MAX_MEMINFO_CNT];
  2831   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
  2832   uint64_t p = (uint64_t)start;
  2833   while (p < (uint64_t)end) {
  2834     addrs[0] = p;
  2835     size_t addrs_count = 1;
  2836     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] < (uint64_t)end) {
  2837       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
  2838       addrs_count++;
  2841     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
  2842       return NULL;
  2845     size_t i = 0;
  2846     for (; i < addrs_count; i++) {
  2847       if ((validity[i] & 1) != 0) {
  2848         if ((validity[i] & 4) != 0) {
  2849           if (outdata[types * i + 1] != page_expected->size) {
  2850             break;
  2852         } else
  2853           if (page_expected->size != 0) {
  2854             break;
  2857         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
  2858           if (outdata[types * i] != page_expected->lgrp_id) {
  2859             break;
  2862       } else {
  2863         return NULL;
  2867     if (i != addrs_count) {
  2868       if ((validity[i] & 2) != 0) {
  2869         page_found->lgrp_id = outdata[types * i];
  2870       } else {
  2871         page_found->lgrp_id = -1;
  2873       if ((validity[i] & 4) != 0) {
  2874         page_found->size = outdata[types * i + 1];
  2875       } else {
  2876         page_found->size = 0;
  2878       return (char*)addrs[i];
  2881     p = addrs[addrs_count - 1] + page_size;
  2883   return end;
  2886 bool os::uncommit_memory(char* addr, size_t bytes) {
  2887   size_t size = bytes;
  2888   // Map uncommitted pages PROT_NONE so we fail early if we touch an
  2889   // uncommitted page. Otherwise, the read/write might succeed if we
  2890   // have enough swap space to back the physical page.
  2891   return
  2892     NULL != Solaris::mmap_chunk(addr, size,
  2893                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
  2894                                 PROT_NONE);
  2897 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
  2898   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
  2900   if (b == MAP_FAILED) {
  2901     return NULL;
  2903   return b;
  2906 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
  2907   char* addr = requested_addr;
  2908   int flags = MAP_PRIVATE | MAP_NORESERVE;
  2910   assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
  2912   if (fixed) {
  2913     flags |= MAP_FIXED;
  2914   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
  2915     flags |= MAP_ALIGN;
  2916     addr = (char*) alignment_hint;
  2919   // Map uncommitted pages PROT_NONE so we fail early if we touch an
  2920   // uncommitted page. Otherwise, the read/write might succeed if we
  2921   // have enough swap space to back the physical page.
  2922   return mmap_chunk(addr, bytes, flags, PROT_NONE);
  2925 char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
  2926   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
  2928   guarantee(requested_addr == NULL || requested_addr == addr,
  2929             "OS failed to return requested mmap address.");
  2930   return addr;
  2933 // Reserve memory at an arbitrary address, only if that area is
  2934 // available (and not reserved for something else).
  2936 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
  2937   const int max_tries = 10;
  2938   char* base[max_tries];
  2939   size_t size[max_tries];
  2941   // Solaris adds a gap between mmap'ed regions.  The size of the gap
  2942   // is dependent on the requested size and the MMU.  Our initial gap
  2943   // value here is just a guess and will be corrected later.
  2944   bool had_top_overlap = false;
  2945   bool have_adjusted_gap = false;
  2946   size_t gap = 0x400000;
  2948   // Assert only that the size is a multiple of the page size, since
  2949   // that's all that mmap requires, and since that's all we really know
  2950   // about at this low abstraction level.  If we need higher alignment,
  2951   // we can either pass an alignment to this method or verify alignment
  2952   // in one of the methods further up the call chain.  See bug 5044738.
  2953   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
  2955   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
  2956   // Give it a try, if the kernel honors the hint we can return immediately.
  2957   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
  2958   volatile int err = errno;
  2959   if (addr == requested_addr) {
  2960     return addr;
  2961   } else if (addr != NULL) {
  2962     unmap_memory(addr, bytes);
  2965   if (PrintMiscellaneous && Verbose) {
  2966     char buf[256];
  2967     buf[0] = '\0';
  2968     if (addr == NULL) {
  2969       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
  2971     warning("attempt_reserve_memory_at: couldn't reserve %d bytes at "
  2972             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
  2973             "%s", bytes, requested_addr, addr, buf);
  2976   // Address hint method didn't work.  Fall back to the old method.
  2977   // In theory, once SNV becomes our oldest supported platform, this
  2978   // code will no longer be needed.
  2979   //
  2980   // Repeatedly allocate blocks until the block is allocated at the
  2981   // right spot. Give up after max_tries.
  2982   int i;
  2983   for (i = 0; i < max_tries; ++i) {
  2984     base[i] = reserve_memory(bytes);
  2986     if (base[i] != NULL) {
  2987       // Is this the block we wanted?
  2988       if (base[i] == requested_addr) {
  2989         size[i] = bytes;
  2990         break;
  2993       // check that the gap value is right
  2994       if (had_top_overlap && !have_adjusted_gap) {
  2995         size_t actual_gap = base[i-1] - base[i] - bytes;
  2996         if (gap != actual_gap) {
  2997           // adjust the gap value and retry the last 2 allocations
  2998           assert(i > 0, "gap adjustment code problem");
  2999           have_adjusted_gap = true;  // adjust the gap only once, just in case
  3000           gap = actual_gap;
  3001           if (PrintMiscellaneous && Verbose) {
  3002             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
  3004           unmap_memory(base[i], bytes);
  3005           unmap_memory(base[i-1], size[i-1]);
  3006           i-=2;
  3007           continue;
  3011       // Does this overlap the block we wanted? Give back the overlapped
  3012       // parts and try again.
  3013       //
  3014       // There is still a bug in this code: if top_overlap == bytes,
  3015       // the overlap is offset from requested region by the value of gap.
  3016       // In this case giving back the overlapped part will not work,
  3017       // because we'll give back the entire block at base[i] and
  3018       // therefore the subsequent allocation will not generate a new gap.
  3019       // This could be fixed with a new algorithm that used larger
  3020       // or variable size chunks to find the requested region -
  3021       // but such a change would introduce additional complications.
  3022       // It's rare enough that the planets align for this bug,
  3023       // so we'll just wait for a fix for 6204603/5003415 which
  3024       // will provide a mmap flag to allow us to avoid this business.
  3026       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
  3027       if (top_overlap >= 0 && top_overlap < bytes) {
  3028         had_top_overlap = true;
  3029         unmap_memory(base[i], top_overlap);
  3030         base[i] += top_overlap;
  3031         size[i] = bytes - top_overlap;
  3032       } else {
  3033         size_t bottom_overlap = base[i] + bytes - requested_addr;
  3034         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
  3035           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
  3036             warning("attempt_reserve_memory_at: possible alignment bug");
  3038           unmap_memory(requested_addr, bottom_overlap);
  3039           size[i] = bytes - bottom_overlap;
  3040         } else {
  3041           size[i] = bytes;
  3047   // Give back the unused reserved pieces.
  3049   for (int j = 0; j < i; ++j) {
  3050     if (base[j] != NULL) {
  3051       unmap_memory(base[j], size[j]);
  3055   return (i < max_tries) ? requested_addr : NULL;
  3058 bool os::release_memory(char* addr, size_t bytes) {
  3059   size_t size = bytes;
  3060   return munmap(addr, size) == 0;
  3063 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
  3064   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
  3065          "addr must be page aligned");
  3066   int retVal = mprotect(addr, bytes, prot);
  3067   return retVal == 0;
  3070 // Protect memory (Used to pass readonly pages through
  3071 // JNI GetArray<type>Elements with empty arrays.)
  3072 // Also, used for serialization page and for compressed oops null pointer
  3073 // checking.
  3074 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
  3075                         bool is_committed) {
  3076   unsigned int p = 0;
  3077   switch (prot) {
  3078   case MEM_PROT_NONE: p = PROT_NONE; break;
  3079   case MEM_PROT_READ: p = PROT_READ; break;
  3080   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
  3081   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
  3082   default:
  3083     ShouldNotReachHere();
  3085   // is_committed is unused.
  3086   return solaris_mprotect(addr, bytes, p);
  3089 // guard_memory and unguard_memory only happens within stack guard pages.
  3090 // Since ISM pertains only to the heap, guard and unguard memory should not
  3091 /// happen with an ISM region.
  3092 bool os::guard_memory(char* addr, size_t bytes) {
  3093   return solaris_mprotect(addr, bytes, PROT_NONE);
  3096 bool os::unguard_memory(char* addr, size_t bytes) {
  3097   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
  3100 // Large page support
  3102 // UseLargePages is the master flag to enable/disable large page memory.
  3103 // UseMPSS and UseISM are supported for compatibility reasons. Their combined
  3104 // effects can be described in the following table:
  3105 //
  3106 // UseLargePages UseMPSS UseISM
  3107 //    false         *       *   => UseLargePages is the master switch, turning
  3108 //                                 it off will turn off both UseMPSS and
  3109 //                                 UseISM. VM will not use large page memory
  3110 //                                 regardless the settings of UseMPSS/UseISM.
  3111 //     true      false    false => Unless future Solaris provides other
  3112 //                                 mechanism to use large page memory, this
  3113 //                                 combination is equivalent to -UseLargePages,
  3114 //                                 VM will not use large page memory
  3115 //     true      true     false => JVM will use MPSS for large page memory.
  3116 //                                 This is the default behavior.
  3117 //     true      false    true  => JVM will use ISM for large page memory.
  3118 //     true      true     true  => JVM will use ISM if it is available.
  3119 //                                 Otherwise, JVM will fall back to MPSS.
  3120 //                                 Becaues ISM is now available on all
  3121 //                                 supported Solaris versions, this combination
  3122 //                                 is equivalent to +UseISM -UseMPSS.
  3124 typedef int (*getpagesizes_func_type) (size_t[], int);
  3125 static size_t _large_page_size = 0;
  3127 bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) {
  3128   // x86 uses either 2M or 4M page, depending on whether PAE (Physical Address
  3129   // Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc
  3130   // can support multiple page sizes.
  3132   // Don't bother to probe page size because getpagesizes() comes with MPSS.
  3133   // ISM is only recommended on old Solaris where there is no MPSS support.
  3134   // Simply choose a conservative value as default.
  3135   *page_size = LargePageSizeInBytes ? LargePageSizeInBytes :
  3136                SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M);
  3138   // ISM is available on all supported Solaris versions
  3139   return true;
  3142 // Insertion sort for small arrays (descending order).
  3143 static void insertion_sort_descending(size_t* array, int len) {
  3144   for (int i = 0; i < len; i++) {
  3145     size_t val = array[i];
  3146     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
  3147       size_t tmp = array[key];
  3148       array[key] = array[key - 1];
  3149       array[key - 1] = tmp;
  3154 bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) {
  3155   getpagesizes_func_type getpagesizes_func =
  3156     CAST_TO_FN_PTR(getpagesizes_func_type, dlsym(RTLD_DEFAULT, "getpagesizes"));
  3157   if (getpagesizes_func == NULL) {
  3158     if (warn) {
  3159       warning("MPSS is not supported by the operating system.");
  3161     return false;
  3164   const unsigned int usable_count = VM_Version::page_size_count();
  3165   if (usable_count == 1) {
  3166     return false;
  3169   // Fill the array of page sizes.
  3170   int n = getpagesizes_func(_page_sizes, page_sizes_max);
  3171   assert(n > 0, "Solaris bug?");
  3172   if (n == page_sizes_max) {
  3173     // Add a sentinel value (necessary only if the array was completely filled
  3174     // since it is static (zeroed at initialization)).
  3175     _page_sizes[--n] = 0;
  3176     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
  3178   assert(_page_sizes[n] == 0, "missing sentinel");
  3180   if (n == 1) return false;     // Only one page size available.
  3182   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
  3183   // select up to usable_count elements.  First sort the array, find the first
  3184   // acceptable value, then copy the usable sizes to the top of the array and
  3185   // trim the rest.  Make sure to include the default page size :-).
  3186   //
  3187   // A better policy could get rid of the 4M limit by taking the sizes of the
  3188   // important VM memory regions (java heap and possibly the code cache) into
  3189   // account.
  3190   insertion_sort_descending(_page_sizes, n);
  3191   const size_t size_limit =
  3192     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
  3193   int beg;
  3194   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
  3195   const int end = MIN2((int)usable_count, n) - 1;
  3196   for (int cur = 0; cur < end; ++cur, ++beg) {
  3197     _page_sizes[cur] = _page_sizes[beg];
  3199   _page_sizes[end] = vm_page_size();
  3200   _page_sizes[end + 1] = 0;
  3202   if (_page_sizes[end] > _page_sizes[end - 1]) {
  3203     // Default page size is not the smallest; sort again.
  3204     insertion_sort_descending(_page_sizes, end + 1);
  3206   *page_size = _page_sizes[0];
  3208   return true;
  3211 bool os::large_page_init() {
  3212   if (!UseLargePages) {
  3213     UseISM = false;
  3214     UseMPSS = false;
  3215     return false;
  3218   // print a warning if any large page related flag is specified on command line
  3219   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
  3220                          !FLAG_IS_DEFAULT(UseISM)               ||
  3221                          !FLAG_IS_DEFAULT(UseMPSS)              ||
  3222                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
  3223   UseISM = UseISM &&
  3224            Solaris::ism_sanity_check(warn_on_failure, &_large_page_size);
  3225   if (UseISM) {
  3226     // ISM disables MPSS to be compatible with old JDK behavior
  3227     UseMPSS = false;
  3228     _page_sizes[0] = _large_page_size;
  3229     _page_sizes[1] = vm_page_size();
  3232   UseMPSS = UseMPSS &&
  3233             Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
  3235   UseLargePages = UseISM || UseMPSS;
  3236   return UseLargePages;
  3239 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
  3240   // Signal to OS that we want large pages for addresses
  3241   // from addr, addr + bytes
  3242   struct memcntl_mha mpss_struct;
  3243   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
  3244   mpss_struct.mha_pagesize = align;
  3245   mpss_struct.mha_flags = 0;
  3246   if (memcntl(start, bytes, MC_HAT_ADVISE,
  3247               (caddr_t) &mpss_struct, 0, 0) < 0) {
  3248     debug_only(warning("Attempt to use MPSS failed."));
  3249     return false;
  3251   return true;
  3254 char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) {
  3255   // "exec" is passed in but not used.  Creating the shared image for
  3256   // the code cache doesn't have an SHM_X executable permission to check.
  3257   assert(UseLargePages && UseISM, "only for ISM large pages");
  3259   size_t size = bytes;
  3260   char* retAddr = NULL;
  3261   int shmid;
  3262   key_t ismKey;
  3264   bool warn_on_failure = UseISM &&
  3265                         (!FLAG_IS_DEFAULT(UseLargePages)         ||
  3266                          !FLAG_IS_DEFAULT(UseISM)                ||
  3267                          !FLAG_IS_DEFAULT(LargePageSizeInBytes)
  3268                         );
  3269   char msg[128];
  3271   ismKey = IPC_PRIVATE;
  3273   // Create a large shared memory region to attach to based on size.
  3274   // Currently, size is the total size of the heap
  3275   shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT);
  3276   if (shmid == -1){
  3277      if (warn_on_failure) {
  3278        jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
  3279        warning(msg);
  3281      return NULL;
  3284   // Attach to the region
  3285   retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W);
  3286   int err = errno;
  3288   // Remove shmid. If shmat() is successful, the actual shared memory segment
  3289   // will be deleted when it's detached by shmdt() or when the process
  3290   // terminates. If shmat() is not successful this will remove the shared
  3291   // segment immediately.
  3292   shmctl(shmid, IPC_RMID, NULL);
  3294   if (retAddr == (char *) -1) {
  3295     if (warn_on_failure) {
  3296       jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
  3297       warning(msg);
  3299     return NULL;
  3302   return retAddr;
  3305 bool os::release_memory_special(char* base, size_t bytes) {
  3306   // detaching the SHM segment will also delete it, see reserve_memory_special()
  3307   int rslt = shmdt(base);
  3308   return rslt == 0;
  3311 size_t os::large_page_size() {
  3312   return _large_page_size;
  3315 // MPSS allows application to commit large page memory on demand; with ISM
  3316 // the entire memory region must be allocated as shared memory.
  3317 bool os::can_commit_large_page_memory() {
  3318   return UseISM ? false : true;
  3321 bool os::can_execute_large_page_memory() {
  3322   return UseISM ? false : true;
  3325 static int os_sleep(jlong millis, bool interruptible) {
  3326   const jlong limit = INT_MAX;
  3327   jlong prevtime;
  3328   int res;
  3330   while (millis > limit) {
  3331     if ((res = os_sleep(limit, interruptible)) != OS_OK)
  3332       return res;
  3333     millis -= limit;
  3336   // Restart interrupted polls with new parameters until the proper delay
  3337   // has been completed.
  3339   prevtime = getTimeMillis();
  3341   while (millis > 0) {
  3342     jlong newtime;
  3344     if (!interruptible) {
  3345       // Following assert fails for os::yield_all:
  3346       // assert(!thread->is_Java_thread(), "must not be java thread");
  3347       res = poll(NULL, 0, millis);
  3348     } else {
  3349       JavaThread *jt = JavaThread::current();
  3351       INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
  3352         os::Solaris::clear_interrupted);
  3355     // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
  3356     // thread.Interrupt.
  3358     if((res == OS_ERR) && (errno == EINTR)) {
  3359       newtime = getTimeMillis();
  3360       assert(newtime >= prevtime, "time moving backwards");
  3361     /* Doing prevtime and newtime in microseconds doesn't help precision,
  3362        and trying to round up to avoid lost milliseconds can result in a
  3363        too-short delay. */
  3364       millis -= newtime - prevtime;
  3365       if(millis <= 0)
  3366         return OS_OK;
  3367       prevtime = newtime;
  3368     } else
  3369       return res;
  3372   return OS_OK;
  3375 // Read calls from inside the vm need to perform state transitions
  3376 size_t os::read(int fd, void *buf, unsigned int nBytes) {
  3377   INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
  3380 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
  3381   assert(thread == Thread::current(),  "thread consistency check");
  3383   // TODO-FIXME: this should be removed.
  3384   // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
  3385   // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
  3386   // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
  3387   // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
  3388   // is fooled into believing that the system is making progress. In the code below we block the
  3389   // the watcher thread while safepoint is in progress so that it would not appear as though the
  3390   // system is making progress.
  3391   if (!Solaris::T2_libthread() &&
  3392       thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
  3393     // We now try to acquire the threads lock. Since this lock is held by the VM thread during
  3394     // the entire safepoint, the watcher thread will  line up here during the safepoint.
  3395     Threads_lock->lock_without_safepoint_check();
  3396     Threads_lock->unlock();
  3399   if (thread->is_Java_thread()) {
  3400     // This is a JavaThread so we honor the _thread_blocked protocol
  3401     // even for sleeps of 0 milliseconds. This was originally done
  3402     // as a workaround for bug 4338139. However, now we also do it
  3403     // to honor the suspend-equivalent protocol.
  3405     JavaThread *jt = (JavaThread *) thread;
  3406     ThreadBlockInVM tbivm(jt);
  3408     jt->set_suspend_equivalent();
  3409     // cleared by handle_special_suspend_equivalent_condition() or
  3410     // java_suspend_self() via check_and_wait_while_suspended()
  3412     int ret_code;
  3413     if (millis <= 0) {
  3414       thr_yield();
  3415       ret_code = 0;
  3416     } else {
  3417       // The original sleep() implementation did not create an
  3418       // OSThreadWaitState helper for sleeps of 0 milliseconds.
  3419       // I'm preserving that decision for now.
  3420       OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
  3422       ret_code = os_sleep(millis, interruptible);
  3425     // were we externally suspended while we were waiting?
  3426     jt->check_and_wait_while_suspended();
  3428     return ret_code;
  3431   // non-JavaThread from this point on:
  3433   if (millis <= 0) {
  3434     thr_yield();
  3435     return 0;
  3438   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
  3440   return os_sleep(millis, interruptible);
  3443 int os::naked_sleep() {
  3444   // %% make the sleep time an integer flag. for now use 1 millisec.
  3445   return os_sleep(1, false);
  3448 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
  3449 void os::infinite_sleep() {
  3450   while (true) {    // sleep forever ...
  3451     ::sleep(100);   // ... 100 seconds at a time
  3455 // Used to convert frequent JVM_Yield() to nops
  3456 bool os::dont_yield() {
  3457   if (DontYieldALot) {
  3458     static hrtime_t last_time = 0;
  3459     hrtime_t diff = getTimeNanos() - last_time;
  3461     if (diff < DontYieldALotInterval * 1000000)
  3462       return true;
  3464     last_time += diff;
  3466     return false;
  3468   else {
  3469     return false;
  3473 // Caveat: Solaris os::yield() causes a thread-state transition whereas
  3474 // the linux and win32 implementations do not.  This should be checked.
  3476 void os::yield() {
  3477   // Yields to all threads with same or greater priority
  3478   os::sleep(Thread::current(), 0, false);
  3481 // Note that yield semantics are defined by the scheduling class to which
  3482 // the thread currently belongs.  Typically, yield will _not yield to
  3483 // other equal or higher priority threads that reside on the dispatch queues
  3484 // of other CPUs.
  3486 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
  3489 // On Solaris we found that yield_all doesn't always yield to all other threads.
  3490 // There have been cases where there is a thread ready to execute but it doesn't
  3491 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
  3492 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
  3493 // SIGWAITING signal which will cause a new lwp to be created. So we count the
  3494 // number of times yield_all is called in the one loop and increase the sleep
  3495 // time after 8 attempts. If this fails too we increase the concurrency level
  3496 // so that the starving thread would get an lwp
  3498 void os::yield_all(int attempts) {
  3499   // Yields to all threads, including threads with lower priorities
  3500   if (attempts == 0) {
  3501     os::sleep(Thread::current(), 1, false);
  3502   } else {
  3503     int iterations = attempts % 30;
  3504     if (iterations == 0 && !os::Solaris::T2_libthread()) {
  3505       // thr_setconcurrency and _getconcurrency make sense only under T1.
  3506       int noofLWPS = thr_getconcurrency();
  3507       if (noofLWPS < (Threads::number_of_threads() + 2)) {
  3508         thr_setconcurrency(thr_getconcurrency() + 1);
  3510     } else if (iterations < 25) {
  3511       os::sleep(Thread::current(), 1, false);
  3512     } else {
  3513       os::sleep(Thread::current(), 10, false);
  3518 // Called from the tight loops to possibly influence time-sharing heuristics
  3519 void os::loop_breaker(int attempts) {
  3520   os::yield_all(attempts);
  3524 // Interface for setting lwp priorities.  If we are using T2 libthread,
  3525 // which forces the use of BoundThreads or we manually set UseBoundThreads,
  3526 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
  3527 // function is meaningless in this mode so we must adjust the real lwp's priority
  3528 // The routines below implement the getting and setting of lwp priorities.
  3529 //
  3530 // Note: There are three priority scales used on Solaris.  Java priotities
  3531 //       which range from 1 to 10, libthread "thr_setprio" scale which range
  3532 //       from 0 to 127, and the current scheduling class of the process we
  3533 //       are running in.  This is typically from -60 to +60.
  3534 //       The setting of the lwp priorities in done after a call to thr_setprio
  3535 //       so Java priorities are mapped to libthread priorities and we map from
  3536 //       the latter to lwp priorities.  We don't keep priorities stored in
  3537 //       Java priorities since some of our worker threads want to set priorities
  3538 //       higher than all Java threads.
  3539 //
  3540 // For related information:
  3541 // (1)  man -s 2 priocntl
  3542 // (2)  man -s 4 priocntl
  3543 // (3)  man dispadmin
  3544 // =    librt.so
  3545 // =    libthread/common/rtsched.c - thrp_setlwpprio().
  3546 // =    ps -cL <pid> ... to validate priority.
  3547 // =    sched_get_priority_min and _max
  3548 //              pthread_create
  3549 //              sched_setparam
  3550 //              pthread_setschedparam
  3551 //
  3552 // Assumptions:
  3553 // +    We assume that all threads in the process belong to the same
  3554 //              scheduling class.   IE. an homogenous process.
  3555 // +    Must be root or in IA group to change change "interactive" attribute.
  3556 //              Priocntl() will fail silently.  The only indication of failure is when
  3557 //              we read-back the value and notice that it hasn't changed.
  3558 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
  3559 // +    For RT, change timeslice as well.  Invariant:
  3560 //              constant "priority integral"
  3561 //              Konst == TimeSlice * (60-Priority)
  3562 //              Given a priority, compute appropriate timeslice.
  3563 // +    Higher numerical values have higher priority.
  3565 // sched class attributes
  3566 typedef struct {
  3567         int   schedPolicy;              // classID
  3568         int   maxPrio;
  3569         int   minPrio;
  3570 } SchedInfo;
  3573 static SchedInfo tsLimits, iaLimits, rtLimits;
  3575 #ifdef ASSERT
  3576 static int  ReadBackValidate = 1;
  3577 #endif
  3578 static int  myClass     = 0;
  3579 static int  myMin       = 0;
  3580 static int  myMax       = 0;
  3581 static int  myCur       = 0;
  3582 static bool priocntl_enable = false;
  3585 // Call the version of priocntl suitable for all supported versions
  3586 // of Solaris. We need to call through this wrapper so that we can
  3587 // build on Solaris 9 and run on Solaris 8, 9 and 10.
  3588 //
  3589 // This code should be removed if we ever stop supporting Solaris 8
  3590 // and earlier releases.
  3592 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg);
  3593 typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg);
  3594 static priocntl_type priocntl_ptr = priocntl_stub;
  3596 // Stub to set the value of the real pointer, and then call the real
  3597 // function.
  3599 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) {
  3600   // Try Solaris 8- name only.
  3601   priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl");
  3602   guarantee(tmp != NULL, "priocntl function not found.");
  3603   priocntl_ptr = tmp;
  3604   return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg);
  3608 // lwp_priocntl_init
  3609 //
  3610 // Try to determine the priority scale for our process.
  3611 //
  3612 // Return errno or 0 if OK.
  3613 //
  3614 static
  3615 int     lwp_priocntl_init ()
  3617   int rslt;
  3618   pcinfo_t ClassInfo;
  3619   pcparms_t ParmInfo;
  3620   int i;
  3622   if (!UseThreadPriorities) return 0;
  3624   // We are using Bound threads, we need to determine our priority ranges
  3625   if (os::Solaris::T2_libthread() || UseBoundThreads) {
  3626     // If ThreadPriorityPolicy is 1, switch tables
  3627     if (ThreadPriorityPolicy == 1) {
  3628       for (i = 0 ; i < MaxPriority+1; i++)
  3629         os::java_to_os_priority[i] = prio_policy1[i];
  3632   // Not using Bound Threads, set to ThreadPolicy 1
  3633   else {
  3634     for ( i = 0 ; i < MaxPriority+1; i++ ) {
  3635       os::java_to_os_priority[i] = prio_policy1[i];
  3637     return 0;
  3641   // Get IDs for a set of well-known scheduling classes.
  3642   // TODO-FIXME: GETCLINFO returns the current # of classes in the
  3643   // the system.  We should have a loop that iterates over the
  3644   // classID values, which are known to be "small" integers.
  3646   strcpy(ClassInfo.pc_clname, "TS");
  3647   ClassInfo.pc_cid = -1;
  3648   rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
  3649   if (rslt < 0) return errno;
  3650   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
  3651   tsLimits.schedPolicy = ClassInfo.pc_cid;
  3652   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
  3653   tsLimits.minPrio = -tsLimits.maxPrio;
  3655   strcpy(ClassInfo.pc_clname, "IA");
  3656   ClassInfo.pc_cid = -1;
  3657   rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
  3658   if (rslt < 0) return errno;
  3659   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
  3660   iaLimits.schedPolicy = ClassInfo.pc_cid;
  3661   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
  3662   iaLimits.minPrio = -iaLimits.maxPrio;
  3664   strcpy(ClassInfo.pc_clname, "RT");
  3665   ClassInfo.pc_cid = -1;
  3666   rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
  3667   if (rslt < 0) return errno;
  3668   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
  3669   rtLimits.schedPolicy = ClassInfo.pc_cid;
  3670   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
  3671   rtLimits.minPrio = 0;
  3674   // Query our "current" scheduling class.
  3675   // This will normally be IA,TS or, rarely, RT.
  3676   memset (&ParmInfo, 0, sizeof(ParmInfo));
  3677   ParmInfo.pc_cid = PC_CLNULL;
  3678   rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo );
  3679   if ( rslt < 0 ) return errno;
  3680   myClass = ParmInfo.pc_cid;
  3682   // We now know our scheduling classId, get specific information
  3683   // the class.
  3684   ClassInfo.pc_cid = myClass;
  3685   ClassInfo.pc_clname[0] = 0;
  3686   rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo );
  3687   if ( rslt < 0 ) return errno;
  3689   if (ThreadPriorityVerbose)
  3690     tty->print_cr ("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
  3692   memset(&ParmInfo, 0, sizeof(pcparms_t));
  3693   ParmInfo.pc_cid = PC_CLNULL;
  3694   rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
  3695   if (rslt < 0) return errno;
  3697   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
  3698     myMin = rtLimits.minPrio;
  3699     myMax = rtLimits.maxPrio;
  3700   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
  3701     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
  3702     myMin = iaLimits.minPrio;
  3703     myMax = iaLimits.maxPrio;
  3704     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
  3705   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
  3706     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
  3707     myMin = tsLimits.minPrio;
  3708     myMax = tsLimits.maxPrio;
  3709     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
  3710   } else {
  3711     // No clue - punt
  3712     if (ThreadPriorityVerbose)
  3713       tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
  3714     return EINVAL;      // no clue, punt
  3717   if (ThreadPriorityVerbose)
  3718         tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
  3720   priocntl_enable = true;  // Enable changing priorities
  3721   return 0;
  3724 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
  3725 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
  3726 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
  3729 // scale_to_lwp_priority
  3730 //
  3731 // Convert from the libthread "thr_setprio" scale to our current
  3732 // lwp scheduling class scale.
  3733 //
  3734 static
  3735 int     scale_to_lwp_priority (int rMin, int rMax, int x)
  3737   int v;
  3739   if (x == 127) return rMax;            // avoid round-down
  3740     v = (((x*(rMax-rMin)))/128)+rMin;
  3741   return v;
  3745 // set_lwp_priority
  3746 //
  3747 // Set the priority of the lwp.  This call should only be made
  3748 // when using bound threads (T2 threads are bound by default).
  3749 //
  3750 int     set_lwp_priority (int ThreadID, int lwpid, int newPrio )
  3752   int rslt;
  3753   int Actual, Expected, prv;
  3754   pcparms_t ParmInfo;                   // for GET-SET
  3755 #ifdef ASSERT
  3756   pcparms_t ReadBack;                   // for readback
  3757 #endif
  3759   // Set priority via PC_GETPARMS, update, PC_SETPARMS
  3760   // Query current values.
  3761   // TODO: accelerate this by eliminating the PC_GETPARMS call.
  3762   // Cache "pcparms_t" in global ParmCache.
  3763   // TODO: elide set-to-same-value
  3765   // If something went wrong on init, don't change priorities.
  3766   if ( !priocntl_enable ) {
  3767     if (ThreadPriorityVerbose)
  3768       tty->print_cr("Trying to set priority but init failed, ignoring");
  3769     return EINVAL;
  3773   // If lwp hasn't started yet, just return
  3774   // the _start routine will call us again.
  3775   if ( lwpid <= 0 ) {
  3776     if (ThreadPriorityVerbose) {
  3777       tty->print_cr ("deferring the set_lwp_priority of thread " INTPTR_FORMAT " to %d, lwpid not set",
  3778                      ThreadID, newPrio);
  3780     return 0;
  3783   if (ThreadPriorityVerbose) {
  3784     tty->print_cr ("set_lwp_priority(" INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
  3785                    ThreadID, lwpid, newPrio);
  3788   memset(&ParmInfo, 0, sizeof(pcparms_t));
  3789   ParmInfo.pc_cid = PC_CLNULL;
  3790   rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
  3791   if (rslt < 0) return errno;
  3793   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
  3794     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
  3795     rtInfo->rt_pri     = scale_to_lwp_priority (rtLimits.minPrio, rtLimits.maxPrio, newPrio);
  3796     rtInfo->rt_tqsecs  = RT_NOCHANGE;
  3797     rtInfo->rt_tqnsecs = RT_NOCHANGE;
  3798     if (ThreadPriorityVerbose) {
  3799       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
  3801   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
  3802     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
  3803     int maxClamped     = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim);
  3804     iaInfo->ia_upri    = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio);
  3805     iaInfo->ia_uprilim = IA_NOCHANGE;
  3806     iaInfo->ia_mode    = IA_NOCHANGE;
  3807     if (ThreadPriorityVerbose) {
  3808       tty->print_cr ("IA: [%d...%d] %d->%d\n",
  3809                iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
  3811   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
  3812     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
  3813     int maxClamped     = MIN2(tsLimits.maxPrio, (int)tsInfo->ts_uprilim);
  3814     prv                = tsInfo->ts_upri;
  3815     tsInfo->ts_upri    = scale_to_lwp_priority(tsLimits.minPrio, maxClamped, newPrio);
  3816     tsInfo->ts_uprilim = IA_NOCHANGE;
  3817     if (ThreadPriorityVerbose) {
  3818       tty->print_cr ("TS: %d [%d...%d] %d->%d\n",
  3819                prv, tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
  3821     if (prv == tsInfo->ts_upri) return 0;
  3822   } else {
  3823     if ( ThreadPriorityVerbose ) {
  3824       tty->print_cr ("Unknown scheduling class\n");
  3826       return EINVAL;    // no clue, punt
  3829   rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
  3830   if (ThreadPriorityVerbose && rslt) {
  3831     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
  3833   if (rslt < 0) return errno;
  3835 #ifdef ASSERT
  3836   // Sanity check: read back what we just attempted to set.
  3837   // In theory it could have changed in the interim ...
  3838   //
  3839   // The priocntl system call is tricky.
  3840   // Sometimes it'll validate the priority value argument and
  3841   // return EINVAL if unhappy.  At other times it fails silently.
  3842   // Readbacks are prudent.
  3844   if (!ReadBackValidate) return 0;
  3846   memset(&ReadBack, 0, sizeof(pcparms_t));
  3847   ReadBack.pc_cid = PC_CLNULL;
  3848   rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
  3849   assert(rslt >= 0, "priocntl failed");
  3850   Actual = Expected = 0xBAD;
  3851   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
  3852   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
  3853     Actual   = RTPRI(ReadBack)->rt_pri;
  3854     Expected = RTPRI(ParmInfo)->rt_pri;
  3855   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
  3856     Actual   = IAPRI(ReadBack)->ia_upri;
  3857     Expected = IAPRI(ParmInfo)->ia_upri;
  3858   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
  3859     Actual   = TSPRI(ReadBack)->ts_upri;
  3860     Expected = TSPRI(ParmInfo)->ts_upri;
  3861   } else {
  3862     if ( ThreadPriorityVerbose ) {
  3863       tty->print_cr("set_lwp_priority: unexpected class in readback: %d\n", ParmInfo.pc_cid);
  3867   if (Actual != Expected) {
  3868     if ( ThreadPriorityVerbose ) {
  3869       tty->print_cr ("set_lwp_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
  3870              lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
  3873 #endif
  3875   return 0;
  3880 // Solaris only gives access to 128 real priorities at a time,
  3881 // so we expand Java's ten to fill this range.  This would be better
  3882 // if we dynamically adjusted relative priorities.
  3883 //
  3884 // The ThreadPriorityPolicy option allows us to select 2 different
  3885 // priority scales.
  3886 //
  3887 // ThreadPriorityPolicy=0
  3888 // Since the Solaris' default priority is MaximumPriority, we do not
  3889 // set a priority lower than Max unless a priority lower than
  3890 // NormPriority is requested.
  3891 //
  3892 // ThreadPriorityPolicy=1
  3893 // This mode causes the priority table to get filled with
  3894 // linear values.  NormPriority get's mapped to 50% of the
  3895 // Maximum priority an so on.  This will cause VM threads
  3896 // to get unfair treatment against other Solaris processes
  3897 // which do not explicitly alter their thread priorities.
  3898 //
  3901 int os::java_to_os_priority[MaxPriority + 1] = {
  3902   -99999,         // 0 Entry should never be used
  3904   0,              // 1 MinPriority
  3905   32,             // 2
  3906   64,             // 3
  3908   96,             // 4
  3909   127,            // 5 NormPriority
  3910   127,            // 6
  3912   127,            // 7
  3913   127,            // 8
  3914   127,            // 9 NearMaxPriority
  3916   127             // 10 MaxPriority
  3917 };
  3920 OSReturn os::set_native_priority(Thread* thread, int newpri) {
  3921   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
  3922   if ( !UseThreadPriorities ) return OS_OK;
  3923   int status = thr_setprio(thread->osthread()->thread_id(), newpri);
  3924   if ( os::Solaris::T2_libthread() || (UseBoundThreads && thread->osthread()->is_vm_created()) )
  3925     status |= (set_lwp_priority (thread->osthread()->thread_id(),
  3926                     thread->osthread()->lwp_id(), newpri ));
  3927   return (status == 0) ? OS_OK : OS_ERR;
  3931 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
  3932   int p;
  3933   if ( !UseThreadPriorities ) {
  3934     *priority_ptr = NormalPriority;
  3935     return OS_OK;
  3937   int status = thr_getprio(thread->osthread()->thread_id(), &p);
  3938   if (status != 0) {
  3939     return OS_ERR;
  3941   *priority_ptr = p;
  3942   return OS_OK;
  3946 // Hint to the underlying OS that a task switch would not be good.
  3947 // Void return because it's a hint and can fail.
  3948 void os::hint_no_preempt() {
  3949   schedctl_start(schedctl_init());
  3952 void os::interrupt(Thread* thread) {
  3953   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
  3955   OSThread* osthread = thread->osthread();
  3957   int isInterrupted = osthread->interrupted();
  3958   if (!isInterrupted) {
  3959       osthread->set_interrupted(true);
  3960       OrderAccess::fence();
  3961       // os::sleep() is implemented with either poll (NULL,0,timeout) or
  3962       // by parking on _SleepEvent.  If the former, thr_kill will unwedge
  3963       // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
  3964       ParkEvent * const slp = thread->_SleepEvent ;
  3965       if (slp != NULL) slp->unpark() ;
  3968   // For JSR166:  unpark after setting status but before thr_kill -dl
  3969   if (thread->is_Java_thread()) {
  3970     ((JavaThread*)thread)->parker()->unpark();
  3973   // Handle interruptible wait() ...
  3974   ParkEvent * const ev = thread->_ParkEvent ;
  3975   if (ev != NULL) ev->unpark() ;
  3977   // When events are used everywhere for os::sleep, then this thr_kill
  3978   // will only be needed if UseVMInterruptibleIO is true.
  3980   if (!isInterrupted) {
  3981     int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
  3982     assert_status(status == 0, status, "thr_kill");
  3984     // Bump thread interruption counter
  3985     RuntimeService::record_thread_interrupt_signaled_count();
  3990 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
  3991   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
  3993   OSThread* osthread = thread->osthread();
  3995   bool res = osthread->interrupted();
  3997   // NOTE that since there is no "lock" around these two operations,
  3998   // there is the possibility that the interrupted flag will be
  3999   // "false" but that the interrupt event will be set. This is
  4000   // intentional. The effect of this is that Object.wait() will appear
  4001   // to have a spurious wakeup, which is not harmful, and the
  4002   // possibility is so rare that it is not worth the added complexity
  4003   // to add yet another lock. It has also been recommended not to put
  4004   // the interrupted flag into the os::Solaris::Event structure,
  4005   // because it hides the issue.
  4006   if (res && clear_interrupted) {
  4007     osthread->set_interrupted(false);
  4009   return res;
  4013 void os::print_statistics() {
  4016 int os::message_box(const char* title, const char* message) {
  4017   int i;
  4018   fdStream err(defaultStream::error_fd());
  4019   for (i = 0; i < 78; i++) err.print_raw("=");
  4020   err.cr();
  4021   err.print_raw_cr(title);
  4022   for (i = 0; i < 78; i++) err.print_raw("-");
  4023   err.cr();
  4024   err.print_raw_cr(message);
  4025   for (i = 0; i < 78; i++) err.print_raw("=");
  4026   err.cr();
  4028   char buf[16];
  4029   // Prevent process from exiting upon "read error" without consuming all CPU
  4030   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
  4032   return buf[0] == 'y' || buf[0] == 'Y';
  4035 // A lightweight implementation that does not suspend the target thread and
  4036 // thus returns only a hint. Used for profiling only!
  4037 ExtendedPC os::get_thread_pc(Thread* thread) {
  4038   // Make sure that it is called by the watcher and the Threads lock is owned.
  4039   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
  4040   // For now, is only used to profile the VM Thread
  4041   assert(thread->is_VM_thread(), "Can only be called for VMThread");
  4042   ExtendedPC epc;
  4044   GetThreadPC_Callback  cb(ProfileVM_lock);
  4045   OSThread *osthread = thread->osthread();
  4046   const int time_to_wait = 400; // 400ms wait for initial response
  4047   int status = cb.interrupt(thread, time_to_wait);
  4049   if (cb.is_done() ) {
  4050     epc = cb.addr();
  4051   } else {
  4052     DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status",
  4053                               osthread->thread_id(), status););
  4054     // epc is already NULL
  4056   return epc;
  4060 // This does not do anything on Solaris. This is basically a hook for being
  4061 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
  4062 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
  4063   f(value, method, args, thread);
  4066 // This routine may be used by user applications as a "hook" to catch signals.
  4067 // The user-defined signal handler must pass unrecognized signals to this
  4068 // routine, and if it returns true (non-zero), then the signal handler must
  4069 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
  4070 // routine will never retun false (zero), but instead will execute a VM panic
  4071 // routine kill the process.
  4072 //
  4073 // If this routine returns false, it is OK to call it again.  This allows
  4074 // the user-defined signal handler to perform checks either before or after
  4075 // the VM performs its own checks.  Naturally, the user code would be making
  4076 // a serious error if it tried to handle an exception (such as a null check
  4077 // or breakpoint) that the VM was generating for its own correct operation.
  4078 //
  4079 // This routine may recognize any of the following kinds of signals:
  4080 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
  4081 // os::Solaris::SIGasync
  4082 // It should be consulted by handlers for any of those signals.
  4083 // It explicitly does not recognize os::Solaris::SIGinterrupt
  4084 //
  4085 // The caller of this routine must pass in the three arguments supplied
  4086 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
  4087 // field of the structure passed to sigaction().  This routine assumes that
  4088 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
  4089 //
  4090 // Note that the VM will print warnings if it detects conflicting signal
  4091 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
  4092 //
  4093 extern "C" int JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
  4096 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
  4097   JVM_handle_solaris_signal(sig, info, ucVoid, true);
  4100 /* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
  4101    is needed to provoke threads blocked on IO to return an EINTR
  4102    Note: this explicitly does NOT call JVM_handle_solaris_signal and
  4103    does NOT participate in signal chaining due to requirement for
  4104    NOT setting SA_RESTART to make EINTR work. */
  4105 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
  4106    if (UseSignalChaining) {
  4107       struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
  4108       if (actp && actp->sa_handler) {
  4109         vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
  4114 // This boolean allows users to forward their own non-matching signals
  4115 // to JVM_handle_solaris_signal, harmlessly.
  4116 bool os::Solaris::signal_handlers_are_installed = false;
  4118 // For signal-chaining
  4119 bool os::Solaris::libjsig_is_loaded = false;
  4120 typedef struct sigaction *(*get_signal_t)(int);
  4121 get_signal_t os::Solaris::get_signal_action = NULL;
  4123 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
  4124   struct sigaction *actp = NULL;
  4126   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
  4127     // Retrieve the old signal handler from libjsig
  4128     actp = (*get_signal_action)(sig);
  4130   if (actp == NULL) {
  4131     // Retrieve the preinstalled signal handler from jvm
  4132     actp = get_preinstalled_handler(sig);
  4135   return actp;
  4138 static bool call_chained_handler(struct sigaction *actp, int sig,
  4139                                  siginfo_t *siginfo, void *context) {
  4140   // Call the old signal handler
  4141   if (actp->sa_handler == SIG_DFL) {
  4142     // It's more reasonable to let jvm treat it as an unexpected exception
  4143     // instead of taking the default action.
  4144     return false;
  4145   } else if (actp->sa_handler != SIG_IGN) {
  4146     if ((actp->sa_flags & SA_NODEFER) == 0) {
  4147       // automaticlly block the signal
  4148       sigaddset(&(actp->sa_mask), sig);
  4151     sa_handler_t hand;
  4152     sa_sigaction_t sa;
  4153     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
  4154     // retrieve the chained handler
  4155     if (siginfo_flag_set) {
  4156       sa = actp->sa_sigaction;
  4157     } else {
  4158       hand = actp->sa_handler;
  4161     if ((actp->sa_flags & SA_RESETHAND) != 0) {
  4162       actp->sa_handler = SIG_DFL;
  4165     // try to honor the signal mask
  4166     sigset_t oset;
  4167     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
  4169     // call into the chained handler
  4170     if (siginfo_flag_set) {
  4171       (*sa)(sig, siginfo, context);
  4172     } else {
  4173       (*hand)(sig);
  4176     // restore the signal mask
  4177     thr_sigsetmask(SIG_SETMASK, &oset, 0);
  4179   // Tell jvm's signal handler the signal is taken care of.
  4180   return true;
  4183 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
  4184   bool chained = false;
  4185   // signal-chaining
  4186   if (UseSignalChaining) {
  4187     struct sigaction *actp = get_chained_signal_action(sig);
  4188     if (actp != NULL) {
  4189       chained = call_chained_handler(actp, sig, siginfo, context);
  4192   return chained;
  4195 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
  4196   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
  4197   if (preinstalled_sigs[sig] != 0) {
  4198     return &chainedsigactions[sig];
  4200   return NULL;
  4203 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
  4205   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
  4206   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
  4207   chainedsigactions[sig] = oldAct;
  4208   preinstalled_sigs[sig] = 1;
  4211 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
  4212   // Check for overwrite.
  4213   struct sigaction oldAct;
  4214   sigaction(sig, (struct sigaction*)NULL, &oldAct);
  4215   void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
  4216                                       : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
  4217   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
  4218       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
  4219       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
  4220     if (AllowUserSignalHandlers || !set_installed) {
  4221       // Do not overwrite; user takes responsibility to forward to us.
  4222       return;
  4223     } else if (UseSignalChaining) {
  4224       if (oktochain) {
  4225         // save the old handler in jvm
  4226         save_preinstalled_handler(sig, oldAct);
  4227       } else {
  4228         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
  4230       // libjsig also interposes the sigaction() call below and saves the
  4231       // old sigaction on it own.
  4232     } else {
  4233       fatal2("Encountered unexpected pre-existing sigaction handler %#lx for signal %d.", (long)oldhand, sig);
  4237   struct sigaction sigAct;
  4238   sigfillset(&(sigAct.sa_mask));
  4239   sigAct.sa_handler = SIG_DFL;
  4241   sigAct.sa_sigaction = signalHandler;
  4242   // Handle SIGSEGV on alternate signal stack if
  4243   // not using stack banging
  4244   if (!UseStackBanging && sig == SIGSEGV) {
  4245     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
  4246   // Interruptible i/o requires SA_RESTART cleared so EINTR
  4247   // is returned instead of restarting system calls
  4248   } else if (sig == os::Solaris::SIGinterrupt()) {
  4249     sigemptyset(&sigAct.sa_mask);
  4250     sigAct.sa_handler = NULL;
  4251     sigAct.sa_flags = SA_SIGINFO;
  4252     sigAct.sa_sigaction = sigINTRHandler;
  4253   } else {
  4254     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
  4256   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
  4258   sigaction(sig, &sigAct, &oldAct);
  4260   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
  4261                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
  4262   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
  4266 #define DO_SIGNAL_CHECK(sig) \
  4267   if (!sigismember(&check_signal_done, sig)) \
  4268     os::Solaris::check_signal_handler(sig)
  4270 // This method is a periodic task to check for misbehaving JNI applications
  4271 // under CheckJNI, we can add any periodic checks here
  4273 void os::run_periodic_checks() {
  4274   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
  4275   // thereby preventing a NULL checks.
  4276   if(!check_addr0_done) check_addr0_done = check_addr0(tty);
  4278   if (check_signals == false) return;
  4280   // SEGV and BUS if overridden could potentially prevent
  4281   // generation of hs*.log in the event of a crash, debugging
  4282   // such a case can be very challenging, so we absolutely
  4283   // check for the following for a good measure:
  4284   DO_SIGNAL_CHECK(SIGSEGV);
  4285   DO_SIGNAL_CHECK(SIGILL);
  4286   DO_SIGNAL_CHECK(SIGFPE);
  4287   DO_SIGNAL_CHECK(SIGBUS);
  4288   DO_SIGNAL_CHECK(SIGPIPE);
  4289   DO_SIGNAL_CHECK(SIGXFSZ);
  4291   // ReduceSignalUsage allows the user to override these handlers
  4292   // see comments at the very top and jvm_solaris.h
  4293   if (!ReduceSignalUsage) {
  4294     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
  4295     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
  4296     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
  4297     DO_SIGNAL_CHECK(BREAK_SIGNAL);
  4300   // See comments above for using JVM1/JVM2 and UseAltSigs
  4301   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
  4302   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
  4306 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
  4308 static os_sigaction_t os_sigaction = NULL;
  4310 void os::Solaris::check_signal_handler(int sig) {
  4311   char buf[O_BUFLEN];
  4312   address jvmHandler = NULL;
  4314   struct sigaction act;
  4315   if (os_sigaction == NULL) {
  4316     // only trust the default sigaction, in case it has been interposed
  4317     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
  4318     if (os_sigaction == NULL) return;
  4321   os_sigaction(sig, (struct sigaction*)NULL, &act);
  4323   address thisHandler = (act.sa_flags & SA_SIGINFO)
  4324     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
  4325     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
  4328   switch(sig) {
  4329     case SIGSEGV:
  4330     case SIGBUS:
  4331     case SIGFPE:
  4332     case SIGPIPE:
  4333     case SIGXFSZ:
  4334     case SIGILL:
  4335       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
  4336       break;
  4338     case SHUTDOWN1_SIGNAL:
  4339     case SHUTDOWN2_SIGNAL:
  4340     case SHUTDOWN3_SIGNAL:
  4341     case BREAK_SIGNAL:
  4342       jvmHandler = (address)user_handler();
  4343       break;
  4345     default:
  4346       int intrsig = os::Solaris::SIGinterrupt();
  4347       int asynsig = os::Solaris::SIGasync();
  4349       if (sig == intrsig) {
  4350         jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
  4351       } else if (sig == asynsig) {
  4352         jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
  4353       } else {
  4354         return;
  4356       break;
  4360   if (thisHandler != jvmHandler) {
  4361     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
  4362     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
  4363     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
  4364     // No need to check this sig any longer
  4365     sigaddset(&check_signal_done, sig);
  4366   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
  4367     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
  4368     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
  4369     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
  4370     // No need to check this sig any longer
  4371     sigaddset(&check_signal_done, sig);
  4374   // Print all the signal handler state
  4375   if (sigismember(&check_signal_done, sig)) {
  4376     print_signal_handlers(tty, buf, O_BUFLEN);
  4381 void os::Solaris::install_signal_handlers() {
  4382   bool libjsigdone = false;
  4383   signal_handlers_are_installed = true;
  4385   // signal-chaining
  4386   typedef void (*signal_setting_t)();
  4387   signal_setting_t begin_signal_setting = NULL;
  4388   signal_setting_t end_signal_setting = NULL;
  4389   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
  4390                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
  4391   if (begin_signal_setting != NULL) {
  4392     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
  4393                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
  4394     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
  4395                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
  4396     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
  4397                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
  4398     libjsig_is_loaded = true;
  4399     if (os::Solaris::get_libjsig_version != NULL) {
  4400       libjsigversion =  (*os::Solaris::get_libjsig_version)();
  4402     assert(UseSignalChaining, "should enable signal-chaining");
  4404   if (libjsig_is_loaded) {
  4405     // Tell libjsig jvm is setting signal handlers
  4406     (*begin_signal_setting)();
  4409   set_signal_handler(SIGSEGV, true, true);
  4410   set_signal_handler(SIGPIPE, true, true);
  4411   set_signal_handler(SIGXFSZ, true, true);
  4412   set_signal_handler(SIGBUS, true, true);
  4413   set_signal_handler(SIGILL, true, true);
  4414   set_signal_handler(SIGFPE, true, true);
  4417   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
  4419     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
  4420     // can not register overridable signals which might be > 32
  4421     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
  4422     // Tell libjsig jvm has finished setting signal handlers
  4423       (*end_signal_setting)();
  4424       libjsigdone = true;
  4428   // Never ok to chain our SIGinterrupt
  4429   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
  4430   set_signal_handler(os::Solaris::SIGasync(), true, true);
  4432   if (libjsig_is_loaded && !libjsigdone) {
  4433     // Tell libjsig jvm finishes setting signal handlers
  4434     (*end_signal_setting)();
  4437   // We don't activate signal checker if libjsig is in place, we trust ourselves
  4438   // and if UserSignalHandler is installed all bets are off
  4439   if (CheckJNICalls) {
  4440     if (libjsig_is_loaded) {
  4441       tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
  4442       check_signals = false;
  4444     if (AllowUserSignalHandlers) {
  4445       tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
  4446       check_signals = false;
  4452 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
  4454 const char * signames[] = {
  4455   "SIG0",
  4456   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
  4457   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
  4458   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
  4459   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
  4460   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
  4461   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
  4462   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
  4463   "SIGCANCEL", "SIGLOST"
  4464 };
  4466 const char* os::exception_name(int exception_code, char* buf, size_t size) {
  4467   if (0 < exception_code && exception_code <= SIGRTMAX) {
  4468     // signal
  4469     if (exception_code < sizeof(signames)/sizeof(const char*)) {
  4470        jio_snprintf(buf, size, "%s", signames[exception_code]);
  4471     } else {
  4472        jio_snprintf(buf, size, "SIG%d", exception_code);
  4474     return buf;
  4475   } else {
  4476     return NULL;
  4480 // (Static) wrappers for the new libthread API
  4481 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
  4482 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
  4483 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
  4484 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
  4485 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
  4487 // (Static) wrapper for getisax(2) call.
  4488 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
  4490 // (Static) wrappers for the liblgrp API
  4491 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
  4492 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
  4493 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
  4494 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
  4495 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
  4496 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
  4497 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
  4498 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
  4499 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
  4501 // (Static) wrapper for meminfo() call.
  4502 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
  4504 static address resolve_symbol_lazy(const char* name) {
  4505   address addr = (address) dlsym(RTLD_DEFAULT, name);
  4506   if(addr == NULL) {
  4507     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
  4508     addr = (address) dlsym(RTLD_NEXT, name);
  4510   return addr;
  4513 static address resolve_symbol(const char* name) {
  4514   address addr = resolve_symbol_lazy(name);
  4515   if(addr == NULL) {
  4516     fatal(dlerror());
  4518   return addr;
  4523 // isT2_libthread()
  4524 //
  4525 // Routine to determine if we are currently using the new T2 libthread.
  4526 //
  4527 // We determine if we are using T2 by reading /proc/self/lstatus and
  4528 // looking for a thread with the ASLWP bit set.  If we find this status
  4529 // bit set, we must assume that we are NOT using T2.  The T2 team
  4530 // has approved this algorithm.
  4531 //
  4532 // We need to determine if we are running with the new T2 libthread
  4533 // since setting native thread priorities is handled differently
  4534 // when using this library.  All threads created using T2 are bound
  4535 // threads. Calling thr_setprio is meaningless in this case.
  4536 //
  4537 bool isT2_libthread() {
  4538   static prheader_t * lwpArray = NULL;
  4539   static int lwpSize = 0;
  4540   static int lwpFile = -1;
  4541   lwpstatus_t * that;
  4542   char lwpName [128];
  4543   bool isT2 = false;
  4545 #define ADR(x)  ((uintptr_t)(x))
  4546 #define LWPINDEX(ary,ix)   ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
  4548   lwpFile = open("/proc/self/lstatus", O_RDONLY, 0);
  4549   if (lwpFile < 0) {
  4550       if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
  4551       return false;
  4553   lwpSize = 16*1024;
  4554   for (;;) {
  4555     lseek (lwpFile, 0, SEEK_SET);
  4556     lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize);
  4557     if (read(lwpFile, lwpArray, lwpSize) < 0) {
  4558       if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
  4559       break;
  4561     if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
  4562        // We got a good snapshot - now iterate over the list.
  4563       int aslwpcount = 0;
  4564       for (int i = 0; i < lwpArray->pr_nent; i++ ) {
  4565         that = LWPINDEX(lwpArray,i);
  4566         if (that->pr_flags & PR_ASLWP) {
  4567           aslwpcount++;
  4570       if (aslwpcount == 0) isT2 = true;
  4571       break;
  4573     lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
  4574     FREE_C_HEAP_ARRAY(char, lwpArray);  // retry.
  4577   FREE_C_HEAP_ARRAY(char, lwpArray);
  4578   close (lwpFile);
  4579   if (ThreadPriorityVerbose) {
  4580     if (isT2) tty->print_cr("We are running with a T2 libthread\n");
  4581     else tty->print_cr("We are not running with a T2 libthread\n");
  4583   return isT2;
  4587 void os::Solaris::libthread_init() {
  4588   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
  4590   // Determine if we are running with the new T2 libthread
  4591   os::Solaris::set_T2_libthread(isT2_libthread());
  4593   lwp_priocntl_init();
  4595   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
  4596   if(func == NULL) {
  4597     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
  4598     // Guarantee that this VM is running on an new enough OS (5.6 or
  4599     // later) that it will have a new enough libthread.so.
  4600     guarantee(func != NULL, "libthread.so is too old.");
  4603   // Initialize the new libthread getstate API wrappers
  4604   func = resolve_symbol("thr_getstate");
  4605   os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
  4607   func = resolve_symbol("thr_setstate");
  4608   os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
  4610   func = resolve_symbol("thr_setmutator");
  4611   os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
  4613   func = resolve_symbol("thr_suspend_mutator");
  4614   os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
  4616   func = resolve_symbol("thr_continue_mutator");
  4617   os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
  4619   int size;
  4620   void (*handler_info_func)(address *, int *);
  4621   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
  4622   handler_info_func(&handler_start, &size);
  4623   handler_end = handler_start + size;
  4627 int_fnP_mutex_tP os::Solaris::_mutex_lock;
  4628 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
  4629 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
  4630 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
  4631 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
  4632 int os::Solaris::_mutex_scope = USYNC_THREAD;
  4634 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
  4635 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
  4636 int_fnP_cond_tP os::Solaris::_cond_signal;
  4637 int_fnP_cond_tP os::Solaris::_cond_broadcast;
  4638 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
  4639 int_fnP_cond_tP os::Solaris::_cond_destroy;
  4640 int os::Solaris::_cond_scope = USYNC_THREAD;
  4642 void os::Solaris::synchronization_init() {
  4643   if(UseLWPSynchronization) {
  4644     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
  4645     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
  4646     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
  4647     os::Solaris::set_mutex_init(lwp_mutex_init);
  4648     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
  4649     os::Solaris::set_mutex_scope(USYNC_THREAD);
  4651     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
  4652     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
  4653     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
  4654     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
  4655     os::Solaris::set_cond_init(lwp_cond_init);
  4656     os::Solaris::set_cond_destroy(lwp_cond_destroy);
  4657     os::Solaris::set_cond_scope(USYNC_THREAD);
  4659   else {
  4660     os::Solaris::set_mutex_scope(USYNC_THREAD);
  4661     os::Solaris::set_cond_scope(USYNC_THREAD);
  4663     if(UsePthreads) {
  4664       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
  4665       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
  4666       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
  4667       os::Solaris::set_mutex_init(pthread_mutex_default_init);
  4668       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
  4670       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
  4671       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
  4672       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
  4673       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
  4674       os::Solaris::set_cond_init(pthread_cond_default_init);
  4675       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
  4677     else {
  4678       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
  4679       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
  4680       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
  4681       os::Solaris::set_mutex_init(::mutex_init);
  4682       os::Solaris::set_mutex_destroy(::mutex_destroy);
  4684       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
  4685       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
  4686       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
  4687       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
  4688       os::Solaris::set_cond_init(::cond_init);
  4689       os::Solaris::set_cond_destroy(::cond_destroy);
  4694 bool os::Solaris::liblgrp_init() {
  4695   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
  4696   if (handle != NULL) {
  4697     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
  4698     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
  4699     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
  4700     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
  4701     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
  4702     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
  4703     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
  4704     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
  4705                                        dlsym(handle, "lgrp_cookie_stale")));
  4707     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
  4708     set_lgrp_cookie(c);
  4709     return true;
  4711   return false;
  4714 void os::Solaris::misc_sym_init() {
  4715   address func;
  4717   // getisax
  4718   func = resolve_symbol_lazy("getisax");
  4719   if (func != NULL) {
  4720     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
  4723   // meminfo
  4724   func = resolve_symbol_lazy("meminfo");
  4725   if (func != NULL) {
  4726     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
  4730 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
  4731   assert(_getisax != NULL, "_getisax not set");
  4732   return _getisax(array, n);
  4735 // Symbol doesn't exist in Solaris 8 pset.h
  4736 #ifndef PS_MYID
  4737 #define PS_MYID -3
  4738 #endif
  4740 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
  4741 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
  4742 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
  4744 void init_pset_getloadavg_ptr(void) {
  4745   pset_getloadavg_ptr =
  4746     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
  4747   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
  4748     warning("pset_getloadavg function not found");
  4752 int os::Solaris::_dev_zero_fd = -1;
  4754 // this is called _before_ the global arguments have been parsed
  4755 void os::init(void) {
  4756   _initial_pid = getpid();
  4758   max_hrtime = first_hrtime = gethrtime();
  4760   init_random(1234567);
  4762   page_size = sysconf(_SC_PAGESIZE);
  4763   if (page_size == -1)
  4764     fatal1("os_solaris.cpp: os::init: sysconf failed (%s)", strerror(errno));
  4765   init_page_sizes((size_t) page_size);
  4767   Solaris::initialize_system_info();
  4769   // Initialize misc. symbols as soon as possible, so we can use them
  4770   // if we need them.
  4771   Solaris::misc_sym_init();
  4773   int fd = open("/dev/zero", O_RDWR);
  4774   if (fd < 0) {
  4775     fatal1("os::init: cannot open /dev/zero (%s)", strerror(errno));
  4776   } else {
  4777     Solaris::set_dev_zero_fd(fd);
  4779     // Close on exec, child won't inherit.
  4780     fcntl(fd, F_SETFD, FD_CLOEXEC);
  4783   clock_tics_per_sec = CLK_TCK;
  4785   // check if dladdr1() exists; dladdr1 can provide more information than
  4786   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
  4787   // and is available on linker patches for 5.7 and 5.8.
  4788   // libdl.so must have been loaded, this call is just an entry lookup
  4789   void * hdl = dlopen("libdl.so", RTLD_NOW);
  4790   if (hdl)
  4791     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
  4793   // (Solaris only) this switches to calls that actually do locking.
  4794   ThreadCritical::initialize();
  4796   main_thread = thr_self();
  4798   // Constant minimum stack size allowed. It must be at least
  4799   // the minimum of what the OS supports (thr_min_stack()), and
  4800   // enough to allow the thread to get to user bytecode execution.
  4801   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
  4802   // If the pagesize of the VM is greater than 8K determine the appropriate
  4803   // number of initial guard pages.  The user can change this with the
  4804   // command line arguments, if needed.
  4805   if (vm_page_size() > 8*K) {
  4806     StackYellowPages = 1;
  4807     StackRedPages = 1;
  4808     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
  4812 // To install functions for atexit system call
  4813 extern "C" {
  4814   static void perfMemory_exit_helper() {
  4815     perfMemory_exit();
  4819 // this is called _after_ the global arguments have been parsed
  4820 jint os::init_2(void) {
  4821   // try to enable extended file IO ASAP, see 6431278
  4822   os::Solaris::try_enable_extended_io();
  4824   // Allocate a single page and mark it as readable for safepoint polling.  Also
  4825   // use this first mmap call to check support for MAP_ALIGN.
  4826   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
  4827                                                       page_size,
  4828                                                       MAP_PRIVATE | MAP_ALIGN,
  4829                                                       PROT_READ);
  4830   if (polling_page == NULL) {
  4831     has_map_align = false;
  4832     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
  4833                                                 PROT_READ);
  4836   os::set_polling_page(polling_page);
  4838 #ifndef PRODUCT
  4839   if( Verbose && PrintMiscellaneous )
  4840     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
  4841 #endif
  4843   if (!UseMembar) {
  4844     address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
  4845     guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
  4846     os::set_memory_serialize_page( mem_serialize_page );
  4848 #ifndef PRODUCT
  4849     if(Verbose && PrintMiscellaneous)
  4850       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
  4851 #endif
  4854   FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
  4856   // Check minimum allowable stack size for thread creation and to initialize
  4857   // the java system classes, including StackOverflowError - depends on page
  4858   // size.  Add a page for compiler2 recursion in main thread.
  4859   // Add in BytesPerWord times page size to account for VM stack during
  4860   // class initialization depending on 32 or 64 bit VM.
  4861   guarantee((Solaris::min_stack_allowed >=
  4862     (StackYellowPages+StackRedPages+StackShadowPages+BytesPerWord
  4863      COMPILER2_PRESENT(+1)) * page_size),
  4864     "need to increase Solaris::min_stack_allowed on this platform");
  4866   size_t threadStackSizeInBytes = ThreadStackSize * K;
  4867   if (threadStackSizeInBytes != 0 &&
  4868     threadStackSizeInBytes < Solaris::min_stack_allowed) {
  4869     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
  4870                   Solaris::min_stack_allowed/K);
  4871     return JNI_ERR;
  4874   // For 64kbps there will be a 64kb page size, which makes
  4875   // the usable default stack size quite a bit less.  Increase the
  4876   // stack for 64kb (or any > than 8kb) pages, this increases
  4877   // virtual memory fragmentation (since we're not creating the
  4878   // stack on a power of 2 boundary.  The real fix for this
  4879   // should be to fix the guard page mechanism.
  4881   if (vm_page_size() > 8*K) {
  4882       threadStackSizeInBytes = (threadStackSizeInBytes != 0)
  4883          ? threadStackSizeInBytes +
  4884            ((StackYellowPages + StackRedPages) * vm_page_size())
  4885          : 0;
  4886       ThreadStackSize = threadStackSizeInBytes/K;
  4889   // Make the stack size a multiple of the page size so that
  4890   // the yellow/red zones can be guarded.
  4891   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
  4892         vm_page_size()));
  4894   Solaris::libthread_init();
  4896   if (UseNUMA) {
  4897     if (!Solaris::liblgrp_init()) {
  4898       UseNUMA = false;
  4899     } else {
  4900       size_t lgrp_limit = os::numa_get_groups_num();
  4901       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
  4902       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
  4903       FREE_C_HEAP_ARRAY(int, lgrp_ids);
  4904       if (lgrp_num < 2) {
  4905         // There's only one locality group, disable NUMA.
  4906         UseNUMA = false;
  4909     if (!UseNUMA && ForceNUMA) {
  4910       UseNUMA = true;
  4914   Solaris::signal_sets_init();
  4915   Solaris::init_signal_mem();
  4916   Solaris::install_signal_handlers();
  4918   if (libjsigversion < JSIG_VERSION_1_4_1) {
  4919     Maxlibjsigsigs = OLDMAXSIGNUM;
  4922   // initialize synchronization primitives to use either thread or
  4923   // lwp synchronization (controlled by UseLWPSynchronization)
  4924   Solaris::synchronization_init();
  4926   if (MaxFDLimit) {
  4927     // set the number of file descriptors to max. print out error
  4928     // if getrlimit/setrlimit fails but continue regardless.
  4929     struct rlimit nbr_files;
  4930     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
  4931     if (status != 0) {
  4932       if (PrintMiscellaneous && (Verbose || WizardMode))
  4933         perror("os::init_2 getrlimit failed");
  4934     } else {
  4935       nbr_files.rlim_cur = nbr_files.rlim_max;
  4936       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
  4937       if (status != 0) {
  4938         if (PrintMiscellaneous && (Verbose || WizardMode))
  4939           perror("os::init_2 setrlimit failed");
  4944   // Initialize HPI.
  4945   jint hpi_result = hpi::initialize();
  4946   if (hpi_result != JNI_OK) {
  4947     tty->print_cr("There was an error trying to initialize the HPI library.");
  4948     return hpi_result;
  4951   // Calculate theoretical max. size of Threads to guard gainst
  4952   // artifical out-of-memory situations, where all available address-
  4953   // space has been reserved by thread stacks. Default stack size is 1Mb.
  4954   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
  4955     JavaThread::stack_size_at_create() : (1*K*K);
  4956   assert(pre_thread_stack_size != 0, "Must have a stack");
  4957   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
  4958   // we should start doing Virtual Memory banging. Currently when the threads will
  4959   // have used all but 200Mb of space.
  4960   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
  4961   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
  4963   // at-exit methods are called in the reverse order of their registration.
  4964   // In Solaris 7 and earlier, atexit functions are called on return from
  4965   // main or as a result of a call to exit(3C). There can be only 32 of
  4966   // these functions registered and atexit() does not set errno. In Solaris
  4967   // 8 and later, there is no limit to the number of functions registered
  4968   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
  4969   // functions are called upon dlclose(3DL) in addition to return from main
  4970   // and exit(3C).
  4972   if (PerfAllowAtExitRegistration) {
  4973     // only register atexit functions if PerfAllowAtExitRegistration is set.
  4974     // atexit functions can be delayed until process exit time, which
  4975     // can be problematic for embedded VM situations. Embedded VMs should
  4976     // call DestroyJavaVM() to assure that VM resources are released.
  4978     // note: perfMemory_exit_helper atexit function may be removed in
  4979     // the future if the appropriate cleanup code can be added to the
  4980     // VM_Exit VMOperation's doit method.
  4981     if (atexit(perfMemory_exit_helper) != 0) {
  4982       warning("os::init2 atexit(perfMemory_exit_helper) failed");
  4986   // Init pset_loadavg function pointer
  4987   init_pset_getloadavg_ptr();
  4989   return JNI_OK;
  4993 // Mark the polling page as unreadable
  4994 void os::make_polling_page_unreadable(void) {
  4995   if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
  4996     fatal("Could not disable polling page");
  4997 };
  4999 // Mark the polling page as readable
  5000 void os::make_polling_page_readable(void) {
  5001   if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
  5002     fatal("Could not enable polling page");
  5003 };
  5005 // OS interface.
  5007 int os::stat(const char *path, struct stat *sbuf) {
  5008   char pathbuf[MAX_PATH];
  5009   if (strlen(path) > MAX_PATH - 1) {
  5010     errno = ENAMETOOLONG;
  5011     return -1;
  5013   hpi::native_path(strcpy(pathbuf, path));
  5014   return ::stat(pathbuf, sbuf);
  5018 bool os::check_heap(bool force) { return true; }
  5020 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
  5021 static vsnprintf_t sol_vsnprintf = NULL;
  5023 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
  5024   if (!sol_vsnprintf) {
  5025     //search  for the named symbol in the objects that were loaded after libjvm
  5026     void* where = RTLD_NEXT;
  5027     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
  5028         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
  5029     if (!sol_vsnprintf){
  5030       //search  for the named symbol in the objects that were loaded before libjvm
  5031       where = RTLD_DEFAULT;
  5032       if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
  5033         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
  5034       assert(sol_vsnprintf != NULL, "vsnprintf not found");
  5037   return (*sol_vsnprintf)(buf, count, fmt, argptr);
  5041 // Is a (classpath) directory empty?
  5042 bool os::dir_is_empty(const char* path) {
  5043   DIR *dir = NULL;
  5044   struct dirent *ptr;
  5046   dir = opendir(path);
  5047   if (dir == NULL) return true;
  5049   /* Scan the directory */
  5050   bool result = true;
  5051   char buf[sizeof(struct dirent) + MAX_PATH];
  5052   struct dirent *dbuf = (struct dirent *) buf;
  5053   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
  5054     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
  5055       result = false;
  5058   closedir(dir);
  5059   return result;
  5062 // create binary file, rewriting existing file if required
  5063 int os::create_binary_file(const char* path, bool rewrite_existing) {
  5064   int oflags = O_WRONLY | O_CREAT;
  5065   if (!rewrite_existing) {
  5066     oflags |= O_EXCL;
  5068   return ::open64(path, oflags, S_IREAD | S_IWRITE);
  5071 // return current position of file pointer
  5072 jlong os::current_file_offset(int fd) {
  5073   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
  5076 // move file pointer to the specified offset
  5077 jlong os::seek_to_file_offset(int fd, jlong offset) {
  5078   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
  5081 // Map a block of memory.
  5082 char* os::map_memory(int fd, const char* file_name, size_t file_offset,
  5083                      char *addr, size_t bytes, bool read_only,
  5084                      bool allow_exec) {
  5085   int prot;
  5086   int flags;
  5088   if (read_only) {
  5089     prot = PROT_READ;
  5090     flags = MAP_SHARED;
  5091   } else {
  5092     prot = PROT_READ | PROT_WRITE;
  5093     flags = MAP_PRIVATE;
  5096   if (allow_exec) {
  5097     prot |= PROT_EXEC;
  5100   if (addr != NULL) {
  5101     flags |= MAP_FIXED;
  5104   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
  5105                                      fd, file_offset);
  5106   if (mapped_address == MAP_FAILED) {
  5107     return NULL;
  5109   return mapped_address;
  5113 // Remap a block of memory.
  5114 char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
  5115                        char *addr, size_t bytes, bool read_only,
  5116                        bool allow_exec) {
  5117   // same as map_memory() on this OS
  5118   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
  5119                         allow_exec);
  5123 // Unmap a block of memory.
  5124 bool os::unmap_memory(char* addr, size_t bytes) {
  5125   return munmap(addr, bytes) == 0;
  5128 void os::pause() {
  5129   char filename[MAX_PATH];
  5130   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
  5131     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
  5132   } else {
  5133     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
  5136   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
  5137   if (fd != -1) {
  5138     struct stat buf;
  5139     close(fd);
  5140     while (::stat(filename, &buf) == 0) {
  5141       (void)::poll(NULL, 0, 100);
  5143   } else {
  5144     jio_fprintf(stderr,
  5145       "Could not open pause file '%s', continuing immediately.\n", filename);
  5149 #ifndef PRODUCT
  5150 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
  5151 // Turn this on if you need to trace synch operations.
  5152 // Set RECORD_SYNCH_LIMIT to a large-enough value,
  5153 // and call record_synch_enable and record_synch_disable
  5154 // around the computation of interest.
  5156 void record_synch(char* name, bool returning);  // defined below
  5158 class RecordSynch {
  5159   char* _name;
  5160  public:
  5161   RecordSynch(char* name) :_name(name)
  5162                  { record_synch(_name, false); }
  5163   ~RecordSynch() { record_synch(_name,   true);  }
  5164 };
  5166 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
  5167 extern "C" ret name params {                                    \
  5168   typedef ret name##_t params;                                  \
  5169   static name##_t* implem = NULL;                               \
  5170   static int callcount = 0;                                     \
  5171   if (implem == NULL) {                                         \
  5172     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
  5173     if (implem == NULL)  fatal(dlerror());                      \
  5174   }                                                             \
  5175   ++callcount;                                                  \
  5176   RecordSynch _rs(#name);                                       \
  5177   inner;                                                        \
  5178   return implem args;                                           \
  5180 // in dbx, examine callcounts this way:
  5181 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
  5183 #define CHECK_POINTER_OK(p) \
  5184   (Universe::perm_gen() == NULL || !Universe::is_reserved_heap((oop)(p)))
  5185 #define CHECK_MU \
  5186   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
  5187 #define CHECK_CV \
  5188   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
  5189 #define CHECK_P(p) \
  5190   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
  5192 #define CHECK_MUTEX(mutex_op) \
  5193 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
  5195 CHECK_MUTEX(   mutex_lock)
  5196 CHECK_MUTEX(  _mutex_lock)
  5197 CHECK_MUTEX( mutex_unlock)
  5198 CHECK_MUTEX(_mutex_unlock)
  5199 CHECK_MUTEX( mutex_trylock)
  5200 CHECK_MUTEX(_mutex_trylock)
  5202 #define CHECK_COND(cond_op) \
  5203 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
  5205 CHECK_COND( cond_wait);
  5206 CHECK_COND(_cond_wait);
  5207 CHECK_COND(_cond_wait_cancel);
  5209 #define CHECK_COND2(cond_op) \
  5210 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
  5212 CHECK_COND2( cond_timedwait);
  5213 CHECK_COND2(_cond_timedwait);
  5214 CHECK_COND2(_cond_timedwait_cancel);
  5216 // do the _lwp_* versions too
  5217 #define mutex_t lwp_mutex_t
  5218 #define cond_t  lwp_cond_t
  5219 CHECK_MUTEX(  _lwp_mutex_lock)
  5220 CHECK_MUTEX(  _lwp_mutex_unlock)
  5221 CHECK_MUTEX(  _lwp_mutex_trylock)
  5222 CHECK_MUTEX( __lwp_mutex_lock)
  5223 CHECK_MUTEX( __lwp_mutex_unlock)
  5224 CHECK_MUTEX( __lwp_mutex_trylock)
  5225 CHECK_MUTEX(___lwp_mutex_lock)
  5226 CHECK_MUTEX(___lwp_mutex_unlock)
  5228 CHECK_COND(  _lwp_cond_wait);
  5229 CHECK_COND( __lwp_cond_wait);
  5230 CHECK_COND(___lwp_cond_wait);
  5232 CHECK_COND2(  _lwp_cond_timedwait);
  5233 CHECK_COND2( __lwp_cond_timedwait);
  5234 #undef mutex_t
  5235 #undef cond_t
  5237 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
  5238 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
  5239 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
  5240 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
  5241 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
  5242 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
  5243 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
  5244 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
  5247 // recording machinery:
  5249 enum { RECORD_SYNCH_LIMIT = 200 };
  5250 char* record_synch_name[RECORD_SYNCH_LIMIT];
  5251 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
  5252 bool record_synch_returning[RECORD_SYNCH_LIMIT];
  5253 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
  5254 int record_synch_count = 0;
  5255 bool record_synch_enabled = false;
  5257 // in dbx, examine recorded data this way:
  5258 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
  5260 void record_synch(char* name, bool returning) {
  5261   if (record_synch_enabled) {
  5262     if (record_synch_count < RECORD_SYNCH_LIMIT) {
  5263       record_synch_name[record_synch_count] = name;
  5264       record_synch_returning[record_synch_count] = returning;
  5265       record_synch_thread[record_synch_count] = thr_self();
  5266       record_synch_arg0ptr[record_synch_count] = &name;
  5267       record_synch_count++;
  5269     // put more checking code here:
  5270     // ...
  5274 void record_synch_enable() {
  5275   // start collecting trace data, if not already doing so
  5276   if (!record_synch_enabled)  record_synch_count = 0;
  5277   record_synch_enabled = true;
  5280 void record_synch_disable() {
  5281   // stop collecting trace data
  5282   record_synch_enabled = false;
  5285 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
  5286 #endif // PRODUCT
  5288 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
  5289 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
  5290                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
  5293 // JVMTI & JVM monitoring and management support
  5294 // The thread_cpu_time() and current_thread_cpu_time() are only
  5295 // supported if is_thread_cpu_time_supported() returns true.
  5296 // They are not supported on Solaris T1.
  5298 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
  5299 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
  5300 // of a thread.
  5301 //
  5302 // current_thread_cpu_time() and thread_cpu_time(Thread *)
  5303 // returns the fast estimate available on the platform.
  5305 // hrtime_t gethrvtime() return value includes
  5306 // user time but does not include system time
  5307 jlong os::current_thread_cpu_time() {
  5308   return (jlong) gethrvtime();
  5311 jlong os::thread_cpu_time(Thread *thread) {
  5312   // return user level CPU time only to be consistent with
  5313   // what current_thread_cpu_time returns.
  5314   // thread_cpu_time_info() must be changed if this changes
  5315   return os::thread_cpu_time(thread, false /* user time only */);
  5318 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
  5319   if (user_sys_cpu_time) {
  5320     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
  5321   } else {
  5322     return os::current_thread_cpu_time();
  5326 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
  5327   char proc_name[64];
  5328   int count;
  5329   prusage_t prusage;
  5330   jlong lwp_time;
  5331   int fd;
  5333   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
  5334                      getpid(),
  5335                      thread->osthread()->lwp_id());
  5336   fd = open(proc_name, O_RDONLY);
  5337   if ( fd == -1 ) return -1;
  5339   do {
  5340     count = pread(fd,
  5341                   (void *)&prusage.pr_utime,
  5342                   thr_time_size,
  5343                   thr_time_off);
  5344   } while (count < 0 && errno == EINTR);
  5345   close(fd);
  5346   if ( count < 0 ) return -1;
  5348   if (user_sys_cpu_time) {
  5349     // user + system CPU time
  5350     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
  5351                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
  5352                  (jlong)prusage.pr_stime.tv_nsec +
  5353                  (jlong)prusage.pr_utime.tv_nsec;
  5354   } else {
  5355     // user level CPU time only
  5356     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
  5357                 (jlong)prusage.pr_utime.tv_nsec;
  5360   return(lwp_time);
  5363 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
  5364   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
  5365   info_ptr->may_skip_backward = false;    // elapsed time not wall time
  5366   info_ptr->may_skip_forward = false;     // elapsed time not wall time
  5367   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
  5370 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
  5371   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
  5372   info_ptr->may_skip_backward = false;    // elapsed time not wall time
  5373   info_ptr->may_skip_forward = false;     // elapsed time not wall time
  5374   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
  5377 bool os::is_thread_cpu_time_supported() {
  5378   if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
  5379     return true;
  5380   } else {
  5381     return false;
  5385 // System loadavg support.  Returns -1 if load average cannot be obtained.
  5386 // Return the load average for our processor set if the primitive exists
  5387 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
  5388 int os::loadavg(double loadavg[], int nelem) {
  5389   if (pset_getloadavg_ptr != NULL) {
  5390     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
  5391   } else {
  5392     return ::getloadavg(loadavg, nelem);
  5396 //---------------------------------------------------------------------------------
  5397 #ifndef PRODUCT
  5399 static address same_page(address x, address y) {
  5400   intptr_t page_bits = -os::vm_page_size();
  5401   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
  5402     return x;
  5403   else if (x > y)
  5404     return (address)(intptr_t(y) | ~page_bits) + 1;
  5405   else
  5406     return (address)(intptr_t(y) & page_bits);
  5409 bool os::find(address addr) {
  5410   Dl_info dlinfo;
  5411   memset(&dlinfo, 0, sizeof(dlinfo));
  5412   if (dladdr(addr, &dlinfo)) {
  5413 #ifdef _LP64
  5414     tty->print("0x%016lx: ", addr);
  5415 #else
  5416     tty->print("0x%08x: ", addr);
  5417 #endif
  5418     if (dlinfo.dli_sname != NULL)
  5419       tty->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
  5420     else if (dlinfo.dli_fname)
  5421       tty->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
  5422     else
  5423       tty->print("<absolute address>");
  5424     if (dlinfo.dli_fname)  tty->print(" in %s", dlinfo.dli_fname);
  5425 #ifdef _LP64
  5426     if (dlinfo.dli_fbase)  tty->print(" at 0x%016lx", dlinfo.dli_fbase);
  5427 #else
  5428     if (dlinfo.dli_fbase)  tty->print(" at 0x%08x", dlinfo.dli_fbase);
  5429 #endif
  5430     tty->cr();
  5432     if (Verbose) {
  5433       // decode some bytes around the PC
  5434       address begin = same_page(addr-40, addr);
  5435       address end   = same_page(addr+40, addr);
  5436       address       lowest = (address) dlinfo.dli_sname;
  5437       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
  5438       if (begin < lowest)  begin = lowest;
  5439       Dl_info dlinfo2;
  5440       if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
  5441           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
  5442         end = (address) dlinfo2.dli_saddr;
  5443       Disassembler::decode(begin, end);
  5445     return true;
  5447   return false;
  5450 #endif
  5453 // Following function has been added to support HotSparc's libjvm.so running
  5454 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
  5455 // src/solaris/hpi/native_threads in the EVM codebase.
  5456 //
  5457 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
  5458 // libraries and should thus be removed. We will leave it behind for a while
  5459 // until we no longer want to able to run on top of 1.3.0 Solaris production
  5460 // JDK. See 4341971.
  5462 #define STACK_SLACK 0x800
  5464 extern "C" {
  5465   intptr_t sysThreadAvailableStackWithSlack() {
  5466     stack_t st;
  5467     intptr_t retval, stack_top;
  5468     retval = thr_stksegment(&st);
  5469     assert(retval == 0, "incorrect return value from thr_stksegment");
  5470     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
  5471     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
  5472     stack_top=(intptr_t)st.ss_sp-st.ss_size;
  5473     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
  5477 // Just to get the Kernel build to link on solaris for testing.
  5479 extern "C" {
  5480 class ASGCT_CallTrace;
  5481 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext)
  5482   KERNEL_RETURN;
  5486 // ObjectMonitor park-unpark infrastructure ...
  5487 //
  5488 // We implement Solaris and Linux PlatformEvents with the
  5489 // obvious condvar-mutex-flag triple.
  5490 // Another alternative that works quite well is pipes:
  5491 // Each PlatformEvent consists of a pipe-pair.
  5492 // The thread associated with the PlatformEvent
  5493 // calls park(), which reads from the input end of the pipe.
  5494 // Unpark() writes into the other end of the pipe.
  5495 // The write-side of the pipe must be set NDELAY.
  5496 // Unfortunately pipes consume a large # of handles.
  5497 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
  5498 // Using pipes for the 1st few threads might be workable, however.
  5499 //
  5500 // park() is permitted to return spuriously.
  5501 // Callers of park() should wrap the call to park() in
  5502 // an appropriate loop.  A litmus test for the correct
  5503 // usage of park is the following: if park() were modified
  5504 // to immediately return 0 your code should still work,
  5505 // albeit degenerating to a spin loop.
  5506 //
  5507 // An interesting optimization for park() is to use a trylock()
  5508 // to attempt to acquire the mutex.  If the trylock() fails
  5509 // then we know that a concurrent unpark() operation is in-progress.
  5510 // in that case the park() code could simply set _count to 0
  5511 // and return immediately.  The subsequent park() operation *might*
  5512 // return immediately.  That's harmless as the caller of park() is
  5513 // expected to loop.  By using trylock() we will have avoided a
  5514 // avoided a context switch caused by contention on the per-thread mutex.
  5515 //
  5516 // TODO-FIXME:
  5517 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
  5518 //     objectmonitor implementation.
  5519 // 2.  Collapse the JSR166 parker event, and the
  5520 //     objectmonitor ParkEvent into a single "Event" construct.
  5521 // 3.  In park() and unpark() add:
  5522 //     assert (Thread::current() == AssociatedWith).
  5523 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
  5524 //     1-out-of-N park() operations will return immediately.
  5525 //
  5526 // _Event transitions in park()
  5527 //   -1 => -1 : illegal
  5528 //    1 =>  0 : pass - return immediately
  5529 //    0 => -1 : block
  5530 //
  5531 // _Event serves as a restricted-range semaphore.
  5532 //
  5533 // Another possible encoding of _Event would be with
  5534 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
  5535 //
  5536 // TODO-FIXME: add DTRACE probes for:
  5537 // 1.   Tx parks
  5538 // 2.   Ty unparks Tx
  5539 // 3.   Tx resumes from park
  5542 // value determined through experimentation
  5543 #define ROUNDINGFIX 11
  5545 // utility to compute the abstime argument to timedwait.
  5546 // TODO-FIXME: switch from compute_abstime() to unpackTime().
  5548 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
  5549   // millis is the relative timeout time
  5550   // abstime will be the absolute timeout time
  5551   if (millis < 0)  millis = 0;
  5552   struct timeval now;
  5553   int status = gettimeofday(&now, NULL);
  5554   assert(status == 0, "gettimeofday");
  5555   jlong seconds = millis / 1000;
  5556   jlong max_wait_period;
  5558   if (UseLWPSynchronization) {
  5559     // forward port of fix for 4275818 (not sleeping long enough)
  5560     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
  5561     // _lwp_cond_timedwait() used a round_down algorithm rather
  5562     // than a round_up. For millis less than our roundfactor
  5563     // it rounded down to 0 which doesn't meet the spec.
  5564     // For millis > roundfactor we may return a bit sooner, but
  5565     // since we can not accurately identify the patch level and
  5566     // this has already been fixed in Solaris 9 and 8 we will
  5567     // leave it alone rather than always rounding down.
  5569     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
  5570        // It appears that when we go directly through Solaris _lwp_cond_timedwait()
  5571            // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
  5572            max_wait_period = 21000000;
  5573   } else {
  5574     max_wait_period = 50000000;
  5576   millis %= 1000;
  5577   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
  5578      seconds = max_wait_period;
  5580   abstime->tv_sec = now.tv_sec  + seconds;
  5581   long       usec = now.tv_usec + millis * 1000;
  5582   if (usec >= 1000000) {
  5583     abstime->tv_sec += 1;
  5584     usec -= 1000000;
  5586   abstime->tv_nsec = usec * 1000;
  5587   return abstime;
  5590 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
  5591 // Conceptually TryPark() should be equivalent to park(0).
  5593 int os::PlatformEvent::TryPark() {
  5594   for (;;) {
  5595     const int v = _Event ;
  5596     guarantee ((v == 0) || (v == 1), "invariant") ;
  5597     if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
  5601 void os::PlatformEvent::park() {           // AKA: down()
  5602   // Invariant: Only the thread associated with the Event/PlatformEvent
  5603   // may call park().
  5604   int v ;
  5605   for (;;) {
  5606       v = _Event ;
  5607       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
  5609   guarantee (v >= 0, "invariant") ;
  5610   if (v == 0) {
  5611      // Do this the hard way by blocking ...
  5612      // See http://monaco.sfbay/detail.jsf?cr=5094058.
  5613      // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
  5614      // Only for SPARC >= V8PlusA
  5615 #if defined(__sparc) && defined(COMPILER2)
  5616      if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
  5617 #endif
  5618      int status = os::Solaris::mutex_lock(_mutex);
  5619      assert_status(status == 0, status,  "mutex_lock");
  5620      guarantee (_nParked == 0, "invariant") ;
  5621      ++ _nParked ;
  5622      while (_Event < 0) {
  5623         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
  5624         // Treat this the same as if the wait was interrupted
  5625         // With usr/lib/lwp going to kernel, always handle ETIME
  5626         status = os::Solaris::cond_wait(_cond, _mutex);
  5627         if (status == ETIME) status = EINTR ;
  5628         assert_status(status == 0 || status == EINTR, status, "cond_wait");
  5630      -- _nParked ;
  5631      _Event = 0 ;
  5632      status = os::Solaris::mutex_unlock(_mutex);
  5633      assert_status(status == 0, status, "mutex_unlock");
  5637 int os::PlatformEvent::park(jlong millis) {
  5638   guarantee (_nParked == 0, "invariant") ;
  5639   int v ;
  5640   for (;;) {
  5641       v = _Event ;
  5642       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
  5644   guarantee (v >= 0, "invariant") ;
  5645   if (v != 0) return OS_OK ;
  5647   int ret = OS_TIMEOUT;
  5648   timestruc_t abst;
  5649   compute_abstime (&abst, millis);
  5651   // See http://monaco.sfbay/detail.jsf?cr=5094058.
  5652   // For Solaris SPARC set fprs.FEF=0 prior to parking.
  5653   // Only for SPARC >= V8PlusA
  5654 #if defined(__sparc) && defined(COMPILER2)
  5655  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
  5656 #endif
  5657   int status = os::Solaris::mutex_lock(_mutex);
  5658   assert_status(status == 0, status, "mutex_lock");
  5659   guarantee (_nParked == 0, "invariant") ;
  5660   ++ _nParked ;
  5661   while (_Event < 0) {
  5662      int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
  5663      assert_status(status == 0 || status == EINTR ||
  5664                    status == ETIME || status == ETIMEDOUT,
  5665                    status, "cond_timedwait");
  5666      if (!FilterSpuriousWakeups) break ;                // previous semantics
  5667      if (status == ETIME || status == ETIMEDOUT) break ;
  5668      // We consume and ignore EINTR and spurious wakeups.
  5670   -- _nParked ;
  5671   if (_Event >= 0) ret = OS_OK ;
  5672   _Event = 0 ;
  5673   status = os::Solaris::mutex_unlock(_mutex);
  5674   assert_status(status == 0, status, "mutex_unlock");
  5675   return ret;
  5678 void os::PlatformEvent::unpark() {
  5679   int v, AnyWaiters;
  5681   // Increment _Event.
  5682   // Another acceptable implementation would be to simply swap 1
  5683   // into _Event:
  5684   //   if (Swap (&_Event, 1) < 0) {
  5685   //      mutex_lock (_mutex) ; AnyWaiters = nParked; mutex_unlock (_mutex) ;
  5686   //      if (AnyWaiters) cond_signal (_cond) ;
  5687   //   }
  5689   for (;;) {
  5690     v = _Event ;
  5691     if (v > 0) {
  5692        // The LD of _Event could have reordered or be satisfied
  5693        // by a read-aside from this processor's write buffer.
  5694        // To avoid problems execute a barrier and then
  5695        // ratify the value.  A degenerate CAS() would also work.
  5696        // Viz., CAS (v+0, &_Event, v) == v).
  5697        OrderAccess::fence() ;
  5698        if (_Event == v) return ;
  5699        continue ;
  5701     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
  5704   // If the thread associated with the event was parked, wake it.
  5705   if (v < 0) {
  5706      int status ;
  5707      // Wait for the thread assoc with the PlatformEvent to vacate.
  5708      status = os::Solaris::mutex_lock(_mutex);
  5709      assert_status(status == 0, status, "mutex_lock");
  5710      AnyWaiters = _nParked ;
  5711      status = os::Solaris::mutex_unlock(_mutex);
  5712      assert_status(status == 0, status, "mutex_unlock");
  5713      guarantee (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
  5714      if (AnyWaiters != 0) {
  5715        // We intentional signal *after* dropping the lock
  5716        // to avoid a common class of futile wakeups.
  5717        status = os::Solaris::cond_signal(_cond);
  5718        assert_status(status == 0, status, "cond_signal");
  5723 // JSR166
  5724 // -------------------------------------------------------
  5726 /*
  5727  * The solaris and linux implementations of park/unpark are fairly
  5728  * conservative for now, but can be improved. They currently use a
  5729  * mutex/condvar pair, plus _counter.
  5730  * Park decrements _counter if > 0, else does a condvar wait.  Unpark
  5731  * sets count to 1 and signals condvar.  Only one thread ever waits
  5732  * on the condvar. Contention seen when trying to park implies that someone
  5733  * is unparking you, so don't wait. And spurious returns are fine, so there
  5734  * is no need to track notifications.
  5735  */
  5737 #define NANOSECS_PER_SEC 1000000000
  5738 #define NANOSECS_PER_MILLISEC 1000000
  5739 #define MAX_SECS 100000000
  5741 /*
  5742  * This code is common to linux and solaris and will be moved to a
  5743  * common place in dolphin.
  5745  * The passed in time value is either a relative time in nanoseconds
  5746  * or an absolute time in milliseconds. Either way it has to be unpacked
  5747  * into suitable seconds and nanoseconds components and stored in the
  5748  * given timespec structure.
  5749  * Given time is a 64-bit value and the time_t used in the timespec is only
  5750  * a signed-32-bit value (except on 64-bit Linux) we have to watch for
  5751  * overflow if times way in the future are given. Further on Solaris versions
  5752  * prior to 10 there is a restriction (see cond_timedwait) that the specified
  5753  * number of seconds, in abstime, is less than current_time  + 100,000,000.
  5754  * As it will be 28 years before "now + 100000000" will overflow we can
  5755  * ignore overflow and just impose a hard-limit on seconds using the value
  5756  * of "now + 100,000,000". This places a limit on the timeout of about 3.17
  5757  * years from "now".
  5758  */
  5759 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
  5760   assert (time > 0, "convertTime");
  5762   struct timeval now;
  5763   int status = gettimeofday(&now, NULL);
  5764   assert(status == 0, "gettimeofday");
  5766   time_t max_secs = now.tv_sec + MAX_SECS;
  5768   if (isAbsolute) {
  5769     jlong secs = time / 1000;
  5770     if (secs > max_secs) {
  5771       absTime->tv_sec = max_secs;
  5773     else {
  5774       absTime->tv_sec = secs;
  5776     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
  5778   else {
  5779     jlong secs = time / NANOSECS_PER_SEC;
  5780     if (secs >= MAX_SECS) {
  5781       absTime->tv_sec = max_secs;
  5782       absTime->tv_nsec = 0;
  5784     else {
  5785       absTime->tv_sec = now.tv_sec + secs;
  5786       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
  5787       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
  5788         absTime->tv_nsec -= NANOSECS_PER_SEC;
  5789         ++absTime->tv_sec; // note: this must be <= max_secs
  5793   assert(absTime->tv_sec >= 0, "tv_sec < 0");
  5794   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
  5795   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
  5796   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
  5799 void Parker::park(bool isAbsolute, jlong time) {
  5801   // Optional fast-path check:
  5802   // Return immediately if a permit is available.
  5803   if (_counter > 0) {
  5804       _counter = 0 ;
  5805       return ;
  5808   // Optional fast-exit: Check interrupt before trying to wait
  5809   Thread* thread = Thread::current();
  5810   assert(thread->is_Java_thread(), "Must be JavaThread");
  5811   JavaThread *jt = (JavaThread *)thread;
  5812   if (Thread::is_interrupted(thread, false)) {
  5813     return;
  5816   // First, demultiplex/decode time arguments
  5817   timespec absTime;
  5818   if (time < 0) { // don't wait at all
  5819     return;
  5821   if (time > 0) {
  5822     // Warning: this code might be exposed to the old Solaris time
  5823     // round-down bugs.  Grep "roundingFix" for details.
  5824     unpackTime(&absTime, isAbsolute, time);
  5827   // Enter safepoint region
  5828   // Beware of deadlocks such as 6317397.
  5829   // The per-thread Parker:: _mutex is a classic leaf-lock.
  5830   // In particular a thread must never block on the Threads_lock while
  5831   // holding the Parker:: mutex.  If safepoints are pending both the
  5832   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
  5833   ThreadBlockInVM tbivm(jt);
  5835   // Don't wait if cannot get lock since interference arises from
  5836   // unblocking.  Also. check interrupt before trying wait
  5837   if (Thread::is_interrupted(thread, false) ||
  5838       os::Solaris::mutex_trylock(_mutex) != 0) {
  5839     return;
  5842   int status ;
  5844   if (_counter > 0)  { // no wait needed
  5845     _counter = 0;
  5846     status = os::Solaris::mutex_unlock(_mutex);
  5847     assert (status == 0, "invariant") ;
  5848     return;
  5851 #ifdef ASSERT
  5852   // Don't catch signals while blocked; let the running threads have the signals.
  5853   // (This allows a debugger to break into the running thread.)
  5854   sigset_t oldsigs;
  5855   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
  5856   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
  5857 #endif
  5859   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
  5860   jt->set_suspend_equivalent();
  5861   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
  5863   // Do this the hard way by blocking ...
  5864   // See http://monaco.sfbay/detail.jsf?cr=5094058.
  5865   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
  5866   // Only for SPARC >= V8PlusA
  5867 #if defined(__sparc) && defined(COMPILER2)
  5868   if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
  5869 #endif
  5871   if (time == 0) {
  5872     status = os::Solaris::cond_wait (_cond, _mutex) ;
  5873   } else {
  5874     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
  5876   // Note that an untimed cond_wait() can sometimes return ETIME on older
  5877   // versions of the Solaris.
  5878   assert_status(status == 0 || status == EINTR ||
  5879                 status == ETIME || status == ETIMEDOUT,
  5880                 status, "cond_timedwait");
  5882 #ifdef ASSERT
  5883   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
  5884 #endif
  5885   _counter = 0 ;
  5886   status = os::Solaris::mutex_unlock(_mutex);
  5887   assert_status(status == 0, status, "mutex_unlock") ;
  5889   // If externally suspended while waiting, re-suspend
  5890   if (jt->handle_special_suspend_equivalent_condition()) {
  5891     jt->java_suspend_self();
  5896 void Parker::unpark() {
  5897   int s, status ;
  5898   status = os::Solaris::mutex_lock (_mutex) ;
  5899   assert (status == 0, "invariant") ;
  5900   s = _counter;
  5901   _counter = 1;
  5902   status = os::Solaris::mutex_unlock (_mutex) ;
  5903   assert (status == 0, "invariant") ;
  5905   if (s < 1) {
  5906     status = os::Solaris::cond_signal (_cond) ;
  5907     assert (status == 0, "invariant") ;
  5911 extern char** environ;
  5913 // Run the specified command in a separate process. Return its exit value,
  5914 // or -1 on failure (e.g. can't fork a new process).
  5915 // Unlike system(), this function can be called from signal handler. It
  5916 // doesn't block SIGINT et al.
  5917 int os::fork_and_exec(char* cmd) {
  5918   char * argv[4];
  5919   argv[0] = (char *)"sh";
  5920   argv[1] = (char *)"-c";
  5921   argv[2] = cmd;
  5922   argv[3] = NULL;
  5924   // fork is async-safe, fork1 is not so can't use in signal handler
  5925   pid_t pid;
  5926   Thread* t = ThreadLocalStorage::get_thread_slow();
  5927   if (t != NULL && t->is_inside_signal_handler()) {
  5928     pid = fork();
  5929   } else {
  5930     pid = fork1();
  5933   if (pid < 0) {
  5934     // fork failed
  5935     warning("fork failed: %s", strerror(errno));
  5936     return -1;
  5938   } else if (pid == 0) {
  5939     // child process
  5941     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
  5942     execve("/usr/bin/sh", argv, environ);
  5944     // execve failed
  5945     _exit(-1);
  5947   } else  {
  5948     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
  5949     // care about the actual exit code, for now.
  5951     int status;
  5953     // Wait for the child process to exit.  This returns immediately if
  5954     // the child has already exited. */
  5955     while (waitpid(pid, &status, 0) < 0) {
  5956         switch (errno) {
  5957         case ECHILD: return 0;
  5958         case EINTR: break;
  5959         default: return -1;
  5963     if (WIFEXITED(status)) {
  5964        // The child exited normally; get its exit code.
  5965        return WEXITSTATUS(status);
  5966     } else if (WIFSIGNALED(status)) {
  5967        // The child exited because of a signal
  5968        // The best value to return is 0x80 + signal number,
  5969        // because that is what all Unix shells do, and because
  5970        // it allows callers to distinguish between process exit and
  5971        // process death by signal.
  5972        return 0x80 + WTERMSIG(status);
  5973     } else {
  5974        // Unknown exit code; pass it through
  5975        return status;

mercurial