src/os/solaris/vm/os_solaris.cpp

Thu, 12 Oct 2017 21:27:07 +0800

author
aoqi
date
Thu, 12 Oct 2017 21:27:07 +0800
changeset 7535
7ae4e26cb1e0
parent 6918
d22136881b85
parent 6876
710a3c8b516e
child 7994
04ff2f6cd0eb
permissions
-rw-r--r--

merge

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 // no precompiled headers
    26 #include "classfile/classLoader.hpp"
    27 #include "classfile/systemDictionary.hpp"
    28 #include "classfile/vmSymbols.hpp"
    29 #include "code/icBuffer.hpp"
    30 #include "code/vtableStubs.hpp"
    31 #include "compiler/compileBroker.hpp"
    32 #include "compiler/disassembler.hpp"
    33 #include "interpreter/interpreter.hpp"
    34 #include "jvm_solaris.h"
    35 #include "memory/allocation.inline.hpp"
    36 #include "memory/filemap.hpp"
    37 #include "mutex_solaris.inline.hpp"
    38 #include "oops/oop.inline.hpp"
    39 #include "os_share_solaris.hpp"
    40 #include "prims/jniFastGetField.hpp"
    41 #include "prims/jvm.h"
    42 #include "prims/jvm_misc.hpp"
    43 #include "runtime/arguments.hpp"
    44 #include "runtime/extendedPC.hpp"
    45 #include "runtime/globals.hpp"
    46 #include "runtime/interfaceSupport.hpp"
    47 #include "runtime/java.hpp"
    48 #include "runtime/javaCalls.hpp"
    49 #include "runtime/mutexLocker.hpp"
    50 #include "runtime/objectMonitor.hpp"
    51 #include "runtime/orderAccess.inline.hpp"
    52 #include "runtime/osThread.hpp"
    53 #include "runtime/perfMemory.hpp"
    54 #include "runtime/sharedRuntime.hpp"
    55 #include "runtime/statSampler.hpp"
    56 #include "runtime/stubRoutines.hpp"
    57 #include "runtime/thread.inline.hpp"
    58 #include "runtime/threadCritical.hpp"
    59 #include "runtime/timer.hpp"
    60 #include "services/attachListener.hpp"
    61 #include "services/memTracker.hpp"
    62 #include "services/runtimeService.hpp"
    63 #include "utilities/decoder.hpp"
    64 #include "utilities/defaultStream.hpp"
    65 #include "utilities/events.hpp"
    66 #include "utilities/growableArray.hpp"
    67 #include "utilities/vmError.hpp"
    69 // put OS-includes here
    70 # include <dlfcn.h>
    71 # include <errno.h>
    72 # include <exception>
    73 # include <link.h>
    74 # include <poll.h>
    75 # include <pthread.h>
    76 # include <pwd.h>
    77 # include <schedctl.h>
    78 # include <setjmp.h>
    79 # include <signal.h>
    80 # include <stdio.h>
    81 # include <alloca.h>
    82 # include <sys/filio.h>
    83 # include <sys/ipc.h>
    84 # include <sys/lwp.h>
    85 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
    86 # include <sys/mman.h>
    87 # include <sys/processor.h>
    88 # include <sys/procset.h>
    89 # include <sys/pset.h>
    90 # include <sys/resource.h>
    91 # include <sys/shm.h>
    92 # include <sys/socket.h>
    93 # include <sys/stat.h>
    94 # include <sys/systeminfo.h>
    95 # include <sys/time.h>
    96 # include <sys/times.h>
    97 # include <sys/types.h>
    98 # include <sys/wait.h>
    99 # include <sys/utsname.h>
   100 # include <thread.h>
   101 # include <unistd.h>
   102 # include <sys/priocntl.h>
   103 # include <sys/rtpriocntl.h>
   104 # include <sys/tspriocntl.h>
   105 # include <sys/iapriocntl.h>
   106 # include <sys/fxpriocntl.h>
   107 # include <sys/loadavg.h>
   108 # include <string.h>
   109 # include <stdio.h>
   111 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
   112 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
   114 #define MAX_PATH (2 * K)
   116 // for timer info max values which include all bits
   117 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
   120 // Here are some liblgrp types from sys/lgrp_user.h to be able to
   121 // compile on older systems without this header file.
   123 #ifndef MADV_ACCESS_LWP
   124 # define  MADV_ACCESS_LWP         7       /* next LWP to access heavily */
   125 #endif
   126 #ifndef MADV_ACCESS_MANY
   127 # define  MADV_ACCESS_MANY        8       /* many processes to access heavily */
   128 #endif
   130 #ifndef LGRP_RSRC_CPU
   131 # define LGRP_RSRC_CPU           0       /* CPU resources */
   132 #endif
   133 #ifndef LGRP_RSRC_MEM
   134 # define LGRP_RSRC_MEM           1       /* memory resources */
   135 #endif
   137 // see thr_setprio(3T) for the basis of these numbers
   138 #define MinimumPriority 0
   139 #define NormalPriority  64
   140 #define MaximumPriority 127
   142 // Values for ThreadPriorityPolicy == 1
   143 int prio_policy1[CriticalPriority+1] = {
   144   -99999,  0, 16,  32,  48,  64,
   145           80, 96, 112, 124, 127, 127 };
   147 // System parameters used internally
   148 static clock_t clock_tics_per_sec = 100;
   150 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
   151 static bool enabled_extended_FILE_stdio = false;
   153 // For diagnostics to print a message once. see run_periodic_checks
   154 static bool check_addr0_done = false;
   155 static sigset_t check_signal_done;
   156 static bool check_signals = true;
   158 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
   159 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
   161 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
   164 // "default" initializers for missing libc APIs
   165 extern "C" {
   166   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
   167   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
   169   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
   170   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
   171 }
   173 // "default" initializers for pthread-based synchronization
   174 extern "C" {
   175   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
   176   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
   177 }
   179 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
   181 // Thread Local Storage
   182 // This is common to all Solaris platforms so it is defined here,
   183 // in this common file.
   184 // The declarations are in the os_cpu threadLS*.hpp files.
   185 //
   186 // Static member initialization for TLS
   187 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
   189 #ifndef PRODUCT
   190 #define _PCT(n,d)       ((100.0*(double)(n))/(double)(d))
   192 int ThreadLocalStorage::_tcacheHit = 0;
   193 int ThreadLocalStorage::_tcacheMiss = 0;
   195 void ThreadLocalStorage::print_statistics() {
   196   int total = _tcacheMiss+_tcacheHit;
   197   tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
   198                 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
   199 }
   200 #undef _PCT
   201 #endif // PRODUCT
   203 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
   204                                                         int index) {
   205   Thread *thread = get_thread_slow();
   206   if (thread != NULL) {
   207     address sp = os::current_stack_pointer();
   208     guarantee(thread->_stack_base == NULL ||
   209               (sp <= thread->_stack_base &&
   210                  sp >= thread->_stack_base - thread->_stack_size) ||
   211                is_error_reported(),
   212               "sp must be inside of selected thread stack");
   214     thread->set_self_raw_id(raw_id);  // mark for quick retrieval
   215     _get_thread_cache[ index ] = thread;
   216   }
   217   return thread;
   218 }
   221 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
   222 #define NO_CACHED_THREAD ((Thread*)all_zero)
   224 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
   226   // Store the new value before updating the cache to prevent a race
   227   // between get_thread_via_cache_slowly() and this store operation.
   228   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
   230   // Update thread cache with new thread if setting on thread create,
   231   // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
   232   uintptr_t raw = pd_raw_thread_id();
   233   int ix = pd_cache_index(raw);
   234   _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
   235 }
   237 void ThreadLocalStorage::pd_init() {
   238   for (int i = 0; i < _pd_cache_size; i++) {
   239     _get_thread_cache[i] = NO_CACHED_THREAD;
   240   }
   241 }
   243 // Invalidate all the caches (happens to be the same as pd_init).
   244 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
   246 #undef NO_CACHED_THREAD
   248 // END Thread Local Storage
   250 static inline size_t adjust_stack_size(address base, size_t size) {
   251   if ((ssize_t)size < 0) {
   252     // 4759953: Compensate for ridiculous stack size.
   253     size = max_intx;
   254   }
   255   if (size > (size_t)base) {
   256     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
   257     size = (size_t)base;
   258   }
   259   return size;
   260 }
   262 static inline stack_t get_stack_info() {
   263   stack_t st;
   264   int retval = thr_stksegment(&st);
   265   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
   266   assert(retval == 0, "incorrect return value from thr_stksegment");
   267   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
   268   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
   269   return st;
   270 }
   272 address os::current_stack_base() {
   273   int r = thr_main() ;
   274   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
   275   bool is_primordial_thread = r;
   277   // Workaround 4352906, avoid calls to thr_stksegment by
   278   // thr_main after the first one (it looks like we trash
   279   // some data, causing the value for ss_sp to be incorrect).
   280   if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
   281     stack_t st = get_stack_info();
   282     if (is_primordial_thread) {
   283       // cache initial value of stack base
   284       os::Solaris::_main_stack_base = (address)st.ss_sp;
   285     }
   286     return (address)st.ss_sp;
   287   } else {
   288     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
   289     return os::Solaris::_main_stack_base;
   290   }
   291 }
   293 size_t os::current_stack_size() {
   294   size_t size;
   296   int r = thr_main() ;
   297   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
   298   if(!r) {
   299     size = get_stack_info().ss_size;
   300   } else {
   301     struct rlimit limits;
   302     getrlimit(RLIMIT_STACK, &limits);
   303     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
   304   }
   305   // base may not be page aligned
   306   address base = current_stack_base();
   307   address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
   308   return (size_t)(base - bottom);
   309 }
   311 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
   312   return localtime_r(clock, res);
   313 }
   315 // interruptible infrastructure
   317 // setup_interruptible saves the thread state before going into an
   318 // interruptible system call.
   319 // The saved state is used to restore the thread to
   320 // its former state whether or not an interrupt is received.
   321 // Used by classloader os::read
   322 // os::restartable_read calls skip this layer and stay in _thread_in_native
   324 void os::Solaris::setup_interruptible(JavaThread* thread) {
   326   JavaThreadState thread_state = thread->thread_state();
   328   assert(thread_state != _thread_blocked, "Coming from the wrong thread");
   329   assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
   330   OSThread* osthread = thread->osthread();
   331   osthread->set_saved_interrupt_thread_state(thread_state);
   332   thread->frame_anchor()->make_walkable(thread);
   333   ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
   334 }
   336 // Version of setup_interruptible() for threads that are already in
   337 // _thread_blocked. Used by os_sleep().
   338 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
   339   thread->frame_anchor()->make_walkable(thread);
   340 }
   342 JavaThread* os::Solaris::setup_interruptible() {
   343   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
   344   setup_interruptible(thread);
   345   return thread;
   346 }
   348 void os::Solaris::try_enable_extended_io() {
   349   typedef int (*enable_extended_FILE_stdio_t)(int, int);
   351   if (!UseExtendedFileIO) {
   352     return;
   353   }
   355   enable_extended_FILE_stdio_t enabler =
   356     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
   357                                          "enable_extended_FILE_stdio");
   358   if (enabler) {
   359     enabler(-1, -1);
   360   }
   361 }
   364 #ifdef ASSERT
   366 JavaThread* os::Solaris::setup_interruptible_native() {
   367   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
   368   JavaThreadState thread_state = thread->thread_state();
   369   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
   370   return thread;
   371 }
   373 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
   374   JavaThreadState thread_state = thread->thread_state();
   375   assert(thread_state == _thread_in_native, "Assumed thread_in_native");
   376 }
   377 #endif
   379 // cleanup_interruptible reverses the effects of setup_interruptible
   380 // setup_interruptible_already_blocked() does not need any cleanup.
   382 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
   383   OSThread* osthread = thread->osthread();
   385   ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
   386 }
   388 // I/O interruption related counters called in _INTERRUPTIBLE
   390 void os::Solaris::bump_interrupted_before_count() {
   391   RuntimeService::record_interrupted_before_count();
   392 }
   394 void os::Solaris::bump_interrupted_during_count() {
   395   RuntimeService::record_interrupted_during_count();
   396 }
   398 static int _processors_online = 0;
   400          jint os::Solaris::_os_thread_limit = 0;
   401 volatile jint os::Solaris::_os_thread_count = 0;
   403 julong os::available_memory() {
   404   return Solaris::available_memory();
   405 }
   407 julong os::Solaris::available_memory() {
   408   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
   409 }
   411 julong os::Solaris::_physical_memory = 0;
   413 julong os::physical_memory() {
   414    return Solaris::physical_memory();
   415 }
   417 static hrtime_t first_hrtime = 0;
   418 static const hrtime_t hrtime_hz = 1000*1000*1000;
   419 static volatile hrtime_t max_hrtime = 0;
   422 void os::Solaris::initialize_system_info() {
   423   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
   424   _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
   425   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
   426 }
   428 int os::active_processor_count() {
   429   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
   430   pid_t pid = getpid();
   431   psetid_t pset = PS_NONE;
   432   // Are we running in a processor set or is there any processor set around?
   433   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
   434     uint_t pset_cpus;
   435     // Query the number of cpus available to us.
   436     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
   437       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
   438       _processors_online = pset_cpus;
   439       return pset_cpus;
   440     }
   441   }
   442   // Otherwise return number of online cpus
   443   return online_cpus;
   444 }
   446 static bool find_processors_in_pset(psetid_t        pset,
   447                                     processorid_t** id_array,
   448                                     uint_t*         id_length) {
   449   bool result = false;
   450   // Find the number of processors in the processor set.
   451   if (pset_info(pset, NULL, id_length, NULL) == 0) {
   452     // Make up an array to hold their ids.
   453     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
   454     // Fill in the array with their processor ids.
   455     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
   456       result = true;
   457     }
   458   }
   459   return result;
   460 }
   462 // Callers of find_processors_online() must tolerate imprecise results --
   463 // the system configuration can change asynchronously because of DR
   464 // or explicit psradm operations.
   465 //
   466 // We also need to take care that the loop (below) terminates as the
   467 // number of processors online can change between the _SC_NPROCESSORS_ONLN
   468 // request and the loop that builds the list of processor ids.   Unfortunately
   469 // there's no reliable way to determine the maximum valid processor id,
   470 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
   471 // man pages, which claim the processor id set is "sparse, but
   472 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
   473 // exit the loop.
   474 //
   475 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
   476 // not available on S8.0.
   478 static bool find_processors_online(processorid_t** id_array,
   479                                    uint*           id_length) {
   480   const processorid_t MAX_PROCESSOR_ID = 100000 ;
   481   // Find the number of processors online.
   482   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
   483   // Make up an array to hold their ids.
   484   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
   485   // Processors need not be numbered consecutively.
   486   long found = 0;
   487   processorid_t next = 0;
   488   while (found < *id_length && next < MAX_PROCESSOR_ID) {
   489     processor_info_t info;
   490     if (processor_info(next, &info) == 0) {
   491       // NB, PI_NOINTR processors are effectively online ...
   492       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
   493         (*id_array)[found] = next;
   494         found += 1;
   495       }
   496     }
   497     next += 1;
   498   }
   499   if (found < *id_length) {
   500       // The loop above didn't identify the expected number of processors.
   501       // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
   502       // and re-running the loop, above, but there's no guarantee of progress
   503       // if the system configuration is in flux.  Instead, we just return what
   504       // we've got.  Note that in the worst case find_processors_online() could
   505       // return an empty set.  (As a fall-back in the case of the empty set we
   506       // could just return the ID of the current processor).
   507       *id_length = found ;
   508   }
   510   return true;
   511 }
   513 static bool assign_distribution(processorid_t* id_array,
   514                                 uint           id_length,
   515                                 uint*          distribution,
   516                                 uint           distribution_length) {
   517   // We assume we can assign processorid_t's to uint's.
   518   assert(sizeof(processorid_t) == sizeof(uint),
   519          "can't convert processorid_t to uint");
   520   // Quick check to see if we won't succeed.
   521   if (id_length < distribution_length) {
   522     return false;
   523   }
   524   // Assign processor ids to the distribution.
   525   // Try to shuffle processors to distribute work across boards,
   526   // assuming 4 processors per board.
   527   const uint processors_per_board = ProcessDistributionStride;
   528   // Find the maximum processor id.
   529   processorid_t max_id = 0;
   530   for (uint m = 0; m < id_length; m += 1) {
   531     max_id = MAX2(max_id, id_array[m]);
   532   }
   533   // The next id, to limit loops.
   534   const processorid_t limit_id = max_id + 1;
   535   // Make up markers for available processors.
   536   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
   537   for (uint c = 0; c < limit_id; c += 1) {
   538     available_id[c] = false;
   539   }
   540   for (uint a = 0; a < id_length; a += 1) {
   541     available_id[id_array[a]] = true;
   542   }
   543   // Step by "boards", then by "slot", copying to "assigned".
   544   // NEEDS_CLEANUP: The assignment of processors should be stateful,
   545   //                remembering which processors have been assigned by
   546   //                previous calls, etc., so as to distribute several
   547   //                independent calls of this method.  What we'd like is
   548   //                It would be nice to have an API that let us ask
   549   //                how many processes are bound to a processor,
   550   //                but we don't have that, either.
   551   //                In the short term, "board" is static so that
   552   //                subsequent distributions don't all start at board 0.
   553   static uint board = 0;
   554   uint assigned = 0;
   555   // Until we've found enough processors ....
   556   while (assigned < distribution_length) {
   557     // ... find the next available processor in the board.
   558     for (uint slot = 0; slot < processors_per_board; slot += 1) {
   559       uint try_id = board * processors_per_board + slot;
   560       if ((try_id < limit_id) && (available_id[try_id] == true)) {
   561         distribution[assigned] = try_id;
   562         available_id[try_id] = false;
   563         assigned += 1;
   564         break;
   565       }
   566     }
   567     board += 1;
   568     if (board * processors_per_board + 0 >= limit_id) {
   569       board = 0;
   570     }
   571   }
   572   if (available_id != NULL) {
   573     FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
   574   }
   575   return true;
   576 }
   578 void os::set_native_thread_name(const char *name) {
   579   // Not yet implemented.
   580   return;
   581 }
   583 bool os::distribute_processes(uint length, uint* distribution) {
   584   bool result = false;
   585   // Find the processor id's of all the available CPUs.
   586   processorid_t* id_array  = NULL;
   587   uint           id_length = 0;
   588   // There are some races between querying information and using it,
   589   // since processor sets can change dynamically.
   590   psetid_t pset = PS_NONE;
   591   // Are we running in a processor set?
   592   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
   593     result = find_processors_in_pset(pset, &id_array, &id_length);
   594   } else {
   595     result = find_processors_online(&id_array, &id_length);
   596   }
   597   if (result == true) {
   598     if (id_length >= length) {
   599       result = assign_distribution(id_array, id_length, distribution, length);
   600     } else {
   601       result = false;
   602     }
   603   }
   604   if (id_array != NULL) {
   605     FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
   606   }
   607   return result;
   608 }
   610 bool os::bind_to_processor(uint processor_id) {
   611   // We assume that a processorid_t can be stored in a uint.
   612   assert(sizeof(uint) == sizeof(processorid_t),
   613          "can't convert uint to processorid_t");
   614   int bind_result =
   615     processor_bind(P_LWPID,                       // bind LWP.
   616                    P_MYID,                        // bind current LWP.
   617                    (processorid_t) processor_id,  // id.
   618                    NULL);                         // don't return old binding.
   619   return (bind_result == 0);
   620 }
   622 bool os::getenv(const char* name, char* buffer, int len) {
   623   char* val = ::getenv( name );
   624   if ( val == NULL
   625   ||   strlen(val) + 1  >  len ) {
   626     if (len > 0)  buffer[0] = 0; // return a null string
   627     return false;
   628   }
   629   strcpy( buffer, val );
   630   return true;
   631 }
   634 // Return true if user is running as root.
   636 bool os::have_special_privileges() {
   637   static bool init = false;
   638   static bool privileges = false;
   639   if (!init) {
   640     privileges = (getuid() != geteuid()) || (getgid() != getegid());
   641     init = true;
   642   }
   643   return privileges;
   644 }
   647 void os::init_system_properties_values() {
   648   // The next steps are taken in the product version:
   649   //
   650   // Obtain the JAVA_HOME value from the location of libjvm.so.
   651   // This library should be located at:
   652   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
   653   //
   654   // If "/jre/lib/" appears at the right place in the path, then we
   655   // assume libjvm.so is installed in a JDK and we use this path.
   656   //
   657   // Otherwise exit with message: "Could not create the Java virtual machine."
   658   //
   659   // The following extra steps are taken in the debugging version:
   660   //
   661   // If "/jre/lib/" does NOT appear at the right place in the path
   662   // instead of exit check for $JAVA_HOME environment variable.
   663   //
   664   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
   665   // then we append a fake suffix "hotspot/libjvm.so" to this path so
   666   // it looks like libjvm.so is installed there
   667   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
   668   //
   669   // Otherwise exit.
   670   //
   671   // Important note: if the location of libjvm.so changes this
   672   // code needs to be changed accordingly.
   674 // Base path of extensions installed on the system.
   675 #define SYS_EXT_DIR     "/usr/jdk/packages"
   676 #define EXTENSIONS_DIR  "/lib/ext"
   677 #define ENDORSED_DIR    "/lib/endorsed"
   679   char cpu_arch[12];
   680   // Buffer that fits several sprintfs.
   681   // Note that the space for the colon and the trailing null are provided
   682   // by the nulls included by the sizeof operator.
   683   const size_t bufsize =
   684     MAX4((size_t)MAXPATHLEN,  // For dll_dir & friends.
   685          sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
   686          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
   687          (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
   688   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
   690   // sysclasspath, java_home, dll_dir
   691   {
   692     char *pslash;
   693     os::jvm_path(buf, bufsize);
   695     // Found the full path to libjvm.so.
   696     // Now cut the path to <java_home>/jre if we can.
   697     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
   698     pslash = strrchr(buf, '/');
   699     if (pslash != NULL) {
   700       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
   701     }
   702     Arguments::set_dll_dir(buf);
   704     if (pslash != NULL) {
   705       pslash = strrchr(buf, '/');
   706       if (pslash != NULL) {
   707         *pslash = '\0';          // Get rid of /<arch>.
   708         pslash = strrchr(buf, '/');
   709         if (pslash != NULL) {
   710           *pslash = '\0';        // Get rid of /lib.
   711         }
   712       }
   713     }
   714     Arguments::set_java_home(buf);
   715     set_boot_path('/', ':');
   716   }
   718   // Where to look for native libraries.
   719   {
   720     // Use dlinfo() to determine the correct java.library.path.
   721     //
   722     // If we're launched by the Java launcher, and the user
   723     // does not set java.library.path explicitly on the commandline,
   724     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
   725     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
   726     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
   727     // /usr/lib), which is exactly what we want.
   728     //
   729     // If the user does set java.library.path, it completely
   730     // overwrites this setting, and always has.
   731     //
   732     // If we're not launched by the Java launcher, we may
   733     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
   734     // settings.  Again, dlinfo does exactly what we want.
   736     Dl_serinfo     info_sz, *info = &info_sz;
   737     Dl_serpath     *path;
   738     char           *library_path;
   739     char           *common_path = buf;
   741     // Determine search path count and required buffer size.
   742     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
   743       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
   744       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
   745     }
   747     // Allocate new buffer and initialize.
   748     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
   749     info->dls_size = info_sz.dls_size;
   750     info->dls_cnt = info_sz.dls_cnt;
   752     // Obtain search path information.
   753     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
   754       FREE_C_HEAP_ARRAY(char, buf,  mtInternal);
   755       FREE_C_HEAP_ARRAY(char, info, mtInternal);
   756       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
   757     }
   759     path = &info->dls_serpath[0];
   761     // Note: Due to a legacy implementation, most of the library path
   762     // is set in the launcher. This was to accomodate linking restrictions
   763     // on legacy Solaris implementations (which are no longer supported).
   764     // Eventually, all the library path setting will be done here.
   765     //
   766     // However, to prevent the proliferation of improperly built native
   767     // libraries, the new path component /usr/jdk/packages is added here.
   769     // Determine the actual CPU architecture.
   770     sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
   771 #ifdef _LP64
   772     // If we are a 64-bit vm, perform the following translations:
   773     //   sparc   -> sparcv9
   774     //   i386    -> amd64
   775     if (strcmp(cpu_arch, "sparc") == 0) {
   776       strcat(cpu_arch, "v9");
   777     } else if (strcmp(cpu_arch, "i386") == 0) {
   778       strcpy(cpu_arch, "amd64");
   779     }
   780 #endif
   782     // Construct the invariant part of ld_library_path.
   783     sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
   785     // Struct size is more than sufficient for the path components obtained
   786     // through the dlinfo() call, so only add additional space for the path
   787     // components explicitly added here.
   788     size_t library_path_size = info->dls_size + strlen(common_path);
   789     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
   790     library_path[0] = '\0';
   792     // Construct the desired Java library path from the linker's library
   793     // search path.
   794     //
   795     // For compatibility, it is optimal that we insert the additional path
   796     // components specific to the Java VM after those components specified
   797     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
   798     // infrastructure.
   799     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
   800       strcpy(library_path, common_path);
   801     } else {
   802       int inserted = 0;
   803       int i;
   804       for (i = 0; i < info->dls_cnt; i++, path++) {
   805         uint_t flags = path->dls_flags & LA_SER_MASK;
   806         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
   807           strcat(library_path, common_path);
   808           strcat(library_path, os::path_separator());
   809           inserted = 1;
   810         }
   811         strcat(library_path, path->dls_name);
   812         strcat(library_path, os::path_separator());
   813       }
   814       // Eliminate trailing path separator.
   815       library_path[strlen(library_path)-1] = '\0';
   816     }
   818     // happens before argument parsing - can't use a trace flag
   819     // tty->print_raw("init_system_properties_values: native lib path: ");
   820     // tty->print_raw_cr(library_path);
   822     // Callee copies into its own buffer.
   823     Arguments::set_library_path(library_path);
   825     FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
   826     FREE_C_HEAP_ARRAY(char, info, mtInternal);
   827   }
   829   // Extensions directories.
   830   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
   831   Arguments::set_ext_dirs(buf);
   833   // Endorsed standards default directory.
   834   sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
   835   Arguments::set_endorsed_dirs(buf);
   837   FREE_C_HEAP_ARRAY(char, buf, mtInternal);
   839 #undef SYS_EXT_DIR
   840 #undef EXTENSIONS_DIR
   841 #undef ENDORSED_DIR
   842 }
   844 void os::breakpoint() {
   845   BREAKPOINT;
   846 }
   848 bool os::obsolete_option(const JavaVMOption *option)
   849 {
   850   if (!strncmp(option->optionString, "-Xt", 3)) {
   851     return true;
   852   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
   853     return true;
   854   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
   855     return true;
   856   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
   857     return true;
   858   }
   859   return false;
   860 }
   862 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
   863   address  stackStart  = (address)thread->stack_base();
   864   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
   865   if (sp < stackStart && sp >= stackEnd ) return true;
   866   return false;
   867 }
   869 extern "C" void breakpoint() {
   870   // use debugger to set breakpoint here
   871 }
   873 static thread_t main_thread;
   875 // Thread start routine for all new Java threads
   876 extern "C" void* java_start(void* thread_addr) {
   877   // Try to randomize the cache line index of hot stack frames.
   878   // This helps when threads of the same stack traces evict each other's
   879   // cache lines. The threads can be either from the same JVM instance, or
   880   // from different JVM instances. The benefit is especially true for
   881   // processors with hyperthreading technology.
   882   static int counter = 0;
   883   int pid = os::current_process_id();
   884   alloca(((pid ^ counter++) & 7) * 128);
   886   int prio;
   887   Thread* thread = (Thread*)thread_addr;
   888   OSThread* osthr = thread->osthread();
   890   osthr->set_lwp_id( _lwp_self() );  // Store lwp in case we are bound
   891   thread->_schedctl = (void *) schedctl_init () ;
   893   if (UseNUMA) {
   894     int lgrp_id = os::numa_get_group_id();
   895     if (lgrp_id != -1) {
   896       thread->set_lgrp_id(lgrp_id);
   897     }
   898   }
   900   // If the creator called set priority before we started,
   901   // we need to call set_native_priority now that we have an lwp.
   902   // We used to get the priority from thr_getprio (we called
   903   // thr_setprio way back in create_thread) and pass it to
   904   // set_native_priority, but Solaris scales the priority
   905   // in java_to_os_priority, so when we read it back here,
   906   // we pass trash to set_native_priority instead of what's
   907   // in java_to_os_priority. So we save the native priority
   908   // in the osThread and recall it here.
   910   if ( osthr->thread_id() != -1 ) {
   911     if ( UseThreadPriorities ) {
   912       int prio = osthr->native_priority();
   913       if (ThreadPriorityVerbose) {
   914         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
   915                       INTPTR_FORMAT ", setting priority: %d\n",
   916                       osthr->thread_id(), osthr->lwp_id(), prio);
   917       }
   918       os::set_native_priority(thread, prio);
   919     }
   920   } else if (ThreadPriorityVerbose) {
   921     warning("Can't set priority in _start routine, thread id hasn't been set\n");
   922   }
   924   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
   926   // initialize signal mask for this thread
   927   os::Solaris::hotspot_sigmask(thread);
   929   thread->run();
   931   // One less thread is executing
   932   // When the VMThread gets here, the main thread may have already exited
   933   // which frees the CodeHeap containing the Atomic::dec code
   934   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
   935     Atomic::dec(&os::Solaris::_os_thread_count);
   936   }
   938   if (UseDetachedThreads) {
   939     thr_exit(NULL);
   940     ShouldNotReachHere();
   941   }
   942   return NULL;
   943 }
   945 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
   946   // Allocate the OSThread object
   947   OSThread* osthread = new OSThread(NULL, NULL);
   948   if (osthread == NULL) return NULL;
   950   // Store info on the Solaris thread into the OSThread
   951   osthread->set_thread_id(thread_id);
   952   osthread->set_lwp_id(_lwp_self());
   953   thread->_schedctl = (void *) schedctl_init () ;
   955   if (UseNUMA) {
   956     int lgrp_id = os::numa_get_group_id();
   957     if (lgrp_id != -1) {
   958       thread->set_lgrp_id(lgrp_id);
   959     }
   960   }
   962   if ( ThreadPriorityVerbose ) {
   963     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
   964                   osthread->thread_id(), osthread->lwp_id() );
   965   }
   967   // Initial thread state is INITIALIZED, not SUSPENDED
   968   osthread->set_state(INITIALIZED);
   970   return osthread;
   971 }
   973 void os::Solaris::hotspot_sigmask(Thread* thread) {
   975   //Save caller's signal mask
   976   sigset_t sigmask;
   977   thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
   978   OSThread *osthread = thread->osthread();
   979   osthread->set_caller_sigmask(sigmask);
   981   thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
   982   if (!ReduceSignalUsage) {
   983     if (thread->is_VM_thread()) {
   984       // Only the VM thread handles BREAK_SIGNAL ...
   985       thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
   986     } else {
   987       // ... all other threads block BREAK_SIGNAL
   988       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
   989       thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
   990     }
   991   }
   992 }
   994 bool os::create_attached_thread(JavaThread* thread) {
   995 #ifdef ASSERT
   996   thread->verify_not_published();
   997 #endif
   998   OSThread* osthread = create_os_thread(thread, thr_self());
   999   if (osthread == NULL) {
  1000      return false;
  1003   // Initial thread state is RUNNABLE
  1004   osthread->set_state(RUNNABLE);
  1005   thread->set_osthread(osthread);
  1007   // initialize signal mask for this thread
  1008   // and save the caller's signal mask
  1009   os::Solaris::hotspot_sigmask(thread);
  1011   return true;
  1014 bool os::create_main_thread(JavaThread* thread) {
  1015 #ifdef ASSERT
  1016   thread->verify_not_published();
  1017 #endif
  1018   if (_starting_thread == NULL) {
  1019     _starting_thread = create_os_thread(thread, main_thread);
  1020      if (_starting_thread == NULL) {
  1021         return false;
  1025   // The primodial thread is runnable from the start
  1026   _starting_thread->set_state(RUNNABLE);
  1028   thread->set_osthread(_starting_thread);
  1030   // initialize signal mask for this thread
  1031   // and save the caller's signal mask
  1032   os::Solaris::hotspot_sigmask(thread);
  1034   return true;
  1037 // _T2_libthread is true if we believe we are running with the newer
  1038 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
  1039 bool os::Solaris::_T2_libthread = false;
  1041 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
  1042   // Allocate the OSThread object
  1043   OSThread* osthread = new OSThread(NULL, NULL);
  1044   if (osthread == NULL) {
  1045     return false;
  1048   if ( ThreadPriorityVerbose ) {
  1049     char *thrtyp;
  1050     switch ( thr_type ) {
  1051       case vm_thread:
  1052         thrtyp = (char *)"vm";
  1053         break;
  1054       case cgc_thread:
  1055         thrtyp = (char *)"cgc";
  1056         break;
  1057       case pgc_thread:
  1058         thrtyp = (char *)"pgc";
  1059         break;
  1060       case java_thread:
  1061         thrtyp = (char *)"java";
  1062         break;
  1063       case compiler_thread:
  1064         thrtyp = (char *)"compiler";
  1065         break;
  1066       case watcher_thread:
  1067         thrtyp = (char *)"watcher";
  1068         break;
  1069       default:
  1070         thrtyp = (char *)"unknown";
  1071         break;
  1073     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
  1076   // Calculate stack size if it's not specified by caller.
  1077   if (stack_size == 0) {
  1078     // The default stack size 1M (2M for LP64).
  1079     stack_size = (BytesPerWord >> 2) * K * K;
  1081     switch (thr_type) {
  1082     case os::java_thread:
  1083       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
  1084       if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
  1085       break;
  1086     case os::compiler_thread:
  1087       if (CompilerThreadStackSize > 0) {
  1088         stack_size = (size_t)(CompilerThreadStackSize * K);
  1089         break;
  1090       } // else fall through:
  1091         // use VMThreadStackSize if CompilerThreadStackSize is not defined
  1092     case os::vm_thread:
  1093     case os::pgc_thread:
  1094     case os::cgc_thread:
  1095     case os::watcher_thread:
  1096       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
  1097       break;
  1100   stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
  1102   // Initial state is ALLOCATED but not INITIALIZED
  1103   osthread->set_state(ALLOCATED);
  1105   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
  1106     // We got lots of threads. Check if we still have some address space left.
  1107     // Need to be at least 5Mb of unreserved address space. We do check by
  1108     // trying to reserve some.
  1109     const size_t VirtualMemoryBangSize = 20*K*K;
  1110     char* mem = os::reserve_memory(VirtualMemoryBangSize);
  1111     if (mem == NULL) {
  1112       delete osthread;
  1113       return false;
  1114     } else {
  1115       // Release the memory again
  1116       os::release_memory(mem, VirtualMemoryBangSize);
  1120   // Setup osthread because the child thread may need it.
  1121   thread->set_osthread(osthread);
  1123   // Create the Solaris thread
  1124   // explicit THR_BOUND for T2_libthread case in case
  1125   // that assumption is not accurate, but our alternate signal stack
  1126   // handling is based on it which must have bound threads
  1127   thread_t tid = 0;
  1128   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
  1129                    | ((UseBoundThreads || os::Solaris::T2_libthread() ||
  1130                        (thr_type == vm_thread) ||
  1131                        (thr_type == cgc_thread) ||
  1132                        (thr_type == pgc_thread) ||
  1133                        (thr_type == compiler_thread && BackgroundCompilation)) ?
  1134                       THR_BOUND : 0);
  1135   int      status;
  1137   // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
  1138   //
  1139   // On multiprocessors systems, libthread sometimes under-provisions our
  1140   // process with LWPs.  On a 30-way systems, for instance, we could have
  1141   // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
  1142   // to our process.  This can result in under utilization of PEs.
  1143   // I suspect the problem is related to libthread's LWP
  1144   // pool management and to the kernel's SIGBLOCKING "last LWP parked"
  1145   // upcall policy.
  1146   //
  1147   // The following code is palliative -- it attempts to ensure that our
  1148   // process has sufficient LWPs to take advantage of multiple PEs.
  1149   // Proper long-term cures include using user-level threads bound to LWPs
  1150   // (THR_BOUND) or using LWP-based synchronization.  Note that there is a
  1151   // slight timing window with respect to sampling _os_thread_count, but
  1152   // the race is benign.  Also, we should periodically recompute
  1153   // _processors_online as the min of SC_NPROCESSORS_ONLN and the
  1154   // the number of PEs in our partition.  You might be tempted to use
  1155   // THR_NEW_LWP here, but I'd recommend against it as that could
  1156   // result in undesirable growth of the libthread's LWP pool.
  1157   // The fix below isn't sufficient; for instance, it doesn't take into count
  1158   // LWPs parked on IO.  It does, however, help certain CPU-bound benchmarks.
  1159   //
  1160   // Some pathologies this scheme doesn't handle:
  1161   // *  Threads can block, releasing the LWPs.  The LWPs can age out.
  1162   //    When a large number of threads become ready again there aren't
  1163   //    enough LWPs available to service them.  This can occur when the
  1164   //    number of ready threads oscillates.
  1165   // *  LWPs/Threads park on IO, thus taking the LWP out of circulation.
  1166   //
  1167   // Finally, we should call thr_setconcurrency() periodically to refresh
  1168   // the LWP pool and thwart the LWP age-out mechanism.
  1169   // The "+3" term provides a little slop -- we want to slightly overprovision.
  1171   if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
  1172     if (!(flags & THR_BOUND)) {
  1173       thr_setconcurrency (os::Solaris::_os_thread_count);       // avoid starvation
  1176   // Although this doesn't hurt, we should warn of undefined behavior
  1177   // when using unbound T1 threads with schedctl().  This should never
  1178   // happen, as the compiler and VM threads are always created bound
  1179   DEBUG_ONLY(
  1180       if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
  1181           (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
  1182           ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
  1183            (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
  1184          warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
  1186   );
  1189   // Mark that we don't have an lwp or thread id yet.
  1190   // In case we attempt to set the priority before the thread starts.
  1191   osthread->set_lwp_id(-1);
  1192   osthread->set_thread_id(-1);
  1194   status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
  1195   if (status != 0) {
  1196     if (PrintMiscellaneous && (Verbose || WizardMode)) {
  1197       perror("os::create_thread");
  1199     thread->set_osthread(NULL);
  1200     // Need to clean up stuff we've allocated so far
  1201     delete osthread;
  1202     return false;
  1205   Atomic::inc(&os::Solaris::_os_thread_count);
  1207   // Store info on the Solaris thread into the OSThread
  1208   osthread->set_thread_id(tid);
  1210   // Remember that we created this thread so we can set priority on it
  1211   osthread->set_vm_created();
  1213   // Set the default thread priority.  If using bound threads, setting
  1214   // lwp priority will be delayed until thread start.
  1215   set_native_priority(thread,
  1216                       DefaultThreadPriority == -1 ?
  1217                         java_to_os_priority[NormPriority] :
  1218                         DefaultThreadPriority);
  1220   // Initial thread state is INITIALIZED, not SUSPENDED
  1221   osthread->set_state(INITIALIZED);
  1223   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
  1224   return true;
  1227 /* defined for >= Solaris 10. This allows builds on earlier versions
  1228  *  of Solaris to take advantage of the newly reserved Solaris JVM signals
  1229  *  With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
  1230  *  and -XX:+UseAltSigs does nothing since these should have no conflict
  1231  */
  1232 #if !defined(SIGJVM1)
  1233 #define SIGJVM1 39
  1234 #define SIGJVM2 40
  1235 #endif
  1237 debug_only(static bool signal_sets_initialized = false);
  1238 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
  1239 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
  1240 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
  1242 bool os::Solaris::is_sig_ignored(int sig) {
  1243       struct sigaction oact;
  1244       sigaction(sig, (struct sigaction*)NULL, &oact);
  1245       void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
  1246                                      : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
  1247       if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
  1248            return true;
  1249       else
  1250            return false;
  1253 // Note: SIGRTMIN is a macro that calls sysconf() so it will
  1254 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
  1255 static bool isJVM1available() {
  1256   return SIGJVM1 < SIGRTMIN;
  1259 void os::Solaris::signal_sets_init() {
  1260   // Should also have an assertion stating we are still single-threaded.
  1261   assert(!signal_sets_initialized, "Already initialized");
  1262   // Fill in signals that are necessarily unblocked for all threads in
  1263   // the VM. Currently, we unblock the following signals:
  1264   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
  1265   //                         by -Xrs (=ReduceSignalUsage));
  1266   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
  1267   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
  1268   // the dispositions or masks wrt these signals.
  1269   // Programs embedding the VM that want to use the above signals for their
  1270   // own purposes must, at this time, use the "-Xrs" option to prevent
  1271   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
  1272   // (See bug 4345157, and other related bugs).
  1273   // In reality, though, unblocking these signals is really a nop, since
  1274   // these signals are not blocked by default.
  1275   sigemptyset(&unblocked_sigs);
  1276   sigemptyset(&allowdebug_blocked_sigs);
  1277   sigaddset(&unblocked_sigs, SIGILL);
  1278   sigaddset(&unblocked_sigs, SIGSEGV);
  1279   sigaddset(&unblocked_sigs, SIGBUS);
  1280   sigaddset(&unblocked_sigs, SIGFPE);
  1282   if (isJVM1available) {
  1283     os::Solaris::set_SIGinterrupt(SIGJVM1);
  1284     os::Solaris::set_SIGasync(SIGJVM2);
  1285   } else if (UseAltSigs) {
  1286     os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
  1287     os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
  1288   } else {
  1289     os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
  1290     os::Solaris::set_SIGasync(ASYNC_SIGNAL);
  1293   sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
  1294   sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
  1296   if (!ReduceSignalUsage) {
  1297    if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
  1298       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
  1299       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
  1301    if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
  1302       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
  1303       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
  1305    if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
  1306       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
  1307       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
  1310   // Fill in signals that are blocked by all but the VM thread.
  1311   sigemptyset(&vm_sigs);
  1312   if (!ReduceSignalUsage)
  1313     sigaddset(&vm_sigs, BREAK_SIGNAL);
  1314   debug_only(signal_sets_initialized = true);
  1316   // For diagnostics only used in run_periodic_checks
  1317   sigemptyset(&check_signal_done);
  1320 // These are signals that are unblocked while a thread is running Java.
  1321 // (For some reason, they get blocked by default.)
  1322 sigset_t* os::Solaris::unblocked_signals() {
  1323   assert(signal_sets_initialized, "Not initialized");
  1324   return &unblocked_sigs;
  1327 // These are the signals that are blocked while a (non-VM) thread is
  1328 // running Java. Only the VM thread handles these signals.
  1329 sigset_t* os::Solaris::vm_signals() {
  1330   assert(signal_sets_initialized, "Not initialized");
  1331   return &vm_sigs;
  1334 // These are signals that are blocked during cond_wait to allow debugger in
  1335 sigset_t* os::Solaris::allowdebug_blocked_signals() {
  1336   assert(signal_sets_initialized, "Not initialized");
  1337   return &allowdebug_blocked_sigs;
  1341 void _handle_uncaught_cxx_exception() {
  1342   VMError err("An uncaught C++ exception");
  1343   err.report_and_die();
  1347 // First crack at OS-specific initialization, from inside the new thread.
  1348 void os::initialize_thread(Thread* thr) {
  1349   int r = thr_main() ;
  1350   guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
  1351   if (r) {
  1352     JavaThread* jt = (JavaThread *)thr;
  1353     assert(jt != NULL,"Sanity check");
  1354     size_t stack_size;
  1355     address base = jt->stack_base();
  1356     if (Arguments::created_by_java_launcher()) {
  1357       // Use 2MB to allow for Solaris 7 64 bit mode.
  1358       stack_size = JavaThread::stack_size_at_create() == 0
  1359         ? 2048*K : JavaThread::stack_size_at_create();
  1361       // There are rare cases when we may have already used more than
  1362       // the basic stack size allotment before this method is invoked.
  1363       // Attempt to allow for a normally sized java_stack.
  1364       size_t current_stack_offset = (size_t)(base - (address)&stack_size);
  1365       stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
  1366     } else {
  1367       // 6269555: If we were not created by a Java launcher, i.e. if we are
  1368       // running embedded in a native application, treat the primordial thread
  1369       // as much like a native attached thread as possible.  This means using
  1370       // the current stack size from thr_stksegment(), unless it is too large
  1371       // to reliably setup guard pages.  A reasonable max size is 8MB.
  1372       size_t current_size = current_stack_size();
  1373       // This should never happen, but just in case....
  1374       if (current_size == 0) current_size = 2 * K * K;
  1375       stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
  1377     address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
  1378     stack_size = (size_t)(base - bottom);
  1380     assert(stack_size > 0, "Stack size calculation problem");
  1382     if (stack_size > jt->stack_size()) {
  1383       NOT_PRODUCT(
  1384         struct rlimit limits;
  1385         getrlimit(RLIMIT_STACK, &limits);
  1386         size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
  1387         assert(size >= jt->stack_size(), "Stack size problem in main thread");
  1389       tty->print_cr(
  1390         "Stack size of %d Kb exceeds current limit of %d Kb.\n"
  1391         "(Stack sizes are rounded up to a multiple of the system page size.)\n"
  1392         "See limit(1) to increase the stack size limit.",
  1393         stack_size / K, jt->stack_size() / K);
  1394       vm_exit(1);
  1396     assert(jt->stack_size() >= stack_size,
  1397           "Attempt to map more stack than was allocated");
  1398     jt->set_stack_size(stack_size);
  1401    // 5/22/01: Right now alternate signal stacks do not handle
  1402    // throwing stack overflow exceptions, see bug 4463178
  1403    // Until a fix is found for this, T2 will NOT imply alternate signal
  1404    // stacks.
  1405    // If using T2 libthread threads, install an alternate signal stack.
  1406    // Because alternate stacks associate with LWPs on Solaris,
  1407    // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
  1408    // we prefer to explicitly stack bang.
  1409    // If not using T2 libthread, but using UseBoundThreads any threads
  1410    // (primordial thread, jni_attachCurrentThread) we do not create,
  1411    // probably are not bound, therefore they can not have an alternate
  1412    // signal stack. Since our stack banging code is generated and
  1413    // is shared across threads, all threads must be bound to allow
  1414    // using alternate signal stacks.  The alternative is to interpose
  1415    // on _lwp_create to associate an alt sig stack with each LWP,
  1416    // and this could be a problem when the JVM is embedded.
  1417    // We would prefer to use alternate signal stacks with T2
  1418    // Since there is currently no accurate way to detect T2
  1419    // we do not. Assuming T2 when running T1 causes sig 11s or assertions
  1420    // on installing alternate signal stacks
  1423    // 05/09/03: removed alternate signal stack support for Solaris
  1424    // The alternate signal stack mechanism is no longer needed to
  1425    // handle stack overflow. This is now handled by allocating
  1426    // guard pages (red zone) and stackbanging.
  1427    // Initially the alternate signal stack mechanism was removed because
  1428    // it did not work with T1 llibthread. Alternate
  1429    // signal stacks MUST have all threads bound to lwps. Applications
  1430    // can create their own threads and attach them without their being
  1431    // bound under T1. This is frequently the case for the primordial thread.
  1432    // If we were ever to reenable this mechanism we would need to
  1433    // use the dynamic check for T2 libthread.
  1435   os::Solaris::init_thread_fpu_state();
  1436   std::set_terminate(_handle_uncaught_cxx_exception);
  1441 // Free Solaris resources related to the OSThread
  1442 void os::free_thread(OSThread* osthread) {
  1443   assert(osthread != NULL, "os::free_thread but osthread not set");
  1446   // We are told to free resources of the argument thread,
  1447   // but we can only really operate on the current thread.
  1448   // The main thread must take the VMThread down synchronously
  1449   // before the main thread exits and frees up CodeHeap
  1450   guarantee((Thread::current()->osthread() == osthread
  1451      || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
  1452   if (Thread::current()->osthread() == osthread) {
  1453     // Restore caller's signal mask
  1454     sigset_t sigmask = osthread->caller_sigmask();
  1455     thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
  1457   delete osthread;
  1460 void os::pd_start_thread(Thread* thread) {
  1461   int status = thr_continue(thread->osthread()->thread_id());
  1462   assert_status(status == 0, status, "thr_continue failed");
  1466 intx os::current_thread_id() {
  1467   return (intx)thr_self();
  1470 static pid_t _initial_pid = 0;
  1472 int os::current_process_id() {
  1473   return (int)(_initial_pid ? _initial_pid : getpid());
  1476 int os::allocate_thread_local_storage() {
  1477   // %%%       in Win32 this allocates a memory segment pointed to by a
  1478   //           register.  Dan Stein can implement a similar feature in
  1479   //           Solaris.  Alternatively, the VM can do the same thing
  1480   //           explicitly: malloc some storage and keep the pointer in a
  1481   //           register (which is part of the thread's context) (or keep it
  1482   //           in TLS).
  1483   // %%%       In current versions of Solaris, thr_self and TSD can
  1484   //           be accessed via short sequences of displaced indirections.
  1485   //           The value of thr_self is available as %g7(36).
  1486   //           The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
  1487   //           assuming that the current thread already has a value bound to k.
  1488   //           It may be worth experimenting with such access patterns,
  1489   //           and later having the parameters formally exported from a Solaris
  1490   //           interface.  I think, however, that it will be faster to
  1491   //           maintain the invariant that %g2 always contains the
  1492   //           JavaThread in Java code, and have stubs simply
  1493   //           treat %g2 as a caller-save register, preserving it in a %lN.
  1494   thread_key_t tk;
  1495   if (thr_keycreate( &tk, NULL ) )
  1496     fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
  1497                   "(%s)", strerror(errno)));
  1498   return int(tk);
  1501 void os::free_thread_local_storage(int index) {
  1502   // %%% don't think we need anything here
  1503   // if ( pthread_key_delete((pthread_key_t) tk) )
  1504   //   fatal("os::free_thread_local_storage: pthread_key_delete failed");
  1507 #define SMALLINT 32   // libthread allocate for tsd_common is a version specific
  1508                       // small number - point is NO swap space available
  1509 void os::thread_local_storage_at_put(int index, void* value) {
  1510   // %%% this is used only in threadLocalStorage.cpp
  1511   if (thr_setspecific((thread_key_t)index, value)) {
  1512     if (errno == ENOMEM) {
  1513        vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
  1514                              "thr_setspecific: out of swap space");
  1515     } else {
  1516       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
  1517                     "(%s)", strerror(errno)));
  1519   } else {
  1520       ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
  1524 // This function could be called before TLS is initialized, for example, when
  1525 // VM receives an async signal or when VM causes a fatal error during
  1526 // initialization. Return NULL if thr_getspecific() fails.
  1527 void* os::thread_local_storage_at(int index) {
  1528   // %%% this is used only in threadLocalStorage.cpp
  1529   void* r = NULL;
  1530   return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
  1534 // gethrtime() should be monotonic according to the documentation,
  1535 // but some virtualized platforms are known to break this guarantee.
  1536 // getTimeNanos() must be guaranteed not to move backwards, so we
  1537 // are forced to add a check here.
  1538 inline hrtime_t getTimeNanos() {
  1539   const hrtime_t now = gethrtime();
  1540   const hrtime_t prev = max_hrtime;
  1541   if (now <= prev) {
  1542     return prev;   // same or retrograde time;
  1544   const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
  1545   assert(obsv >= prev, "invariant");   // Monotonicity
  1546   // If the CAS succeeded then we're done and return "now".
  1547   // If the CAS failed and the observed value "obsv" is >= now then
  1548   // we should return "obsv".  If the CAS failed and now > obsv > prv then
  1549   // some other thread raced this thread and installed a new value, in which case
  1550   // we could either (a) retry the entire operation, (b) retry trying to install now
  1551   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
  1552   // we might discard a higher "now" value in deference to a slightly lower but freshly
  1553   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
  1554   // to (a) or (b) -- and greatly reduces coherence traffic.
  1555   // We might also condition (c) on the magnitude of the delta between obsv and now.
  1556   // Avoiding excessive CAS operations to hot RW locations is critical.
  1557   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
  1558   return (prev == obsv) ? now : obsv;
  1561 // Time since start-up in seconds to a fine granularity.
  1562 // Used by VMSelfDestructTimer and the MemProfiler.
  1563 double os::elapsedTime() {
  1564   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
  1567 jlong os::elapsed_counter() {
  1568   return (jlong)(getTimeNanos() - first_hrtime);
  1571 jlong os::elapsed_frequency() {
  1572    return hrtime_hz;
  1575 // Return the real, user, and system times in seconds from an
  1576 // arbitrary fixed point in the past.
  1577 bool os::getTimesSecs(double* process_real_time,
  1578                   double* process_user_time,
  1579                   double* process_system_time) {
  1580   struct tms ticks;
  1581   clock_t real_ticks = times(&ticks);
  1583   if (real_ticks == (clock_t) (-1)) {
  1584     return false;
  1585   } else {
  1586     double ticks_per_second = (double) clock_tics_per_sec;
  1587     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
  1588     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
  1589     // For consistency return the real time from getTimeNanos()
  1590     // converted to seconds.
  1591     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
  1593     return true;
  1597 bool os::supports_vtime() { return true; }
  1599 bool os::enable_vtime() {
  1600   int fd = ::open("/proc/self/ctl", O_WRONLY);
  1601   if (fd == -1)
  1602     return false;
  1604   long cmd[] = { PCSET, PR_MSACCT };
  1605   int res = ::write(fd, cmd, sizeof(long) * 2);
  1606   ::close(fd);
  1607   if (res != sizeof(long) * 2)
  1608     return false;
  1610   return true;
  1613 bool os::vtime_enabled() {
  1614   int fd = ::open("/proc/self/status", O_RDONLY);
  1615   if (fd == -1)
  1616     return false;
  1618   pstatus_t status;
  1619   int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
  1620   ::close(fd);
  1621   if (res != sizeof(pstatus_t))
  1622     return false;
  1624   return status.pr_flags & PR_MSACCT;
  1627 double os::elapsedVTime() {
  1628   return (double)gethrvtime() / (double)hrtime_hz;
  1631 // Used internally for comparisons only
  1632 // getTimeMillis guaranteed to not move backwards on Solaris
  1633 jlong getTimeMillis() {
  1634   jlong nanotime = getTimeNanos();
  1635   return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
  1638 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
  1639 jlong os::javaTimeMillis() {
  1640   timeval t;
  1641   if (gettimeofday( &t, NULL) == -1)
  1642     fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
  1643   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
  1646 jlong os::javaTimeNanos() {
  1647   return (jlong)getTimeNanos();
  1650 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
  1651   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
  1652   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
  1653   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
  1654   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
  1657 char * os::local_time_string(char *buf, size_t buflen) {
  1658   struct tm t;
  1659   time_t long_time;
  1660   time(&long_time);
  1661   localtime_r(&long_time, &t);
  1662   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
  1663                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
  1664                t.tm_hour, t.tm_min, t.tm_sec);
  1665   return buf;
  1668 // Note: os::shutdown() might be called very early during initialization, or
  1669 // called from signal handler. Before adding something to os::shutdown(), make
  1670 // sure it is async-safe and can handle partially initialized VM.
  1671 void os::shutdown() {
  1673   // allow PerfMemory to attempt cleanup of any persistent resources
  1674   perfMemory_exit();
  1676   // needs to remove object in file system
  1677   AttachListener::abort();
  1679   // flush buffered output, finish log files
  1680   ostream_abort();
  1682   // Check for abort hook
  1683   abort_hook_t abort_hook = Arguments::abort_hook();
  1684   if (abort_hook != NULL) {
  1685     abort_hook();
  1689 // Note: os::abort() might be called very early during initialization, or
  1690 // called from signal handler. Before adding something to os::abort(), make
  1691 // sure it is async-safe and can handle partially initialized VM.
  1692 void os::abort(bool dump_core) {
  1693   os::shutdown();
  1694   if (dump_core) {
  1695 #ifndef PRODUCT
  1696     fdStream out(defaultStream::output_fd());
  1697     out.print_raw("Current thread is ");
  1698     char buf[16];
  1699     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
  1700     out.print_raw_cr(buf);
  1701     out.print_raw_cr("Dumping core ...");
  1702 #endif
  1703     ::abort(); // dump core (for debugging)
  1706   ::exit(1);
  1709 // Die immediately, no exit hook, no abort hook, no cleanup.
  1710 void os::die() {
  1711   ::abort(); // dump core (for debugging)
  1714 // DLL functions
  1716 const char* os::dll_file_extension() { return ".so"; }
  1718 // This must be hard coded because it's the system's temporary
  1719 // directory not the java application's temp directory, ala java.io.tmpdir.
  1720 const char* os::get_temp_directory() { return "/tmp"; }
  1722 static bool file_exists(const char* filename) {
  1723   struct stat statbuf;
  1724   if (filename == NULL || strlen(filename) == 0) {
  1725     return false;
  1727   return os::stat(filename, &statbuf) == 0;
  1730 bool os::dll_build_name(char* buffer, size_t buflen,
  1731                         const char* pname, const char* fname) {
  1732   bool retval = false;
  1733   const size_t pnamelen = pname ? strlen(pname) : 0;
  1735   // Return error on buffer overflow.
  1736   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
  1737     return retval;
  1740   if (pnamelen == 0) {
  1741     snprintf(buffer, buflen, "lib%s.so", fname);
  1742     retval = true;
  1743   } else if (strchr(pname, *os::path_separator()) != NULL) {
  1744     int n;
  1745     char** pelements = split_path(pname, &n);
  1746     if (pelements == NULL) {
  1747       return false;
  1749     for (int i = 0 ; i < n ; i++) {
  1750       // really shouldn't be NULL but what the heck, check can't hurt
  1751       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
  1752         continue; // skip the empty path values
  1754       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
  1755       if (file_exists(buffer)) {
  1756         retval = true;
  1757         break;
  1760     // release the storage
  1761     for (int i = 0 ; i < n ; i++) {
  1762       if (pelements[i] != NULL) {
  1763         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
  1766     if (pelements != NULL) {
  1767       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
  1769   } else {
  1770     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
  1771     retval = true;
  1773   return retval;
  1776 // check if addr is inside libjvm.so
  1777 bool os::address_is_in_vm(address addr) {
  1778   static address libjvm_base_addr;
  1779   Dl_info dlinfo;
  1781   if (libjvm_base_addr == NULL) {
  1782     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
  1783       libjvm_base_addr = (address)dlinfo.dli_fbase;
  1785     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
  1788   if (dladdr((void *)addr, &dlinfo) != 0) {
  1789     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
  1792   return false;
  1795 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
  1796 static dladdr1_func_type dladdr1_func = NULL;
  1798 bool os::dll_address_to_function_name(address addr, char *buf,
  1799                                       int buflen, int * offset) {
  1800   // buf is not optional, but offset is optional
  1801   assert(buf != NULL, "sanity check");
  1803   Dl_info dlinfo;
  1805   // dladdr1_func was initialized in os::init()
  1806   if (dladdr1_func != NULL) {
  1807     // yes, we have dladdr1
  1809     // Support for dladdr1 is checked at runtime; it may be
  1810     // available even if the vm is built on a machine that does
  1811     // not have dladdr1 support.  Make sure there is a value for
  1812     // RTLD_DL_SYMENT.
  1813     #ifndef RTLD_DL_SYMENT
  1814     #define RTLD_DL_SYMENT 1
  1815     #endif
  1816 #ifdef _LP64
  1817     Elf64_Sym * info;
  1818 #else
  1819     Elf32_Sym * info;
  1820 #endif
  1821     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
  1822                      RTLD_DL_SYMENT) != 0) {
  1823       // see if we have a matching symbol that covers our address
  1824       if (dlinfo.dli_saddr != NULL &&
  1825           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
  1826         if (dlinfo.dli_sname != NULL) {
  1827           if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
  1828             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
  1830           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
  1831           return true;
  1834       // no matching symbol so try for just file info
  1835       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
  1836         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
  1837                             buf, buflen, offset, dlinfo.dli_fname)) {
  1838           return true;
  1842     buf[0] = '\0';
  1843     if (offset != NULL) *offset  = -1;
  1844     return false;
  1847   // no, only dladdr is available
  1848   if (dladdr((void *)addr, &dlinfo) != 0) {
  1849     // see if we have a matching symbol
  1850     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
  1851       if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
  1852         jio_snprintf(buf, buflen, dlinfo.dli_sname);
  1854       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
  1855       return true;
  1857     // no matching symbol so try for just file info
  1858     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
  1859       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
  1860                           buf, buflen, offset, dlinfo.dli_fname)) {
  1861         return true;
  1865   buf[0] = '\0';
  1866   if (offset != NULL) *offset  = -1;
  1867   return false;
  1870 bool os::dll_address_to_library_name(address addr, char* buf,
  1871                                      int buflen, int* offset) {
  1872   // buf is not optional, but offset is optional
  1873   assert(buf != NULL, "sanity check");
  1875   Dl_info dlinfo;
  1877   if (dladdr((void*)addr, &dlinfo) != 0) {
  1878     if (dlinfo.dli_fname != NULL) {
  1879       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
  1881     if (dlinfo.dli_fbase != NULL && offset != NULL) {
  1882       *offset = addr - (address)dlinfo.dli_fbase;
  1884     return true;
  1887   buf[0] = '\0';
  1888   if (offset) *offset = -1;
  1889   return false;
  1892 // Prints the names and full paths of all opened dynamic libraries
  1893 // for current process
  1894 void os::print_dll_info(outputStream * st) {
  1895   Dl_info dli;
  1896   void *handle;
  1897   Link_map *map;
  1898   Link_map *p;
  1900   st->print_cr("Dynamic libraries:"); st->flush();
  1902   if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
  1903       dli.dli_fname == NULL) {
  1904     st->print_cr("Error: Cannot print dynamic libraries.");
  1905     return;
  1907   handle = dlopen(dli.dli_fname, RTLD_LAZY);
  1908   if (handle == NULL) {
  1909     st->print_cr("Error: Cannot print dynamic libraries.");
  1910     return;
  1912   dlinfo(handle, RTLD_DI_LINKMAP, &map);
  1913   if (map == NULL) {
  1914     st->print_cr("Error: Cannot print dynamic libraries.");
  1915     return;
  1918   while (map->l_prev != NULL)
  1919     map = map->l_prev;
  1921   while (map != NULL) {
  1922     st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
  1923     map = map->l_next;
  1926   dlclose(handle);
  1929   // Loads .dll/.so and
  1930   // in case of error it checks if .dll/.so was built for the
  1931   // same architecture as Hotspot is running on
  1933 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
  1935   void * result= ::dlopen(filename, RTLD_LAZY);
  1936   if (result != NULL) {
  1937     // Successful loading
  1938     return result;
  1941   Elf32_Ehdr elf_head;
  1943   // Read system error message into ebuf
  1944   // It may or may not be overwritten below
  1945   ::strncpy(ebuf, ::dlerror(), ebuflen-1);
  1946   ebuf[ebuflen-1]='\0';
  1947   int diag_msg_max_length=ebuflen-strlen(ebuf);
  1948   char* diag_msg_buf=ebuf+strlen(ebuf);
  1950   if (diag_msg_max_length==0) {
  1951     // No more space in ebuf for additional diagnostics message
  1952     return NULL;
  1956   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
  1958   if (file_descriptor < 0) {
  1959     // Can't open library, report dlerror() message
  1960     return NULL;
  1963   bool failed_to_read_elf_head=
  1964     (sizeof(elf_head)!=
  1965         (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
  1967   ::close(file_descriptor);
  1968   if (failed_to_read_elf_head) {
  1969     // file i/o error - report dlerror() msg
  1970     return NULL;
  1973   typedef struct {
  1974     Elf32_Half  code;         // Actual value as defined in elf.h
  1975     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
  1976     char        elf_class;    // 32 or 64 bit
  1977     char        endianess;    // MSB or LSB
  1978     char*       name;         // String representation
  1979   } arch_t;
  1981   static const arch_t arch_array[]={
  1982     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
  1983     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
  1984     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
  1985     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
  1986     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
  1987     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
  1988     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
  1989     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
  1990     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
  1991     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
  1992   };
  1994   #if  (defined IA32)
  1995     static  Elf32_Half running_arch_code=EM_386;
  1996   #elif   (defined AMD64)
  1997     static  Elf32_Half running_arch_code=EM_X86_64;
  1998   #elif  (defined IA64)
  1999     static  Elf32_Half running_arch_code=EM_IA_64;
  2000   #elif  (defined __sparc) && (defined _LP64)
  2001     static  Elf32_Half running_arch_code=EM_SPARCV9;
  2002   #elif  (defined __sparc) && (!defined _LP64)
  2003     static  Elf32_Half running_arch_code=EM_SPARC;
  2004   #elif  (defined __powerpc64__)
  2005     static  Elf32_Half running_arch_code=EM_PPC64;
  2006   #elif  (defined __powerpc__)
  2007     static  Elf32_Half running_arch_code=EM_PPC;
  2008   #elif (defined ARM)
  2009     static  Elf32_Half running_arch_code=EM_ARM;
  2010   #else
  2011     #error Method os::dll_load requires that one of following is defined:\
  2012          IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
  2013   #endif
  2015   // Identify compatability class for VM's architecture and library's architecture
  2016   // Obtain string descriptions for architectures
  2018   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
  2019   int running_arch_index=-1;
  2021   for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
  2022     if (running_arch_code == arch_array[i].code) {
  2023       running_arch_index    = i;
  2025     if (lib_arch.code == arch_array[i].code) {
  2026       lib_arch.compat_class = arch_array[i].compat_class;
  2027       lib_arch.name         = arch_array[i].name;
  2031   assert(running_arch_index != -1,
  2032     "Didn't find running architecture code (running_arch_code) in arch_array");
  2033   if (running_arch_index == -1) {
  2034     // Even though running architecture detection failed
  2035     // we may still continue with reporting dlerror() message
  2036     return NULL;
  2039   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
  2040     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
  2041     return NULL;
  2044   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
  2045     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
  2046     return NULL;
  2049   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
  2050     if ( lib_arch.name!=NULL ) {
  2051       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
  2052         " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
  2053         lib_arch.name, arch_array[running_arch_index].name);
  2054     } else {
  2055       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
  2056       " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
  2057         lib_arch.code,
  2058         arch_array[running_arch_index].name);
  2062   return NULL;
  2065 void* os::dll_lookup(void* handle, const char* name) {
  2066   return dlsym(handle, name);
  2069 void* os::get_default_process_handle() {
  2070   return (void*)::dlopen(NULL, RTLD_LAZY);
  2073 int os::stat(const char *path, struct stat *sbuf) {
  2074   char pathbuf[MAX_PATH];
  2075   if (strlen(path) > MAX_PATH - 1) {
  2076     errno = ENAMETOOLONG;
  2077     return -1;
  2079   os::native_path(strcpy(pathbuf, path));
  2080   return ::stat(pathbuf, sbuf);
  2083 static bool _print_ascii_file(const char* filename, outputStream* st) {
  2084   int fd = ::open(filename, O_RDONLY);
  2085   if (fd == -1) {
  2086      return false;
  2089   char buf[32];
  2090   int bytes;
  2091   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
  2092     st->print_raw(buf, bytes);
  2095   ::close(fd);
  2097   return true;
  2100 void os::print_os_info_brief(outputStream* st) {
  2101   os::Solaris::print_distro_info(st);
  2103   os::Posix::print_uname_info(st);
  2105   os::Solaris::print_libversion_info(st);
  2108 void os::print_os_info(outputStream* st) {
  2109   st->print("OS:");
  2111   os::Solaris::print_distro_info(st);
  2113   os::Posix::print_uname_info(st);
  2115   os::Solaris::print_libversion_info(st);
  2117   os::Posix::print_rlimit_info(st);
  2119   os::Posix::print_load_average(st);
  2122 void os::Solaris::print_distro_info(outputStream* st) {
  2123   if (!_print_ascii_file("/etc/release", st)) {
  2124       st->print("Solaris");
  2126     st->cr();
  2129 void os::Solaris::print_libversion_info(outputStream* st) {
  2130   if (os::Solaris::T2_libthread()) {
  2131     st->print("  (T2 libthread)");
  2133   else {
  2134     st->print("  (T1 libthread)");
  2136   st->cr();
  2139 static bool check_addr0(outputStream* st) {
  2140   jboolean status = false;
  2141   int fd = ::open("/proc/self/map",O_RDONLY);
  2142   if (fd >= 0) {
  2143     prmap_t p;
  2144     while(::read(fd, &p, sizeof(p)) > 0) {
  2145       if (p.pr_vaddr == 0x0) {
  2146         st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
  2147         st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
  2148         st->print("Access:");
  2149         st->print("%s",(p.pr_mflags & MA_READ)  ? "r" : "-");
  2150         st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
  2151         st->print("%s",(p.pr_mflags & MA_EXEC)  ? "x" : "-");
  2152         st->cr();
  2153         status = true;
  2156     ::close(fd);
  2158   return status;
  2161 void os::pd_print_cpu_info(outputStream* st) {
  2162   // Nothing to do for now.
  2165 void os::print_memory_info(outputStream* st) {
  2166   st->print("Memory:");
  2167   st->print(" %dk page", os::vm_page_size()>>10);
  2168   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
  2169   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
  2170   st->cr();
  2171   (void) check_addr0(st);
  2174 void os::print_siginfo(outputStream* st, void* siginfo) {
  2175   const siginfo_t* si = (const siginfo_t*)siginfo;
  2177   os::Posix::print_siginfo_brief(st, si);
  2179   if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
  2180       UseSharedSpaces) {
  2181     FileMapInfo* mapinfo = FileMapInfo::current_info();
  2182     if (mapinfo->is_in_shared_space(si->si_addr)) {
  2183       st->print("\n\nError accessing class data sharing archive."   \
  2184                 " Mapped file inaccessible during execution, "      \
  2185                 " possible disk/network problem.");
  2188   st->cr();
  2191 // Moved from whole group, because we need them here for diagnostic
  2192 // prints.
  2193 #define OLDMAXSIGNUM 32
  2194 static int Maxsignum = 0;
  2195 static int *ourSigFlags = NULL;
  2197 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
  2199 int os::Solaris::get_our_sigflags(int sig) {
  2200   assert(ourSigFlags!=NULL, "signal data structure not initialized");
  2201   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
  2202   return ourSigFlags[sig];
  2205 void os::Solaris::set_our_sigflags(int sig, int flags) {
  2206   assert(ourSigFlags!=NULL, "signal data structure not initialized");
  2207   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
  2208   ourSigFlags[sig] = flags;
  2212 static const char* get_signal_handler_name(address handler,
  2213                                            char* buf, int buflen) {
  2214   int offset;
  2215   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
  2216   if (found) {
  2217     // skip directory names
  2218     const char *p1, *p2;
  2219     p1 = buf;
  2220     size_t len = strlen(os::file_separator());
  2221     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
  2222     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
  2223   } else {
  2224     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
  2226   return buf;
  2229 static void print_signal_handler(outputStream* st, int sig,
  2230                                   char* buf, size_t buflen) {
  2231   struct sigaction sa;
  2233   sigaction(sig, NULL, &sa);
  2235   st->print("%s: ", os::exception_name(sig, buf, buflen));
  2237   address handler = (sa.sa_flags & SA_SIGINFO)
  2238                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
  2239                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
  2241   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
  2242     st->print("SIG_DFL");
  2243   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
  2244     st->print("SIG_IGN");
  2245   } else {
  2246     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
  2249   st->print(", sa_mask[0]=");
  2250   os::Posix::print_signal_set_short(st, &sa.sa_mask);
  2252   address rh = VMError::get_resetted_sighandler(sig);
  2253   // May be, handler was resetted by VMError?
  2254   if(rh != NULL) {
  2255     handler = rh;
  2256     sa.sa_flags = VMError::get_resetted_sigflags(sig);
  2259   st->print(", sa_flags=");
  2260   os::Posix::print_sa_flags(st, sa.sa_flags);
  2262   // Check: is it our handler?
  2263   if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
  2264      handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
  2265     // It is our signal handler
  2266     // check for flags
  2267     if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
  2268       st->print(
  2269         ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
  2270         os::Solaris::get_our_sigflags(sig));
  2273   st->cr();
  2276 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
  2277   st->print_cr("Signal Handlers:");
  2278   print_signal_handler(st, SIGSEGV, buf, buflen);
  2279   print_signal_handler(st, SIGBUS , buf, buflen);
  2280   print_signal_handler(st, SIGFPE , buf, buflen);
  2281   print_signal_handler(st, SIGPIPE, buf, buflen);
  2282   print_signal_handler(st, SIGXFSZ, buf, buflen);
  2283   print_signal_handler(st, SIGILL , buf, buflen);
  2284   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
  2285   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
  2286   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
  2287   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
  2288   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
  2289   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
  2290   print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
  2291   print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
  2294 static char saved_jvm_path[MAXPATHLEN] = { 0 };
  2296 // Find the full path to the current module, libjvm.so
  2297 void os::jvm_path(char *buf, jint buflen) {
  2298   // Error checking.
  2299   if (buflen < MAXPATHLEN) {
  2300     assert(false, "must use a large-enough buffer");
  2301     buf[0] = '\0';
  2302     return;
  2304   // Lazy resolve the path to current module.
  2305   if (saved_jvm_path[0] != 0) {
  2306     strcpy(buf, saved_jvm_path);
  2307     return;
  2310   Dl_info dlinfo;
  2311   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
  2312   assert(ret != 0, "cannot locate libjvm");
  2313   if (ret != 0 && dlinfo.dli_fname != NULL) {
  2314     realpath((char *)dlinfo.dli_fname, buf);
  2315   } else {
  2316     buf[0] = '\0';
  2317     return;
  2320   if (Arguments::created_by_gamma_launcher()) {
  2321     // Support for the gamma launcher.  Typical value for buf is
  2322     // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
  2323     // the right place in the string, then assume we are installed in a JDK and
  2324     // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
  2325     // up the path so it looks like libjvm.so is installed there (append a
  2326     // fake suffix hotspot/libjvm.so).
  2327     const char *p = buf + strlen(buf) - 1;
  2328     for (int count = 0; p > buf && count < 5; ++count) {
  2329       for (--p; p > buf && *p != '/'; --p)
  2330         /* empty */ ;
  2333     if (strncmp(p, "/jre/lib/", 9) != 0) {
  2334       // Look for JAVA_HOME in the environment.
  2335       char* java_home_var = ::getenv("JAVA_HOME");
  2336       if (java_home_var != NULL && java_home_var[0] != 0) {
  2337         char cpu_arch[12];
  2338         char* jrelib_p;
  2339         int   len;
  2340         sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
  2341 #ifdef _LP64
  2342         // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
  2343         if (strcmp(cpu_arch, "sparc") == 0) {
  2344           strcat(cpu_arch, "v9");
  2345         } else if (strcmp(cpu_arch, "i386") == 0) {
  2346           strcpy(cpu_arch, "amd64");
  2348 #endif
  2349         // Check the current module name "libjvm.so".
  2350         p = strrchr(buf, '/');
  2351         assert(strstr(p, "/libjvm") == p, "invalid library name");
  2353         realpath(java_home_var, buf);
  2354         // determine if this is a legacy image or modules image
  2355         // modules image doesn't have "jre" subdirectory
  2356         len = strlen(buf);
  2357         assert(len < buflen, "Ran out of buffer space");
  2358         jrelib_p = buf + len;
  2359         snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
  2360         if (0 != access(buf, F_OK)) {
  2361           snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
  2364         if (0 == access(buf, F_OK)) {
  2365           // Use current module name "libjvm.so"
  2366           len = strlen(buf);
  2367           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
  2368         } else {
  2369           // Go back to path of .so
  2370           realpath((char *)dlinfo.dli_fname, buf);
  2376   strncpy(saved_jvm_path, buf, MAXPATHLEN);
  2380 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
  2381   // no prefix required, not even "_"
  2385 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
  2386   // no suffix required
  2389 // This method is a copy of JDK's sysGetLastErrorString
  2390 // from src/solaris/hpi/src/system_md.c
  2392 size_t os::lasterror(char *buf, size_t len) {
  2394   if (errno == 0)  return 0;
  2396   const char *s = ::strerror(errno);
  2397   size_t n = ::strlen(s);
  2398   if (n >= len) {
  2399     n = len - 1;
  2401   ::strncpy(buf, s, n);
  2402   buf[n] = '\0';
  2403   return n;
  2407 // sun.misc.Signal
  2409 extern "C" {
  2410   static void UserHandler(int sig, void *siginfo, void *context) {
  2411     // Ctrl-C is pressed during error reporting, likely because the error
  2412     // handler fails to abort. Let VM die immediately.
  2413     if (sig == SIGINT && is_error_reported()) {
  2414        os::die();
  2417     os::signal_notify(sig);
  2418     // We do not need to reinstate the signal handler each time...
  2422 void* os::user_handler() {
  2423   return CAST_FROM_FN_PTR(void*, UserHandler);
  2426 class Semaphore : public StackObj {
  2427   public:
  2428     Semaphore();
  2429     ~Semaphore();
  2430     void signal();
  2431     void wait();
  2432     bool trywait();
  2433     bool timedwait(unsigned int sec, int nsec);
  2434   private:
  2435     sema_t _semaphore;
  2436 };
  2439 Semaphore::Semaphore() {
  2440   sema_init(&_semaphore, 0, NULL, NULL);
  2443 Semaphore::~Semaphore() {
  2444   sema_destroy(&_semaphore);
  2447 void Semaphore::signal() {
  2448   sema_post(&_semaphore);
  2451 void Semaphore::wait() {
  2452   sema_wait(&_semaphore);
  2455 bool Semaphore::trywait() {
  2456   return sema_trywait(&_semaphore) == 0;
  2459 bool Semaphore::timedwait(unsigned int sec, int nsec) {
  2460   struct timespec ts;
  2461   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
  2463   while (1) {
  2464     int result = sema_timedwait(&_semaphore, &ts);
  2465     if (result == 0) {
  2466       return true;
  2467     } else if (errno == EINTR) {
  2468       continue;
  2469     } else if (errno == ETIME) {
  2470       return false;
  2471     } else {
  2472       return false;
  2477 extern "C" {
  2478   typedef void (*sa_handler_t)(int);
  2479   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
  2482 void* os::signal(int signal_number, void* handler) {
  2483   struct sigaction sigAct, oldSigAct;
  2484   sigfillset(&(sigAct.sa_mask));
  2485   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
  2486   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
  2488   if (sigaction(signal_number, &sigAct, &oldSigAct))
  2489     // -1 means registration failed
  2490     return (void *)-1;
  2492   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
  2495 void os::signal_raise(int signal_number) {
  2496   raise(signal_number);
  2499 /*
  2500  * The following code is moved from os.cpp for making this
  2501  * code platform specific, which it is by its very nature.
  2502  */
  2504 // a counter for each possible signal value
  2505 static int Sigexit = 0;
  2506 static int Maxlibjsigsigs;
  2507 static jint *pending_signals = NULL;
  2508 static int *preinstalled_sigs = NULL;
  2509 static struct sigaction *chainedsigactions = NULL;
  2510 static sema_t sig_sem;
  2511 typedef int (*version_getting_t)();
  2512 version_getting_t os::Solaris::get_libjsig_version = NULL;
  2513 static int libjsigversion = NULL;
  2515 int os::sigexitnum_pd() {
  2516   assert(Sigexit > 0, "signal memory not yet initialized");
  2517   return Sigexit;
  2520 void os::Solaris::init_signal_mem() {
  2521   // Initialize signal structures
  2522   Maxsignum = SIGRTMAX;
  2523   Sigexit = Maxsignum+1;
  2524   assert(Maxsignum >0, "Unable to obtain max signal number");
  2526   Maxlibjsigsigs = Maxsignum;
  2528   // pending_signals has one int per signal
  2529   // The additional signal is for SIGEXIT - exit signal to signal_thread
  2530   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
  2531   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
  2533   if (UseSignalChaining) {
  2534      chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
  2535        * (Maxsignum + 1), mtInternal);
  2536      memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
  2537      preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
  2538      memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
  2540   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
  2541   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
  2544 void os::signal_init_pd() {
  2545   int ret;
  2547   ret = ::sema_init(&sig_sem, 0, NULL, NULL);
  2548   assert(ret == 0, "sema_init() failed");
  2551 void os::signal_notify(int signal_number) {
  2552   int ret;
  2554   Atomic::inc(&pending_signals[signal_number]);
  2555   ret = ::sema_post(&sig_sem);
  2556   assert(ret == 0, "sema_post() failed");
  2559 static int check_pending_signals(bool wait_for_signal) {
  2560   int ret;
  2561   while (true) {
  2562     for (int i = 0; i < Sigexit + 1; i++) {
  2563       jint n = pending_signals[i];
  2564       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
  2565         return i;
  2568     if (!wait_for_signal) {
  2569       return -1;
  2571     JavaThread *thread = JavaThread::current();
  2572     ThreadBlockInVM tbivm(thread);
  2574     bool threadIsSuspended;
  2575     do {
  2576       thread->set_suspend_equivalent();
  2577       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
  2578       while((ret = ::sema_wait(&sig_sem)) == EINTR)
  2580       assert(ret == 0, "sema_wait() failed");
  2582       // were we externally suspended while we were waiting?
  2583       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
  2584       if (threadIsSuspended) {
  2585         //
  2586         // The semaphore has been incremented, but while we were waiting
  2587         // another thread suspended us. We don't want to continue running
  2588         // while suspended because that would surprise the thread that
  2589         // suspended us.
  2590         //
  2591         ret = ::sema_post(&sig_sem);
  2592         assert(ret == 0, "sema_post() failed");
  2594         thread->java_suspend_self();
  2596     } while (threadIsSuspended);
  2600 int os::signal_lookup() {
  2601   return check_pending_signals(false);
  2604 int os::signal_wait() {
  2605   return check_pending_signals(true);
  2608 ////////////////////////////////////////////////////////////////////////////////
  2609 // Virtual Memory
  2611 static int page_size = -1;
  2613 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later.  init_2() will
  2614 // clear this var if support is not available.
  2615 static bool has_map_align = true;
  2617 int os::vm_page_size() {
  2618   assert(page_size != -1, "must call os::init");
  2619   return page_size;
  2622 // Solaris allocates memory by pages.
  2623 int os::vm_allocation_granularity() {
  2624   assert(page_size != -1, "must call os::init");
  2625   return page_size;
  2628 static bool recoverable_mmap_error(int err) {
  2629   // See if the error is one we can let the caller handle. This
  2630   // list of errno values comes from the Solaris mmap(2) man page.
  2631   switch (err) {
  2632   case EBADF:
  2633   case EINVAL:
  2634   case ENOTSUP:
  2635     // let the caller deal with these errors
  2636     return true;
  2638   default:
  2639     // Any remaining errors on this OS can cause our reserved mapping
  2640     // to be lost. That can cause confusion where different data
  2641     // structures think they have the same memory mapped. The worst
  2642     // scenario is if both the VM and a library think they have the
  2643     // same memory mapped.
  2644     return false;
  2648 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
  2649                                     int err) {
  2650   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
  2651           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
  2652           strerror(err), err);
  2655 static void warn_fail_commit_memory(char* addr, size_t bytes,
  2656                                     size_t alignment_hint, bool exec,
  2657                                     int err) {
  2658   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
  2659           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
  2660           alignment_hint, exec, strerror(err), err);
  2663 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
  2664   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
  2665   size_t size = bytes;
  2666   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
  2667   if (res != NULL) {
  2668     if (UseNUMAInterleaving) {
  2669       numa_make_global(addr, bytes);
  2671     return 0;
  2674   int err = errno;  // save errno from mmap() call in mmap_chunk()
  2676   if (!recoverable_mmap_error(err)) {
  2677     warn_fail_commit_memory(addr, bytes, exec, err);
  2678     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
  2681   return err;
  2684 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
  2685   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
  2688 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
  2689                                   const char* mesg) {
  2690   assert(mesg != NULL, "mesg must be specified");
  2691   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
  2692   if (err != 0) {
  2693     // the caller wants all commit errors to exit with the specified mesg:
  2694     warn_fail_commit_memory(addr, bytes, exec, err);
  2695     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
  2699 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
  2700                                     size_t alignment_hint, bool exec) {
  2701   int err = Solaris::commit_memory_impl(addr, bytes, exec);
  2702   if (err == 0) {
  2703     if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) {
  2704       // If the large page size has been set and the VM
  2705       // is using large pages, use the large page size
  2706       // if it is smaller than the alignment hint. This is
  2707       // a case where the VM wants to use a larger alignment size
  2708       // for its own reasons but still want to use large pages
  2709       // (which is what matters to setting the mpss range.
  2710       size_t page_size = 0;
  2711       if (large_page_size() < alignment_hint) {
  2712         assert(UseLargePages, "Expected to be here for large page use only");
  2713         page_size = large_page_size();
  2714       } else {
  2715         // If the alignment hint is less than the large page
  2716         // size, the VM wants a particular alignment (thus the hint)
  2717         // for internal reasons.  Try to set the mpss range using
  2718         // the alignment_hint.
  2719         page_size = alignment_hint;
  2721       // Since this is a hint, ignore any failures.
  2722       (void)Solaris::setup_large_pages(addr, bytes, page_size);
  2725   return err;
  2728 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
  2729                           bool exec) {
  2730   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
  2733 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
  2734                                   size_t alignment_hint, bool exec,
  2735                                   const char* mesg) {
  2736   assert(mesg != NULL, "mesg must be specified");
  2737   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
  2738   if (err != 0) {
  2739     // the caller wants all commit errors to exit with the specified mesg:
  2740     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
  2741     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
  2745 // Uncommit the pages in a specified region.
  2746 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
  2747   if (madvise(addr, bytes, MADV_FREE) < 0) {
  2748     debug_only(warning("MADV_FREE failed."));
  2749     return;
  2753 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
  2754   return os::commit_memory(addr, size, !ExecMem);
  2757 bool os::remove_stack_guard_pages(char* addr, size_t size) {
  2758   return os::uncommit_memory(addr, size);
  2761 // Change the page size in a given range.
  2762 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
  2763   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
  2764   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
  2765   if (UseLargePages) {
  2766     Solaris::setup_large_pages(addr, bytes, alignment_hint);
  2770 // Tell the OS to make the range local to the first-touching LWP
  2771 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
  2772   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
  2773   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
  2774     debug_only(warning("MADV_ACCESS_LWP failed."));
  2778 // Tell the OS that this range would be accessed from different LWPs.
  2779 void os::numa_make_global(char *addr, size_t bytes) {
  2780   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
  2781   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
  2782     debug_only(warning("MADV_ACCESS_MANY failed."));
  2786 // Get the number of the locality groups.
  2787 size_t os::numa_get_groups_num() {
  2788   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
  2789   return n != -1 ? n : 1;
  2792 // Get a list of leaf locality groups. A leaf lgroup is group that
  2793 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
  2794 // board. An LWP is assigned to one of these groups upon creation.
  2795 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
  2796    if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
  2797      ids[0] = 0;
  2798      return 1;
  2800    int result_size = 0, top = 1, bottom = 0, cur = 0;
  2801    for (int k = 0; k < size; k++) {
  2802      int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
  2803                                     (Solaris::lgrp_id_t*)&ids[top], size - top);
  2804      if (r == -1) {
  2805        ids[0] = 0;
  2806        return 1;
  2808      if (!r) {
  2809        // That's a leaf node.
  2810        assert (bottom <= cur, "Sanity check");
  2811        // Check if the node has memory
  2812        if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
  2813                                    NULL, 0, LGRP_RSRC_MEM) > 0) {
  2814          ids[bottom++] = ids[cur];
  2817      top += r;
  2818      cur++;
  2820    if (bottom == 0) {
  2821      // Handle a situation, when the OS reports no memory available.
  2822      // Assume UMA architecture.
  2823      ids[0] = 0;
  2824      return 1;
  2826    return bottom;
  2829 // Detect the topology change. Typically happens during CPU plugging-unplugging.
  2830 bool os::numa_topology_changed() {
  2831   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
  2832   if (is_stale != -1 && is_stale) {
  2833     Solaris::lgrp_fini(Solaris::lgrp_cookie());
  2834     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
  2835     assert(c != 0, "Failure to initialize LGRP API");
  2836     Solaris::set_lgrp_cookie(c);
  2837     return true;
  2839   return false;
  2842 // Get the group id of the current LWP.
  2843 int os::numa_get_group_id() {
  2844   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
  2845   if (lgrp_id == -1) {
  2846     return 0;
  2848   const int size = os::numa_get_groups_num();
  2849   int *ids = (int*)alloca(size * sizeof(int));
  2851   // Get the ids of all lgroups with memory; r is the count.
  2852   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
  2853                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
  2854   if (r <= 0) {
  2855     return 0;
  2857   return ids[os::random() % r];
  2860 // Request information about the page.
  2861 bool os::get_page_info(char *start, page_info* info) {
  2862   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
  2863   uint64_t addr = (uintptr_t)start;
  2864   uint64_t outdata[2];
  2865   uint_t validity = 0;
  2867   if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
  2868     return false;
  2871   info->size = 0;
  2872   info->lgrp_id = -1;
  2874   if ((validity & 1) != 0) {
  2875     if ((validity & 2) != 0) {
  2876       info->lgrp_id = outdata[0];
  2878     if ((validity & 4) != 0) {
  2879       info->size = outdata[1];
  2881     return true;
  2883   return false;
  2886 // Scan the pages from start to end until a page different than
  2887 // the one described in the info parameter is encountered.
  2888 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
  2889   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
  2890   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
  2891   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
  2892   uint_t validity[MAX_MEMINFO_CNT];
  2894   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
  2895   uint64_t p = (uint64_t)start;
  2896   while (p < (uint64_t)end) {
  2897     addrs[0] = p;
  2898     size_t addrs_count = 1;
  2899     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
  2900       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
  2901       addrs_count++;
  2904     if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
  2905       return NULL;
  2908     size_t i = 0;
  2909     for (; i < addrs_count; i++) {
  2910       if ((validity[i] & 1) != 0) {
  2911         if ((validity[i] & 4) != 0) {
  2912           if (outdata[types * i + 1] != page_expected->size) {
  2913             break;
  2915         } else
  2916           if (page_expected->size != 0) {
  2917             break;
  2920         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
  2921           if (outdata[types * i] != page_expected->lgrp_id) {
  2922             break;
  2925       } else {
  2926         return NULL;
  2930     if (i < addrs_count) {
  2931       if ((validity[i] & 2) != 0) {
  2932         page_found->lgrp_id = outdata[types * i];
  2933       } else {
  2934         page_found->lgrp_id = -1;
  2936       if ((validity[i] & 4) != 0) {
  2937         page_found->size = outdata[types * i + 1];
  2938       } else {
  2939         page_found->size = 0;
  2941       return (char*)addrs[i];
  2944     p = addrs[addrs_count - 1] + page_size;
  2946   return end;
  2949 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
  2950   size_t size = bytes;
  2951   // Map uncommitted pages PROT_NONE so we fail early if we touch an
  2952   // uncommitted page. Otherwise, the read/write might succeed if we
  2953   // have enough swap space to back the physical page.
  2954   return
  2955     NULL != Solaris::mmap_chunk(addr, size,
  2956                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
  2957                                 PROT_NONE);
  2960 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
  2961   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
  2963   if (b == MAP_FAILED) {
  2964     return NULL;
  2966   return b;
  2969 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
  2970   char* addr = requested_addr;
  2971   int flags = MAP_PRIVATE | MAP_NORESERVE;
  2973   assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
  2975   if (fixed) {
  2976     flags |= MAP_FIXED;
  2977   } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
  2978     flags |= MAP_ALIGN;
  2979     addr = (char*) alignment_hint;
  2982   // Map uncommitted pages PROT_NONE so we fail early if we touch an
  2983   // uncommitted page. Otherwise, the read/write might succeed if we
  2984   // have enough swap space to back the physical page.
  2985   return mmap_chunk(addr, bytes, flags, PROT_NONE);
  2988 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
  2989   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
  2991   guarantee(requested_addr == NULL || requested_addr == addr,
  2992             "OS failed to return requested mmap address.");
  2993   return addr;
  2996 // Reserve memory at an arbitrary address, only if that area is
  2997 // available (and not reserved for something else).
  2999 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
  3000   const int max_tries = 10;
  3001   char* base[max_tries];
  3002   size_t size[max_tries];
  3004   // Solaris adds a gap between mmap'ed regions.  The size of the gap
  3005   // is dependent on the requested size and the MMU.  Our initial gap
  3006   // value here is just a guess and will be corrected later.
  3007   bool had_top_overlap = false;
  3008   bool have_adjusted_gap = false;
  3009   size_t gap = 0x400000;
  3011   // Assert only that the size is a multiple of the page size, since
  3012   // that's all that mmap requires, and since that's all we really know
  3013   // about at this low abstraction level.  If we need higher alignment,
  3014   // we can either pass an alignment to this method or verify alignment
  3015   // in one of the methods further up the call chain.  See bug 5044738.
  3016   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
  3018   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
  3019   // Give it a try, if the kernel honors the hint we can return immediately.
  3020   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
  3022   volatile int err = errno;
  3023   if (addr == requested_addr) {
  3024     return addr;
  3025   } else if (addr != NULL) {
  3026     pd_unmap_memory(addr, bytes);
  3029   if (PrintMiscellaneous && Verbose) {
  3030     char buf[256];
  3031     buf[0] = '\0';
  3032     if (addr == NULL) {
  3033       jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
  3035     warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
  3036             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
  3037             "%s", bytes, requested_addr, addr, buf);
  3040   // Address hint method didn't work.  Fall back to the old method.
  3041   // In theory, once SNV becomes our oldest supported platform, this
  3042   // code will no longer be needed.
  3043   //
  3044   // Repeatedly allocate blocks until the block is allocated at the
  3045   // right spot. Give up after max_tries.
  3046   int i;
  3047   for (i = 0; i < max_tries; ++i) {
  3048     base[i] = reserve_memory(bytes);
  3050     if (base[i] != NULL) {
  3051       // Is this the block we wanted?
  3052       if (base[i] == requested_addr) {
  3053         size[i] = bytes;
  3054         break;
  3057       // check that the gap value is right
  3058       if (had_top_overlap && !have_adjusted_gap) {
  3059         size_t actual_gap = base[i-1] - base[i] - bytes;
  3060         if (gap != actual_gap) {
  3061           // adjust the gap value and retry the last 2 allocations
  3062           assert(i > 0, "gap adjustment code problem");
  3063           have_adjusted_gap = true;  // adjust the gap only once, just in case
  3064           gap = actual_gap;
  3065           if (PrintMiscellaneous && Verbose) {
  3066             warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
  3068           unmap_memory(base[i], bytes);
  3069           unmap_memory(base[i-1], size[i-1]);
  3070           i-=2;
  3071           continue;
  3075       // Does this overlap the block we wanted? Give back the overlapped
  3076       // parts and try again.
  3077       //
  3078       // There is still a bug in this code: if top_overlap == bytes,
  3079       // the overlap is offset from requested region by the value of gap.
  3080       // In this case giving back the overlapped part will not work,
  3081       // because we'll give back the entire block at base[i] and
  3082       // therefore the subsequent allocation will not generate a new gap.
  3083       // This could be fixed with a new algorithm that used larger
  3084       // or variable size chunks to find the requested region -
  3085       // but such a change would introduce additional complications.
  3086       // It's rare enough that the planets align for this bug,
  3087       // so we'll just wait for a fix for 6204603/5003415 which
  3088       // will provide a mmap flag to allow us to avoid this business.
  3090       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
  3091       if (top_overlap >= 0 && top_overlap < bytes) {
  3092         had_top_overlap = true;
  3093         unmap_memory(base[i], top_overlap);
  3094         base[i] += top_overlap;
  3095         size[i] = bytes - top_overlap;
  3096       } else {
  3097         size_t bottom_overlap = base[i] + bytes - requested_addr;
  3098         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
  3099           if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
  3100             warning("attempt_reserve_memory_at: possible alignment bug");
  3102           unmap_memory(requested_addr, bottom_overlap);
  3103           size[i] = bytes - bottom_overlap;
  3104         } else {
  3105           size[i] = bytes;
  3111   // Give back the unused reserved pieces.
  3113   for (int j = 0; j < i; ++j) {
  3114     if (base[j] != NULL) {
  3115       unmap_memory(base[j], size[j]);
  3119   return (i < max_tries) ? requested_addr : NULL;
  3122 bool os::pd_release_memory(char* addr, size_t bytes) {
  3123   size_t size = bytes;
  3124   return munmap(addr, size) == 0;
  3127 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
  3128   assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
  3129          "addr must be page aligned");
  3130   int retVal = mprotect(addr, bytes, prot);
  3131   return retVal == 0;
  3134 // Protect memory (Used to pass readonly pages through
  3135 // JNI GetArray<type>Elements with empty arrays.)
  3136 // Also, used for serialization page and for compressed oops null pointer
  3137 // checking.
  3138 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
  3139                         bool is_committed) {
  3140   unsigned int p = 0;
  3141   switch (prot) {
  3142   case MEM_PROT_NONE: p = PROT_NONE; break;
  3143   case MEM_PROT_READ: p = PROT_READ; break;
  3144   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
  3145   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
  3146   default:
  3147     ShouldNotReachHere();
  3149   // is_committed is unused.
  3150   return solaris_mprotect(addr, bytes, p);
  3153 // guard_memory and unguard_memory only happens within stack guard pages.
  3154 // Since ISM pertains only to the heap, guard and unguard memory should not
  3155 /// happen with an ISM region.
  3156 bool os::guard_memory(char* addr, size_t bytes) {
  3157   return solaris_mprotect(addr, bytes, PROT_NONE);
  3160 bool os::unguard_memory(char* addr, size_t bytes) {
  3161   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
  3164 // Large page support
  3165 static size_t _large_page_size = 0;
  3167 // Insertion sort for small arrays (descending order).
  3168 static void insertion_sort_descending(size_t* array, int len) {
  3169   for (int i = 0; i < len; i++) {
  3170     size_t val = array[i];
  3171     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
  3172       size_t tmp = array[key];
  3173       array[key] = array[key - 1];
  3174       array[key - 1] = tmp;
  3179 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
  3180   const unsigned int usable_count = VM_Version::page_size_count();
  3181   if (usable_count == 1) {
  3182     return false;
  3185   // Find the right getpagesizes interface.  When solaris 11 is the minimum
  3186   // build platform, getpagesizes() (without the '2') can be called directly.
  3187   typedef int (*gps_t)(size_t[], int);
  3188   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
  3189   if (gps_func == NULL) {
  3190     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
  3191     if (gps_func == NULL) {
  3192       if (warn) {
  3193         warning("MPSS is not supported by the operating system.");
  3195       return false;
  3199   // Fill the array of page sizes.
  3200   int n = (*gps_func)(_page_sizes, page_sizes_max);
  3201   assert(n > 0, "Solaris bug?");
  3203   if (n == page_sizes_max) {
  3204     // Add a sentinel value (necessary only if the array was completely filled
  3205     // since it is static (zeroed at initialization)).
  3206     _page_sizes[--n] = 0;
  3207     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
  3209   assert(_page_sizes[n] == 0, "missing sentinel");
  3210   trace_page_sizes("available page sizes", _page_sizes, n);
  3212   if (n == 1) return false;     // Only one page size available.
  3214   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
  3215   // select up to usable_count elements.  First sort the array, find the first
  3216   // acceptable value, then copy the usable sizes to the top of the array and
  3217   // trim the rest.  Make sure to include the default page size :-).
  3218   //
  3219   // A better policy could get rid of the 4M limit by taking the sizes of the
  3220   // important VM memory regions (java heap and possibly the code cache) into
  3221   // account.
  3222   insertion_sort_descending(_page_sizes, n);
  3223   const size_t size_limit =
  3224     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
  3225   int beg;
  3226   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
  3227   const int end = MIN2((int)usable_count, n) - 1;
  3228   for (int cur = 0; cur < end; ++cur, ++beg) {
  3229     _page_sizes[cur] = _page_sizes[beg];
  3231   _page_sizes[end] = vm_page_size();
  3232   _page_sizes[end + 1] = 0;
  3234   if (_page_sizes[end] > _page_sizes[end - 1]) {
  3235     // Default page size is not the smallest; sort again.
  3236     insertion_sort_descending(_page_sizes, end + 1);
  3238   *page_size = _page_sizes[0];
  3240   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
  3241   return true;
  3244 void os::large_page_init() {
  3245   if (UseLargePages) {
  3246     // print a warning if any large page related flag is specified on command line
  3247     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
  3248                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
  3250     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
  3254 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
  3255   // Signal to OS that we want large pages for addresses
  3256   // from addr, addr + bytes
  3257   struct memcntl_mha mpss_struct;
  3258   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
  3259   mpss_struct.mha_pagesize = align;
  3260   mpss_struct.mha_flags = 0;
  3261   // Upon successful completion, memcntl() returns 0
  3262   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
  3263     debug_only(warning("Attempt to use MPSS failed."));
  3264     return false;
  3266   return true;
  3269 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
  3270   fatal("os::reserve_memory_special should not be called on Solaris.");
  3271   return NULL;
  3274 bool os::release_memory_special(char* base, size_t bytes) {
  3275   fatal("os::release_memory_special should not be called on Solaris.");
  3276   return false;
  3279 size_t os::large_page_size() {
  3280   return _large_page_size;
  3283 // MPSS allows application to commit large page memory on demand; with ISM
  3284 // the entire memory region must be allocated as shared memory.
  3285 bool os::can_commit_large_page_memory() {
  3286   return true;
  3289 bool os::can_execute_large_page_memory() {
  3290   return true;
  3293 static int os_sleep(jlong millis, bool interruptible) {
  3294   const jlong limit = INT_MAX;
  3295   jlong prevtime;
  3296   int res;
  3298   while (millis > limit) {
  3299     if ((res = os_sleep(limit, interruptible)) != OS_OK)
  3300       return res;
  3301     millis -= limit;
  3304   // Restart interrupted polls with new parameters until the proper delay
  3305   // has been completed.
  3307   prevtime = getTimeMillis();
  3309   while (millis > 0) {
  3310     jlong newtime;
  3312     if (!interruptible) {
  3313       // Following assert fails for os::yield_all:
  3314       // assert(!thread->is_Java_thread(), "must not be java thread");
  3315       res = poll(NULL, 0, millis);
  3316     } else {
  3317       JavaThread *jt = JavaThread::current();
  3319       INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
  3320         os::Solaris::clear_interrupted);
  3323     // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
  3324     // thread.Interrupt.
  3326     // See c/r 6751923. Poll can return 0 before time
  3327     // has elapsed if time is set via clock_settime (as NTP does).
  3328     // res == 0 if poll timed out (see man poll RETURN VALUES)
  3329     // using the logic below checks that we really did
  3330     // sleep at least "millis" if not we'll sleep again.
  3331     if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) {
  3332       newtime = getTimeMillis();
  3333       assert(newtime >= prevtime, "time moving backwards");
  3334     /* Doing prevtime and newtime in microseconds doesn't help precision,
  3335        and trying to round up to avoid lost milliseconds can result in a
  3336        too-short delay. */
  3337       millis -= newtime - prevtime;
  3338       if(millis <= 0)
  3339         return OS_OK;
  3340       prevtime = newtime;
  3341     } else
  3342       return res;
  3345   return OS_OK;
  3348 // Read calls from inside the vm need to perform state transitions
  3349 size_t os::read(int fd, void *buf, unsigned int nBytes) {
  3350   INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
  3353 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
  3354   INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
  3357 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
  3358   assert(thread == Thread::current(),  "thread consistency check");
  3360   // TODO-FIXME: this should be removed.
  3361   // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
  3362   // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
  3363   // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
  3364   // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
  3365   // is fooled into believing that the system is making progress. In the code below we block the
  3366   // the watcher thread while safepoint is in progress so that it would not appear as though the
  3367   // system is making progress.
  3368   if (!Solaris::T2_libthread() &&
  3369       thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
  3370     // We now try to acquire the threads lock. Since this lock is held by the VM thread during
  3371     // the entire safepoint, the watcher thread will  line up here during the safepoint.
  3372     Threads_lock->lock_without_safepoint_check();
  3373     Threads_lock->unlock();
  3376   if (thread->is_Java_thread()) {
  3377     // This is a JavaThread so we honor the _thread_blocked protocol
  3378     // even for sleeps of 0 milliseconds. This was originally done
  3379     // as a workaround for bug 4338139. However, now we also do it
  3380     // to honor the suspend-equivalent protocol.
  3382     JavaThread *jt = (JavaThread *) thread;
  3383     ThreadBlockInVM tbivm(jt);
  3385     jt->set_suspend_equivalent();
  3386     // cleared by handle_special_suspend_equivalent_condition() or
  3387     // java_suspend_self() via check_and_wait_while_suspended()
  3389     int ret_code;
  3390     if (millis <= 0) {
  3391       thr_yield();
  3392       ret_code = 0;
  3393     } else {
  3394       // The original sleep() implementation did not create an
  3395       // OSThreadWaitState helper for sleeps of 0 milliseconds.
  3396       // I'm preserving that decision for now.
  3397       OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
  3399       ret_code = os_sleep(millis, interruptible);
  3402     // were we externally suspended while we were waiting?
  3403     jt->check_and_wait_while_suspended();
  3405     return ret_code;
  3408   // non-JavaThread from this point on:
  3410   if (millis <= 0) {
  3411     thr_yield();
  3412     return 0;
  3415   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
  3417   return os_sleep(millis, interruptible);
  3420 void os::naked_short_sleep(jlong ms) {
  3421   assert(ms < 1000, "Un-interruptable sleep, short time use only");
  3423   // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
  3424   // Solaris requires -lrt for this.
  3425   usleep((ms * 1000));
  3427   return;
  3430 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
  3431 void os::infinite_sleep() {
  3432   while (true) {    // sleep forever ...
  3433     ::sleep(100);   // ... 100 seconds at a time
  3437 // Used to convert frequent JVM_Yield() to nops
  3438 bool os::dont_yield() {
  3439   if (DontYieldALot) {
  3440     static hrtime_t last_time = 0;
  3441     hrtime_t diff = getTimeNanos() - last_time;
  3443     if (diff < DontYieldALotInterval * 1000000)
  3444       return true;
  3446     last_time += diff;
  3448     return false;
  3450   else {
  3451     return false;
  3455 // Caveat: Solaris os::yield() causes a thread-state transition whereas
  3456 // the linux and win32 implementations do not.  This should be checked.
  3458 void os::yield() {
  3459   // Yields to all threads with same or greater priority
  3460   os::sleep(Thread::current(), 0, false);
  3463 // Note that yield semantics are defined by the scheduling class to which
  3464 // the thread currently belongs.  Typically, yield will _not yield to
  3465 // other equal or higher priority threads that reside on the dispatch queues
  3466 // of other CPUs.
  3468 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
  3471 // On Solaris we found that yield_all doesn't always yield to all other threads.
  3472 // There have been cases where there is a thread ready to execute but it doesn't
  3473 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
  3474 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
  3475 // SIGWAITING signal which will cause a new lwp to be created. So we count the
  3476 // number of times yield_all is called in the one loop and increase the sleep
  3477 // time after 8 attempts. If this fails too we increase the concurrency level
  3478 // so that the starving thread would get an lwp
  3480 void os::yield_all(int attempts) {
  3481   // Yields to all threads, including threads with lower priorities
  3482   if (attempts == 0) {
  3483     os::sleep(Thread::current(), 1, false);
  3484   } else {
  3485     int iterations = attempts % 30;
  3486     if (iterations == 0 && !os::Solaris::T2_libthread()) {
  3487       // thr_setconcurrency and _getconcurrency make sense only under T1.
  3488       int noofLWPS = thr_getconcurrency();
  3489       if (noofLWPS < (Threads::number_of_threads() + 2)) {
  3490         thr_setconcurrency(thr_getconcurrency() + 1);
  3492     } else if (iterations < 25) {
  3493       os::sleep(Thread::current(), 1, false);
  3494     } else {
  3495       os::sleep(Thread::current(), 10, false);
  3500 // Called from the tight loops to possibly influence time-sharing heuristics
  3501 void os::loop_breaker(int attempts) {
  3502   os::yield_all(attempts);
  3506 // Interface for setting lwp priorities.  If we are using T2 libthread,
  3507 // which forces the use of BoundThreads or we manually set UseBoundThreads,
  3508 // all of our threads will be assigned to real lwp's.  Using the thr_setprio
  3509 // function is meaningless in this mode so we must adjust the real lwp's priority
  3510 // The routines below implement the getting and setting of lwp priorities.
  3511 //
  3512 // Note: There are three priority scales used on Solaris.  Java priotities
  3513 //       which range from 1 to 10, libthread "thr_setprio" scale which range
  3514 //       from 0 to 127, and the current scheduling class of the process we
  3515 //       are running in.  This is typically from -60 to +60.
  3516 //       The setting of the lwp priorities in done after a call to thr_setprio
  3517 //       so Java priorities are mapped to libthread priorities and we map from
  3518 //       the latter to lwp priorities.  We don't keep priorities stored in
  3519 //       Java priorities since some of our worker threads want to set priorities
  3520 //       higher than all Java threads.
  3521 //
  3522 // For related information:
  3523 // (1)  man -s 2 priocntl
  3524 // (2)  man -s 4 priocntl
  3525 // (3)  man dispadmin
  3526 // =    librt.so
  3527 // =    libthread/common/rtsched.c - thrp_setlwpprio().
  3528 // =    ps -cL <pid> ... to validate priority.
  3529 // =    sched_get_priority_min and _max
  3530 //              pthread_create
  3531 //              sched_setparam
  3532 //              pthread_setschedparam
  3533 //
  3534 // Assumptions:
  3535 // +    We assume that all threads in the process belong to the same
  3536 //              scheduling class.   IE. an homogenous process.
  3537 // +    Must be root or in IA group to change change "interactive" attribute.
  3538 //              Priocntl() will fail silently.  The only indication of failure is when
  3539 //              we read-back the value and notice that it hasn't changed.
  3540 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
  3541 // +    For RT, change timeslice as well.  Invariant:
  3542 //              constant "priority integral"
  3543 //              Konst == TimeSlice * (60-Priority)
  3544 //              Given a priority, compute appropriate timeslice.
  3545 // +    Higher numerical values have higher priority.
  3547 // sched class attributes
  3548 typedef struct {
  3549         int   schedPolicy;              // classID
  3550         int   maxPrio;
  3551         int   minPrio;
  3552 } SchedInfo;
  3555 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
  3557 #ifdef ASSERT
  3558 static int  ReadBackValidate = 1;
  3559 #endif
  3560 static int  myClass     = 0;
  3561 static int  myMin       = 0;
  3562 static int  myMax       = 0;
  3563 static int  myCur       = 0;
  3564 static bool priocntl_enable = false;
  3566 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
  3567 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
  3570 // lwp_priocntl_init
  3571 //
  3572 // Try to determine the priority scale for our process.
  3573 //
  3574 // Return errno or 0 if OK.
  3575 //
  3576 static int lwp_priocntl_init () {
  3577   int rslt;
  3578   pcinfo_t ClassInfo;
  3579   pcparms_t ParmInfo;
  3580   int i;
  3582   if (!UseThreadPriorities) return 0;
  3584   // We are using Bound threads, we need to determine our priority ranges
  3585   if (os::Solaris::T2_libthread() || UseBoundThreads) {
  3586     // If ThreadPriorityPolicy is 1, switch tables
  3587     if (ThreadPriorityPolicy == 1) {
  3588       for (i = 0 ; i < CriticalPriority+1; i++)
  3589         os::java_to_os_priority[i] = prio_policy1[i];
  3591     if (UseCriticalJavaThreadPriority) {
  3592       // MaxPriority always maps to the FX scheduling class and criticalPrio.
  3593       // See set_native_priority() and set_lwp_class_and_priority().
  3594       // Save original MaxPriority mapping in case attempt to
  3595       // use critical priority fails.
  3596       java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
  3597       // Set negative to distinguish from other priorities
  3598       os::java_to_os_priority[MaxPriority] = -criticalPrio;
  3601   // Not using Bound Threads, set to ThreadPolicy 1
  3602   else {
  3603     for ( i = 0 ; i < CriticalPriority+1; i++ ) {
  3604       os::java_to_os_priority[i] = prio_policy1[i];
  3606     return 0;
  3609   // Get IDs for a set of well-known scheduling classes.
  3610   // TODO-FIXME: GETCLINFO returns the current # of classes in the
  3611   // the system.  We should have a loop that iterates over the
  3612   // classID values, which are known to be "small" integers.
  3614   strcpy(ClassInfo.pc_clname, "TS");
  3615   ClassInfo.pc_cid = -1;
  3616   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
  3617   if (rslt < 0) return errno;
  3618   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
  3619   tsLimits.schedPolicy = ClassInfo.pc_cid;
  3620   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
  3621   tsLimits.minPrio = -tsLimits.maxPrio;
  3623   strcpy(ClassInfo.pc_clname, "IA");
  3624   ClassInfo.pc_cid = -1;
  3625   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
  3626   if (rslt < 0) return errno;
  3627   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
  3628   iaLimits.schedPolicy = ClassInfo.pc_cid;
  3629   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
  3630   iaLimits.minPrio = -iaLimits.maxPrio;
  3632   strcpy(ClassInfo.pc_clname, "RT");
  3633   ClassInfo.pc_cid = -1;
  3634   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
  3635   if (rslt < 0) return errno;
  3636   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
  3637   rtLimits.schedPolicy = ClassInfo.pc_cid;
  3638   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
  3639   rtLimits.minPrio = 0;
  3641   strcpy(ClassInfo.pc_clname, "FX");
  3642   ClassInfo.pc_cid = -1;
  3643   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
  3644   if (rslt < 0) return errno;
  3645   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
  3646   fxLimits.schedPolicy = ClassInfo.pc_cid;
  3647   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
  3648   fxLimits.minPrio = 0;
  3650   // Query our "current" scheduling class.
  3651   // This will normally be IA, TS or, rarely, FX or RT.
  3652   memset(&ParmInfo, 0, sizeof(ParmInfo));
  3653   ParmInfo.pc_cid = PC_CLNULL;
  3654   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
  3655   if (rslt < 0) return errno;
  3656   myClass = ParmInfo.pc_cid;
  3658   // We now know our scheduling classId, get specific information
  3659   // about the class.
  3660   ClassInfo.pc_cid = myClass;
  3661   ClassInfo.pc_clname[0] = 0;
  3662   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
  3663   if (rslt < 0) return errno;
  3665   if (ThreadPriorityVerbose) {
  3666     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
  3669   memset(&ParmInfo, 0, sizeof(pcparms_t));
  3670   ParmInfo.pc_cid = PC_CLNULL;
  3671   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
  3672   if (rslt < 0) return errno;
  3674   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
  3675     myMin = rtLimits.minPrio;
  3676     myMax = rtLimits.maxPrio;
  3677   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
  3678     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
  3679     myMin = iaLimits.minPrio;
  3680     myMax = iaLimits.maxPrio;
  3681     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
  3682   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
  3683     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
  3684     myMin = tsLimits.minPrio;
  3685     myMax = tsLimits.maxPrio;
  3686     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
  3687   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
  3688     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
  3689     myMin = fxLimits.minPrio;
  3690     myMax = fxLimits.maxPrio;
  3691     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
  3692   } else {
  3693     // No clue - punt
  3694     if (ThreadPriorityVerbose)
  3695       tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
  3696     return EINVAL;      // no clue, punt
  3699   if (ThreadPriorityVerbose) {
  3700     tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
  3703   priocntl_enable = true;  // Enable changing priorities
  3704   return 0;
  3707 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
  3708 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
  3709 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
  3710 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
  3713 // scale_to_lwp_priority
  3714 //
  3715 // Convert from the libthread "thr_setprio" scale to our current
  3716 // lwp scheduling class scale.
  3717 //
  3718 static
  3719 int     scale_to_lwp_priority (int rMin, int rMax, int x)
  3721   int v;
  3723   if (x == 127) return rMax;            // avoid round-down
  3724     v = (((x*(rMax-rMin)))/128)+rMin;
  3725   return v;
  3729 // set_lwp_class_and_priority
  3730 //
  3731 // Set the class and priority of the lwp.  This call should only
  3732 // be made when using bound threads (T2 threads are bound by default).
  3733 //
  3734 int set_lwp_class_and_priority(int ThreadID, int lwpid,
  3735                                int newPrio, int new_class, bool scale) {
  3736   int rslt;
  3737   int Actual, Expected, prv;
  3738   pcparms_t ParmInfo;                   // for GET-SET
  3739 #ifdef ASSERT
  3740   pcparms_t ReadBack;                   // for readback
  3741 #endif
  3743   // Set priority via PC_GETPARMS, update, PC_SETPARMS
  3744   // Query current values.
  3745   // TODO: accelerate this by eliminating the PC_GETPARMS call.
  3746   // Cache "pcparms_t" in global ParmCache.
  3747   // TODO: elide set-to-same-value
  3749   // If something went wrong on init, don't change priorities.
  3750   if ( !priocntl_enable ) {
  3751     if (ThreadPriorityVerbose)
  3752       tty->print_cr("Trying to set priority but init failed, ignoring");
  3753     return EINVAL;
  3756   // If lwp hasn't started yet, just return
  3757   // the _start routine will call us again.
  3758   if ( lwpid <= 0 ) {
  3759     if (ThreadPriorityVerbose) {
  3760       tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
  3761                      INTPTR_FORMAT " to %d, lwpid not set",
  3762                      ThreadID, newPrio);
  3764     return 0;
  3767   if (ThreadPriorityVerbose) {
  3768     tty->print_cr ("set_lwp_class_and_priority("
  3769                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
  3770                    ThreadID, lwpid, newPrio);
  3773   memset(&ParmInfo, 0, sizeof(pcparms_t));
  3774   ParmInfo.pc_cid = PC_CLNULL;
  3775   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
  3776   if (rslt < 0) return errno;
  3778   int cur_class = ParmInfo.pc_cid;
  3779   ParmInfo.pc_cid = (id_t)new_class;
  3781   if (new_class == rtLimits.schedPolicy) {
  3782     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
  3783     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
  3784                                                        rtLimits.maxPrio, newPrio)
  3785                                : newPrio;
  3786     rtInfo->rt_tqsecs  = RT_NOCHANGE;
  3787     rtInfo->rt_tqnsecs = RT_NOCHANGE;
  3788     if (ThreadPriorityVerbose) {
  3789       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
  3791   } else if (new_class == iaLimits.schedPolicy) {
  3792     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
  3793     int maxClamped     = MIN2(iaLimits.maxPrio,
  3794                               cur_class == new_class
  3795                                 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
  3796     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
  3797                                                        maxClamped, newPrio)
  3798                                : newPrio;
  3799     iaInfo->ia_uprilim = cur_class == new_class
  3800                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
  3801     iaInfo->ia_mode    = IA_NOCHANGE;
  3802     if (ThreadPriorityVerbose) {
  3803       tty->print_cr("IA: [%d...%d] %d->%d\n",
  3804                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
  3806   } else if (new_class == tsLimits.schedPolicy) {
  3807     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
  3808     int maxClamped     = MIN2(tsLimits.maxPrio,
  3809                               cur_class == new_class
  3810                                 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
  3811     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
  3812                                                        maxClamped, newPrio)
  3813                                : newPrio;
  3814     tsInfo->ts_uprilim = cur_class == new_class
  3815                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
  3816     if (ThreadPriorityVerbose) {
  3817       tty->print_cr("TS: [%d...%d] %d->%d\n",
  3818                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
  3820   } else if (new_class == fxLimits.schedPolicy) {
  3821     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
  3822     int maxClamped     = MIN2(fxLimits.maxPrio,
  3823                               cur_class == new_class
  3824                                 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
  3825     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
  3826                                                        maxClamped, newPrio)
  3827                                : newPrio;
  3828     fxInfo->fx_uprilim = cur_class == new_class
  3829                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
  3830     fxInfo->fx_tqsecs  = FX_NOCHANGE;
  3831     fxInfo->fx_tqnsecs = FX_NOCHANGE;
  3832     if (ThreadPriorityVerbose) {
  3833       tty->print_cr("FX: [%d...%d] %d->%d\n",
  3834                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
  3836   } else {
  3837     if (ThreadPriorityVerbose) {
  3838       tty->print_cr("Unknown new scheduling class %d\n", new_class);
  3840     return EINVAL;    // no clue, punt
  3843   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
  3844   if (ThreadPriorityVerbose && rslt) {
  3845     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
  3847   if (rslt < 0) return errno;
  3849 #ifdef ASSERT
  3850   // Sanity check: read back what we just attempted to set.
  3851   // In theory it could have changed in the interim ...
  3852   //
  3853   // The priocntl system call is tricky.
  3854   // Sometimes it'll validate the priority value argument and
  3855   // return EINVAL if unhappy.  At other times it fails silently.
  3856   // Readbacks are prudent.
  3858   if (!ReadBackValidate) return 0;
  3860   memset(&ReadBack, 0, sizeof(pcparms_t));
  3861   ReadBack.pc_cid = PC_CLNULL;
  3862   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
  3863   assert(rslt >= 0, "priocntl failed");
  3864   Actual = Expected = 0xBAD;
  3865   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
  3866   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
  3867     Actual   = RTPRI(ReadBack)->rt_pri;
  3868     Expected = RTPRI(ParmInfo)->rt_pri;
  3869   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
  3870     Actual   = IAPRI(ReadBack)->ia_upri;
  3871     Expected = IAPRI(ParmInfo)->ia_upri;
  3872   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
  3873     Actual   = TSPRI(ReadBack)->ts_upri;
  3874     Expected = TSPRI(ParmInfo)->ts_upri;
  3875   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
  3876     Actual   = FXPRI(ReadBack)->fx_upri;
  3877     Expected = FXPRI(ParmInfo)->fx_upri;
  3878   } else {
  3879     if (ThreadPriorityVerbose) {
  3880       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
  3881                     ParmInfo.pc_cid);
  3885   if (Actual != Expected) {
  3886     if (ThreadPriorityVerbose) {
  3887       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
  3888                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
  3891 #endif
  3893   return 0;
  3896 // Solaris only gives access to 128 real priorities at a time,
  3897 // so we expand Java's ten to fill this range.  This would be better
  3898 // if we dynamically adjusted relative priorities.
  3899 //
  3900 // The ThreadPriorityPolicy option allows us to select 2 different
  3901 // priority scales.
  3902 //
  3903 // ThreadPriorityPolicy=0
  3904 // Since the Solaris' default priority is MaximumPriority, we do not
  3905 // set a priority lower than Max unless a priority lower than
  3906 // NormPriority is requested.
  3907 //
  3908 // ThreadPriorityPolicy=1
  3909 // This mode causes the priority table to get filled with
  3910 // linear values.  NormPriority get's mapped to 50% of the
  3911 // Maximum priority an so on.  This will cause VM threads
  3912 // to get unfair treatment against other Solaris processes
  3913 // which do not explicitly alter their thread priorities.
  3914 //
  3916 int os::java_to_os_priority[CriticalPriority + 1] = {
  3917   -99999,         // 0 Entry should never be used
  3919   0,              // 1 MinPriority
  3920   32,             // 2
  3921   64,             // 3
  3923   96,             // 4
  3924   127,            // 5 NormPriority
  3925   127,            // 6
  3927   127,            // 7
  3928   127,            // 8
  3929   127,            // 9 NearMaxPriority
  3931   127,            // 10 MaxPriority
  3933   -criticalPrio   // 11 CriticalPriority
  3934 };
  3936 OSReturn os::set_native_priority(Thread* thread, int newpri) {
  3937   OSThread* osthread = thread->osthread();
  3939   // Save requested priority in case the thread hasn't been started
  3940   osthread->set_native_priority(newpri);
  3942   // Check for critical priority request
  3943   bool fxcritical = false;
  3944   if (newpri == -criticalPrio) {
  3945     fxcritical = true;
  3946     newpri = criticalPrio;
  3949   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
  3950   if (!UseThreadPriorities) return OS_OK;
  3952   int status = 0;
  3954   if (!fxcritical) {
  3955     // Use thr_setprio only if we have a priority that thr_setprio understands
  3956     status = thr_setprio(thread->osthread()->thread_id(), newpri);
  3959   if (os::Solaris::T2_libthread() ||
  3960       (UseBoundThreads && osthread->is_vm_created())) {
  3961     int lwp_status =
  3962       set_lwp_class_and_priority(osthread->thread_id(),
  3963                                  osthread->lwp_id(),
  3964                                  newpri,
  3965                                  fxcritical ? fxLimits.schedPolicy : myClass,
  3966                                  !fxcritical);
  3967     if (lwp_status != 0 && fxcritical) {
  3968       // Try again, this time without changing the scheduling class
  3969       newpri = java_MaxPriority_to_os_priority;
  3970       lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
  3971                                               osthread->lwp_id(),
  3972                                               newpri, myClass, false);
  3974     status |= lwp_status;
  3976   return (status == 0) ? OS_OK : OS_ERR;
  3980 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
  3981   int p;
  3982   if ( !UseThreadPriorities ) {
  3983     *priority_ptr = NormalPriority;
  3984     return OS_OK;
  3986   int status = thr_getprio(thread->osthread()->thread_id(), &p);
  3987   if (status != 0) {
  3988     return OS_ERR;
  3990   *priority_ptr = p;
  3991   return OS_OK;
  3995 // Hint to the underlying OS that a task switch would not be good.
  3996 // Void return because it's a hint and can fail.
  3997 void os::hint_no_preempt() {
  3998   schedctl_start(schedctl_init());
  4001 static void resume_clear_context(OSThread *osthread) {
  4002   osthread->set_ucontext(NULL);
  4005 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
  4006   osthread->set_ucontext(context);
  4009 static Semaphore sr_semaphore;
  4011 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
  4012   // Save and restore errno to avoid confusing native code with EINTR
  4013   // after sigsuspend.
  4014   int old_errno = errno;
  4016   OSThread* osthread = thread->osthread();
  4017   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
  4019   os::SuspendResume::State current = osthread->sr.state();
  4020   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
  4021     suspend_save_context(osthread, uc);
  4023     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
  4024     os::SuspendResume::State state = osthread->sr.suspended();
  4025     if (state == os::SuspendResume::SR_SUSPENDED) {
  4026       sigset_t suspend_set;  // signals for sigsuspend()
  4028       // get current set of blocked signals and unblock resume signal
  4029       thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
  4030       sigdelset(&suspend_set, os::Solaris::SIGasync());
  4032       sr_semaphore.signal();
  4033       // wait here until we are resumed
  4034       while (1) {
  4035         sigsuspend(&suspend_set);
  4037         os::SuspendResume::State result = osthread->sr.running();
  4038         if (result == os::SuspendResume::SR_RUNNING) {
  4039           sr_semaphore.signal();
  4040           break;
  4044     } else if (state == os::SuspendResume::SR_RUNNING) {
  4045       // request was cancelled, continue
  4046     } else {
  4047       ShouldNotReachHere();
  4050     resume_clear_context(osthread);
  4051   } else if (current == os::SuspendResume::SR_RUNNING) {
  4052     // request was cancelled, continue
  4053   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
  4054     // ignore
  4055   } else {
  4056     // ignore
  4059   errno = old_errno;
  4063 void os::interrupt(Thread* thread) {
  4064   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
  4066   OSThread* osthread = thread->osthread();
  4068   int isInterrupted = osthread->interrupted();
  4069   if (!isInterrupted) {
  4070       osthread->set_interrupted(true);
  4071       OrderAccess::fence();
  4072       // os::sleep() is implemented with either poll (NULL,0,timeout) or
  4073       // by parking on _SleepEvent.  If the former, thr_kill will unwedge
  4074       // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
  4075       ParkEvent * const slp = thread->_SleepEvent ;
  4076       if (slp != NULL) slp->unpark() ;
  4079   // For JSR166:  unpark after setting status but before thr_kill -dl
  4080   if (thread->is_Java_thread()) {
  4081     ((JavaThread*)thread)->parker()->unpark();
  4084   // Handle interruptible wait() ...
  4085   ParkEvent * const ev = thread->_ParkEvent ;
  4086   if (ev != NULL) ev->unpark() ;
  4088   // When events are used everywhere for os::sleep, then this thr_kill
  4089   // will only be needed if UseVMInterruptibleIO is true.
  4091   if (!isInterrupted) {
  4092     int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
  4093     assert_status(status == 0, status, "thr_kill");
  4095     // Bump thread interruption counter
  4096     RuntimeService::record_thread_interrupt_signaled_count();
  4101 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
  4102   assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
  4104   OSThread* osthread = thread->osthread();
  4106   bool res = osthread->interrupted();
  4108   // NOTE that since there is no "lock" around these two operations,
  4109   // there is the possibility that the interrupted flag will be
  4110   // "false" but that the interrupt event will be set. This is
  4111   // intentional. The effect of this is that Object.wait() will appear
  4112   // to have a spurious wakeup, which is not harmful, and the
  4113   // possibility is so rare that it is not worth the added complexity
  4114   // to add yet another lock. It has also been recommended not to put
  4115   // the interrupted flag into the os::Solaris::Event structure,
  4116   // because it hides the issue.
  4117   if (res && clear_interrupted) {
  4118     osthread->set_interrupted(false);
  4120   return res;
  4124 void os::print_statistics() {
  4127 int os::message_box(const char* title, const char* message) {
  4128   int i;
  4129   fdStream err(defaultStream::error_fd());
  4130   for (i = 0; i < 78; i++) err.print_raw("=");
  4131   err.cr();
  4132   err.print_raw_cr(title);
  4133   for (i = 0; i < 78; i++) err.print_raw("-");
  4134   err.cr();
  4135   err.print_raw_cr(message);
  4136   for (i = 0; i < 78; i++) err.print_raw("=");
  4137   err.cr();
  4139   char buf[16];
  4140   // Prevent process from exiting upon "read error" without consuming all CPU
  4141   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
  4143   return buf[0] == 'y' || buf[0] == 'Y';
  4146 static int sr_notify(OSThread* osthread) {
  4147   int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
  4148   assert_status(status == 0, status, "thr_kill");
  4149   return status;
  4152 // "Randomly" selected value for how long we want to spin
  4153 // before bailing out on suspending a thread, also how often
  4154 // we send a signal to a thread we want to resume
  4155 static const int RANDOMLY_LARGE_INTEGER = 1000000;
  4156 static const int RANDOMLY_LARGE_INTEGER2 = 100;
  4158 static bool do_suspend(OSThread* osthread) {
  4159   assert(osthread->sr.is_running(), "thread should be running");
  4160   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
  4162   // mark as suspended and send signal
  4163   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
  4164     // failed to switch, state wasn't running?
  4165     ShouldNotReachHere();
  4166     return false;
  4169   if (sr_notify(osthread) != 0) {
  4170     ShouldNotReachHere();
  4173   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
  4174   while (true) {
  4175     if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
  4176       break;
  4177     } else {
  4178       // timeout
  4179       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
  4180       if (cancelled == os::SuspendResume::SR_RUNNING) {
  4181         return false;
  4182       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
  4183         // make sure that we consume the signal on the semaphore as well
  4184         sr_semaphore.wait();
  4185         break;
  4186       } else {
  4187         ShouldNotReachHere();
  4188         return false;
  4193   guarantee(osthread->sr.is_suspended(), "Must be suspended");
  4194   return true;
  4197 static void do_resume(OSThread* osthread) {
  4198   assert(osthread->sr.is_suspended(), "thread should be suspended");
  4199   assert(!sr_semaphore.trywait(), "invalid semaphore state");
  4201   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
  4202     // failed to switch to WAKEUP_REQUEST
  4203     ShouldNotReachHere();
  4204     return;
  4207   while (true) {
  4208     if (sr_notify(osthread) == 0) {
  4209       if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
  4210         if (osthread->sr.is_running()) {
  4211           return;
  4214     } else {
  4215       ShouldNotReachHere();
  4219   guarantee(osthread->sr.is_running(), "Must be running!");
  4222 void os::SuspendedThreadTask::internal_do_task() {
  4223   if (do_suspend(_thread->osthread())) {
  4224     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
  4225     do_task(context);
  4226     do_resume(_thread->osthread());
  4230 class PcFetcher : public os::SuspendedThreadTask {
  4231 public:
  4232   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
  4233   ExtendedPC result();
  4234 protected:
  4235   void do_task(const os::SuspendedThreadTaskContext& context);
  4236 private:
  4237   ExtendedPC _epc;
  4238 };
  4240 ExtendedPC PcFetcher::result() {
  4241   guarantee(is_done(), "task is not done yet.");
  4242   return _epc;
  4245 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
  4246   Thread* thread = context.thread();
  4247   OSThread* osthread = thread->osthread();
  4248   if (osthread->ucontext() != NULL) {
  4249     _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
  4250   } else {
  4251     // NULL context is unexpected, double-check this is the VMThread
  4252     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
  4256 // A lightweight implementation that does not suspend the target thread and
  4257 // thus returns only a hint. Used for profiling only!
  4258 ExtendedPC os::get_thread_pc(Thread* thread) {
  4259   // Make sure that it is called by the watcher and the Threads lock is owned.
  4260   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
  4261   // For now, is only used to profile the VM Thread
  4262   assert(thread->is_VM_thread(), "Can only be called for VMThread");
  4263   PcFetcher fetcher(thread);
  4264   fetcher.run();
  4265   return fetcher.result();
  4269 // This does not do anything on Solaris. This is basically a hook for being
  4270 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
  4271 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
  4272   f(value, method, args, thread);
  4275 // This routine may be used by user applications as a "hook" to catch signals.
  4276 // The user-defined signal handler must pass unrecognized signals to this
  4277 // routine, and if it returns true (non-zero), then the signal handler must
  4278 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
  4279 // routine will never retun false (zero), but instead will execute a VM panic
  4280 // routine kill the process.
  4281 //
  4282 // If this routine returns false, it is OK to call it again.  This allows
  4283 // the user-defined signal handler to perform checks either before or after
  4284 // the VM performs its own checks.  Naturally, the user code would be making
  4285 // a serious error if it tried to handle an exception (such as a null check
  4286 // or breakpoint) that the VM was generating for its own correct operation.
  4287 //
  4288 // This routine may recognize any of the following kinds of signals:
  4289 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
  4290 // os::Solaris::SIGasync
  4291 // It should be consulted by handlers for any of those signals.
  4292 // It explicitly does not recognize os::Solaris::SIGinterrupt
  4293 //
  4294 // The caller of this routine must pass in the three arguments supplied
  4295 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
  4296 // field of the structure passed to sigaction().  This routine assumes that
  4297 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
  4298 //
  4299 // Note that the VM will print warnings if it detects conflicting signal
  4300 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
  4301 //
  4302 extern "C" JNIEXPORT int
  4303 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
  4304                           int abort_if_unrecognized);
  4307 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
  4308   int orig_errno = errno;  // Preserve errno value over signal handler.
  4309   JVM_handle_solaris_signal(sig, info, ucVoid, true);
  4310   errno = orig_errno;
  4313 /* Do not delete - if guarantee is ever removed,  a signal handler (even empty)
  4314    is needed to provoke threads blocked on IO to return an EINTR
  4315    Note: this explicitly does NOT call JVM_handle_solaris_signal and
  4316    does NOT participate in signal chaining due to requirement for
  4317    NOT setting SA_RESTART to make EINTR work. */
  4318 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
  4319    if (UseSignalChaining) {
  4320       struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
  4321       if (actp && actp->sa_handler) {
  4322         vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
  4327 // This boolean allows users to forward their own non-matching signals
  4328 // to JVM_handle_solaris_signal, harmlessly.
  4329 bool os::Solaris::signal_handlers_are_installed = false;
  4331 // For signal-chaining
  4332 bool os::Solaris::libjsig_is_loaded = false;
  4333 typedef struct sigaction *(*get_signal_t)(int);
  4334 get_signal_t os::Solaris::get_signal_action = NULL;
  4336 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
  4337   struct sigaction *actp = NULL;
  4339   if ((libjsig_is_loaded)  && (sig <= Maxlibjsigsigs)) {
  4340     // Retrieve the old signal handler from libjsig
  4341     actp = (*get_signal_action)(sig);
  4343   if (actp == NULL) {
  4344     // Retrieve the preinstalled signal handler from jvm
  4345     actp = get_preinstalled_handler(sig);
  4348   return actp;
  4351 static bool call_chained_handler(struct sigaction *actp, int sig,
  4352                                  siginfo_t *siginfo, void *context) {
  4353   // Call the old signal handler
  4354   if (actp->sa_handler == SIG_DFL) {
  4355     // It's more reasonable to let jvm treat it as an unexpected exception
  4356     // instead of taking the default action.
  4357     return false;
  4358   } else if (actp->sa_handler != SIG_IGN) {
  4359     if ((actp->sa_flags & SA_NODEFER) == 0) {
  4360       // automaticlly block the signal
  4361       sigaddset(&(actp->sa_mask), sig);
  4364     sa_handler_t hand;
  4365     sa_sigaction_t sa;
  4366     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
  4367     // retrieve the chained handler
  4368     if (siginfo_flag_set) {
  4369       sa = actp->sa_sigaction;
  4370     } else {
  4371       hand = actp->sa_handler;
  4374     if ((actp->sa_flags & SA_RESETHAND) != 0) {
  4375       actp->sa_handler = SIG_DFL;
  4378     // try to honor the signal mask
  4379     sigset_t oset;
  4380     thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
  4382     // call into the chained handler
  4383     if (siginfo_flag_set) {
  4384       (*sa)(sig, siginfo, context);
  4385     } else {
  4386       (*hand)(sig);
  4389     // restore the signal mask
  4390     thr_sigsetmask(SIG_SETMASK, &oset, 0);
  4392   // Tell jvm's signal handler the signal is taken care of.
  4393   return true;
  4396 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
  4397   bool chained = false;
  4398   // signal-chaining
  4399   if (UseSignalChaining) {
  4400     struct sigaction *actp = get_chained_signal_action(sig);
  4401     if (actp != NULL) {
  4402       chained = call_chained_handler(actp, sig, siginfo, context);
  4405   return chained;
  4408 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
  4409   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
  4410   if (preinstalled_sigs[sig] != 0) {
  4411     return &chainedsigactions[sig];
  4413   return NULL;
  4416 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
  4418   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
  4419   assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
  4420   chainedsigactions[sig] = oldAct;
  4421   preinstalled_sigs[sig] = 1;
  4424 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
  4425   // Check for overwrite.
  4426   struct sigaction oldAct;
  4427   sigaction(sig, (struct sigaction*)NULL, &oldAct);
  4428   void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
  4429                                       : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
  4430   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
  4431       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
  4432       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
  4433     if (AllowUserSignalHandlers || !set_installed) {
  4434       // Do not overwrite; user takes responsibility to forward to us.
  4435       return;
  4436     } else if (UseSignalChaining) {
  4437       if (oktochain) {
  4438         // save the old handler in jvm
  4439         save_preinstalled_handler(sig, oldAct);
  4440       } else {
  4441         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
  4443       // libjsig also interposes the sigaction() call below and saves the
  4444       // old sigaction on it own.
  4445     } else {
  4446       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
  4447                     "%#lx for signal %d.", (long)oldhand, sig));
  4451   struct sigaction sigAct;
  4452   sigfillset(&(sigAct.sa_mask));
  4453   sigAct.sa_handler = SIG_DFL;
  4455   sigAct.sa_sigaction = signalHandler;
  4456   // Handle SIGSEGV on alternate signal stack if
  4457   // not using stack banging
  4458   if (!UseStackBanging && sig == SIGSEGV) {
  4459     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
  4460   // Interruptible i/o requires SA_RESTART cleared so EINTR
  4461   // is returned instead of restarting system calls
  4462   } else if (sig == os::Solaris::SIGinterrupt()) {
  4463     sigemptyset(&sigAct.sa_mask);
  4464     sigAct.sa_handler = NULL;
  4465     sigAct.sa_flags = SA_SIGINFO;
  4466     sigAct.sa_sigaction = sigINTRHandler;
  4467   } else {
  4468     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
  4470   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
  4472   sigaction(sig, &sigAct, &oldAct);
  4474   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
  4475                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
  4476   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
  4480 #define DO_SIGNAL_CHECK(sig) \
  4481   if (!sigismember(&check_signal_done, sig)) \
  4482     os::Solaris::check_signal_handler(sig)
  4484 // This method is a periodic task to check for misbehaving JNI applications
  4485 // under CheckJNI, we can add any periodic checks here
  4487 void os::run_periodic_checks() {
  4488   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
  4489   // thereby preventing a NULL checks.
  4490   if(!check_addr0_done) check_addr0_done = check_addr0(tty);
  4492   if (check_signals == false) return;
  4494   // SEGV and BUS if overridden could potentially prevent
  4495   // generation of hs*.log in the event of a crash, debugging
  4496   // such a case can be very challenging, so we absolutely
  4497   // check for the following for a good measure:
  4498   DO_SIGNAL_CHECK(SIGSEGV);
  4499   DO_SIGNAL_CHECK(SIGILL);
  4500   DO_SIGNAL_CHECK(SIGFPE);
  4501   DO_SIGNAL_CHECK(SIGBUS);
  4502   DO_SIGNAL_CHECK(SIGPIPE);
  4503   DO_SIGNAL_CHECK(SIGXFSZ);
  4505   // ReduceSignalUsage allows the user to override these handlers
  4506   // see comments at the very top and jvm_solaris.h
  4507   if (!ReduceSignalUsage) {
  4508     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
  4509     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
  4510     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
  4511     DO_SIGNAL_CHECK(BREAK_SIGNAL);
  4514   // See comments above for using JVM1/JVM2 and UseAltSigs
  4515   DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
  4516   DO_SIGNAL_CHECK(os::Solaris::SIGasync());
  4520 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
  4522 static os_sigaction_t os_sigaction = NULL;
  4524 void os::Solaris::check_signal_handler(int sig) {
  4525   char buf[O_BUFLEN];
  4526   address jvmHandler = NULL;
  4528   struct sigaction act;
  4529   if (os_sigaction == NULL) {
  4530     // only trust the default sigaction, in case it has been interposed
  4531     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
  4532     if (os_sigaction == NULL) return;
  4535   os_sigaction(sig, (struct sigaction*)NULL, &act);
  4537   address thisHandler = (act.sa_flags & SA_SIGINFO)
  4538     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
  4539     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
  4542   switch(sig) {
  4543     case SIGSEGV:
  4544     case SIGBUS:
  4545     case SIGFPE:
  4546     case SIGPIPE:
  4547     case SIGXFSZ:
  4548     case SIGILL:
  4549       jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
  4550       break;
  4552     case SHUTDOWN1_SIGNAL:
  4553     case SHUTDOWN2_SIGNAL:
  4554     case SHUTDOWN3_SIGNAL:
  4555     case BREAK_SIGNAL:
  4556       jvmHandler = (address)user_handler();
  4557       break;
  4559     default:
  4560       int intrsig = os::Solaris::SIGinterrupt();
  4561       int asynsig = os::Solaris::SIGasync();
  4563       if (sig == intrsig) {
  4564         jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
  4565       } else if (sig == asynsig) {
  4566         jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
  4567       } else {
  4568         return;
  4570       break;
  4574   if (thisHandler != jvmHandler) {
  4575     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
  4576     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
  4577     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
  4578     // No need to check this sig any longer
  4579     sigaddset(&check_signal_done, sig);
  4580   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
  4581     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
  4582     tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
  4583     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
  4584     // No need to check this sig any longer
  4585     sigaddset(&check_signal_done, sig);
  4588   // Print all the signal handler state
  4589   if (sigismember(&check_signal_done, sig)) {
  4590     print_signal_handlers(tty, buf, O_BUFLEN);
  4595 void os::Solaris::install_signal_handlers() {
  4596   bool libjsigdone = false;
  4597   signal_handlers_are_installed = true;
  4599   // signal-chaining
  4600   typedef void (*signal_setting_t)();
  4601   signal_setting_t begin_signal_setting = NULL;
  4602   signal_setting_t end_signal_setting = NULL;
  4603   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
  4604                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
  4605   if (begin_signal_setting != NULL) {
  4606     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
  4607                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
  4608     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
  4609                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
  4610     get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
  4611                                          dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
  4612     libjsig_is_loaded = true;
  4613     if (os::Solaris::get_libjsig_version != NULL) {
  4614       libjsigversion =  (*os::Solaris::get_libjsig_version)();
  4616     assert(UseSignalChaining, "should enable signal-chaining");
  4618   if (libjsig_is_loaded) {
  4619     // Tell libjsig jvm is setting signal handlers
  4620     (*begin_signal_setting)();
  4623   set_signal_handler(SIGSEGV, true, true);
  4624   set_signal_handler(SIGPIPE, true, true);
  4625   set_signal_handler(SIGXFSZ, true, true);
  4626   set_signal_handler(SIGBUS, true, true);
  4627   set_signal_handler(SIGILL, true, true);
  4628   set_signal_handler(SIGFPE, true, true);
  4631   if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
  4633     // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
  4634     // can not register overridable signals which might be > 32
  4635     if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
  4636     // Tell libjsig jvm has finished setting signal handlers
  4637       (*end_signal_setting)();
  4638       libjsigdone = true;
  4642   // Never ok to chain our SIGinterrupt
  4643   set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
  4644   set_signal_handler(os::Solaris::SIGasync(), true, true);
  4646   if (libjsig_is_loaded && !libjsigdone) {
  4647     // Tell libjsig jvm finishes setting signal handlers
  4648     (*end_signal_setting)();
  4651   // We don't activate signal checker if libjsig is in place, we trust ourselves
  4652   // and if UserSignalHandler is installed all bets are off.
  4653   // Log that signal checking is off only if -verbose:jni is specified.
  4654   if (CheckJNICalls) {
  4655     if (libjsig_is_loaded) {
  4656       if (PrintJNIResolving) {
  4657         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
  4659       check_signals = false;
  4661     if (AllowUserSignalHandlers) {
  4662       if (PrintJNIResolving) {
  4663         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
  4665       check_signals = false;
  4671 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
  4673 const char * signames[] = {
  4674   "SIG0",
  4675   "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
  4676   "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
  4677   "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
  4678   "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
  4679   "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
  4680   "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
  4681   "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
  4682   "SIGCANCEL", "SIGLOST"
  4683 };
  4685 const char* os::exception_name(int exception_code, char* buf, size_t size) {
  4686   if (0 < exception_code && exception_code <= SIGRTMAX) {
  4687     // signal
  4688     if (exception_code < sizeof(signames)/sizeof(const char*)) {
  4689        jio_snprintf(buf, size, "%s", signames[exception_code]);
  4690     } else {
  4691        jio_snprintf(buf, size, "SIG%d", exception_code);
  4693     return buf;
  4694   } else {
  4695     return NULL;
  4699 // (Static) wrappers for the new libthread API
  4700 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
  4701 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
  4702 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
  4703 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
  4704 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
  4706 // (Static) wrapper for getisax(2) call.
  4707 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
  4709 // (Static) wrappers for the liblgrp API
  4710 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
  4711 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
  4712 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
  4713 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
  4714 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
  4715 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
  4716 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
  4717 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
  4718 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
  4720 // (Static) wrapper for meminfo() call.
  4721 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
  4723 static address resolve_symbol_lazy(const char* name) {
  4724   address addr = (address) dlsym(RTLD_DEFAULT, name);
  4725   if(addr == NULL) {
  4726     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
  4727     addr = (address) dlsym(RTLD_NEXT, name);
  4729   return addr;
  4732 static address resolve_symbol(const char* name) {
  4733   address addr = resolve_symbol_lazy(name);
  4734   if(addr == NULL) {
  4735     fatal(dlerror());
  4737   return addr;
  4742 // isT2_libthread()
  4743 //
  4744 // Routine to determine if we are currently using the new T2 libthread.
  4745 //
  4746 // We determine if we are using T2 by reading /proc/self/lstatus and
  4747 // looking for a thread with the ASLWP bit set.  If we find this status
  4748 // bit set, we must assume that we are NOT using T2.  The T2 team
  4749 // has approved this algorithm.
  4750 //
  4751 // We need to determine if we are running with the new T2 libthread
  4752 // since setting native thread priorities is handled differently
  4753 // when using this library.  All threads created using T2 are bound
  4754 // threads. Calling thr_setprio is meaningless in this case.
  4755 //
  4756 bool isT2_libthread() {
  4757   static prheader_t * lwpArray = NULL;
  4758   static int lwpSize = 0;
  4759   static int lwpFile = -1;
  4760   lwpstatus_t * that;
  4761   char lwpName [128];
  4762   bool isT2 = false;
  4764 #define ADR(x)  ((uintptr_t)(x))
  4765 #define LWPINDEX(ary,ix)   ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
  4767   lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
  4768   if (lwpFile < 0) {
  4769       if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
  4770       return false;
  4772   lwpSize = 16*1024;
  4773   for (;;) {
  4774     ::lseek64 (lwpFile, 0, SEEK_SET);
  4775     lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
  4776     if (::read(lwpFile, lwpArray, lwpSize) < 0) {
  4777       if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
  4778       break;
  4780     if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
  4781        // We got a good snapshot - now iterate over the list.
  4782       int aslwpcount = 0;
  4783       for (int i = 0; i < lwpArray->pr_nent; i++ ) {
  4784         that = LWPINDEX(lwpArray,i);
  4785         if (that->pr_flags & PR_ASLWP) {
  4786           aslwpcount++;
  4789       if (aslwpcount == 0) isT2 = true;
  4790       break;
  4792     lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
  4793     FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);  // retry.
  4796   FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
  4797   ::close (lwpFile);
  4798   if (ThreadPriorityVerbose) {
  4799     if (isT2) tty->print_cr("We are running with a T2 libthread\n");
  4800     else tty->print_cr("We are not running with a T2 libthread\n");
  4802   return isT2;
  4806 void os::Solaris::libthread_init() {
  4807   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
  4809   // Determine if we are running with the new T2 libthread
  4810   os::Solaris::set_T2_libthread(isT2_libthread());
  4812   lwp_priocntl_init();
  4814   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
  4815   if(func == NULL) {
  4816     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
  4817     // Guarantee that this VM is running on an new enough OS (5.6 or
  4818     // later) that it will have a new enough libthread.so.
  4819     guarantee(func != NULL, "libthread.so is too old.");
  4822   // Initialize the new libthread getstate API wrappers
  4823   func = resolve_symbol("thr_getstate");
  4824   os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
  4826   func = resolve_symbol("thr_setstate");
  4827   os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
  4829   func = resolve_symbol("thr_setmutator");
  4830   os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
  4832   func = resolve_symbol("thr_suspend_mutator");
  4833   os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
  4835   func = resolve_symbol("thr_continue_mutator");
  4836   os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
  4838   int size;
  4839   void (*handler_info_func)(address *, int *);
  4840   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
  4841   handler_info_func(&handler_start, &size);
  4842   handler_end = handler_start + size;
  4846 int_fnP_mutex_tP os::Solaris::_mutex_lock;
  4847 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
  4848 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
  4849 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
  4850 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
  4851 int os::Solaris::_mutex_scope = USYNC_THREAD;
  4853 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
  4854 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
  4855 int_fnP_cond_tP os::Solaris::_cond_signal;
  4856 int_fnP_cond_tP os::Solaris::_cond_broadcast;
  4857 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
  4858 int_fnP_cond_tP os::Solaris::_cond_destroy;
  4859 int os::Solaris::_cond_scope = USYNC_THREAD;
  4861 void os::Solaris::synchronization_init() {
  4862   if(UseLWPSynchronization) {
  4863     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
  4864     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
  4865     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
  4866     os::Solaris::set_mutex_init(lwp_mutex_init);
  4867     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
  4868     os::Solaris::set_mutex_scope(USYNC_THREAD);
  4870     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
  4871     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
  4872     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
  4873     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
  4874     os::Solaris::set_cond_init(lwp_cond_init);
  4875     os::Solaris::set_cond_destroy(lwp_cond_destroy);
  4876     os::Solaris::set_cond_scope(USYNC_THREAD);
  4878   else {
  4879     os::Solaris::set_mutex_scope(USYNC_THREAD);
  4880     os::Solaris::set_cond_scope(USYNC_THREAD);
  4882     if(UsePthreads) {
  4883       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
  4884       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
  4885       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
  4886       os::Solaris::set_mutex_init(pthread_mutex_default_init);
  4887       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
  4889       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
  4890       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
  4891       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
  4892       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
  4893       os::Solaris::set_cond_init(pthread_cond_default_init);
  4894       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
  4896     else {
  4897       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
  4898       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
  4899       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
  4900       os::Solaris::set_mutex_init(::mutex_init);
  4901       os::Solaris::set_mutex_destroy(::mutex_destroy);
  4903       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
  4904       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
  4905       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
  4906       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
  4907       os::Solaris::set_cond_init(::cond_init);
  4908       os::Solaris::set_cond_destroy(::cond_destroy);
  4913 bool os::Solaris::liblgrp_init() {
  4914   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
  4915   if (handle != NULL) {
  4916     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
  4917     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
  4918     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
  4919     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
  4920     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
  4921     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
  4922     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
  4923     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
  4924                                        dlsym(handle, "lgrp_cookie_stale")));
  4926     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
  4927     set_lgrp_cookie(c);
  4928     return true;
  4930   return false;
  4933 void os::Solaris::misc_sym_init() {
  4934   address func;
  4936   // getisax
  4937   func = resolve_symbol_lazy("getisax");
  4938   if (func != NULL) {
  4939     os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
  4942   // meminfo
  4943   func = resolve_symbol_lazy("meminfo");
  4944   if (func != NULL) {
  4945     os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
  4949 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
  4950   assert(_getisax != NULL, "_getisax not set");
  4951   return _getisax(array, n);
  4954 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
  4955 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
  4956 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
  4958 void init_pset_getloadavg_ptr(void) {
  4959   pset_getloadavg_ptr =
  4960     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
  4961   if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
  4962     warning("pset_getloadavg function not found");
  4966 int os::Solaris::_dev_zero_fd = -1;
  4968 // this is called _before_ the global arguments have been parsed
  4969 void os::init(void) {
  4970   _initial_pid = getpid();
  4972   max_hrtime = first_hrtime = gethrtime();
  4974   init_random(1234567);
  4976   page_size = sysconf(_SC_PAGESIZE);
  4977   if (page_size == -1)
  4978     fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
  4979                   strerror(errno)));
  4980   init_page_sizes((size_t) page_size);
  4982   Solaris::initialize_system_info();
  4984   // Initialize misc. symbols as soon as possible, so we can use them
  4985   // if we need them.
  4986   Solaris::misc_sym_init();
  4988   int fd = ::open("/dev/zero", O_RDWR);
  4989   if (fd < 0) {
  4990     fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
  4991   } else {
  4992     Solaris::set_dev_zero_fd(fd);
  4994     // Close on exec, child won't inherit.
  4995     fcntl(fd, F_SETFD, FD_CLOEXEC);
  4998   clock_tics_per_sec = CLK_TCK;
  5000   // check if dladdr1() exists; dladdr1 can provide more information than
  5001   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
  5002   // and is available on linker patches for 5.7 and 5.8.
  5003   // libdl.so must have been loaded, this call is just an entry lookup
  5004   void * hdl = dlopen("libdl.so", RTLD_NOW);
  5005   if (hdl)
  5006     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
  5008   // (Solaris only) this switches to calls that actually do locking.
  5009   ThreadCritical::initialize();
  5011   main_thread = thr_self();
  5013   // Constant minimum stack size allowed. It must be at least
  5014   // the minimum of what the OS supports (thr_min_stack()), and
  5015   // enough to allow the thread to get to user bytecode execution.
  5016   Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
  5017   // If the pagesize of the VM is greater than 8K determine the appropriate
  5018   // number of initial guard pages.  The user can change this with the
  5019   // command line arguments, if needed.
  5020   if (vm_page_size() > 8*K) {
  5021     StackYellowPages = 1;
  5022     StackRedPages = 1;
  5023     StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
  5027 // To install functions for atexit system call
  5028 extern "C" {
  5029   static void perfMemory_exit_helper() {
  5030     perfMemory_exit();
  5034 // this is called _after_ the global arguments have been parsed
  5035 jint os::init_2(void) {
  5036   // try to enable extended file IO ASAP, see 6431278
  5037   os::Solaris::try_enable_extended_io();
  5039   // Allocate a single page and mark it as readable for safepoint polling.  Also
  5040   // use this first mmap call to check support for MAP_ALIGN.
  5041   address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
  5042                                                       page_size,
  5043                                                       MAP_PRIVATE | MAP_ALIGN,
  5044                                                       PROT_READ);
  5045   if (polling_page == NULL) {
  5046     has_map_align = false;
  5047     polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
  5048                                                 PROT_READ);
  5051   os::set_polling_page(polling_page);
  5053 #ifndef PRODUCT
  5054   if( Verbose && PrintMiscellaneous )
  5055     tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
  5056 #endif
  5058   if (!UseMembar) {
  5059     address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
  5060     guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
  5061     os::set_memory_serialize_page( mem_serialize_page );
  5063 #ifndef PRODUCT
  5064     if(Verbose && PrintMiscellaneous)
  5065       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
  5066 #endif
  5069   // Check minimum allowable stack size for thread creation and to initialize
  5070   // the java system classes, including StackOverflowError - depends on page
  5071   // size.  Add a page for compiler2 recursion in main thread.
  5072   // Add in 2*BytesPerWord times page size to account for VM stack during
  5073   // class initialization depending on 32 or 64 bit VM.
  5074   os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
  5075             (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
  5076                     2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
  5078   size_t threadStackSizeInBytes = ThreadStackSize * K;
  5079   if (threadStackSizeInBytes != 0 &&
  5080     threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
  5081     tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
  5082                   os::Solaris::min_stack_allowed/K);
  5083     return JNI_ERR;
  5086   // For 64kbps there will be a 64kb page size, which makes
  5087   // the usable default stack size quite a bit less.  Increase the
  5088   // stack for 64kb (or any > than 8kb) pages, this increases
  5089   // virtual memory fragmentation (since we're not creating the
  5090   // stack on a power of 2 boundary.  The real fix for this
  5091   // should be to fix the guard page mechanism.
  5093   if (vm_page_size() > 8*K) {
  5094       threadStackSizeInBytes = (threadStackSizeInBytes != 0)
  5095          ? threadStackSizeInBytes +
  5096            ((StackYellowPages + StackRedPages) * vm_page_size())
  5097          : 0;
  5098       ThreadStackSize = threadStackSizeInBytes/K;
  5101   // Make the stack size a multiple of the page size so that
  5102   // the yellow/red zones can be guarded.
  5103   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
  5104         vm_page_size()));
  5106   Solaris::libthread_init();
  5108   if (UseNUMA) {
  5109     if (!Solaris::liblgrp_init()) {
  5110       UseNUMA = false;
  5111     } else {
  5112       size_t lgrp_limit = os::numa_get_groups_num();
  5113       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
  5114       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
  5115       FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
  5116       if (lgrp_num < 2) {
  5117         // There's only one locality group, disable NUMA.
  5118         UseNUMA = false;
  5121     if (!UseNUMA && ForceNUMA) {
  5122       UseNUMA = true;
  5126   Solaris::signal_sets_init();
  5127   Solaris::init_signal_mem();
  5128   Solaris::install_signal_handlers();
  5130   if (libjsigversion < JSIG_VERSION_1_4_1) {
  5131     Maxlibjsigsigs = OLDMAXSIGNUM;
  5134   // initialize synchronization primitives to use either thread or
  5135   // lwp synchronization (controlled by UseLWPSynchronization)
  5136   Solaris::synchronization_init();
  5138   if (MaxFDLimit) {
  5139     // set the number of file descriptors to max. print out error
  5140     // if getrlimit/setrlimit fails but continue regardless.
  5141     struct rlimit nbr_files;
  5142     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
  5143     if (status != 0) {
  5144       if (PrintMiscellaneous && (Verbose || WizardMode))
  5145         perror("os::init_2 getrlimit failed");
  5146     } else {
  5147       nbr_files.rlim_cur = nbr_files.rlim_max;
  5148       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
  5149       if (status != 0) {
  5150         if (PrintMiscellaneous && (Verbose || WizardMode))
  5151           perror("os::init_2 setrlimit failed");
  5156   // Calculate theoretical max. size of Threads to guard gainst
  5157   // artifical out-of-memory situations, where all available address-
  5158   // space has been reserved by thread stacks. Default stack size is 1Mb.
  5159   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
  5160     JavaThread::stack_size_at_create() : (1*K*K);
  5161   assert(pre_thread_stack_size != 0, "Must have a stack");
  5162   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
  5163   // we should start doing Virtual Memory banging. Currently when the threads will
  5164   // have used all but 200Mb of space.
  5165   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
  5166   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
  5168   // at-exit methods are called in the reverse order of their registration.
  5169   // In Solaris 7 and earlier, atexit functions are called on return from
  5170   // main or as a result of a call to exit(3C). There can be only 32 of
  5171   // these functions registered and atexit() does not set errno. In Solaris
  5172   // 8 and later, there is no limit to the number of functions registered
  5173   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
  5174   // functions are called upon dlclose(3DL) in addition to return from main
  5175   // and exit(3C).
  5177   if (PerfAllowAtExitRegistration) {
  5178     // only register atexit functions if PerfAllowAtExitRegistration is set.
  5179     // atexit functions can be delayed until process exit time, which
  5180     // can be problematic for embedded VM situations. Embedded VMs should
  5181     // call DestroyJavaVM() to assure that VM resources are released.
  5183     // note: perfMemory_exit_helper atexit function may be removed in
  5184     // the future if the appropriate cleanup code can be added to the
  5185     // VM_Exit VMOperation's doit method.
  5186     if (atexit(perfMemory_exit_helper) != 0) {
  5187       warning("os::init2 atexit(perfMemory_exit_helper) failed");
  5191   // Init pset_loadavg function pointer
  5192   init_pset_getloadavg_ptr();
  5194   return JNI_OK;
  5197 void os::init_3(void) {
  5198   return;
  5201 // Mark the polling page as unreadable
  5202 void os::make_polling_page_unreadable(void) {
  5203   if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
  5204     fatal("Could not disable polling page");
  5205 };
  5207 // Mark the polling page as readable
  5208 void os::make_polling_page_readable(void) {
  5209   if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
  5210     fatal("Could not enable polling page");
  5211 };
  5213 // OS interface.
  5215 bool os::check_heap(bool force) { return true; }
  5217 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
  5218 static vsnprintf_t sol_vsnprintf = NULL;
  5220 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
  5221   if (!sol_vsnprintf) {
  5222     //search  for the named symbol in the objects that were loaded after libjvm
  5223     void* where = RTLD_NEXT;
  5224     if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
  5225         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
  5226     if (!sol_vsnprintf){
  5227       //search  for the named symbol in the objects that were loaded before libjvm
  5228       where = RTLD_DEFAULT;
  5229       if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
  5230         sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
  5231       assert(sol_vsnprintf != NULL, "vsnprintf not found");
  5234   return (*sol_vsnprintf)(buf, count, fmt, argptr);
  5238 // Is a (classpath) directory empty?
  5239 bool os::dir_is_empty(const char* path) {
  5240   DIR *dir = NULL;
  5241   struct dirent *ptr;
  5243   dir = opendir(path);
  5244   if (dir == NULL) return true;
  5246   /* Scan the directory */
  5247   bool result = true;
  5248   char buf[sizeof(struct dirent) + MAX_PATH];
  5249   struct dirent *dbuf = (struct dirent *) buf;
  5250   while (result && (ptr = readdir(dir, dbuf)) != NULL) {
  5251     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
  5252       result = false;
  5255   closedir(dir);
  5256   return result;
  5259 // This code originates from JDK's sysOpen and open64_w
  5260 // from src/solaris/hpi/src/system_md.c
  5262 #ifndef O_DELETE
  5263 #define O_DELETE 0x10000
  5264 #endif
  5266 // Open a file. Unlink the file immediately after open returns
  5267 // if the specified oflag has the O_DELETE flag set.
  5268 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
  5270 int os::open(const char *path, int oflag, int mode) {
  5271   if (strlen(path) > MAX_PATH - 1) {
  5272     errno = ENAMETOOLONG;
  5273     return -1;
  5275   int fd;
  5276   int o_delete = (oflag & O_DELETE);
  5277   oflag = oflag & ~O_DELETE;
  5279   fd = ::open64(path, oflag, mode);
  5280   if (fd == -1) return -1;
  5282   //If the open succeeded, the file might still be a directory
  5284     struct stat64 buf64;
  5285     int ret = ::fstat64(fd, &buf64);
  5286     int st_mode = buf64.st_mode;
  5288     if (ret != -1) {
  5289       if ((st_mode & S_IFMT) == S_IFDIR) {
  5290         errno = EISDIR;
  5291         ::close(fd);
  5292         return -1;
  5294     } else {
  5295       ::close(fd);
  5296       return -1;
  5299     /*
  5300      * 32-bit Solaris systems suffer from:
  5302      * - an historical default soft limit of 256 per-process file
  5303      *   descriptors that is too low for many Java programs.
  5305      * - a design flaw where file descriptors created using stdio
  5306      *   fopen must be less than 256, _even_ when the first limit above
  5307      *   has been raised.  This can cause calls to fopen (but not calls to
  5308      *   open, for example) to fail mysteriously, perhaps in 3rd party
  5309      *   native code (although the JDK itself uses fopen).  One can hardly
  5310      *   criticize them for using this most standard of all functions.
  5312      * We attempt to make everything work anyways by:
  5314      * - raising the soft limit on per-process file descriptors beyond
  5315      *   256
  5317      * - As of Solaris 10u4, we can request that Solaris raise the 256
  5318      *   stdio fopen limit by calling function enable_extended_FILE_stdio.
  5319      *   This is done in init_2 and recorded in enabled_extended_FILE_stdio
  5321      * - If we are stuck on an old (pre 10u4) Solaris system, we can
  5322      *   workaround the bug by remapping non-stdio file descriptors below
  5323      *   256 to ones beyond 256, which is done below.
  5325      * See:
  5326      * 1085341: 32-bit stdio routines should support file descriptors >255
  5327      * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
  5328      * 6431278: Netbeans crash on 32 bit Solaris: need to call
  5329      *          enable_extended_FILE_stdio() in VM initialisation
  5330      * Giri Mandalika's blog
  5331      * http://technopark02.blogspot.com/2005_05_01_archive.html
  5332      */
  5333 #ifndef  _LP64
  5334      if ((!enabled_extended_FILE_stdio) && fd < 256) {
  5335          int newfd = ::fcntl(fd, F_DUPFD, 256);
  5336          if (newfd != -1) {
  5337              ::close(fd);
  5338              fd = newfd;
  5341 #endif // 32-bit Solaris
  5342     /*
  5343      * All file descriptors that are opened in the JVM and not
  5344      * specifically destined for a subprocess should have the
  5345      * close-on-exec flag set.  If we don't set it, then careless 3rd
  5346      * party native code might fork and exec without closing all
  5347      * appropriate file descriptors (e.g. as we do in closeDescriptors in
  5348      * UNIXProcess.c), and this in turn might:
  5350      * - cause end-of-file to fail to be detected on some file
  5351      *   descriptors, resulting in mysterious hangs, or
  5353      * - might cause an fopen in the subprocess to fail on a system
  5354      *   suffering from bug 1085341.
  5356      * (Yes, the default setting of the close-on-exec flag is a Unix
  5357      * design flaw)
  5359      * See:
  5360      * 1085341: 32-bit stdio routines should support file descriptors >255
  5361      * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
  5362      * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
  5363      */
  5364 #ifdef FD_CLOEXEC
  5366         int flags = ::fcntl(fd, F_GETFD);
  5367         if (flags != -1)
  5368             ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
  5370 #endif
  5372   if (o_delete != 0) {
  5373     ::unlink(path);
  5375   return fd;
  5378 // create binary file, rewriting existing file if required
  5379 int os::create_binary_file(const char* path, bool rewrite_existing) {
  5380   int oflags = O_WRONLY | O_CREAT;
  5381   if (!rewrite_existing) {
  5382     oflags |= O_EXCL;
  5384   return ::open64(path, oflags, S_IREAD | S_IWRITE);
  5387 // return current position of file pointer
  5388 jlong os::current_file_offset(int fd) {
  5389   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
  5392 // move file pointer to the specified offset
  5393 jlong os::seek_to_file_offset(int fd, jlong offset) {
  5394   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
  5397 jlong os::lseek(int fd, jlong offset, int whence) {
  5398   return (jlong) ::lseek64(fd, offset, whence);
  5401 char * os::native_path(char *path) {
  5402   return path;
  5405 int os::ftruncate(int fd, jlong length) {
  5406   return ::ftruncate64(fd, length);
  5409 int os::fsync(int fd)  {
  5410   RESTARTABLE_RETURN_INT(::fsync(fd));
  5413 int os::available(int fd, jlong *bytes) {
  5414   jlong cur, end;
  5415   int mode;
  5416   struct stat64 buf64;
  5418   if (::fstat64(fd, &buf64) >= 0) {
  5419     mode = buf64.st_mode;
  5420     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
  5421       /*
  5422       * XXX: is the following call interruptible? If so, this might
  5423       * need to go through the INTERRUPT_IO() wrapper as for other
  5424       * blocking, interruptible calls in this file.
  5425       */
  5426       int n,ioctl_return;
  5428       INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
  5429       if (ioctl_return>= 0) {
  5430           *bytes = n;
  5431         return 1;
  5435   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
  5436     return 0;
  5437   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
  5438     return 0;
  5439   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
  5440     return 0;
  5442   *bytes = end - cur;
  5443   return 1;
  5446 // Map a block of memory.
  5447 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
  5448                      char *addr, size_t bytes, bool read_only,
  5449                      bool allow_exec) {
  5450   int prot;
  5451   int flags;
  5453   if (read_only) {
  5454     prot = PROT_READ;
  5455     flags = MAP_SHARED;
  5456   } else {
  5457     prot = PROT_READ | PROT_WRITE;
  5458     flags = MAP_PRIVATE;
  5461   if (allow_exec) {
  5462     prot |= PROT_EXEC;
  5465   if (addr != NULL) {
  5466     flags |= MAP_FIXED;
  5469   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
  5470                                      fd, file_offset);
  5471   if (mapped_address == MAP_FAILED) {
  5472     return NULL;
  5474   return mapped_address;
  5478 // Remap a block of memory.
  5479 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
  5480                        char *addr, size_t bytes, bool read_only,
  5481                        bool allow_exec) {
  5482   // same as map_memory() on this OS
  5483   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
  5484                         allow_exec);
  5488 // Unmap a block of memory.
  5489 bool os::pd_unmap_memory(char* addr, size_t bytes) {
  5490   return munmap(addr, bytes) == 0;
  5493 void os::pause() {
  5494   char filename[MAX_PATH];
  5495   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
  5496     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
  5497   } else {
  5498     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
  5501   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
  5502   if (fd != -1) {
  5503     struct stat buf;
  5504     ::close(fd);
  5505     while (::stat(filename, &buf) == 0) {
  5506       (void)::poll(NULL, 0, 100);
  5508   } else {
  5509     jio_fprintf(stderr,
  5510       "Could not open pause file '%s', continuing immediately.\n", filename);
  5514 #ifndef PRODUCT
  5515 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
  5516 // Turn this on if you need to trace synch operations.
  5517 // Set RECORD_SYNCH_LIMIT to a large-enough value,
  5518 // and call record_synch_enable and record_synch_disable
  5519 // around the computation of interest.
  5521 void record_synch(char* name, bool returning);  // defined below
  5523 class RecordSynch {
  5524   char* _name;
  5525  public:
  5526   RecordSynch(char* name) :_name(name)
  5527                  { record_synch(_name, false); }
  5528   ~RecordSynch() { record_synch(_name,   true);  }
  5529 };
  5531 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
  5532 extern "C" ret name params {                                    \
  5533   typedef ret name##_t params;                                  \
  5534   static name##_t* implem = NULL;                               \
  5535   static int callcount = 0;                                     \
  5536   if (implem == NULL) {                                         \
  5537     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
  5538     if (implem == NULL)  fatal(dlerror());                      \
  5539   }                                                             \
  5540   ++callcount;                                                  \
  5541   RecordSynch _rs(#name);                                       \
  5542   inner;                                                        \
  5543   return implem args;                                           \
  5545 // in dbx, examine callcounts this way:
  5546 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
  5548 #define CHECK_POINTER_OK(p) \
  5549   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
  5550 #define CHECK_MU \
  5551   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
  5552 #define CHECK_CV \
  5553   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
  5554 #define CHECK_P(p) \
  5555   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
  5557 #define CHECK_MUTEX(mutex_op) \
  5558 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
  5560 CHECK_MUTEX(   mutex_lock)
  5561 CHECK_MUTEX(  _mutex_lock)
  5562 CHECK_MUTEX( mutex_unlock)
  5563 CHECK_MUTEX(_mutex_unlock)
  5564 CHECK_MUTEX( mutex_trylock)
  5565 CHECK_MUTEX(_mutex_trylock)
  5567 #define CHECK_COND(cond_op) \
  5568 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
  5570 CHECK_COND( cond_wait);
  5571 CHECK_COND(_cond_wait);
  5572 CHECK_COND(_cond_wait_cancel);
  5574 #define CHECK_COND2(cond_op) \
  5575 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
  5577 CHECK_COND2( cond_timedwait);
  5578 CHECK_COND2(_cond_timedwait);
  5579 CHECK_COND2(_cond_timedwait_cancel);
  5581 // do the _lwp_* versions too
  5582 #define mutex_t lwp_mutex_t
  5583 #define cond_t  lwp_cond_t
  5584 CHECK_MUTEX(  _lwp_mutex_lock)
  5585 CHECK_MUTEX(  _lwp_mutex_unlock)
  5586 CHECK_MUTEX(  _lwp_mutex_trylock)
  5587 CHECK_MUTEX( __lwp_mutex_lock)
  5588 CHECK_MUTEX( __lwp_mutex_unlock)
  5589 CHECK_MUTEX( __lwp_mutex_trylock)
  5590 CHECK_MUTEX(___lwp_mutex_lock)
  5591 CHECK_MUTEX(___lwp_mutex_unlock)
  5593 CHECK_COND(  _lwp_cond_wait);
  5594 CHECK_COND( __lwp_cond_wait);
  5595 CHECK_COND(___lwp_cond_wait);
  5597 CHECK_COND2(  _lwp_cond_timedwait);
  5598 CHECK_COND2( __lwp_cond_timedwait);
  5599 #undef mutex_t
  5600 #undef cond_t
  5602 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
  5603 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
  5604 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
  5605 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
  5606 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
  5607 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
  5608 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
  5609 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
  5612 // recording machinery:
  5614 enum { RECORD_SYNCH_LIMIT = 200 };
  5615 char* record_synch_name[RECORD_SYNCH_LIMIT];
  5616 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
  5617 bool record_synch_returning[RECORD_SYNCH_LIMIT];
  5618 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
  5619 int record_synch_count = 0;
  5620 bool record_synch_enabled = false;
  5622 // in dbx, examine recorded data this way:
  5623 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
  5625 void record_synch(char* name, bool returning) {
  5626   if (record_synch_enabled) {
  5627     if (record_synch_count < RECORD_SYNCH_LIMIT) {
  5628       record_synch_name[record_synch_count] = name;
  5629       record_synch_returning[record_synch_count] = returning;
  5630       record_synch_thread[record_synch_count] = thr_self();
  5631       record_synch_arg0ptr[record_synch_count] = &name;
  5632       record_synch_count++;
  5634     // put more checking code here:
  5635     // ...
  5639 void record_synch_enable() {
  5640   // start collecting trace data, if not already doing so
  5641   if (!record_synch_enabled)  record_synch_count = 0;
  5642   record_synch_enabled = true;
  5645 void record_synch_disable() {
  5646   // stop collecting trace data
  5647   record_synch_enabled = false;
  5650 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
  5651 #endif // PRODUCT
  5653 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
  5654 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
  5655                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
  5658 // JVMTI & JVM monitoring and management support
  5659 // The thread_cpu_time() and current_thread_cpu_time() are only
  5660 // supported if is_thread_cpu_time_supported() returns true.
  5661 // They are not supported on Solaris T1.
  5663 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
  5664 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
  5665 // of a thread.
  5666 //
  5667 // current_thread_cpu_time() and thread_cpu_time(Thread *)
  5668 // returns the fast estimate available on the platform.
  5670 // hrtime_t gethrvtime() return value includes
  5671 // user time but does not include system time
  5672 jlong os::current_thread_cpu_time() {
  5673   return (jlong) gethrvtime();
  5676 jlong os::thread_cpu_time(Thread *thread) {
  5677   // return user level CPU time only to be consistent with
  5678   // what current_thread_cpu_time returns.
  5679   // thread_cpu_time_info() must be changed if this changes
  5680   return os::thread_cpu_time(thread, false /* user time only */);
  5683 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
  5684   if (user_sys_cpu_time) {
  5685     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
  5686   } else {
  5687     return os::current_thread_cpu_time();
  5691 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
  5692   char proc_name[64];
  5693   int count;
  5694   prusage_t prusage;
  5695   jlong lwp_time;
  5696   int fd;
  5698   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
  5699                      getpid(),
  5700                      thread->osthread()->lwp_id());
  5701   fd = ::open(proc_name, O_RDONLY);
  5702   if ( fd == -1 ) return -1;
  5704   do {
  5705     count = ::pread(fd,
  5706                   (void *)&prusage.pr_utime,
  5707                   thr_time_size,
  5708                   thr_time_off);
  5709   } while (count < 0 && errno == EINTR);
  5710   ::close(fd);
  5711   if ( count < 0 ) return -1;
  5713   if (user_sys_cpu_time) {
  5714     // user + system CPU time
  5715     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
  5716                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
  5717                  (jlong)prusage.pr_stime.tv_nsec +
  5718                  (jlong)prusage.pr_utime.tv_nsec;
  5719   } else {
  5720     // user level CPU time only
  5721     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
  5722                 (jlong)prusage.pr_utime.tv_nsec;
  5725   return(lwp_time);
  5728 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
  5729   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
  5730   info_ptr->may_skip_backward = false;    // elapsed time not wall time
  5731   info_ptr->may_skip_forward = false;     // elapsed time not wall time
  5732   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
  5735 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
  5736   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
  5737   info_ptr->may_skip_backward = false;    // elapsed time not wall time
  5738   info_ptr->may_skip_forward = false;     // elapsed time not wall time
  5739   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
  5742 bool os::is_thread_cpu_time_supported() {
  5743   if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
  5744     return true;
  5745   } else {
  5746     return false;
  5750 // System loadavg support.  Returns -1 if load average cannot be obtained.
  5751 // Return the load average for our processor set if the primitive exists
  5752 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
  5753 int os::loadavg(double loadavg[], int nelem) {
  5754   if (pset_getloadavg_ptr != NULL) {
  5755     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
  5756   } else {
  5757     return ::getloadavg(loadavg, nelem);
  5761 //---------------------------------------------------------------------------------
  5763 bool os::find(address addr, outputStream* st) {
  5764   Dl_info dlinfo;
  5765   memset(&dlinfo, 0, sizeof(dlinfo));
  5766   if (dladdr(addr, &dlinfo) != 0) {
  5767     st->print(PTR_FORMAT ": ", addr);
  5768     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
  5769       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
  5770     } else if (dlinfo.dli_fbase != NULL)
  5771       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
  5772     else
  5773       st->print("<absolute address>");
  5774     if (dlinfo.dli_fname != NULL) {
  5775       st->print(" in %s", dlinfo.dli_fname);
  5777     if (dlinfo.dli_fbase != NULL) {
  5778       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
  5780     st->cr();
  5782     if (Verbose) {
  5783       // decode some bytes around the PC
  5784       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
  5785       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
  5786       address       lowest = (address) dlinfo.dli_sname;
  5787       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
  5788       if (begin < lowest)  begin = lowest;
  5789       Dl_info dlinfo2;
  5790       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
  5791           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
  5792         end = (address) dlinfo2.dli_saddr;
  5793       Disassembler::decode(begin, end, st);
  5795     return true;
  5797   return false;
  5800 // Following function has been added to support HotSparc's libjvm.so running
  5801 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
  5802 // src/solaris/hpi/native_threads in the EVM codebase.
  5803 //
  5804 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
  5805 // libraries and should thus be removed. We will leave it behind for a while
  5806 // until we no longer want to able to run on top of 1.3.0 Solaris production
  5807 // JDK. See 4341971.
  5809 #define STACK_SLACK 0x800
  5811 extern "C" {
  5812   intptr_t sysThreadAvailableStackWithSlack() {
  5813     stack_t st;
  5814     intptr_t retval, stack_top;
  5815     retval = thr_stksegment(&st);
  5816     assert(retval == 0, "incorrect return value from thr_stksegment");
  5817     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
  5818     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
  5819     stack_top=(intptr_t)st.ss_sp-st.ss_size;
  5820     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
  5824 // ObjectMonitor park-unpark infrastructure ...
  5825 //
  5826 // We implement Solaris and Linux PlatformEvents with the
  5827 // obvious condvar-mutex-flag triple.
  5828 // Another alternative that works quite well is pipes:
  5829 // Each PlatformEvent consists of a pipe-pair.
  5830 // The thread associated with the PlatformEvent
  5831 // calls park(), which reads from the input end of the pipe.
  5832 // Unpark() writes into the other end of the pipe.
  5833 // The write-side of the pipe must be set NDELAY.
  5834 // Unfortunately pipes consume a large # of handles.
  5835 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
  5836 // Using pipes for the 1st few threads might be workable, however.
  5837 //
  5838 // park() is permitted to return spuriously.
  5839 // Callers of park() should wrap the call to park() in
  5840 // an appropriate loop.  A litmus test for the correct
  5841 // usage of park is the following: if park() were modified
  5842 // to immediately return 0 your code should still work,
  5843 // albeit degenerating to a spin loop.
  5844 //
  5845 // An interesting optimization for park() is to use a trylock()
  5846 // to attempt to acquire the mutex.  If the trylock() fails
  5847 // then we know that a concurrent unpark() operation is in-progress.
  5848 // in that case the park() code could simply set _count to 0
  5849 // and return immediately.  The subsequent park() operation *might*
  5850 // return immediately.  That's harmless as the caller of park() is
  5851 // expected to loop.  By using trylock() we will have avoided a
  5852 // avoided a context switch caused by contention on the per-thread mutex.
  5853 //
  5854 // TODO-FIXME:
  5855 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the
  5856 //     objectmonitor implementation.
  5857 // 2.  Collapse the JSR166 parker event, and the
  5858 //     objectmonitor ParkEvent into a single "Event" construct.
  5859 // 3.  In park() and unpark() add:
  5860 //     assert (Thread::current() == AssociatedWith).
  5861 // 4.  add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
  5862 //     1-out-of-N park() operations will return immediately.
  5863 //
  5864 // _Event transitions in park()
  5865 //   -1 => -1 : illegal
  5866 //    1 =>  0 : pass - return immediately
  5867 //    0 => -1 : block
  5868 //
  5869 // _Event serves as a restricted-range semaphore.
  5870 //
  5871 // Another possible encoding of _Event would be with
  5872 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
  5873 //
  5874 // TODO-FIXME: add DTRACE probes for:
  5875 // 1.   Tx parks
  5876 // 2.   Ty unparks Tx
  5877 // 3.   Tx resumes from park
  5880 // value determined through experimentation
  5881 #define ROUNDINGFIX 11
  5883 // utility to compute the abstime argument to timedwait.
  5884 // TODO-FIXME: switch from compute_abstime() to unpackTime().
  5886 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
  5887   // millis is the relative timeout time
  5888   // abstime will be the absolute timeout time
  5889   if (millis < 0)  millis = 0;
  5890   struct timeval now;
  5891   int status = gettimeofday(&now, NULL);
  5892   assert(status == 0, "gettimeofday");
  5893   jlong seconds = millis / 1000;
  5894   jlong max_wait_period;
  5896   if (UseLWPSynchronization) {
  5897     // forward port of fix for 4275818 (not sleeping long enough)
  5898     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
  5899     // _lwp_cond_timedwait() used a round_down algorithm rather
  5900     // than a round_up. For millis less than our roundfactor
  5901     // it rounded down to 0 which doesn't meet the spec.
  5902     // For millis > roundfactor we may return a bit sooner, but
  5903     // since we can not accurately identify the patch level and
  5904     // this has already been fixed in Solaris 9 and 8 we will
  5905     // leave it alone rather than always rounding down.
  5907     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
  5908        // It appears that when we go directly through Solaris _lwp_cond_timedwait()
  5909            // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
  5910            max_wait_period = 21000000;
  5911   } else {
  5912     max_wait_period = 50000000;
  5914   millis %= 1000;
  5915   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
  5916      seconds = max_wait_period;
  5918   abstime->tv_sec = now.tv_sec  + seconds;
  5919   long       usec = now.tv_usec + millis * 1000;
  5920   if (usec >= 1000000) {
  5921     abstime->tv_sec += 1;
  5922     usec -= 1000000;
  5924   abstime->tv_nsec = usec * 1000;
  5925   return abstime;
  5928 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
  5929 // Conceptually TryPark() should be equivalent to park(0).
  5931 int os::PlatformEvent::TryPark() {
  5932   for (;;) {
  5933     const int v = _Event ;
  5934     guarantee ((v == 0) || (v == 1), "invariant") ;
  5935     if (Atomic::cmpxchg (0, &_Event, v) == v) return v  ;
  5939 void os::PlatformEvent::park() {           // AKA: down()
  5940   // Invariant: Only the thread associated with the Event/PlatformEvent
  5941   // may call park().
  5942   int v ;
  5943   for (;;) {
  5944       v = _Event ;
  5945       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
  5947   guarantee (v >= 0, "invariant") ;
  5948   if (v == 0) {
  5949      // Do this the hard way by blocking ...
  5950      // See http://monaco.sfbay/detail.jsf?cr=5094058.
  5951      // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
  5952      // Only for SPARC >= V8PlusA
  5953 #if defined(__sparc) && defined(COMPILER2)
  5954      if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
  5955 #endif
  5956      int status = os::Solaris::mutex_lock(_mutex);
  5957      assert_status(status == 0, status,  "mutex_lock");
  5958      guarantee (_nParked == 0, "invariant") ;
  5959      ++ _nParked ;
  5960      while (_Event < 0) {
  5961         // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
  5962         // Treat this the same as if the wait was interrupted
  5963         // With usr/lib/lwp going to kernel, always handle ETIME
  5964         status = os::Solaris::cond_wait(_cond, _mutex);
  5965         if (status == ETIME) status = EINTR ;
  5966         assert_status(status == 0 || status == EINTR, status, "cond_wait");
  5968      -- _nParked ;
  5969      _Event = 0 ;
  5970      status = os::Solaris::mutex_unlock(_mutex);
  5971      assert_status(status == 0, status, "mutex_unlock");
  5972     // Paranoia to ensure our locked and lock-free paths interact
  5973     // correctly with each other.
  5974     OrderAccess::fence();
  5978 int os::PlatformEvent::park(jlong millis) {
  5979   guarantee (_nParked == 0, "invariant") ;
  5980   int v ;
  5981   for (;;) {
  5982       v = _Event ;
  5983       if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
  5985   guarantee (v >= 0, "invariant") ;
  5986   if (v != 0) return OS_OK ;
  5988   int ret = OS_TIMEOUT;
  5989   timestruc_t abst;
  5990   compute_abstime (&abst, millis);
  5992   // See http://monaco.sfbay/detail.jsf?cr=5094058.
  5993   // For Solaris SPARC set fprs.FEF=0 prior to parking.
  5994   // Only for SPARC >= V8PlusA
  5995 #if defined(__sparc) && defined(COMPILER2)
  5996  if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
  5997 #endif
  5998   int status = os::Solaris::mutex_lock(_mutex);
  5999   assert_status(status == 0, status, "mutex_lock");
  6000   guarantee (_nParked == 0, "invariant") ;
  6001   ++ _nParked ;
  6002   while (_Event < 0) {
  6003      int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
  6004      assert_status(status == 0 || status == EINTR ||
  6005                    status == ETIME || status == ETIMEDOUT,
  6006                    status, "cond_timedwait");
  6007      if (!FilterSpuriousWakeups) break ;                // previous semantics
  6008      if (status == ETIME || status == ETIMEDOUT) break ;
  6009      // We consume and ignore EINTR and spurious wakeups.
  6011   -- _nParked ;
  6012   if (_Event >= 0) ret = OS_OK ;
  6013   _Event = 0 ;
  6014   status = os::Solaris::mutex_unlock(_mutex);
  6015   assert_status(status == 0, status, "mutex_unlock");
  6016   // Paranoia to ensure our locked and lock-free paths interact
  6017   // correctly with each other.
  6018   OrderAccess::fence();
  6019   return ret;
  6022 void os::PlatformEvent::unpark() {
  6023   // Transitions for _Event:
  6024   //    0 :=> 1
  6025   //    1 :=> 1
  6026   //   -1 :=> either 0 or 1; must signal target thread
  6027   //          That is, we can safely transition _Event from -1 to either
  6028   //          0 or 1. Forcing 1 is slightly more efficient for back-to-back
  6029   //          unpark() calls.
  6030   // See also: "Semaphores in Plan 9" by Mullender & Cox
  6031   //
  6032   // Note: Forcing a transition from "-1" to "1" on an unpark() means
  6033   // that it will take two back-to-back park() calls for the owning
  6034   // thread to block. This has the benefit of forcing a spurious return
  6035   // from the first park() call after an unpark() call which will help
  6036   // shake out uses of park() and unpark() without condition variables.
  6038   if (Atomic::xchg(1, &_Event) >= 0) return;
  6040   // If the thread associated with the event was parked, wake it.
  6041   // Wait for the thread assoc with the PlatformEvent to vacate.
  6042   int status = os::Solaris::mutex_lock(_mutex);
  6043   assert_status(status == 0, status, "mutex_lock");
  6044   int AnyWaiters = _nParked;
  6045   status = os::Solaris::mutex_unlock(_mutex);
  6046   assert_status(status == 0, status, "mutex_unlock");
  6047   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
  6048   if (AnyWaiters != 0) {
  6049     // We intentional signal *after* dropping the lock
  6050     // to avoid a common class of futile wakeups.
  6051     status = os::Solaris::cond_signal(_cond);
  6052     assert_status(status == 0, status, "cond_signal");
  6056 // JSR166
  6057 // -------------------------------------------------------
  6059 /*
  6060  * The solaris and linux implementations of park/unpark are fairly
  6061  * conservative for now, but can be improved. They currently use a
  6062  * mutex/condvar pair, plus _counter.
  6063  * Park decrements _counter if > 0, else does a condvar wait.  Unpark
  6064  * sets count to 1 and signals condvar.  Only one thread ever waits
  6065  * on the condvar. Contention seen when trying to park implies that someone
  6066  * is unparking you, so don't wait. And spurious returns are fine, so there
  6067  * is no need to track notifications.
  6068  */
  6070 #define MAX_SECS 100000000
  6071 /*
  6072  * This code is common to linux and solaris and will be moved to a
  6073  * common place in dolphin.
  6075  * The passed in time value is either a relative time in nanoseconds
  6076  * or an absolute time in milliseconds. Either way it has to be unpacked
  6077  * into suitable seconds and nanoseconds components and stored in the
  6078  * given timespec structure.
  6079  * Given time is a 64-bit value and the time_t used in the timespec is only
  6080  * a signed-32-bit value (except on 64-bit Linux) we have to watch for
  6081  * overflow if times way in the future are given. Further on Solaris versions
  6082  * prior to 10 there is a restriction (see cond_timedwait) that the specified
  6083  * number of seconds, in abstime, is less than current_time  + 100,000,000.
  6084  * As it will be 28 years before "now + 100000000" will overflow we can
  6085  * ignore overflow and just impose a hard-limit on seconds using the value
  6086  * of "now + 100,000,000". This places a limit on the timeout of about 3.17
  6087  * years from "now".
  6088  */
  6089 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
  6090   assert (time > 0, "convertTime");
  6092   struct timeval now;
  6093   int status = gettimeofday(&now, NULL);
  6094   assert(status == 0, "gettimeofday");
  6096   time_t max_secs = now.tv_sec + MAX_SECS;
  6098   if (isAbsolute) {
  6099     jlong secs = time / 1000;
  6100     if (secs > max_secs) {
  6101       absTime->tv_sec = max_secs;
  6103     else {
  6104       absTime->tv_sec = secs;
  6106     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
  6108   else {
  6109     jlong secs = time / NANOSECS_PER_SEC;
  6110     if (secs >= MAX_SECS) {
  6111       absTime->tv_sec = max_secs;
  6112       absTime->tv_nsec = 0;
  6114     else {
  6115       absTime->tv_sec = now.tv_sec + secs;
  6116       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
  6117       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
  6118         absTime->tv_nsec -= NANOSECS_PER_SEC;
  6119         ++absTime->tv_sec; // note: this must be <= max_secs
  6123   assert(absTime->tv_sec >= 0, "tv_sec < 0");
  6124   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
  6125   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
  6126   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
  6129 void Parker::park(bool isAbsolute, jlong time) {
  6130   // Ideally we'd do something useful while spinning, such
  6131   // as calling unpackTime().
  6133   // Optional fast-path check:
  6134   // Return immediately if a permit is available.
  6135   // We depend on Atomic::xchg() having full barrier semantics
  6136   // since we are doing a lock-free update to _counter.
  6137   if (Atomic::xchg(0, &_counter) > 0) return;
  6139   // Optional fast-exit: Check interrupt before trying to wait
  6140   Thread* thread = Thread::current();
  6141   assert(thread->is_Java_thread(), "Must be JavaThread");
  6142   JavaThread *jt = (JavaThread *)thread;
  6143   if (Thread::is_interrupted(thread, false)) {
  6144     return;
  6147   // First, demultiplex/decode time arguments
  6148   timespec absTime;
  6149   if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
  6150     return;
  6152   if (time > 0) {
  6153     // Warning: this code might be exposed to the old Solaris time
  6154     // round-down bugs.  Grep "roundingFix" for details.
  6155     unpackTime(&absTime, isAbsolute, time);
  6158   // Enter safepoint region
  6159   // Beware of deadlocks such as 6317397.
  6160   // The per-thread Parker:: _mutex is a classic leaf-lock.
  6161   // In particular a thread must never block on the Threads_lock while
  6162   // holding the Parker:: mutex.  If safepoints are pending both the
  6163   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
  6164   ThreadBlockInVM tbivm(jt);
  6166   // Don't wait if cannot get lock since interference arises from
  6167   // unblocking.  Also. check interrupt before trying wait
  6168   if (Thread::is_interrupted(thread, false) ||
  6169       os::Solaris::mutex_trylock(_mutex) != 0) {
  6170     return;
  6173   int status ;
  6175   if (_counter > 0)  { // no wait needed
  6176     _counter = 0;
  6177     status = os::Solaris::mutex_unlock(_mutex);
  6178     assert (status == 0, "invariant") ;
  6179     // Paranoia to ensure our locked and lock-free paths interact
  6180     // correctly with each other and Java-level accesses.
  6181     OrderAccess::fence();
  6182     return;
  6185 #ifdef ASSERT
  6186   // Don't catch signals while blocked; let the running threads have the signals.
  6187   // (This allows a debugger to break into the running thread.)
  6188   sigset_t oldsigs;
  6189   sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
  6190   thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
  6191 #endif
  6193   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
  6194   jt->set_suspend_equivalent();
  6195   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
  6197   // Do this the hard way by blocking ...
  6198   // See http://monaco.sfbay/detail.jsf?cr=5094058.
  6199   // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
  6200   // Only for SPARC >= V8PlusA
  6201 #if defined(__sparc) && defined(COMPILER2)
  6202   if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
  6203 #endif
  6205   if (time == 0) {
  6206     status = os::Solaris::cond_wait (_cond, _mutex) ;
  6207   } else {
  6208     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
  6210   // Note that an untimed cond_wait() can sometimes return ETIME on older
  6211   // versions of the Solaris.
  6212   assert_status(status == 0 || status == EINTR ||
  6213                 status == ETIME || status == ETIMEDOUT,
  6214                 status, "cond_timedwait");
  6216 #ifdef ASSERT
  6217   thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
  6218 #endif
  6219   _counter = 0 ;
  6220   status = os::Solaris::mutex_unlock(_mutex);
  6221   assert_status(status == 0, status, "mutex_unlock") ;
  6222   // Paranoia to ensure our locked and lock-free paths interact
  6223   // correctly with each other and Java-level accesses.
  6224   OrderAccess::fence();
  6226   // If externally suspended while waiting, re-suspend
  6227   if (jt->handle_special_suspend_equivalent_condition()) {
  6228     jt->java_suspend_self();
  6232 void Parker::unpark() {
  6233   int s, status ;
  6234   status = os::Solaris::mutex_lock (_mutex) ;
  6235   assert (status == 0, "invariant") ;
  6236   s = _counter;
  6237   _counter = 1;
  6238   status = os::Solaris::mutex_unlock (_mutex) ;
  6239   assert (status == 0, "invariant") ;
  6241   if (s < 1) {
  6242     status = os::Solaris::cond_signal (_cond) ;
  6243     assert (status == 0, "invariant") ;
  6247 extern char** environ;
  6249 // Run the specified command in a separate process. Return its exit value,
  6250 // or -1 on failure (e.g. can't fork a new process).
  6251 // Unlike system(), this function can be called from signal handler. It
  6252 // doesn't block SIGINT et al.
  6253 int os::fork_and_exec(char* cmd) {
  6254   char * argv[4];
  6255   argv[0] = (char *)"sh";
  6256   argv[1] = (char *)"-c";
  6257   argv[2] = cmd;
  6258   argv[3] = NULL;
  6260   // fork is async-safe, fork1 is not so can't use in signal handler
  6261   pid_t pid;
  6262   Thread* t = ThreadLocalStorage::get_thread_slow();
  6263   if (t != NULL && t->is_inside_signal_handler()) {
  6264     pid = fork();
  6265   } else {
  6266     pid = fork1();
  6269   if (pid < 0) {
  6270     // fork failed
  6271     warning("fork failed: %s", strerror(errno));
  6272     return -1;
  6274   } else if (pid == 0) {
  6275     // child process
  6277     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
  6278     execve("/usr/bin/sh", argv, environ);
  6280     // execve failed
  6281     _exit(-1);
  6283   } else  {
  6284     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
  6285     // care about the actual exit code, for now.
  6287     int status;
  6289     // Wait for the child process to exit.  This returns immediately if
  6290     // the child has already exited. */
  6291     while (waitpid(pid, &status, 0) < 0) {
  6292         switch (errno) {
  6293         case ECHILD: return 0;
  6294         case EINTR: break;
  6295         default: return -1;
  6299     if (WIFEXITED(status)) {
  6300        // The child exited normally; get its exit code.
  6301        return WEXITSTATUS(status);
  6302     } else if (WIFSIGNALED(status)) {
  6303        // The child exited because of a signal
  6304        // The best value to return is 0x80 + signal number,
  6305        // because that is what all Unix shells do, and because
  6306        // it allows callers to distinguish between process exit and
  6307        // process death by signal.
  6308        return 0x80 + WTERMSIG(status);
  6309     } else {
  6310        // Unknown exit code; pass it through
  6311        return status;
  6316 // is_headless_jre()
  6317 //
  6318 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
  6319 // in order to report if we are running in a headless jre
  6320 //
  6321 // Since JDK8 xawt/libmawt.so was moved into the same directory
  6322 // as libawt.so, and renamed libawt_xawt.so
  6323 //
  6324 bool os::is_headless_jre() {
  6325     struct stat statbuf;
  6326     char buf[MAXPATHLEN];
  6327     char libmawtpath[MAXPATHLEN];
  6328     const char *xawtstr  = "/xawt/libmawt.so";
  6329     const char *new_xawtstr = "/libawt_xawt.so";
  6330     char *p;
  6332     // Get path to libjvm.so
  6333     os::jvm_path(buf, sizeof(buf));
  6335     // Get rid of libjvm.so
  6336     p = strrchr(buf, '/');
  6337     if (p == NULL) return false;
  6338     else *p = '\0';
  6340     // Get rid of client or server
  6341     p = strrchr(buf, '/');
  6342     if (p == NULL) return false;
  6343     else *p = '\0';
  6345     // check xawt/libmawt.so
  6346     strcpy(libmawtpath, buf);
  6347     strcat(libmawtpath, xawtstr);
  6348     if (::stat(libmawtpath, &statbuf) == 0) return false;
  6350     // check libawt_xawt.so
  6351     strcpy(libmawtpath, buf);
  6352     strcat(libmawtpath, new_xawtstr);
  6353     if (::stat(libmawtpath, &statbuf) == 0) return false;
  6355     return true;
  6358 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
  6359   INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
  6362 int os::close(int fd) {
  6363   return ::close(fd);
  6366 int os::socket_close(int fd) {
  6367   return ::close(fd);
  6370 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
  6371   INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
  6374 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
  6375   INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
  6378 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
  6379   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
  6382 // As both poll and select can be interrupted by signals, we have to be
  6383 // prepared to restart the system call after updating the timeout, unless
  6384 // a poll() is done with timeout == -1, in which case we repeat with this
  6385 // "wait forever" value.
  6387 int os::timeout(int fd, long timeout) {
  6388   int res;
  6389   struct timeval t;
  6390   julong prevtime, newtime;
  6391   static const char* aNull = 0;
  6392   struct pollfd pfd;
  6393   pfd.fd = fd;
  6394   pfd.events = POLLIN;
  6396   gettimeofday(&t, &aNull);
  6397   prevtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec / 1000;
  6399   for(;;) {
  6400     INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
  6401     if(res == OS_ERR && errno == EINTR) {
  6402         if(timeout != -1) {
  6403           gettimeofday(&t, &aNull);
  6404           newtime = ((julong)t.tv_sec * 1000)  +  t.tv_usec /1000;
  6405           timeout -= newtime - prevtime;
  6406           if(timeout <= 0)
  6407             return OS_OK;
  6408           prevtime = newtime;
  6410     } else return res;
  6414 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
  6415   int _result;
  6416   INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\
  6417                           os::Solaris::clear_interrupted);
  6419   // Depending on when thread interruption is reset, _result could be
  6420   // one of two values when errno == EINTR
  6422   if (((_result == OS_INTRPT) || (_result == OS_ERR))
  6423       && (errno == EINTR)) {
  6424      /* restarting a connect() changes its errno semantics */
  6425      INTERRUPTIBLE(::connect(fd, him, len), _result,\
  6426                    os::Solaris::clear_interrupted);
  6427      /* undo these changes */
  6428      if (_result == OS_ERR) {
  6429        if (errno == EALREADY) {
  6430          errno = EINPROGRESS; /* fall through */
  6431        } else if (errno == EISCONN) {
  6432          errno = 0;
  6433          return OS_OK;
  6437    return _result;
  6440 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
  6441   if (fd < 0) {
  6442     return OS_ERR;
  6444   INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\
  6445                            os::Solaris::clear_interrupted);
  6448 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
  6449                  sockaddr* from, socklen_t* fromlen) {
  6450   INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\
  6451                            os::Solaris::clear_interrupted);
  6454 int os::sendto(int fd, char* buf, size_t len, uint flags,
  6455                struct sockaddr* to, socklen_t tolen) {
  6456   INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\
  6457                            os::Solaris::clear_interrupted);
  6460 int os::socket_available(int fd, jint *pbytes) {
  6461   if (fd < 0) {
  6462     return OS_OK;
  6464   int ret;
  6465   RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
  6466   // note: ioctl can return 0 when successful, JVM_SocketAvailable
  6467   // is expected to return 0 on failure and 1 on success to the jdk.
  6468   return (ret == OS_ERR) ? 0 : 1;
  6471 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
  6472    INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
  6473                                       os::Solaris::clear_interrupted);
  6476 // Get the default path to the core file
  6477 // Returns the length of the string
  6478 int os::get_core_path(char* buffer, size_t bufferSize) {
  6479   const char* p = get_current_directory(buffer, bufferSize);
  6481   if (p == NULL) {
  6482     assert(p != NULL, "failed to get current directory");
  6483     return 0;
  6486   return strlen(buffer);
  6489 #ifndef PRODUCT
  6490 void TestReserveMemorySpecial_test() {
  6491   // No tests available for this platform
  6493 #endif

mercurial